mirror of
https://github.com/corda/corda.git
synced 2024-12-18 20:47:57 +00:00
Merge remote-tracking branch 'origin/release/os/4.3' into my_merge_branch
This commit is contained in:
commit
5ac1e50135
38
.ci/dev/regression/Jenkinsfile
vendored
38
.ci/dev/regression/Jenkinsfile
vendored
@ -8,9 +8,10 @@ pipeline {
|
||||
options { timestamps() }
|
||||
|
||||
environment {
|
||||
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}"
|
||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||
}
|
||||
|
||||
stages {
|
||||
@ -21,37 +22,32 @@ pipeline {
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" clean pushBuildImage"
|
||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" clean pushBuildImage preAllocateForParallelRegressionTest --stacktrace"
|
||||
}
|
||||
sh "kubectl auth can-i get pods"
|
||||
}
|
||||
}
|
||||
|
||||
stage('Corda Pull Request - Run Tests') {
|
||||
stage('Unit and Integration Tests') {
|
||||
steps {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" allParallelUnitAndIntegrationTest"
|
||||
}
|
||||
}
|
||||
stage('Slow Integration Tests') {
|
||||
steps {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" allParallelSlowIntegrationTest"
|
||||
}
|
||||
stage('Regression Test') {
|
||||
steps {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
||||
"-Dartifactory.username=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
|
||||
"-Dgit.target.branch=\"\${GIT_BRANCH}\" " +
|
||||
" deAllocateForParallelRegressionTest parallelRegressionTest --stacktrace"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
post {
|
||||
always {
|
||||
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
||||
junit '**/build/test-results-xml/**/*.xml'
|
||||
}
|
||||
cleanup {
|
||||
|
3
.idea/codeStyles/Project.xml
generated
3
.idea/codeStyles/Project.xml
generated
@ -32,6 +32,9 @@
|
||||
<option name="IF_RPAREN_ON_NEW_LINE" value="false" />
|
||||
<option name="CODE_STYLE_DEFAULTS" value="KOTLIN_OFFICIAL" />
|
||||
</JetCodeStyleSettings>
|
||||
<MarkdownNavigatorCodeStyleSettings>
|
||||
<option name="RIGHT_MARGIN" value="72" />
|
||||
</MarkdownNavigatorCodeStyleSettings>
|
||||
<editorconfig>
|
||||
<option name="ENABLED" value="false" />
|
||||
</editorconfig>
|
||||
|
8
Jenkinsfile
vendored
8
Jenkinsfile
vendored
@ -1,4 +1,3 @@
|
||||
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||
@Library('existing-build-control')
|
||||
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||
|
||||
@ -12,6 +11,7 @@ pipeline {
|
||||
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}"
|
||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||
}
|
||||
|
||||
stages {
|
||||
@ -36,7 +36,11 @@ pipeline {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
"-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
||||
"-Dartifactory.username=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
|
||||
"-Dgit.target.branch=\"\${CHANGE_TARGET}\" " +
|
||||
" deAllocateForAllParallelIntegrationTest allParallelIntegrationTest --stacktrace"
|
||||
}
|
||||
}
|
||||
|
147
buildSrc/src/main/groovy/net/corda/testing/Artifactory.java
Normal file
147
buildSrc/src/main/groovy/net/corda/testing/Artifactory.java
Normal file
@ -0,0 +1,147 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import okhttp3.*;
|
||||
import org.apache.commons.compress.utils.IOUtils;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* Used by TestArtifacts
|
||||
*/
|
||||
public class Artifactory {
|
||||
|
||||
//<editor-fold desc="Statics">
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Artifactory.class);
|
||||
|
||||
private static String authorization() {
|
||||
return Credentials.basic(Properties.getUsername(), Properties.getPassword());
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the URL in a style that Artifactory prefers.
|
||||
*
|
||||
* @param baseUrl e.g. https://software.r3.com/artifactory/corda-releases/net/corda/corda/
|
||||
* @param theTag e.g. 4.3-RC0
|
||||
* @param artifact e.g. corda
|
||||
* @param extension e.g. jar
|
||||
* @return full URL to artifact.
|
||||
*/
|
||||
private static String getFullUrl(@NotNull final String baseUrl,
|
||||
@NotNull final String theTag,
|
||||
@NotNull final String artifact,
|
||||
@NotNull final String extension) {
|
||||
return baseUrl + "/" + theTag + "/" + getFileName(artifact, extension, theTag);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param artifact e.g. corda
|
||||
* @param extension e.g. jar
|
||||
* @param theTag e.g. 4.3
|
||||
* @return e.g. corda-4.3.jar
|
||||
*/
|
||||
static String getFileName(@NotNull final String artifact,
|
||||
@NotNull final String extension,
|
||||
@Nullable final String theTag) {
|
||||
StringBuilder sb = new StringBuilder().append(artifact);
|
||||
if (theTag != null) {
|
||||
sb.append("-").append(theTag);
|
||||
}
|
||||
sb.append(".").append(extension);
|
||||
return sb.toString();
|
||||
}
|
||||
//</editor-fold>
|
||||
|
||||
/**
|
||||
* Get the unit tests, synchronous get.
|
||||
* See https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-RetrieveLatestArtifact
|
||||
*
|
||||
* @return true if successful, false otherwise.
|
||||
*/
|
||||
boolean get(@NotNull final String baseUrl,
|
||||
@NotNull final String theTag,
|
||||
@NotNull final String artifact,
|
||||
@NotNull final String extension,
|
||||
@NotNull final OutputStream outputStream) {
|
||||
final String url = getFullUrl(baseUrl, theTag, artifact, extension);
|
||||
final Request request = new Request.Builder()
|
||||
.addHeader("Authorization", authorization())
|
||||
.url(url)
|
||||
.build();
|
||||
|
||||
final OkHttpClient client = new OkHttpClient();
|
||||
|
||||
try (Response response = client.newCall(request).execute()) {
|
||||
handleResponse(response);
|
||||
if (response.body() != null) {
|
||||
outputStream.write(response.body().bytes());
|
||||
} else {
|
||||
LOG.warn("Response body was empty");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to execute GET via REST");
|
||||
LOG.debug("Exception", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG.warn("Ok. REST GET successful");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Post an artifact, synchronous PUT
|
||||
* See https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-DeployArtifact
|
||||
*
|
||||
* @return true if successful
|
||||
*/
|
||||
boolean put(@NotNull final String baseUrl,
|
||||
@NotNull final String theTag,
|
||||
@NotNull final String artifact,
|
||||
@NotNull final String extension,
|
||||
@NotNull final InputStream inputStream) {
|
||||
final MediaType contentType = MediaType.parse("application/zip, application/octet-stream");
|
||||
final String url = getFullUrl(baseUrl, theTag, artifact, extension);
|
||||
|
||||
final OkHttpClient client = new OkHttpClient();
|
||||
|
||||
byte[] bytes;
|
||||
|
||||
try {
|
||||
bytes = IOUtils.toByteArray(inputStream);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to execute PUT tests via REST: ", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
final Request request = new Request.Builder()
|
||||
.addHeader("Authorization", authorization())
|
||||
.url(url)
|
||||
.put(RequestBody.create(contentType, bytes))
|
||||
.build();
|
||||
|
||||
try (Response response = client.newCall(request).execute()) {
|
||||
handleResponse(response);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to execute PUT via REST: ", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private void handleResponse(@NotNull final Response response) throws IOException {
|
||||
if (response.isSuccessful()) return;
|
||||
|
||||
LOG.warn("Bad response from server: {}", response.toString());
|
||||
LOG.warn(response.toString());
|
||||
if (response.code() == 401) {
|
||||
throw new IOException("Not authorized - incorrect credentials?");
|
||||
}
|
||||
|
||||
throw new IOException(response.message());
|
||||
}
|
||||
}
|
@ -5,20 +5,29 @@ package net.corda.testing;
|
||||
import groovy.lang.Tuple2;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
public class BucketingAllocator {
|
||||
|
||||
private List<Tuple2<TestLister, Object>> sources = new ArrayList<>();
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BucketingAllocator.class);
|
||||
private final List<TestsForForkContainer> forkContainers;
|
||||
private final Supplier<List<Tuple2<String, Double>>> timedTestsProvider;
|
||||
private final Supplier<Tests> timedTestsProvider;
|
||||
private List<Tuple2<TestLister, Object>> sources = new ArrayList<>();
|
||||
|
||||
|
||||
public BucketingAllocator(Integer forkCount, Supplier<List<Tuple2<String, Double>>> timedTestsProvider) {
|
||||
public BucketingAllocator(Integer forkCount, Supplier<Tests> timedTestsProvider) {
|
||||
this.forkContainers = IntStream.range(0, forkCount).mapToObj(TestsForForkContainer::new).collect(Collectors.toList());
|
||||
this.timedTestsProvider = timedTestsProvider;
|
||||
}
|
||||
@ -33,9 +42,9 @@ public class BucketingAllocator {
|
||||
|
||||
@TaskAction
|
||||
public void generateTestPlan() {
|
||||
List<Tuple2<String, Double>> allTestsFromCSV = timedTestsProvider.get();
|
||||
Tests allTestsFromFile = timedTestsProvider.get();
|
||||
List<Tuple2<String, Object>> allDiscoveredTests = getTestsOnClasspathOfTestingTasks();
|
||||
List<TestBucket> matchedTests = matchClasspathTestsToCSV(allTestsFromCSV, allDiscoveredTests);
|
||||
List<TestBucket> matchedTests = matchClasspathTestsToFile(allTestsFromFile, allDiscoveredTests);
|
||||
|
||||
//use greedy algo - for each testbucket find the currently smallest container and add to it
|
||||
allocateTestsToForks(matchedTests);
|
||||
@ -44,15 +53,31 @@ public class BucketingAllocator {
|
||||
printSummary();
|
||||
}
|
||||
|
||||
static String getDuration(long nanos) {
|
||||
long t = TimeUnit.NANOSECONDS.toMinutes(nanos);
|
||||
if (t > 0) {
|
||||
return t + " mins";
|
||||
}
|
||||
t = TimeUnit.NANOSECONDS.toSeconds(nanos);
|
||||
if (t > 0) {
|
||||
return t + " secs";
|
||||
}
|
||||
t = TimeUnit.NANOSECONDS.toMillis(nanos);
|
||||
if (t > 0) {
|
||||
return t + " ms";
|
||||
}
|
||||
return nanos + " ns";
|
||||
}
|
||||
|
||||
private void printSummary() {
|
||||
forkContainers.forEach(container -> {
|
||||
System.out.println("####### TEST PLAN SUMMARY ( " + container.forkIdx + " ) #######");
|
||||
System.out.println("Duration: " + container.getCurrentDuration());
|
||||
System.out.println("Duration: " + getDuration(container.getCurrentDuration()));
|
||||
System.out.println("Number of tests: " + container.testsForFork.stream().mapToInt(b -> b.foundTests.size()).sum());
|
||||
System.out.println("Tests to Run: ");
|
||||
container.testsForFork.forEach(tb -> {
|
||||
System.out.println(tb.testName);
|
||||
tb.foundTests.forEach(ft -> System.out.println("\t" + ft.getFirst() + ", " + ft.getSecond()));
|
||||
tb.foundTests.forEach(ft -> System.out.println("\t" + ft.getFirst() + ", " + getDuration(ft.getSecond())));
|
||||
});
|
||||
});
|
||||
}
|
||||
@ -64,12 +89,23 @@ public class BucketingAllocator {
|
||||
});
|
||||
}
|
||||
|
||||
private List<TestBucket> matchClasspathTestsToCSV(List<Tuple2<String, Double>> allTestsFromCSV, @NotNull List<Tuple2<String, Object>> allDiscoveredTests) {
|
||||
List<TestsForForkContainer> getForkContainers() {
|
||||
return forkContainers;
|
||||
}
|
||||
|
||||
private List<TestBucket> matchClasspathTestsToFile(@NotNull final Tests tests,
|
||||
@NotNull final List<Tuple2<String, Object>> allDiscoveredTests) {
|
||||
// Note that this does not preserve the order of tests with known and unknown durations, as we
|
||||
// always return a duration from 'tests.startsWith'.
|
||||
return allDiscoveredTests.stream().map(tuple -> {
|
||||
String testName = tuple.getFirst();
|
||||
Object task = tuple.getSecond();
|
||||
//2DO [can this filtering algorithm be improved - the test names are sorted, it should be possible to do something using binary search]
|
||||
List<Tuple2<String, Double>> matchingTests = allTestsFromCSV.stream().filter(testFromCSV -> testFromCSV.getFirst().startsWith(testName)).collect(Collectors.toList());
|
||||
final String testName = tuple.getFirst();
|
||||
final Object task = tuple.getSecond();
|
||||
|
||||
// If the gradle task is distributing by class rather than method, then 'testName' will be the className
|
||||
// and not className.testName
|
||||
// No matter which it is, we return the mean test duration as the duration value if not found.
|
||||
final List<Tuple2<String, Long>> matchingTests = tests.startsWith(testName);
|
||||
|
||||
return new TestBucket(task, testName, matchingTests);
|
||||
}).sorted(Comparator.comparing(TestBucket::getDuration).reversed()).collect(Collectors.toList());
|
||||
}
|
||||
@ -85,18 +121,20 @@ public class BucketingAllocator {
|
||||
public static class TestBucket {
|
||||
final Object testTask;
|
||||
final String testName;
|
||||
final List<Tuple2<String, Double>> foundTests;
|
||||
final Double duration;
|
||||
final List<Tuple2<String, Long>> foundTests;
|
||||
final long durationNanos;
|
||||
|
||||
public TestBucket(Object testTask, String testName, List<Tuple2<String, Double>> foundTests) {
|
||||
public TestBucket(@NotNull final Object testTask,
|
||||
@NotNull final String testName,
|
||||
@NotNull final List<Tuple2<String, Long>> foundTests) {
|
||||
this.testTask = testTask;
|
||||
this.testName = testName;
|
||||
this.foundTests = foundTests;
|
||||
duration = Math.max(foundTests.stream().mapToDouble(tp -> Math.max(tp.getSecond(), 1)).sum(), 1);
|
||||
this.durationNanos = foundTests.stream().mapToLong(tp -> Math.max(tp.getSecond(), 1)).sum();
|
||||
}
|
||||
|
||||
public Double getDuration() {
|
||||
return duration;
|
||||
public long getDuration() {
|
||||
return durationNanos;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -105,17 +143,16 @@ public class BucketingAllocator {
|
||||
"testTask=" + testTask +
|
||||
", nameWithAsterix='" + testName + '\'' +
|
||||
", foundTests=" + foundTests +
|
||||
", duration=" + duration +
|
||||
", durationNanos=" + durationNanos +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
public static class TestsForForkContainer {
|
||||
private Double runningDuration = 0.0;
|
||||
private final Integer forkIdx;
|
||||
|
||||
private final List<TestBucket> testsForFork = Collections.synchronizedList(new ArrayList<>());
|
||||
private final Map<Object, List<TestBucket>> frozenTests = new HashMap<>();
|
||||
private long runningDuration = 0L;
|
||||
|
||||
public TestsForForkContainer(Integer forkIdx) {
|
||||
this.forkIdx = forkIdx;
|
||||
@ -123,10 +160,10 @@ public class BucketingAllocator {
|
||||
|
||||
public void addBucket(TestBucket tb) {
|
||||
this.testsForFork.add(tb);
|
||||
this.runningDuration = runningDuration + tb.duration;
|
||||
this.runningDuration = this.runningDuration + tb.durationNanos;
|
||||
}
|
||||
|
||||
public Double getCurrentDuration() {
|
||||
public Long getCurrentDuration() {
|
||||
return runningDuration;
|
||||
}
|
||||
|
||||
@ -154,6 +191,4 @@ public class BucketingAllocator {
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,39 +1,19 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import groovy.lang.Tuple2;
|
||||
import org.apache.commons.csv.CSVFormat;
|
||||
import org.apache.commons.csv.CSVRecord;
|
||||
import org.gradle.api.DefaultTask;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
import org.gradle.api.tasks.testing.Test;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class BucketingAllocatorTask extends DefaultTask {
|
||||
private static final String DEFAULT_TESTING_TEST_TIMES_CSV = "testing/test-times.csv";
|
||||
private final BucketingAllocator allocator;
|
||||
|
||||
@Inject
|
||||
public BucketingAllocatorTask(Integer forkCount) {
|
||||
Supplier<List<Tuple2<String, Double>>> defaultTestCSV = () -> {
|
||||
try {
|
||||
FileReader csvSource = new FileReader(new File(BucketingAllocatorTask.this.getProject().getRootDir(), DEFAULT_TESTING_TEST_TIMES_CSV));
|
||||
return fromCSV(csvSource);
|
||||
} catch (IOException e) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
};
|
||||
this.allocator = new BucketingAllocator(forkCount, defaultTestCSV);
|
||||
this.allocator = new BucketingAllocator(forkCount, TestDurationArtifacts.getTestsSupplier());
|
||||
}
|
||||
|
||||
public void addSource(TestLister source, Test testTask) {
|
||||
@ -49,21 +29,4 @@ public class BucketingAllocatorTask extends DefaultTask {
|
||||
public void allocate() {
|
||||
allocator.generateTestPlan();
|
||||
}
|
||||
|
||||
|
||||
public static List<Tuple2<String, Double>> fromCSV(Reader reader) throws IOException {
|
||||
String name = "Test Name";
|
||||
String duration = "Duration(ms)";
|
||||
List<CSVRecord> records = CSVFormat.DEFAULT.withHeader().parse(reader).getRecords();
|
||||
return records.stream().map(record -> {
|
||||
try {
|
||||
String testName = record.get(name);
|
||||
String testDuration = record.get(duration);
|
||||
return new Tuple2<>(testName, Math.max(Double.parseDouble(testDuration), 1));
|
||||
} catch (IllegalArgumentException | IllegalStateException e) {
|
||||
return null;
|
||||
}
|
||||
}).filter(Objects::nonNull).sorted(Comparator.comparing(Tuple2::getFirst)).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,6 +13,8 @@ import org.gradle.api.tasks.testing.Test
|
||||
*/
|
||||
class DistributedTesting implements Plugin<Project> {
|
||||
|
||||
public static final String GRADLE_GROUP = "Distributed Testing";
|
||||
|
||||
static def getPropertyAsInt(Project proj, String property, Integer defaultValue) {
|
||||
return proj.hasProperty(property) ? Integer.parseInt(proj.property(property).toString()) : defaultValue
|
||||
}
|
||||
@ -20,6 +22,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
if (System.getProperty("kubenetize") != null) {
|
||||
Properties.setRootProjectType(project.rootProject.name)
|
||||
|
||||
Integer forks = getPropertyAsInt(project, "dockerForks", 1)
|
||||
|
||||
@ -30,6 +33,9 @@ class DistributedTesting implements Plugin<Project> {
|
||||
String tagToUseForRunningTests = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY)
|
||||
String tagToUseForBuilding = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY)
|
||||
BucketingAllocatorTask globalAllocator = project.tasks.create("bucketingAllocator", BucketingAllocatorTask, forks)
|
||||
globalAllocator.group = GRADLE_GROUP
|
||||
globalAllocator.description = "Allocates tests to buckets"
|
||||
|
||||
|
||||
Set<String> requestedTaskNames = project.gradle.startParameter.taskNames.toSet()
|
||||
def requestedTasks = requestedTaskNames.collect { project.tasks.findByPath(it) }
|
||||
@ -41,14 +47,14 @@ class DistributedTesting implements Plugin<Project> {
|
||||
//4. after each completed test write its name to a file to keep track of what finished for restart purposes
|
||||
project.subprojects { Project subProject ->
|
||||
subProject.tasks.withType(Test) { Test task ->
|
||||
println "Evaluating ${task.getPath()}"
|
||||
project.logger.info("Evaluating ${task.getPath()}")
|
||||
if (task in requestedTasks && !task.hasProperty("ignoreForDistribution")) {
|
||||
println "Modifying ${task.getPath()}"
|
||||
project.logger.info "Modifying ${task.getPath()}"
|
||||
ListTests testListerTask = createTestListingTasks(task, subProject)
|
||||
globalAllocator.addSource(testListerTask, task)
|
||||
Test modifiedTestTask = modifyTestTaskForParallelExecution(subProject, task, globalAllocator)
|
||||
} else {
|
||||
println "Skipping modification of ${task.getPath()} as it's not scheduled for execution"
|
||||
project.logger.info "Skipping modification of ${task.getPath()} as it's not scheduled for execution"
|
||||
}
|
||||
if (!task.hasProperty("ignoreForDistribution")) {
|
||||
//this is what enables execution of a single test suite - for example node:parallelTest would execute all unit tests in node, node:parallelIntegrationTest would do the same for integration tests
|
||||
@ -73,10 +79,12 @@ class DistributedTesting implements Plugin<Project> {
|
||||
userGroups.forEach { testGrouping ->
|
||||
|
||||
//for each "group" (ie: test, integrationTest) within the grouping find all the Test tasks which have the same name.
|
||||
List<Test> groups = ((ParallelTestGroup) testGrouping).groups.collect { allTestTasksGroupedByType.get(it) }.flatten()
|
||||
List<Test> testTasksToRunInGroup = ((ParallelTestGroup) testGrouping).groups.collect {
|
||||
allTestTasksGroupedByType.get(it)
|
||||
}.flatten()
|
||||
|
||||
//join up these test tasks into a single set of tasks to invoke (node:test, node:integrationTest...)
|
||||
String superListOfTasks = groups.collect { it.path }.join(" ")
|
||||
String superListOfTasks = testTasksToRunInGroup.collect { it.path }.join(" ")
|
||||
|
||||
//generate a preAllocate / deAllocate task which allows you to "pre-book" a node during the image building phase
|
||||
//this prevents time lost to cloud provider node spin up time (assuming image build time > provider spin up time)
|
||||
@ -88,6 +96,8 @@ class DistributedTesting implements Plugin<Project> {
|
||||
}
|
||||
|
||||
def userDefinedParallelTask = project.rootProject.tasks.create("userDefined" + testGrouping.name.capitalize(), KubesTest) {
|
||||
group = GRADLE_GROUP
|
||||
|
||||
if (!tagToUseForRunningTests) {
|
||||
dependsOn imagePushTask
|
||||
}
|
||||
@ -108,6 +118,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
}
|
||||
}
|
||||
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.name.capitalize()}", KubesReporting) {
|
||||
group = GRADLE_GROUP
|
||||
dependsOn userDefinedParallelTask
|
||||
destinationDir new File(project.rootProject.getBuildDir(), "userDefinedReports${testGrouping.name.capitalize()}")
|
||||
doFirst {
|
||||
@ -117,15 +128,25 @@ class DistributedTesting implements Plugin<Project> {
|
||||
reportOn(userDefinedParallelTask.testOutput)
|
||||
}
|
||||
}
|
||||
|
||||
// Task to zip up test results, and upload them to somewhere (Artifactory).
|
||||
def zipTask = TestDurationArtifacts.createZipTask(project.rootProject, testGrouping.name, userDefinedParallelTask)
|
||||
|
||||
userDefinedParallelTask.finalizedBy(reportOnAllTask)
|
||||
testGrouping.dependsOn(userDefinedParallelTask)
|
||||
zipTask.dependsOn(userDefinedParallelTask)
|
||||
testGrouping.dependsOn(zipTask)
|
||||
}
|
||||
}
|
||||
|
||||
// Added only so that we can manually run zipTask on the command line as a test.
|
||||
TestDurationArtifacts.createZipTask(project.rootProject, "zipTask", null)
|
||||
.setDescription("Zip task that can be run locally for testing");
|
||||
}
|
||||
|
||||
private List<Task> generatePreAllocateAndDeAllocateTasksForGrouping(Project project, ParallelTestGroup testGrouping) {
|
||||
PodAllocator allocator = new PodAllocator(project.getLogger())
|
||||
Task preAllocateTask = project.rootProject.tasks.create("preAllocateFor" + testGrouping.name.capitalize()) {
|
||||
group = GRADLE_GROUP
|
||||
doFirst {
|
||||
String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_BUILDING_PROPERTY)
|
||||
if (dockerTag == null) {
|
||||
@ -142,6 +163,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
}
|
||||
|
||||
Task deAllocateTask = project.rootProject.tasks.create("deAllocateFor" + testGrouping.name.capitalize()) {
|
||||
group = GRADLE_GROUP
|
||||
doFirst {
|
||||
String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY)
|
||||
if (dockerTag == null) {
|
||||
@ -160,6 +182,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
def capitalizedTaskName = task.getName().capitalize()
|
||||
|
||||
KubesTest createdParallelTestTask = projectContainingTask.tasks.create("parallel" + capitalizedTaskName, KubesTest) {
|
||||
group = GRADLE_GROUP + " Parallel Test Tasks"
|
||||
if (!providedTag) {
|
||||
dependsOn imageBuildingTask
|
||||
}
|
||||
@ -232,6 +255,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
//determine all the tests which are present in this test task.
|
||||
//this list will then be shared between the various worker forks
|
||||
def createdListTask = subProject.tasks.create("listTestsFor" + capitalizedTaskName, ListTests) {
|
||||
group = GRADLE_GROUP
|
||||
//the convention is that a testing task is backed by a sourceSet with the same name
|
||||
dependsOn subProject.getTasks().getByName("${taskName}Classes")
|
||||
doFirst {
|
||||
@ -242,6 +266,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
|
||||
//convenience task to utilize the output of the test listing task to display to local console, useful for debugging missing tests
|
||||
def createdPrintTask = subProject.tasks.create("printTestsFor" + capitalizedTaskName) {
|
||||
group = GRADLE_GROUP
|
||||
dependsOn createdListTask
|
||||
doLast {
|
||||
createdListTask.getTestsForFork(
|
||||
|
@ -104,8 +104,7 @@ public class KubesTest extends DefaultTask {
|
||||
}
|
||||
|
||||
List<Future<KubePodResult>> futures = IntStream.range(0, numberOfPods).mapToObj(i -> {
|
||||
String potentialPodName = (taskToExecuteName + "-" + stableRunId + random + i).toLowerCase();
|
||||
String podName = potentialPodName.substring(0, Math.min(potentialPodName.length(), 62));
|
||||
String podName = generatePodName(stableRunId, random, i);
|
||||
return submitBuild(NAMESPACE, numberOfPods, i, podName, printOutput, 3);
|
||||
}).collect(Collectors.toList());
|
||||
|
||||
@ -125,6 +124,16 @@ public class KubesTest extends DefaultTask {
|
||||
}).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private String generatePodName(String stableRunId, String random, int i) {
|
||||
int magicMaxLength = 63;
|
||||
String provisionalName = taskToExecuteName.toLowerCase() + "-" + stableRunId + "-" + random + "-" + i;
|
||||
//length = 100
|
||||
//100-63 = 37
|
||||
//subString(37, 100) -? string of 63 characters
|
||||
return provisionalName.substring(Math.max(provisionalName.length() - magicMaxLength, 0));
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private KubernetesClient getKubernetesClient() {
|
||||
io.fabric8.kubernetes.client.Config config = new io.fabric8.kubernetes.client.ConfigBuilder()
|
||||
@ -396,10 +405,20 @@ public class KubesTest extends DefaultTask {
|
||||
}
|
||||
|
||||
private String[] getBuildCommand(int numberOfPods, int podIdx) {
|
||||
final String gitBranch = " -Dgit.branch=" + Properties.getGitBranch();
|
||||
final String gitTargetBranch = " -Dgit.target.branch=" + Properties.getTargetGitBranch();
|
||||
final String artifactoryUsername = " -Dartifactory.username=" + Properties.getUsername() + " ";
|
||||
final String artifactoryPassword = " -Dartifactory.password=" + Properties.getPassword() + " ";
|
||||
|
||||
String shellScript = "(let x=1 ; while [ ${x} -ne 0 ] ; do echo \"Waiting for DNS\" ; curl services.gradle.org > /dev/null 2>&1 ; x=$? ; sleep 1 ; done ) && "
|
||||
+ " cd /tmp/source && " +
|
||||
"(let y=1 ; while [ ${y} -ne 0 ] ; do echo \"Preparing build directory\" ; ./gradlew testClasses integrationTestClasses --parallel 2>&1 ; y=$? ; sleep 1 ; done ) && " +
|
||||
"(./gradlew -D" + ListTests.DISTRIBUTION_PROPERTY + "=" + distribution.name() + " -Dkubenetize -PdockerFork=" + podIdx + " -PdockerForks=" + numberOfPods + " " + fullTaskToExecutePath + " " + getLoggingLevel() + " 2>&1) ; " +
|
||||
"(./gradlew -D" + ListTests.DISTRIBUTION_PROPERTY + "=" + distribution.name() +
|
||||
gitBranch +
|
||||
gitTargetBranch +
|
||||
artifactoryUsername +
|
||||
artifactoryPassword +
|
||||
"-Dkubenetize -PdockerFork=" + podIdx + " -PdockerForks=" + numberOfPods + " " + fullTaskToExecutePath + " " + getLoggingLevel() + " 2>&1) ; " +
|
||||
"let rs=$? ; sleep 10 ; exit ${rs}";
|
||||
return new String[]{"bash", "-c", shellScript};
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
import io.fabric8.kubernetes.api.model.PodBuilder;
|
||||
import io.fabric8.kubernetes.api.model.Quantity;
|
||||
import io.fabric8.kubernetes.api.model.batch.Job;
|
||||
import io.fabric8.kubernetes.api.model.batch.JobBuilder;
|
||||
import io.fabric8.kubernetes.client.Config;
|
||||
import io.fabric8.kubernetes.client.ConfigBuilder;
|
||||
import io.fabric8.kubernetes.client.DefaultKubernetesClient;
|
||||
@ -48,15 +49,15 @@ public class PodAllocator {
|
||||
|
||||
KubernetesClient client = new DefaultKubernetesClient(config);
|
||||
|
||||
List<Pod> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildPod("pa-" + prefix + i, coresPerPod, memoryPerPod)).collect(Collectors.toList());
|
||||
podsToRequest.forEach(requestedPod -> {
|
||||
String msg = "PreAllocating " + requestedPod.getMetadata().getName();
|
||||
List<Job> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod)).collect(Collectors.toList());
|
||||
podsToRequest.forEach(requestedJob -> {
|
||||
String msg = "PreAllocating " + requestedJob.getMetadata().getName();
|
||||
if (logger instanceof org.gradle.api.logging.Logger) {
|
||||
((org.gradle.api.logging.Logger) logger).quiet(msg);
|
||||
} else {
|
||||
logger.info(msg);
|
||||
}
|
||||
client.pods().inNamespace(KubesTest.NAMESPACE).create(requestedPod);
|
||||
client.batch().jobs().inNamespace(KubesTest.NAMESPACE).create(requestedJob);
|
||||
});
|
||||
}
|
||||
|
||||
@ -69,40 +70,36 @@ public class PodAllocator {
|
||||
.withWebsocketPingInterval(CONNECTION_TIMEOUT)
|
||||
.build();
|
||||
KubernetesClient client = new DefaultKubernetesClient(config);
|
||||
Stream<Pod> podsToDelete = client.pods().inNamespace(KubesTest.NAMESPACE).list()
|
||||
Stream<Job> jobsToDelete = client.batch().jobs().inNamespace(KubesTest.NAMESPACE).list()
|
||||
.getItems()
|
||||
.stream()
|
||||
.sorted(Comparator.comparing(p -> p.getMetadata().getName()))
|
||||
.filter(foundPod -> foundPod.getMetadata().getName().contains(prefix));
|
||||
|
||||
List<CompletableFuture<Pod>> deleteFutures = podsToDelete.map(pod -> {
|
||||
CompletableFuture<Pod> result = new CompletableFuture<>();
|
||||
Watch watch = client.pods().inNamespace(pod.getMetadata().getNamespace()).withName(pod.getMetadata().getName()).watch(new Watcher<Pod>() {
|
||||
List<CompletableFuture<Job>> deleteFutures = jobsToDelete.map(job -> {
|
||||
CompletableFuture<Job> result = new CompletableFuture<>();
|
||||
Watch watch = client.batch().jobs().inNamespace(job.getMetadata().getNamespace()).withName(job.getMetadata().getName()).watch(new Watcher<Job>() {
|
||||
@Override
|
||||
public void eventReceived(Action action, Pod resource) {
|
||||
public void eventReceived(Action action, Job resource) {
|
||||
if (action == Action.DELETED) {
|
||||
result.complete(resource);
|
||||
String msg = "Successfully deleted pod " + pod.getMetadata().getName();
|
||||
if (logger instanceof org.gradle.api.logging.Logger) {
|
||||
((org.gradle.api.logging.Logger) logger).lifecycle(msg);
|
||||
} else {
|
||||
logger.info(msg);
|
||||
}
|
||||
String msg = "Successfully deleted job " + job.getMetadata().getName();
|
||||
logger.info(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(KubernetesClientException cause) {
|
||||
String message = "Failed to delete pod " + pod.getMetadata().getName();
|
||||
String message = "Failed to delete job " + job.getMetadata().getName();
|
||||
if (logger instanceof org.gradle.api.logging.Logger) {
|
||||
((org.gradle.api.logging.Logger) logger).quiet(message);
|
||||
((org.gradle.api.logging.Logger) logger).error(message);
|
||||
} else {
|
||||
logger.info(message);
|
||||
}
|
||||
result.completeExceptionally(cause);
|
||||
}
|
||||
});
|
||||
client.pods().delete(pod);
|
||||
client.batch().jobs().delete(job);
|
||||
return result;
|
||||
}).collect(Collectors.toList());
|
||||
|
||||
@ -114,8 +111,14 @@ public class PodAllocator {
|
||||
}
|
||||
|
||||
|
||||
Pod buildPod(String podName, Integer coresPerPod, Integer memoryPerPod) {
|
||||
return new PodBuilder().withNewMetadata().withName(podName).endMetadata()
|
||||
Job buildJob(String podName, Integer coresPerPod, Integer memoryPerPod) {
|
||||
return new JobBuilder().withNewMetadata().withName(podName).endMetadata()
|
||||
.withNewSpec()
|
||||
.withTtlSecondsAfterFinished(10)
|
||||
.withNewTemplate()
|
||||
.withNewMetadata()
|
||||
.withName(podName + "-pod")
|
||||
.endMetadata()
|
||||
.withNewSpec()
|
||||
.addNewContainer()
|
||||
.withImage("busybox:latest")
|
||||
@ -129,6 +132,8 @@ public class PodAllocator {
|
||||
.endContainer()
|
||||
.withRestartPolicy("Never")
|
||||
.endSpec()
|
||||
.endTemplate()
|
||||
.endSpec()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
91
buildSrc/src/main/groovy/net/corda/testing/Properties.java
Normal file
91
buildSrc/src/main/groovy/net/corda/testing/Properties.java
Normal file
@ -0,0 +1,91 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A single class to hold some of the properties we need to get from the command line
|
||||
* in order to store test results in Artifactory.
|
||||
*/
|
||||
public class Properties {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Properties.class);
|
||||
|
||||
private static String ROOT_PROJECT_TYPE = "corda"; // corda or enterprise
|
||||
|
||||
/**
|
||||
* Get the Corda type. Used in the tag names when we store in Artifactory.
|
||||
*
|
||||
* @return either 'corda' or 'enterprise'
|
||||
*/
|
||||
static String getRootProjectType() {
|
||||
return ROOT_PROJECT_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the Corda (repo) type - either enterprise, or corda (open-source).
|
||||
* Used in the tag names when we store in Artifactory.
|
||||
*
|
||||
* @param rootProjectType the corda repo type.
|
||||
*/
|
||||
static void setRootProjectType(@NotNull final String rootProjectType) {
|
||||
ROOT_PROJECT_TYPE = rootProjectType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get property with logging
|
||||
*
|
||||
* @param key property to get
|
||||
* @return empty string, or trimmed value
|
||||
*/
|
||||
@NotNull
|
||||
static String getProperty(@NotNull final String key) {
|
||||
final String value = System.getProperty(key, "").trim();
|
||||
if (value.isEmpty()) {
|
||||
LOG.debug("Property '{}' not set", key);
|
||||
} else {
|
||||
LOG.debug("Ok. Property '{}' is set", key);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Artifactory username
|
||||
*
|
||||
* @return the username
|
||||
*/
|
||||
static String getUsername() {
|
||||
return getProperty("artifactory.username");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Artifactory password
|
||||
*
|
||||
* @return the password
|
||||
*/
|
||||
static String getPassword() {
|
||||
return getProperty("artifactory.password");
|
||||
}
|
||||
|
||||
/**
|
||||
* The current branch/tag
|
||||
*
|
||||
* @return the current branch
|
||||
*/
|
||||
@NotNull
|
||||
static String getGitBranch() {
|
||||
return getProperty("git.branch").replace('/', '-');
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the branch that this branch was likely checked out from.
|
||||
*/
|
||||
@NotNull
|
||||
static String getTargetGitBranch() {
|
||||
return getProperty("git.target.branch").replace('/', '-');
|
||||
}
|
||||
|
||||
static boolean getPublishJunitTests() {
|
||||
return ! getProperty("publish.junit").isEmpty();
|
||||
}
|
||||
}
|
@ -0,0 +1,430 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import groovy.lang.Tuple2;
|
||||
import org.apache.commons.compress.archivers.ArchiveEntry;
|
||||
import org.apache.commons.compress.archivers.ArchiveException;
|
||||
import org.apache.commons.compress.archivers.ArchiveInputStream;
|
||||
import org.apache.commons.compress.archivers.ArchiveStreamFactory;
|
||||
import org.apache.commons.compress.utils.IOUtils;
|
||||
import org.gradle.api.Project;
|
||||
import org.gradle.api.Task;
|
||||
import org.gradle.api.tasks.bundling.Zip;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.NamedNodeMap;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpression;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.FileVisitor;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.PathMatcher;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Get or put test artifacts to/from a REST endpoint. The expected format is a zip file of junit XML files.
|
||||
* See https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API
|
||||
*/
|
||||
public class TestDurationArtifacts {
|
||||
private static final String EXTENSION = "zip";
|
||||
private static final String BASE_URL = "https://software.r3.com/artifactory/corda-test-results/net/corda";
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestDurationArtifacts.class);
|
||||
private static final String ARTIFACT = "tests-durations";
|
||||
// The one and only set of tests information. We load these at the start of a build, and update them and save them at the end.
|
||||
static Tests tests = new Tests();
|
||||
|
||||
// Artifactory API
|
||||
private final Artifactory artifactory = new Artifactory();
|
||||
|
||||
/**
|
||||
* Write out the test durations as a CSV file.
|
||||
* Reload the tests from artifactory and update with the latest run.
|
||||
*
|
||||
* @param project project that we are attaching the test to.
|
||||
* @param name basename for the test.
|
||||
* @return the csv task
|
||||
*/
|
||||
private static Task createCsvTask(@NotNull final Project project, @NotNull final String name) {
|
||||
return project.getTasks().create("createCsvFromXmlFor" + capitalize(name), Task.class, t -> {
|
||||
t.setGroup(DistributedTesting.GRADLE_GROUP);
|
||||
t.setDescription("Create csv from all discovered junit xml files");
|
||||
|
||||
// Parse all the junit results and write them to a csv file.
|
||||
t.doFirst(task -> {
|
||||
project.getLogger().warn("About to create CSV file and zip it");
|
||||
|
||||
// Reload the test object from artifactory
|
||||
loadTests();
|
||||
// Get the list of junit xml artifacts
|
||||
final List<Path> testXmlFiles = getTestXmlFiles(project.getBuildDir().getAbsoluteFile().toPath());
|
||||
project.getLogger().warn("Found {} xml junit files", testXmlFiles.size());
|
||||
|
||||
// Read test xml files for tests and duration and add them to the `Tests` object
|
||||
// This adjusts the runCount and over all average duration for existing tests.
|
||||
for (Path testResult : testXmlFiles) {
|
||||
try {
|
||||
final List<Tuple2<String, Long>> unitTests = fromJunitXml(new FileInputStream(testResult.toFile()));
|
||||
|
||||
// Add the non-zero duration tests to build up an average.
|
||||
unitTests.stream()
|
||||
.filter(t2 -> t2.getSecond() > 0L)
|
||||
.forEach(unitTest -> tests.addDuration(unitTest.getFirst(), unitTest.getSecond()));
|
||||
|
||||
final long meanDurationForTests = tests.getMeanDurationForTests();
|
||||
|
||||
// Add the zero duration tests using the mean value so they are fairly distributed over the pods in the next run.
|
||||
// If we used 'zero' they would all be added to the smallest bucket.
|
||||
unitTests.stream()
|
||||
.filter(t2 -> t2.getSecond() <= 0L)
|
||||
.forEach(unitTest -> tests.addDuration(unitTest.getFirst(), meanDurationForTests));
|
||||
|
||||
} catch (FileNotFoundException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
// Write the test file to disk.
|
||||
try {
|
||||
final FileWriter writer = new FileWriter(new File(project.getRootDir(), ARTIFACT + ".csv"));
|
||||
tests.write(writer);
|
||||
LOG.warn("Written tests csv file with {} tests", tests.size());
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@NotNull
|
||||
static String capitalize(@NotNull final String str) {
|
||||
return str.substring(0, 1).toUpperCase() + str.substring(1); // groovy has this as an extension method
|
||||
}
|
||||
|
||||
/**
|
||||
* Discover junit xml files, zip them, and upload to artifactory.
|
||||
*
|
||||
* @param project root project
|
||||
* @param name task name that we're 'extending'
|
||||
* @return gradle task
|
||||
*/
|
||||
@NotNull
|
||||
private static Task createJunitZipTask(@NotNull final Project project, @NotNull final String name) {
|
||||
return project.getTasks().create("zipJunitXmlFilesAndUploadFor" + capitalize(name), Zip.class, z -> {
|
||||
z.setGroup(DistributedTesting.GRADLE_GROUP);
|
||||
z.setDescription("Zip junit files and upload to artifactory");
|
||||
|
||||
z.getArchiveFileName().set(Artifactory.getFileName("junit", EXTENSION, getBranchTag()));
|
||||
z.getDestinationDirectory().set(project.getRootDir());
|
||||
z.setIncludeEmptyDirs(false);
|
||||
z.from(project.getRootDir(), task -> task.include("**/build/test-results-xml/**/*.xml", "**/build/test-results/**/*.xml"));
|
||||
z.doLast(task -> {
|
||||
try (FileInputStream inputStream = new FileInputStream(new File(z.getArchiveFileName().get()))) {
|
||||
new Artifactory().put(BASE_URL, getBranchTag(), "junit", EXTENSION, inputStream);
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Zip and upload test-duration csv files to artifactory
|
||||
*
|
||||
* @param project root project that we're attaching the task to
|
||||
* @param name the task name we're 'extending'
|
||||
* @return gradle task
|
||||
*/
|
||||
@NotNull
|
||||
private static Task createCsvZipAndUploadTask(@NotNull final Project project, @NotNull final String name) {
|
||||
return project.getTasks().create("zipCsvFilesAndUploadFor" + capitalize(name), Zip.class, z -> {
|
||||
z.setGroup(DistributedTesting.GRADLE_GROUP);
|
||||
z.setDescription("Zips test duration csv and uploads to artifactory");
|
||||
|
||||
z.getArchiveFileName().set(Artifactory.getFileName(ARTIFACT, EXTENSION, getBranchTag()));
|
||||
z.getDestinationDirectory().set(project.getRootDir());
|
||||
z.setIncludeEmptyDirs(false);
|
||||
|
||||
// There's only one csv, but glob it anyway.
|
||||
z.from(project.getRootDir(), task -> task.include("**/" + ARTIFACT + ".csv"));
|
||||
|
||||
// ...base class method zips the CSV...
|
||||
|
||||
z.doLast(task -> {
|
||||
// We've now created the one csv file containing the tests and their mean durations,
|
||||
// this task has zipped it, so we now just upload it.
|
||||
project.getLogger().warn("SAVING tests");
|
||||
project.getLogger().warn("Attempting to upload {}", z.getArchiveFileName().get());
|
||||
try (FileInputStream inputStream = new FileInputStream(new File(z.getArchiveFileName().get()))) {
|
||||
if (!new TestDurationArtifacts().put(getBranchTag(), inputStream)) {
|
||||
project.getLogger().warn("Could not upload zip of tests");
|
||||
} else {
|
||||
project.getLogger().warn("SAVED tests");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
project.getLogger().warn("Problem trying to upload: {} {}", z.getArchiveFileName().get(), e.toString());
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the Gradle Zip task to gather test information
|
||||
*
|
||||
* @param project project to attach this task to
|
||||
* @param name name of the task
|
||||
* @param task a task that we depend on when creating the csv so Gradle produces the correct task graph.
|
||||
* @return a reference to the created task.
|
||||
*/
|
||||
@NotNull
|
||||
public static Task createZipTask(@NotNull final Project project, @NotNull final String name, @Nullable final Task task) {
|
||||
final Task csvTask = createCsvTask(project, name);
|
||||
|
||||
if (Properties.getPublishJunitTests()) {
|
||||
final Task zipJunitTask = createJunitZipTask(project, name);
|
||||
csvTask.dependsOn(zipJunitTask);
|
||||
}
|
||||
|
||||
if (task != null) {
|
||||
csvTask.dependsOn(task);
|
||||
}
|
||||
final Task zipCsvTask = createCsvZipAndUploadTask(project, name);
|
||||
zipCsvTask.dependsOn(csvTask); // we have to create the csv before we can zip it.
|
||||
|
||||
return zipCsvTask;
|
||||
}
|
||||
|
||||
static List<Path> getTestXmlFiles(@NotNull final Path rootDir) {
|
||||
List<Path> paths = new ArrayList<>();
|
||||
List<PathMatcher> matchers = new ArrayList<>();
|
||||
matchers.add(FileSystems.getDefault().getPathMatcher("glob:**/test-results-xml/**/*.xml"));
|
||||
try {
|
||||
Files.walkFileTree(rootDir, new FileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
|
||||
for (PathMatcher matcher : matchers) {
|
||||
if (matcher.matches(file)) {
|
||||
paths.add(file);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Could not walk tree and get all test xml files: {}", e.toString());
|
||||
}
|
||||
return paths;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unzip test results in memory and return test names and durations.
|
||||
* Assumes the input stream contains only csv files of the correct format.
|
||||
*
|
||||
* @param tests reference to the Tests object to be populated.
|
||||
* @param zippedInputStream stream containing zipped result file(s)
|
||||
*/
|
||||
static void addTestsFromZippedCsv(@NotNull final Tests tests,
|
||||
@NotNull final InputStream zippedInputStream) {
|
||||
// We need this because ArchiveStream requires the `mark` functionality which is supported in buffered streams.
|
||||
final BufferedInputStream bufferedInputStream = new BufferedInputStream(zippedInputStream);
|
||||
try (ArchiveInputStream archiveInputStream = new ArchiveStreamFactory().createArchiveInputStream(bufferedInputStream)) {
|
||||
ArchiveEntry e;
|
||||
while ((e = archiveInputStream.getNextEntry()) != null) {
|
||||
if (e.isDirectory()) continue;
|
||||
|
||||
// We seem to need to take a copy of the original input stream (as positioned by the ArchiveEntry), because
|
||||
// the XML parsing closes the stream after it has finished. This has the side effect of only parsing the first
|
||||
// entry in the archive.
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
IOUtils.copy(archiveInputStream, outputStream);
|
||||
ByteArrayInputStream byteInputStream = new ByteArrayInputStream(outputStream.toByteArray());
|
||||
|
||||
// Read the tests from the (csv) stream
|
||||
final InputStreamReader reader = new InputStreamReader(byteInputStream);
|
||||
|
||||
// Add the tests to the Tests object
|
||||
tests.addTests(Tests.read(reader));
|
||||
}
|
||||
} catch (ArchiveException | IOException e) {
|
||||
LOG.warn("Problem unzipping XML test results");
|
||||
}
|
||||
|
||||
LOG.debug("Discovered {} tests", tests.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* For a given stream, return the testcase names and durations.
|
||||
* <p>
|
||||
* NOTE: the input stream will be closed by this method.
|
||||
*
|
||||
* @param inputStream an InputStream, closed once parsed
|
||||
* @return a list of test names and their durations in nanos.
|
||||
*/
|
||||
@NotNull
|
||||
static List<Tuple2<String, Long>> fromJunitXml(@NotNull final InputStream inputStream) {
|
||||
final DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
|
||||
final List<Tuple2<String, Long>> results = new ArrayList<>();
|
||||
|
||||
try {
|
||||
final DocumentBuilder builder = dbFactory.newDocumentBuilder();
|
||||
final Document document = builder.parse(inputStream);
|
||||
document.getDocumentElement().normalize();
|
||||
final XPathFactory xPathfactory = XPathFactory.newInstance();
|
||||
final XPath xpath = xPathfactory.newXPath();
|
||||
final XPathExpression expression = xpath.compile("//testcase");
|
||||
final NodeList nodeList = (NodeList) expression.evaluate(document, XPathConstants.NODESET);
|
||||
|
||||
final BiFunction<NamedNodeMap, String, String> get =
|
||||
(a, k) -> a.getNamedItem(k) != null ? a.getNamedItem(k).getNodeValue() : "";
|
||||
|
||||
for (int i = 0; i < nodeList.getLength(); i++) {
|
||||
final Node item = nodeList.item(i);
|
||||
final NamedNodeMap attributes = item.getAttributes();
|
||||
final String testName = get.apply(attributes, "name");
|
||||
final String testDuration = get.apply(attributes, "time");
|
||||
final String testClassName = get.apply(attributes, "classname");
|
||||
|
||||
// If the test doesn't have a duration (it should), we return zero.
|
||||
if (!(testName.isEmpty() || testClassName.isEmpty())) {
|
||||
final long nanos = !testDuration.isEmpty() ? (long) (Double.parseDouble(testDuration) * 1_000_000_000.0) : 0L;
|
||||
results.add(new Tuple2<>(testClassName + "." + testName, nanos));
|
||||
} else {
|
||||
LOG.warn("Bad test in junit xml: name={} className={}", testName, testClassName);
|
||||
}
|
||||
}
|
||||
} catch (ParserConfigurationException | IOException | XPathExpressionException | SAXException e) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* A supplier of tests.
|
||||
* <p>
|
||||
* We get them from Artifactory and then parse the test xml files to get the duration.
|
||||
*
|
||||
* @return a supplier of test results
|
||||
*/
|
||||
@NotNull
|
||||
static Supplier<Tests> getTestsSupplier() {
|
||||
return TestDurationArtifacts::loadTests;
|
||||
}
|
||||
|
||||
/**
|
||||
* we need to prepend the project type so that we have a unique tag for artifactory
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
static String getBranchTag() {
|
||||
return (Properties.getRootProjectType() + "-" + Properties.getGitBranch()).replace('.', '-');
|
||||
}
|
||||
|
||||
/**
|
||||
* we need to prepend the project type so that we have a unique tag artifactory
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
static String getTargetBranchTag() {
|
||||
return (Properties.getRootProjectType() + "-" + Properties.getTargetGitBranch()).replace('.', '-');
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the tests from Artifactory, in-memory. No temp file used. Existing test data is cleared.
|
||||
*
|
||||
* @return a reference to the loaded tests.
|
||||
*/
|
||||
static Tests loadTests() {
|
||||
LOG.warn("LOADING previous test runs from Artifactory");
|
||||
tests.clear();
|
||||
try {
|
||||
final TestDurationArtifacts testArtifacts = new TestDurationArtifacts();
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
|
||||
// Try getting artifacts for our branch, if not, try the target branch.
|
||||
if (!testArtifacts.get(getBranchTag(), outputStream)) {
|
||||
outputStream = new ByteArrayOutputStream();
|
||||
LOG.warn("Could not get tests from Artifactory for tag {}, trying {}", getBranchTag(), getTargetBranchTag());
|
||||
if (!testArtifacts.get(getTargetBranchTag(), outputStream)) {
|
||||
LOG.warn("Could not get any tests from Artifactory");
|
||||
return tests;
|
||||
}
|
||||
}
|
||||
|
||||
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
|
||||
addTestsFromZippedCsv(tests, inputStream);
|
||||
LOG.warn("Got {} tests from Artifactory", tests.size());
|
||||
return tests;
|
||||
} catch (Exception e) { // was IOException
|
||||
LOG.warn(e.toString());
|
||||
LOG.warn("Could not get tests from Artifactory");
|
||||
return tests;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get tests for the specified tag in the outputStream
|
||||
*
|
||||
* @param theTag tag for tests
|
||||
* @param outputStream stream of zipped xml files
|
||||
* @return false if we fail to get the tests
|
||||
*/
|
||||
private boolean get(@NotNull final String theTag, @NotNull final OutputStream outputStream) {
|
||||
return artifactory.get(BASE_URL, theTag, ARTIFACT, "zip", outputStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload the supplied tests
|
||||
*
|
||||
* @param theTag tag for tests
|
||||
* @param inputStream stream of zipped xml files.
|
||||
* @return true if we succeed
|
||||
*/
|
||||
private boolean put(@NotNull final String theTag, @NotNull final InputStream inputStream) {
|
||||
return artifactory.put(BASE_URL, theTag, ARTIFACT, EXTENSION, inputStream);
|
||||
}
|
||||
}
|
||||
|
199
buildSrc/src/main/groovy/net/corda/testing/Tests.java
Normal file
199
buildSrc/src/main/groovy/net/corda/testing/Tests.java
Normal file
@ -0,0 +1,199 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import groovy.lang.Tuple2;
|
||||
import groovy.lang.Tuple3;
|
||||
import org.apache.commons.csv.CSVFormat;
|
||||
import org.apache.commons.csv.CSVPrinter;
|
||||
import org.apache.commons.csv.CSVRecord;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.Writer;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class Tests {
|
||||
final static String TEST_NAME = "Test Name";
|
||||
final static String MEAN_DURATION_NANOS = "Mean Duration Nanos";
|
||||
final static String NUMBER_OF_RUNS = "Number of runs";
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Tests.class);
|
||||
// test name -> (mean duration, number of runs)
|
||||
private final Map<String, Tuple2<Long, Long>> tests = new HashMap<>();
|
||||
// If we don't have any tests from which to get a mean, use this.
|
||||
static long DEFAULT_MEAN_NANOS = 1000L;
|
||||
|
||||
private static Tuple2<Long, Long> DEFAULT_MEAN_TUPLE = new Tuple2<>(DEFAULT_MEAN_NANOS, 0L);
|
||||
// mean, count
|
||||
private Tuple2<Long, Long> meanForTests = DEFAULT_MEAN_TUPLE;
|
||||
|
||||
/**
|
||||
* Read tests, mean duration and runs from a csv file.
|
||||
*
|
||||
* @param reader a reader
|
||||
* @return list of tests, or an empty list if none or we have a problem.
|
||||
*/
|
||||
public static List<Tuple3<String, Long, Long>> read(Reader reader) {
|
||||
try {
|
||||
List<CSVRecord> records = CSVFormat.DEFAULT.withHeader().parse(reader).getRecords();
|
||||
return records.stream().map(record -> {
|
||||
try {
|
||||
final String testName = record.get(TEST_NAME);
|
||||
final long testDuration = Long.parseLong(record.get(MEAN_DURATION_NANOS));
|
||||
final long testRuns = Long.parseLong(record.get(NUMBER_OF_RUNS)); // can't see how we would have zero tbh.
|
||||
return new Tuple3<>(testName, testDuration, Math.max(testRuns, 1));
|
||||
} catch (IllegalArgumentException | IllegalStateException e) {
|
||||
return null;
|
||||
}
|
||||
}).filter(Objects::nonNull).sorted(Comparator.comparing(Tuple3::getFirst)).collect(Collectors.toList());
|
||||
} catch (IOException ignored) {
|
||||
|
||||
}
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
private static Tuple2<Long, Long> recalculateMean(@NotNull final Tuple2<Long, Long> previous, long nanos) {
|
||||
final long total = previous.getFirst() * previous.getSecond() + nanos;
|
||||
final long count = previous.getSecond() + 1;
|
||||
return new Tuple2<>(total / count, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a csv file of test name, duration, runs
|
||||
*
|
||||
* @param writer a writer
|
||||
* @return true if no problems.
|
||||
*/
|
||||
public boolean write(@NotNull final Writer writer) {
|
||||
boolean ok = true;
|
||||
final CSVPrinter printer;
|
||||
try {
|
||||
printer = new CSVPrinter(writer,
|
||||
CSVFormat.DEFAULT.withHeader(TEST_NAME, MEAN_DURATION_NANOS, NUMBER_OF_RUNS));
|
||||
for (String key : tests.keySet()) {
|
||||
printer.printRecord(key, tests.get(key).getFirst(), tests.get(key).getSecond());
|
||||
}
|
||||
|
||||
printer.flush();
|
||||
} catch (IOException e) {
|
||||
ok = false;
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add tests, and also (re)calculate the mean test duration.
|
||||
* e.g. addTests(read(reader));
|
||||
*
|
||||
* @param testsCollection tests, typically from a csv file.
|
||||
*/
|
||||
public void addTests(@NotNull final List<Tuple3<String, Long, Long>> testsCollection) {
|
||||
testsCollection.forEach(t -> this.tests.put(t.getFirst(), new Tuple2<>(t.getSecond(), t.getThird())));
|
||||
|
||||
// Calculate the mean test time.
|
||||
if (tests.size() > 0) {
|
||||
long total = 0;
|
||||
for (String testName : this.tests.keySet()) total += tests.get(testName).getFirst();
|
||||
meanForTests = new Tuple2<>(total / this.tests.size(), 1L);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the known mean duration of a test.
|
||||
*
|
||||
* @param testName the test name
|
||||
* @return duration in nanos.
|
||||
*/
|
||||
public long getDuration(@NotNull final String testName) {
|
||||
return tests.getOrDefault(testName, meanForTests).getFirst();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add test information. Recalulates mean test duration if already exists.
|
||||
*
|
||||
* @param testName name of the test
|
||||
* @param durationNanos duration
|
||||
*/
|
||||
public void addDuration(@NotNull final String testName, long durationNanos) {
|
||||
final Tuple2<Long, Long> current = tests.getOrDefault(testName, new Tuple2<>(0L, 0L));
|
||||
|
||||
tests.put(testName, recalculateMean(current, durationNanos));
|
||||
|
||||
LOG.debug("Recorded test '{}', mean={} ns, runs={}", testName, tests.get(testName).getFirst(), tests.get(testName).getSecond());
|
||||
|
||||
meanForTests = recalculateMean(meanForTests, durationNanos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Do we have any test information?
|
||||
*
|
||||
* @return false if no tests info
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return tests.isEmpty();
|
||||
}
|
||||
|
||||
/**
|
||||
* How many tests do we have?
|
||||
*
|
||||
* @return the number of tests we have information for
|
||||
*/
|
||||
public int size() {
|
||||
return tests.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return all tests (and their durations) that being with (or are equal to) `testPrefix`
|
||||
* If not present we just return the mean test duration so that the test is fairly distributed.
|
||||
* @param testPrefix could be just the classname, or the entire classname + testname.
|
||||
* @return list of matching tests
|
||||
*/
|
||||
@NotNull
|
||||
List<Tuple2<String, Long>> startsWith(@NotNull final String testPrefix) {
|
||||
List<Tuple2<String, Long>> results = this.tests.keySet().stream()
|
||||
.filter(t -> t.startsWith(testPrefix))
|
||||
.map(t -> new Tuple2<>(t, getDuration(t)))
|
||||
.collect(Collectors.toList());
|
||||
// We don't know if the testPrefix is a classname or classname.methodname (exact match).
|
||||
if (results == null || results.isEmpty()) {
|
||||
LOG.warn("In {} previously executed tests, could not find any starting with {}", tests.size(), testPrefix);
|
||||
results = Arrays.asList(new Tuple2<>(testPrefix, getMeanDurationForTests()));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* How many times has this function been run? Every call to addDuration increments the current value.
|
||||
*
|
||||
* @param testName the test name
|
||||
* @return the number of times the test name has been run.
|
||||
*/
|
||||
public long getRunCount(@NotNull final String testName) {
|
||||
return tests.getOrDefault(testName, new Tuple2<>(0L, 0L)).getSecond();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the mean duration for a unit to run
|
||||
*
|
||||
* @return mean duration in nanos.
|
||||
*/
|
||||
public long getMeanDurationForTests() {
|
||||
return meanForTests.getFirst();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all tests
|
||||
*/
|
||||
void clear() {
|
||||
tests.clear();
|
||||
meanForTests = DEFAULT_MEAN_TUPLE;
|
||||
}
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class PropertiesTest {
|
||||
private static String username = "me";
|
||||
private static String password = "me";
|
||||
private static String cordaType = "corda-project";
|
||||
private static String branch = "mine";
|
||||
private static String targetBranch = "master";
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
System.setProperty("git.branch", branch);
|
||||
System.setProperty("git.target.branch", targetBranch);
|
||||
System.setProperty("artifactory.username", username);
|
||||
System.setProperty("artifactory.password", password);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
System.setProperty("git.branch", "");
|
||||
System.setProperty("git.target.branch", "");
|
||||
System.setProperty("artifactory.username", "");
|
||||
System.setProperty("artifactory.password", "");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cordaType() {
|
||||
Properties.setRootProjectType(cordaType);
|
||||
Assert.assertEquals(cordaType, Properties.getRootProjectType());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getUsername() {
|
||||
Assert.assertEquals(username, Properties.getUsername());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPassword() {
|
||||
Assert.assertEquals(password, Properties.getPassword());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getGitBranch() {
|
||||
Assert.assertEquals(branch, Properties.getGitBranch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTargetGitBranch() {
|
||||
Assert.assertEquals(targetBranch, Properties.getTargetGitBranch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPublishJunitTests() {
|
||||
Assert.assertFalse(Properties.getPublishJunitTests());
|
||||
System.setProperty("publish.junit", "true");
|
||||
Assert.assertTrue(Properties.getPublishJunitTests());
|
||||
}
|
||||
}
|
@ -0,0 +1,323 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import groovy.lang.Tuple2;
|
||||
import org.apache.commons.compress.archivers.ArchiveException;
|
||||
import org.apache.commons.compress.archivers.ArchiveOutputStream;
|
||||
import org.apache.commons.compress.archivers.ArchiveStreamFactory;
|
||||
import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
public class TestDurationArtifactsTest {
|
||||
final static String CLASSNAME = "FAKE";
|
||||
|
||||
String getXml(List<Tuple2<String, Long>> tests) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
|
||||
"<testsuites disabled=\"\" errors=\"\" failures=\"\" name=\"\" tests=\"\" time=\"\">\n" +
|
||||
" <testsuite disabled=\"\" errors=\"\" failures=\"\" hostname=\"\" id=\"\"\n" +
|
||||
" name=\"\" package=\"\" skipped=\"\" tests=\"\" time=\"\" timestamp=\"\">\n" +
|
||||
" <properties>\n" +
|
||||
" <property name=\"\" value=\"\"/>\n" +
|
||||
" </properties>\n");
|
||||
|
||||
for (Tuple2<String, Long> test : tests) {
|
||||
Double d = ((double) test.getSecond()) / 1_000_000_000.0;
|
||||
sb.append(" <testcase assertions=\"\" classname=\"" + CLASSNAME + "\" name=\""
|
||||
+ test.getFirst() + "\" status=\"\" time=\"" + d.toString() + "\">\n" +
|
||||
" <skipped/>\n" +
|
||||
" <error message=\"\" type=\"\"/>\n" +
|
||||
" <failure message=\"\" type=\"\"/>\n" +
|
||||
" <system-out/>\n" +
|
||||
" <system-err/>\n" +
|
||||
" </testcase>\n");
|
||||
}
|
||||
|
||||
sb.append(" <system-out/>\n" +
|
||||
" <system-err/>\n" +
|
||||
" </testsuite>\n" +
|
||||
"</testsuites>");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
String getXmlWithNoTime(List<Tuple2<String, Long>> tests) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
|
||||
"<testsuites disabled=\"\" errors=\"\" failures=\"\" name=\"\" tests=\"\" time=\"\">\n" +
|
||||
" <testsuite disabled=\"\" errors=\"\" failures=\"\" hostname=\"\" id=\"\"\n" +
|
||||
" name=\"\" package=\"\" skipped=\"\" tests=\"\" time=\"\" timestamp=\"\">\n" +
|
||||
" <properties>\n" +
|
||||
" <property name=\"\" value=\"\"/>\n" +
|
||||
" </properties>\n");
|
||||
|
||||
for (Tuple2<String, Long> test : tests) {
|
||||
Double d = ((double) test.getSecond()) / 1_000_000_000.0;
|
||||
sb.append(" <testcase assertions=\"\" classname=\"" + CLASSNAME + "\" name=\""
|
||||
+ test.getFirst() + "\" status=\"\" time=\"\">\n" +
|
||||
" <skipped/>\n" +
|
||||
" <error message=\"\" type=\"\"/>\n" +
|
||||
" <failure message=\"\" type=\"\"/>\n" +
|
||||
" <system-out/>\n" +
|
||||
" <system-err/>\n" +
|
||||
" </testcase>\n");
|
||||
}
|
||||
|
||||
sb.append(" <system-out/>\n" +
|
||||
" <system-err/>\n" +
|
||||
" </testsuite>\n" +
|
||||
"</testsuites>");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromJunitXml() {
|
||||
List<Tuple2<String, Long>> tests = new ArrayList<>();
|
||||
tests.add(new Tuple2<>("TEST-A", 111_000_000_000L));
|
||||
tests.add(new Tuple2<>("TEST-B", 222_200_000_000L));
|
||||
final String xml = getXml(tests);
|
||||
|
||||
List<Tuple2<String, Long>> results
|
||||
= TestDurationArtifacts.fromJunitXml(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)));
|
||||
|
||||
Assert.assertNotNull(results);
|
||||
|
||||
Assert.assertFalse("Should have results", results.isEmpty());
|
||||
Assert.assertEquals(results.size(), 2);
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-A", results.get(0).getFirst());
|
||||
Assert.assertEquals(111_000_000_000L, results.get(0).getSecond().longValue());
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-B", results.get(1).getFirst());
|
||||
Assert.assertEquals(222_200_000_000L, results.get(1).getSecond().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromJunitXmlWithZeroDuration() {
|
||||
// We do return zero values.
|
||||
List<Tuple2<String, Long>> tests = new ArrayList<>();
|
||||
tests.add(new Tuple2<>("TEST-A", 0L));
|
||||
tests.add(new Tuple2<>("TEST-B", 0L));
|
||||
final String xml = getXml(tests);
|
||||
|
||||
List<Tuple2<String, Long>> results
|
||||
= TestDurationArtifacts.fromJunitXml(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)));
|
||||
|
||||
Assert.assertNotNull(results);
|
||||
|
||||
Assert.assertFalse("Should have results", results.isEmpty());
|
||||
Assert.assertEquals(results.size(), 2);
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-A", results.get(0).getFirst());
|
||||
Assert.assertEquals(0L, results.get(0).getSecond().longValue());
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-B", results.get(1).getFirst());
|
||||
Assert.assertEquals(0L, results.get(1).getSecond().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void fromJunitXmlWithNoDuration() {
|
||||
// We do return zero values.
|
||||
List<Tuple2<String, Long>> tests = new ArrayList<>();
|
||||
tests.add(new Tuple2<>("TEST-A", 0L));
|
||||
tests.add(new Tuple2<>("TEST-B", 0L));
|
||||
final String xml = getXmlWithNoTime(tests);
|
||||
|
||||
List<Tuple2<String, Long>> results
|
||||
= TestDurationArtifacts.fromJunitXml(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)));
|
||||
|
||||
Assert.assertNotNull(results);
|
||||
|
||||
Assert.assertFalse("Should have results", results.isEmpty());
|
||||
Assert.assertEquals(2, results.size());
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-A", results.get(0).getFirst());
|
||||
Assert.assertEquals(0L, results.get(0).getSecond().longValue());
|
||||
Assert.assertEquals(CLASSNAME + "." + "TEST-B", results.get(1).getFirst());
|
||||
Assert.assertEquals(0L, results.get(1).getSecond().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canCreateZipFile() throws IOException {
|
||||
Tests outputTests = new Tests();
|
||||
final String testA = "com.corda.testA";
|
||||
final String testB = "com.corda.testB";
|
||||
outputTests.addDuration(testA, 55L);
|
||||
outputTests.addDuration(testB, 33L);
|
||||
|
||||
StringWriter writer = new StringWriter();
|
||||
outputTests.write(writer);
|
||||
String csv = writer.toString();
|
||||
|
||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||
try (ZipOutputStream outputStream = new ZipOutputStream(byteStream, StandardCharsets.UTF_8)) {
|
||||
ZipEntry entry = new ZipEntry("tests.csv");
|
||||
outputStream.putNextEntry(entry);
|
||||
outputStream.write(csv.getBytes(StandardCharsets.UTF_8));
|
||||
outputStream.closeEntry();
|
||||
}
|
||||
Assert.assertNotEquals(0, byteStream.toByteArray().length);
|
||||
|
||||
ByteArrayInputStream inputStream = new ByteArrayInputStream(byteStream.toByteArray());
|
||||
Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
TestDurationArtifacts.addTestsFromZippedCsv(tests, inputStream);
|
||||
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(2, tests.size());
|
||||
Assert.assertEquals(55L, tests.getDuration(testA));
|
||||
Assert.assertEquals(33L, tests.getDuration(testB));
|
||||
|
||||
Assert.assertEquals(44L, tests.getMeanDurationForTests());
|
||||
}
|
||||
|
||||
void putIntoArchive(@NotNull final ArchiveOutputStream outputStream,
|
||||
@NotNull final String fileName,
|
||||
@NotNull final String content) throws IOException {
|
||||
ZipArchiveEntry entry = new ZipArchiveEntry(fileName);
|
||||
outputStream.putArchiveEntry(entry);
|
||||
outputStream.write(content.getBytes(StandardCharsets.UTF_8));
|
||||
outputStream.closeArchiveEntry();
|
||||
}
|
||||
|
||||
String write(@NotNull final Tests tests) {
|
||||
|
||||
StringWriter writer = new StringWriter();
|
||||
tests.write(writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canCreateZipFileContainingMultipleFiles() throws IOException, ArchiveException {
|
||||
// Currently we don't have two csvs in the zip file, but test anyway.
|
||||
|
||||
Tests outputTests = new Tests();
|
||||
final String testA = "com.corda.testA";
|
||||
final String testB = "com.corda.testB";
|
||||
final String testC = "com.corda.testC";
|
||||
outputTests.addDuration(testA, 55L);
|
||||
outputTests.addDuration(testB, 33L);
|
||||
|
||||
String csv = write(outputTests);
|
||||
|
||||
Tests otherTests = new Tests();
|
||||
otherTests.addDuration(testA, 55L);
|
||||
otherTests.addDuration(testB, 33L);
|
||||
otherTests.addDuration(testC, 22L);
|
||||
String otherCsv = write(otherTests);
|
||||
|
||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||
try (ArchiveOutputStream outputStream =
|
||||
new ArchiveStreamFactory("UTF-8").createArchiveOutputStream(ArchiveStreamFactory.ZIP, byteStream)) {
|
||||
putIntoArchive(outputStream, "tests1.csv", csv);
|
||||
putIntoArchive(outputStream, "tests2.csv", otherCsv);
|
||||
outputStream.flush();
|
||||
}
|
||||
|
||||
Assert.assertNotEquals(0, byteStream.toByteArray().length);
|
||||
ByteArrayInputStream inputStream = new ByteArrayInputStream(byteStream.toByteArray());
|
||||
|
||||
Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
TestDurationArtifacts.addTestsFromZippedCsv(tests, inputStream);
|
||||
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(3, tests.size());
|
||||
Assert.assertEquals((55 + 33 + 22) / 3, tests.getMeanDurationForTests());
|
||||
}
|
||||
|
||||
// // Uncomment to test a file.
|
||||
// // Run a build to generate some test files, create a zip:
|
||||
// // zip ~/tests.zip $(find . -name "*.xml" -type f | grep test-results)
|
||||
//// @Test
|
||||
//// public void testZipFile() throws FileNotFoundException {
|
||||
//// File f = new File(System.getProperty("tests.zip", "/tests.zip");
|
||||
//// List<Tuple2<String, Long>> results = BucketingAllocatorTask.fromZippedXml(new BufferedInputStream(new FileInputStream(f)));
|
||||
//// Assert.assertFalse("Should have results", results.isEmpty());
|
||||
//// System.out.println("Results = " + results.size());
|
||||
//// System.out.println(results.toString());
|
||||
//// }
|
||||
|
||||
|
||||
@Test
|
||||
public void branchNamesDoNotHaveDirectoryDelimiters() {
|
||||
// we use the branch name in file and artifact tagging, so '/' would confuse things,
|
||||
// so make sure when we retrieve the property we strip them out.
|
||||
|
||||
final String expected = "release/os/4.3";
|
||||
final String key = "git.branch";
|
||||
final String cordaType = "corda";
|
||||
Properties.setRootProjectType(cordaType);
|
||||
System.setProperty(key, expected);
|
||||
|
||||
Assert.assertEquals(expected, System.getProperty(key));
|
||||
Assert.assertNotEquals(expected, Properties.getGitBranch());
|
||||
Assert.assertEquals("release-os-4.3", Properties.getGitBranch());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTestsFromArtifactory() {
|
||||
String artifactory_password = System.getenv("ARTIFACTORY_PASSWORD");
|
||||
String artifactory_username = System.getenv("ARTIFACTORY_USERNAME");
|
||||
String git_branch = System.getenv("CORDA_GIT_BRANCH");
|
||||
String git_target_branch = System.getenv("CORDA_GIT_TARGET_BRANCH");
|
||||
|
||||
if (artifactory_password == null ||
|
||||
artifactory_username == null ||
|
||||
git_branch == null ||
|
||||
git_target_branch == null
|
||||
) {
|
||||
System.out.println("Skipping test - set env vars to run this test");
|
||||
return;
|
||||
}
|
||||
|
||||
System.setProperty("git.branch", git_branch);
|
||||
System.setProperty("git.target.branch", git_target_branch);
|
||||
System.setProperty("artifactory.password", artifactory_password);
|
||||
System.setProperty("artifactory.username", artifactory_username);
|
||||
Assert.assertTrue(TestDurationArtifacts.tests.isEmpty());
|
||||
TestDurationArtifacts.loadTests();
|
||||
Assert.assertFalse(TestDurationArtifacts.tests.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void tryAndWalkForTestXmlFiles() {
|
||||
final String xmlRoot = System.getenv("JUNIT_XML_ROOT");
|
||||
if (xmlRoot == null) {
|
||||
System.out.println("Set JUNIT_XML_ROOT to run this test");
|
||||
return;
|
||||
}
|
||||
|
||||
List<Path> testXmlFiles = TestDurationArtifacts.getTestXmlFiles(Paths.get(xmlRoot));
|
||||
Assert.assertFalse(testXmlFiles.isEmpty());
|
||||
|
||||
for (Path testXmlFile : testXmlFiles.stream().sorted().collect(Collectors.toList())) {
|
||||
// System.out.println(testXmlFile.toString());
|
||||
}
|
||||
|
||||
System.out.println("\n\nTESTS\n\n");
|
||||
for (Path testResult : testXmlFiles) {
|
||||
try {
|
||||
final List<Tuple2<String, Long>> unitTests = TestDurationArtifacts.fromJunitXml(new FileInputStream(testResult.toFile()));
|
||||
for (Tuple2<String, Long> unitTest : unitTests) {
|
||||
System.out.println(unitTest.getFirst() + " --> " + BucketingAllocator.getDuration(unitTest.getSecond()));
|
||||
}
|
||||
|
||||
} catch (FileNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
145
buildSrc/src/test/groovy/net/corda/testing/TestsTest.java
Normal file
145
buildSrc/src/test/groovy/net/corda/testing/TestsTest.java
Normal file
@ -0,0 +1,145 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
public class TestsTest {
|
||||
@Test
|
||||
public void read() {
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n";
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals((long) tests.getDuration("hello"), 100);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void write() {
|
||||
final StringWriter writer = new StringWriter();
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
tests.addDuration("hello", 100);
|
||||
tests.write(writer);
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
|
||||
final StringReader reader = new StringReader(writer.toString());
|
||||
final Tests otherTests = new Tests();
|
||||
otherTests.addTests(Tests.read(reader));
|
||||
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertFalse(otherTests.isEmpty());
|
||||
Assert.assertEquals(tests.size(), otherTests.size());
|
||||
Assert.assertEquals(tests.getDuration("hello"), otherTests.getDuration("hello"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addingTestChangesMeanDuration() {
|
||||
final Tests tests = new Tests();
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n";
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
// 400 total for 4 tests
|
||||
Assert.assertEquals((long) tests.getDuration("hello"), 100);
|
||||
|
||||
// 1000 total for 5 tests = 200 mean
|
||||
tests.addDuration("hello", 600);
|
||||
Assert.assertEquals((long) tests.getDuration("hello"), 200);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addTests() {
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n"
|
||||
+ "goodbye,200,4\n";
|
||||
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(tests.size(), 2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getDuration() {
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n"
|
||||
+ "goodbye,200,4\n";
|
||||
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(tests.size(), 2);
|
||||
|
||||
Assert.assertEquals(100L, tests.getDuration("hello"));
|
||||
Assert.assertEquals(200L, tests.getDuration("goodbye"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addTestInfo() {
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n"
|
||||
+ "goodbye,200,4\n";
|
||||
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(2, tests.size());
|
||||
|
||||
tests.addDuration("foo", 55);
|
||||
tests.addDuration("bar", 33);
|
||||
Assert.assertEquals(4, tests.size());
|
||||
|
||||
tests.addDuration("bar", 56);
|
||||
Assert.assertEquals(4, tests.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addingNewDurationUpdatesRunCount() {
|
||||
|
||||
final Tests tests = new Tests();
|
||||
Assert.assertTrue(tests.isEmpty());
|
||||
|
||||
final String s = Tests.TEST_NAME + "," + Tests.MEAN_DURATION_NANOS + "," + Tests.NUMBER_OF_RUNS + '\n'
|
||||
+ "hello,100,4\n"
|
||||
+ "goodbye,200,4\n";
|
||||
|
||||
tests.addTests(Tests.read(new StringReader(s)));
|
||||
Assert.assertFalse(tests.isEmpty());
|
||||
Assert.assertEquals(2, tests.size());
|
||||
|
||||
tests.addDuration("foo", 55);
|
||||
|
||||
Assert.assertEquals(0, tests.getRunCount("bar"));
|
||||
|
||||
tests.addDuration("bar", 33);
|
||||
Assert.assertEquals(4, tests.size());
|
||||
|
||||
tests.addDuration("bar", 56);
|
||||
Assert.assertEquals(2, tests.getRunCount("bar"));
|
||||
Assert.assertEquals(4, tests.size());
|
||||
|
||||
tests.addDuration("bar", 56);
|
||||
tests.addDuration("bar", 56);
|
||||
Assert.assertEquals(4, tests.getRunCount("bar"));
|
||||
|
||||
Assert.assertEquals(4, tests.getRunCount("hello"));
|
||||
tests.addDuration("hello", 22);
|
||||
tests.addDuration("hello", 22);
|
||||
tests.addDuration("hello", 22);
|
||||
Assert.assertEquals(7, tests.getRunCount("hello"));
|
||||
}
|
||||
}
|
@ -2,12 +2,13 @@ package net.corda.testing;
|
||||
|
||||
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ -17,8 +18,8 @@ public class BucketingAllocatorTest {
|
||||
|
||||
@Test
|
||||
public void shouldAlwaysBucketTestsEvenIfNotInTimedFile() {
|
||||
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(1, Collections::emptyList);
|
||||
Tests tests = new Tests();
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(1, () -> tests);
|
||||
|
||||
Object task = new Object();
|
||||
bucketingAllocator.addSource(() -> Arrays.asList("SomeTestingClass", "AnotherTestingClass"), task);
|
||||
@ -28,13 +29,41 @@ public class BucketingAllocatorTest {
|
||||
|
||||
Assert.assertThat(testsForForkAndTestTask, IsIterableContainingInAnyOrder.containsInAnyOrder("SomeTestingClass", "AnotherTestingClass"));
|
||||
|
||||
List<BucketingAllocator.TestsForForkContainer> forkContainers = bucketingAllocator.getForkContainers();
|
||||
Assert.assertEquals(1, forkContainers.size());
|
||||
// There aren't any known tests, so it will use the default instead.
|
||||
Assert.assertEquals(Tests.DEFAULT_MEAN_NANOS, tests.getMeanDurationForTests());
|
||||
Assert.assertEquals(2 * tests.getMeanDurationForTests(), forkContainers.get(0).getCurrentDuration().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAlwaysBucketTestsEvenIfNotInTimedFileAndUseMeanValue() {
|
||||
final Tests tests = new Tests();
|
||||
tests.addDuration("someRandomTestNameToForceMeanValue", 1_000_000_000);
|
||||
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(1, () -> tests);
|
||||
|
||||
Object task = new Object();
|
||||
List<String> testNames = Arrays.asList("SomeTestingClass", "AnotherTestingClass");
|
||||
|
||||
bucketingAllocator.addSource(() -> testNames, task);
|
||||
|
||||
bucketingAllocator.generateTestPlan();
|
||||
List<String> testsForForkAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(0, task);
|
||||
|
||||
Assert.assertThat(testsForForkAndTestTask, IsIterableContainingInAnyOrder.containsInAnyOrder(testNames.toArray()));
|
||||
|
||||
List<BucketingAllocator.TestsForForkContainer> forkContainers = bucketingAllocator.getForkContainers();
|
||||
Assert.assertEquals(1, forkContainers.size());
|
||||
Assert.assertEquals(testNames.size() * tests.getMeanDurationForTests(), forkContainers.get(0).getCurrentDuration().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAllocateTestsAcrossForksEvenIfNoMatchingTestsFound() {
|
||||
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(2, Collections::emptyList);
|
||||
Tests tests = new Tests();
|
||||
tests.addDuration("SomeTestingClass", 1_000_000_000);
|
||||
tests.addDuration("AnotherTestingClass", 2222);
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(2, () -> tests);
|
||||
|
||||
Object task = new Object();
|
||||
bucketingAllocator.addSource(() -> Arrays.asList("SomeTestingClass", "AnotherTestingClass"), task);
|
||||
@ -49,6 +78,101 @@ public class BucketingAllocatorTest {
|
||||
List<String> allTests = Stream.of(testsForForkOneAndTestTask, testsForForkTwoAndTestTask).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
|
||||
Assert.assertThat(allTests, IsIterableContainingInAnyOrder.containsInAnyOrder("SomeTestingClass", "AnotherTestingClass"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldAllocateTestsAcrossForksEvenIfNoMatchingTestsFoundAndUseExisitingValues() {
|
||||
Tests tests = new Tests();
|
||||
tests.addDuration("SomeTestingClass", 1_000_000_000L);
|
||||
tests.addDuration("AnotherTestingClass", 3_000_000_000L);
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(2, () -> tests);
|
||||
|
||||
Object task = new Object();
|
||||
bucketingAllocator.addSource(() -> Arrays.asList("YetAnotherTestingClass", "SomeTestingClass", "AnotherTestingClass"), task);
|
||||
|
||||
bucketingAllocator.generateTestPlan();
|
||||
List<String> testsForForkOneAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(0, task);
|
||||
List<String> testsForForkTwoAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(1, task);
|
||||
|
||||
Assert.assertThat(testsForForkOneAndTestTask.size(), is(1));
|
||||
Assert.assertThat(testsForForkTwoAndTestTask.size(), is(2));
|
||||
|
||||
List<String> allTests = Stream.of(testsForForkOneAndTestTask, testsForForkTwoAndTestTask).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
|
||||
Assert.assertThat(allTests, IsIterableContainingInAnyOrder.containsInAnyOrder("YetAnotherTestingClass", "SomeTestingClass", "AnotherTestingClass"));
|
||||
|
||||
List<BucketingAllocator.TestsForForkContainer> forkContainers = bucketingAllocator.getForkContainers();
|
||||
Assert.assertEquals(2, forkContainers.size());
|
||||
// Internally, we should have sorted the tests by decreasing size, so the largest would be added to the first bucket.
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(3), forkContainers.get(0).getCurrentDuration().longValue());
|
||||
|
||||
// At this point, the second bucket is empty. We also know that the test average is 2s (1+3)/2.
|
||||
// So we should put SomeTestingClass (1s) into this bucket, AND then put the 'unknown' test 'YetAnotherTestingClass'
|
||||
// into this bucket, using the mean duration = 2s, resulting in 3s.
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(3), forkContainers.get(1).getCurrentDuration().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBucketAllocationForSeveralTestsDistributedByClassName() {
|
||||
Tests tests = new Tests();
|
||||
tests.addDuration("SmallTestingClass", 1_000_000_000L);
|
||||
tests.addDuration("LargeTestingClass", 3_000_000_000L);
|
||||
tests.addDuration("MediumTestingClass", 2_000_000_000L);
|
||||
// Gives a nice mean of 2s.
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(2), tests.getMeanDurationForTests());
|
||||
|
||||
BucketingAllocator bucketingAllocator = new BucketingAllocator(4, () -> tests);
|
||||
|
||||
List<String> testNames = Arrays.asList(
|
||||
"EvenMoreTestingClass",
|
||||
"YetAnotherTestingClass",
|
||||
"AndYetAnotherTestingClass",
|
||||
"OhYesAnotherTestingClass",
|
||||
"MediumTestingClass",
|
||||
"SmallTestingClass",
|
||||
"LargeTestingClass");
|
||||
|
||||
Object task = new Object();
|
||||
bucketingAllocator.addSource(() -> testNames, task);
|
||||
|
||||
// does not preserve order of known tests and unknown tests....
|
||||
bucketingAllocator.generateTestPlan();
|
||||
|
||||
List<String> testsForFork0 = bucketingAllocator.getTestsForForkAndTestTask(0, task);
|
||||
List<String> testsForFork1 = bucketingAllocator.getTestsForForkAndTestTask(1, task);
|
||||
List<String> testsForFork2 = bucketingAllocator.getTestsForForkAndTestTask(2, task);
|
||||
List<String> testsForFork3 = bucketingAllocator.getTestsForForkAndTestTask(3, task);
|
||||
|
||||
Assert.assertThat(testsForFork0.size(), is(1));
|
||||
Assert.assertThat(testsForFork1.size(), is(2));
|
||||
Assert.assertThat(testsForFork2.size(), is(2));
|
||||
Assert.assertThat(testsForFork3.size(), is(2));
|
||||
|
||||
// This must be true as it is the largest value.
|
||||
Assert.assertTrue(testsForFork0.contains("LargeTestingClass"));
|
||||
|
||||
List<String> allTests = Stream.of(testsForFork0, testsForFork1, testsForFork2, testsForFork3)
|
||||
.flatMap(Collection::stream).collect(Collectors.toList());
|
||||
|
||||
Assert.assertThat(allTests, IsIterableContainingInAnyOrder.containsInAnyOrder(testNames.toArray()));
|
||||
|
||||
List<BucketingAllocator.TestsForForkContainer> forkContainers = bucketingAllocator.getForkContainers();
|
||||
Assert.assertEquals(4, forkContainers.size());
|
||||
|
||||
long totalDuration = forkContainers.stream().mapToLong(c -> c.getCurrentDuration()).sum();
|
||||
Assert.assertEquals(tests.getMeanDurationForTests() * testNames.size(), totalDuration);
|
||||
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(3), forkContainers.get(0).getCurrentDuration().longValue());
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(4), forkContainers.get(1).getCurrentDuration().longValue());
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(4), forkContainers.get(2).getCurrentDuration().longValue());
|
||||
Assert.assertEquals(TimeUnit.SECONDS.toNanos(3), forkContainers.get(3).getCurrentDuration().longValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void durationToString() {
|
||||
Assert.assertEquals("1 mins", BucketingAllocator.getDuration(60_000_000_000L));
|
||||
Assert.assertEquals("4 secs", BucketingAllocator.getDuration(4_000_000_000L));
|
||||
Assert.assertEquals("400 ms", BucketingAllocator.getDuration(400_000_000L));
|
||||
Assert.assertEquals("400000 ns", BucketingAllocator.getDuration(400_000L));
|
||||
}
|
||||
}
|
@ -687,6 +687,8 @@
|
||||
<ID>LongParameterList:Driver.kt$DriverParameters$( isDebug: Boolean, driverDirectory: Path, portAllocation: PortAllocation, debugPortAllocation: PortAllocation, systemProperties: Map<String, String>, useTestClock: Boolean, startNodesInProcess: Boolean, waitForAllNodesToFinish: Boolean, notarySpecs: List<NotarySpec>, extraCordappPackagesToScan: List<String>, jmxPolicy: JmxPolicy, networkParameters: NetworkParameters, cordappsForAllNodes: Set<TestCordapp>? )</ID>
|
||||
<ID>LongParameterList:DriverDSL.kt$DriverDSL$( defaultParameters: NodeParameters = NodeParameters(), providedName: CordaX500Name? = defaultParameters.providedName, rpcUsers: List<User> = defaultParameters.rpcUsers, verifierType: VerifierType = defaultParameters.verifierType, customOverrides: Map<String, Any?> = defaultParameters.customOverrides, startInSameProcess: Boolean? = defaultParameters.startInSameProcess, maximumHeapSize: String = defaultParameters.maximumHeapSize )</ID>
|
||||
<ID>LongParameterList:DriverDSL.kt$DriverDSL$( defaultParameters: NodeParameters = NodeParameters(), providedName: CordaX500Name? = defaultParameters.providedName, rpcUsers: List<User> = defaultParameters.rpcUsers, verifierType: VerifierType = defaultParameters.verifierType, customOverrides: Map<String, Any?> = defaultParameters.customOverrides, startInSameProcess: Boolean? = defaultParameters.startInSameProcess, maximumHeapSize: String = defaultParameters.maximumHeapSize, logLevelOverride: String? = defaultParameters.logLevelOverride )</ID>
|
||||
<ID>LongParameterList:DriverDSLImpl.kt$( isDebug: Boolean = DriverParameters().isDebug, driverDirectory: Path = DriverParameters().driverDirectory, portAllocation: PortAllocation = DriverParameters().portAllocation, debugPortAllocation: PortAllocation = DriverParameters().debugPortAllocation, systemProperties: Map<String, String> = DriverParameters().systemProperties, useTestClock: Boolean = DriverParameters().useTestClock, startNodesInProcess: Boolean = DriverParameters().startNodesInProcess, extraCordappPackagesToScan: List<String> = @Suppress("DEPRECATION") DriverParameters().extraCordappPackagesToScan, waitForAllNodesToFinish: Boolean = DriverParameters().waitForAllNodesToFinish, notarySpecs: List<NotarySpec> = DriverParameters().notarySpecs, jmxPolicy: JmxPolicy = DriverParameters().jmxPolicy, networkParameters: NetworkParameters = DriverParameters().networkParameters, compatibilityZone: CompatibilityZoneParams? = null, notaryCustomOverrides: Map<String, Any?> = DriverParameters().notaryCustomOverrides, inMemoryDB: Boolean = DriverParameters().inMemoryDB, cordappsForAllNodes: Collection<TestCordappInternal>? = null, dsl: DriverDSLImpl.() -> A )</ID>
|
||||
<ID>LongParameterList:DriverDSLImpl.kt$DriverDSLImpl.Companion$( config: NodeConfig, quasarJarPath: String, debugPort: Int?, overriddenSystemProperties: Map<String, String>, maximumHeapSize: String, logLevelOverride: String?, vararg extraCmdLineFlag: String )</ID>
|
||||
<ID>LongParameterList:DummyFungibleContract.kt$DummyFungibleContract$(inputs: List<State>, outputs: List<State>, tx: LedgerTransaction, issueCommand: CommandWithParties<Commands.Issue>, currency: Currency, issuer: PartyAndReference)</ID>
|
||||
<ID>LongParameterList:IRS.kt$FloatingRatePaymentEvent$(date: LocalDate = this.date, accrualStartDate: LocalDate = this.accrualStartDate, accrualEndDate: LocalDate = this.accrualEndDate, dayCountBasisDay: DayCountBasisDay = this.dayCountBasisDay, dayCountBasisYear: DayCountBasisYear = this.dayCountBasisYear, fixingDate: LocalDate = this.fixingDate, notional: Amount<Currency> = this.notional, rate: Rate = this.rate)</ID>
|
||||
<ID>LongParameterList:IRS.kt$InterestRateSwap$(floatingLeg: FloatingLeg, fixedLeg: FixedLeg, calculation: Calculation, common: Common, oracle: Party, notary: Party)</ID>
|
||||
@ -723,6 +725,8 @@
|
||||
<ID>LongParameterList:ParametersUtilities.kt$( notaries: List<NotaryInfo> = emptyList(), minimumPlatformVersion: Int = 1, modifiedTime: Instant = Instant.now(), maxMessageSize: Int = 10485760, // TODO: Make this configurable and consistence across driver, bootstrapper, demobench and NetworkMapServer maxTransactionSize: Int = maxMessageSize * 50, whitelistedContractImplementations: Map<String, List<AttachmentId>> = emptyMap(), epoch: Int = 1, eventHorizon: Duration = 30.days, packageOwnership: Map<String, PublicKey> = emptyMap() )</ID>
|
||||
<ID>LongParameterList:PersistentUniquenessProvider.kt$PersistentUniquenessProvider$( states: List<StateRef>, txId: SecureHash, callerIdentity: Party, requestSignature: NotarisationRequestSignature, timeWindow: TimeWindow?, references: List<StateRef> )</ID>
|
||||
<ID>LongParameterList:PhysicalLocationStructures.kt$WorldCoordinate$(screenWidth: Double, screenHeight: Double, topLatitude: Double, bottomLatitude: Double, leftLongitude: Double, rightLongitude: Double)</ID>
|
||||
<ID>LongParameterList:ProcessUtilities.kt$ProcessUtilities$( arguments: List<String>, classPath: List<String> = defaultClassPath, workingDirectory: Path? = null, jdwpPort: Int? = null, extraJvmArguments: List<String> = emptyList(), maximumHeapSize: String? = null )</ID>
|
||||
<ID>LongParameterList:ProcessUtilities.kt$ProcessUtilities$( className: String, arguments: List<String>, classPath: List<String> = defaultClassPath, workingDirectory: Path? = null, jdwpPort: Int? = null, extraJvmArguments: List<String> = emptyList(), maximumHeapSize: String? = null )</ID>
|
||||
<ID>LongParameterList:QueryCriteria.kt$QueryCriteria.FungibleAssetQueryCriteria$( participants: List<AbstractParty>? = this.participants, owner: List<AbstractParty>? = this.owner, quantity: ColumnPredicate<Long>? = this.quantity, issuer: List<AbstractParty>? = this.issuer, issuerRef: List<OpaqueBytes>? = this.issuerRef, status: Vault.StateStatus = this.status, contractStateTypes: Set<Class<out ContractState>>? = this.contractStateTypes )</ID>
|
||||
<ID>LongParameterList:QueryCriteria.kt$QueryCriteria.FungibleAssetQueryCriteria$( participants: List<AbstractParty>? = this.participants, owner: List<AbstractParty>? = this.owner, quantity: ColumnPredicate<Long>? = this.quantity, issuer: List<AbstractParty>? = this.issuer, issuerRef: List<OpaqueBytes>? = this.issuerRef, status: Vault.StateStatus = this.status, contractStateTypes: Set<Class<out ContractState>>? = this.contractStateTypes, relevancyStatus: Vault.RelevancyStatus = this.relevancyStatus )</ID>
|
||||
<ID>LongParameterList:QueryCriteria.kt$QueryCriteria.LinearStateQueryCriteria$( participants: List<AbstractParty>? = this.participants, uuid: List<UUID>? = this.uuid, externalId: List<String>? = this.externalId, status: Vault.StateStatus = this.status, contractStateTypes: Set<Class<out ContractState>>? = this.contractStateTypes, relevancyStatus: Vault.RelevancyStatus = this.relevancyStatus )</ID>
|
||||
@ -730,6 +734,7 @@
|
||||
<ID>LongParameterList:QueryCriteria.kt$QueryCriteria.VaultQueryCriteria$( status: Vault.StateStatus = Vault.StateStatus.UNCONSUMED, contractStateTypes: Set<Class<out ContractState>>? = null, stateRefs: List<StateRef>? = null, notary: List<AbstractParty>? = null, softLockingCondition: SoftLockingCondition? = null, timeCondition: TimeCondition? = null, relevancyStatus: Vault.RelevancyStatus = Vault.RelevancyStatus.ALL, constraintTypes: Set<Vault.ConstraintInfo.Type> = emptySet(), constraints: Set<Vault.ConstraintInfo> = emptySet(), participants: List<AbstractParty>? = null, externalIds: List<UUID> = emptyList() )</ID>
|
||||
<ID>LongParameterList:QueryCriteria.kt$QueryCriteria.VaultQueryCriteria$( status: Vault.StateStatus = this.status, contractStateTypes: Set<Class<out ContractState>>? = this.contractStateTypes, stateRefs: List<StateRef>? = this.stateRefs, notary: List<AbstractParty>? = this.notary, softLockingCondition: SoftLockingCondition? = this.softLockingCondition, timeCondition: TimeCondition? = this.timeCondition )</ID>
|
||||
<ID>LongParameterList:RPCClient.kt$RPCClient$( rpcOpsClass: Class<I>, username: String, password: String, externalTrace: Trace? = null, impersonatedActor: Actor? = null, targetLegalIdentity: CordaX500Name? = null )</ID>
|
||||
<ID>LongParameterList:RPCDriver.kt$( isDebug: Boolean = false, driverDirectory: Path = Paths.get("build") / "rpc-driver" / getTimestampAsDirectoryName(), portAllocation: PortAllocation = globalPortAllocation, debugPortAllocation: PortAllocation = globalDebugPortAllocation, systemProperties: Map<String, String> = emptyMap(), useTestClock: Boolean = false, startNodesInProcess: Boolean = false, waitForNodesToFinish: Boolean = false, extraCordappPackagesToScan: List<String> = emptyList(), notarySpecs: List<NotarySpec> = emptyList(), externalTrace: Trace? = null, @Suppress("DEPRECATION") jmxPolicy: JmxPolicy = JmxPolicy(), networkParameters: NetworkParameters = testNetworkParameters(), notaryCustomOverrides: Map<String, Any?> = emptyMap(), inMemoryDB: Boolean = true, cordappsForAllNodes: Collection<TestCordappInternal>? = null, dsl: RPCDriverDSL.() -> A )</ID>
|
||||
<ID>LongParameterList:RPCDriver.kt$RPCDriverDSL$( rpcUser: User = rpcTestUser, nodeLegalName: CordaX500Name = fakeNodeLegalName, configuration: RPCServerConfiguration = RPCServerConfiguration.DEFAULT, listOps: List<I>, brokerHandle: RpcBrokerHandle, queueDrainTimeout: Duration = 5.seconds )</ID>
|
||||
<ID>LongParameterList:RPCDriver.kt$RPCDriverDSL$( rpcUser: User = rpcTestUser, nodeLegalName: CordaX500Name = fakeNodeLegalName, configuration: RPCServerConfiguration = RPCServerConfiguration.DEFAULT, ops: I, brokerHandle: RpcBrokerHandle, queueDrainTimeout: Duration = 5.seconds )</ID>
|
||||
<ID>LongParameterList:RPCDriver.kt$RPCDriverDSL$( rpcUser: User = rpcTestUser, nodeLegalName: CordaX500Name = fakeNodeLegalName, maxFileSize: Int = MAX_MESSAGE_SIZE, maxBufferedBytesPerClient: Long = 10L * MAX_MESSAGE_SIZE, configuration: RPCServerConfiguration = RPCServerConfiguration.DEFAULT, ops: I, queueDrainTimeout: Duration = 5.seconds )</ID>
|
||||
@ -1744,6 +1749,7 @@
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$ // TODO Move this to KeyStoreConfigHelpers. fun NodeConfiguration.configureWithDevSSLCertificate(cryptoService: CryptoService? = null)</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$// Problems: // - Forces you to have a primary constructor with all fields of name and type matching the configuration file structure. // - Encourages weak bean-like types. // - Cannot support a many-to-one relationship between configuration file structures and configuration domain type. This is essential for versioning of the configuration files. // - It's complicated and based on reflection, meaning problems with it are typically found at runtime. // - It doesn't support validation errors in a structured way. If something goes wrong, it throws exceptions, which doesn't support good usability practices like displaying all the errors at once. fun <T : Any> Config.parseAs(clazz: KClass<T>, onUnknownKeys: ((Set<String>, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle, nestedPath: String? = null, baseDirectory: Path? = null): T</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$// TODO Move this to KeyStoreConfigHelpers. fun MutualSslConfiguration.configureDevKeyAndTrustStores(myLegalName: CordaX500Name, signingCertificateStore: FileBasedCertificateStoreSupplier, certificatesDirectory: Path, cryptoService: CryptoService? = null)</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$ConfigHelper$return ConfigFactory.parseMap(toProperties().filterKeys { (it as String).startsWith(CORDA_PROPERTY_PREFIX) }.mapKeys { (it.key as String).removePrefix(CORDA_PROPERTY_PREFIX) })</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$ConfigHelper$val smartDevMode = CordaSystemUtils.isOsMac() || (CordaSystemUtils.isOsWindows() && !CordaSystemUtils.getOsName().toLowerCase().contains("server"))</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$fun Any?.toConfigValue(): ConfigValue</ID>
|
||||
<ID>MaxLineLength:ConfigUtilities.kt$inline fun <reified T : Any> Config.parseAs(noinline onUnknownKeys: ((Set<String>, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle): T</ID>
|
||||
@ -2056,8 +2062,9 @@
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$private</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val flowOverrideConfig = FlowOverrideConfig(parameters.flowOverrides.map { FlowOverride(it.key.canonicalName, it.value.canonicalName) })</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val jdbcUrl = "jdbc:h2:mem:persistence${inMemoryCounter.getAndIncrement()};DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100"</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val process = startOutOfProcessNode(config, quasarJarPath, debugPort, systemProperties, parameters.maximumHeapSize, parameters.logLevelOverride)</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion$private operator fun Config.plus(property: Pair<String, Any>)</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion${ log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, " + "debug port is " + (debugPort ?: "not enabled")) // Write node.conf writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly()) val systemProperties = mutableMapOf( "name" to config.corda.myLegalName, "visualvm.display.name" to "corda-${config.corda.myLegalName}" ) debugPort?.let { systemProperties += "log4j2.level" to "debug" systemProperties += "log4j2.debug" to "true" } systemProperties += inheritFromParentProcess() systemProperties += overriddenSystemProperties // See experimental/quasar-hook/README.md for how to generate. val excludePattern = "x(antlr**;bftsmart**;ch**;co.paralleluniverse**;com.codahale**;com.esotericsoftware**;" + "com.fasterxml**;com.google**;com.ibm**;com.intellij**;com.jcabi**;com.nhaarman**;com.opengamma**;" + "com.typesafe**;com.zaxxer**;de.javakaffee**;groovy**;groovyjarjarantlr**;groovyjarjarasm**;io.atomix**;" + "io.github**;io.netty**;jdk**;joptsimple**;junit**;kotlin**;net.bytebuddy**;net.i2p**;org.apache**;" + "org.assertj**;org.bouncycastle**;org.codehaus**;org.crsh**;org.dom4j**;org.fusesource**;org.h2**;" + "org.hamcrest**;org.hibernate**;org.jboss**;org.jcp**;org.joda**;org.junit**;org.mockito**;org.objectweb**;" + "org.objenesis**;org.slf4j**;org.w3c**;org.xml**;org.yaml**;reflectasm**;rx**;org.jolokia**;" + "com.lmax**;picocli**;liquibase**;com.github.benmanes**;org.json**;org.postgresql**;nonapi.io.github.classgraph**;)" val extraJvmArguments = systemProperties.removeResolvedClasspath().map { "-D${it.key}=${it.value}" } + "-javaagent:$quasarJarPath=$excludePattern" val loggingLevel = when { logLevelOverride != null -> logLevelOverride debugPort == null -> "INFO" else -> "DEBUG" } val arguments = mutableListOf( "--base-directory=${config.corda.baseDirectory}", "--logging-level=$loggingLevel", "--no-local-shell").also { it.addAll(extraCmdLineFlag) }.toList() // The following dependencies are excluded from the classpath of the created JVM, so that the environment resembles a real one as close as possible. // These are either classes that will be added as attachments to the node (i.e. samples, finance, opengamma etc.) or irrelevant testing libraries (test, corda-mock etc.). // TODO: There is pending work to fix this issue without custom blacklisting. See: https://r3-cev.atlassian.net/browse/CORDA-2164. val exclude = listOf("samples", "finance", "integrationTest", "test", "corda-mock", "com.opengamma.strata") val cp = ProcessUtilities.defaultClassPath.filterNot { cpEntry -> exclude.any { token -> cpEntry.contains("${File.separatorChar}$token") } || cpEntry.endsWith("-tests.jar") } return ProcessUtilities.startJavaProcess( className = "net.corda.node.Corda", // cannot directly get class for this, so just use string arguments = arguments, jdwpPort = debugPort, extraJvmArguments = extraJvmArguments, workingDirectory = config.corda.baseDirectory, maximumHeapSize = maximumHeapSize, classPath = cp, environmentVariables = environmentVariables ) }</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion${ log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, debug port is " + (debugPort ?: "not enabled")) // Write node.conf writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly()) val systemProperties = mutableMapOf( "name" to config.corda.myLegalName, "visualvm.display.name" to "corda-${config.corda.myLegalName}" ) debugPort?.let { systemProperties += "log4j2.level" to "debug" systemProperties += "log4j2.debug" to "true" } systemProperties += inheritFromParentProcess() systemProperties += overriddenSystemProperties // See experimental/quasar-hook/README.md for how to generate. val excludePattern = "x(antlr**;bftsmart**;ch**;co.paralleluniverse**;com.codahale**;com.esotericsoftware**;" + "com.fasterxml**;com.google**;com.ibm**;com.intellij**;com.jcabi**;com.nhaarman**;com.opengamma**;" + "com.typesafe**;com.zaxxer**;de.javakaffee**;groovy**;groovyjarjarantlr**;groovyjarjarasm**;io.atomix**;" + "io.github**;io.netty**;jdk**;joptsimple**;junit**;kotlin**;net.bytebuddy**;net.i2p**;org.apache**;" + "org.assertj**;org.bouncycastle**;org.codehaus**;org.crsh**;org.dom4j**;org.fusesource**;org.h2**;" + "org.hamcrest**;org.hibernate**;org.jboss**;org.jcp**;org.joda**;org.junit**;org.mockito**;org.objectweb**;" + "org.objenesis**;org.slf4j**;org.w3c**;org.xml**;org.yaml**;reflectasm**;rx**;org.jolokia**;" + "com.lmax**;picocli**;liquibase**;com.github.benmanes**;org.json**;org.postgresql**;nonapi.io.github.classgraph**;)" val extraJvmArguments = systemProperties.removeResolvedClasspath().map { "-D${it.key}=${it.value}" } + "-javaagent:$quasarJarPath=$excludePattern" val loggingLevel = when { logLevelOverride != null -> logLevelOverride debugPort == null -> "INFO" else -> "DEBUG" } val arguments = mutableListOf( "--base-directory=${config.corda.baseDirectory}", "--logging-level=$loggingLevel", "--no-local-shell").also { it += extraCmdLineFlag }.toList() // The following dependencies are excluded from the classpath of the created JVM, so that the environment resembles a real one as close as possible. // These are either classes that will be added as attachments to the node (i.e. samples, finance, opengamma etc.) or irrelevant testing libraries (test, corda-mock etc.). // TODO: There is pending work to fix this issue without custom blacklisting. See: https://r3-cev.atlassian.net/browse/CORDA-2164. val exclude = listOf("samples", "finance", "integrationTest", "test", "corda-mock", "com.opengamma.strata") val cp = ProcessUtilities.defaultClassPath.filterNot { cpEntry -> exclude.any { token -> cpEntry.contains("${File.separatorChar}$token") } || cpEntry.endsWith("-tests.jar") } return ProcessUtilities.startJavaProcess( className = "net.corda.node.Corda", // cannot directly get class for this, so just use string arguments = arguments, jdwpPort = debugPort, extraJvmArguments = extraJvmArguments, workingDirectory = config.corda.baseDirectory, maximumHeapSize = maximumHeapSize, classPath = cp ) }</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$InternalDriverDSL$ fun <A> pollUntilNonNull(pollName: String, pollInterval: Duration = DEFAULT_POLL_INTERVAL, warnCount: Int = DEFAULT_WARN_COUNT, check: () -> A?): CordaFuture<A></ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$InternalDriverDSL$ fun pollUntilTrue(pollName: String, pollInterval: Duration = DEFAULT_POLL_INTERVAL, warnCount: Int = DEFAULT_WARN_COUNT, check: () -> Boolean): CordaFuture<Unit></ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$fun DriverDSL.startNode(providedName: CordaX500Name, devMode: Boolean, parameters: NodeParameters = NodeParameters()): CordaFuture<NodeHandle></ID>
|
||||
@ -3834,6 +3841,7 @@
|
||||
<ID>SpreadOperator:DemoBench.kt$DemoBench.Companion$(DemoBench::class.java, *args)</ID>
|
||||
<ID>SpreadOperator:DevCertificatesTest.kt$DevCertificatesTest$(*oldX509Certificates)</ID>
|
||||
<ID>SpreadOperator:DockerInstantiator.kt$DockerInstantiator$(*it.toTypedArray())</ID>
|
||||
<ID>SpreadOperator:DriverDSLImpl.kt$DriverDSLImpl$( config, quasarJarPath, debugPort, systemProperties, "512m", null, *extraCmdLineFlag )</ID>
|
||||
<ID>SpreadOperator:DummyContract.kt$DummyContract.Companion$( /* INPUTS */ *priors.toTypedArray(), /* COMMAND */ Command(cmd, priorState.owner.owningKey), /* OUTPUT */ StateAndContract(state, PROGRAM_ID) )</ID>
|
||||
<ID>SpreadOperator:DummyContract.kt$DummyContract.Companion$(*items)</ID>
|
||||
<ID>SpreadOperator:DummyContractV2.kt$DummyContractV2.Companion$( /* INPUTS */ *priors.toTypedArray(), /* COMMAND */ Command(cmd, priorState.owners.map { it.owningKey }), /* OUTPUT */ StateAndContract(state, DummyContractV2.PROGRAM_ID) )</ID>
|
||||
|
@ -88,9 +88,6 @@ Unreleased
|
||||
Note that it's a responsibility of a client application to handle RPC reconnection in case this happens.
|
||||
See :ref:`setting_jvm_args` and :ref:`memory_usage_and_tuning` for further details.
|
||||
|
||||
* Environment variables and system properties can now be provided with underscore separators instead of dots. Neither are case sensitive.
|
||||
See :ref:`overriding config values <corda_configuration_file_overriding_config>` for more information.
|
||||
|
||||
.. _changelog_v4.1:
|
||||
|
||||
Version 4.1
|
||||
|
@ -39,8 +39,6 @@ To alter this behaviour, the ``on-unknown-config-keys`` command-line argument ca
|
||||
Overriding values from node.conf
|
||||
--------------------------------
|
||||
|
||||
.. _corda_configuration_file_overriding_config:
|
||||
|
||||
Environment variables
|
||||
For example: ``${NODE_TRUST_STORE_PASSWORD}`` would be replaced by the contents of environment variable ``NODE_TRUST_STORE_PASSWORD`` (see: :ref:`hiding-sensitive-data` section).
|
||||
|
||||
@ -56,11 +54,6 @@ JVM options
|
||||
.. note:: If the same field is overriden by both an environment variable and system property, the system property
|
||||
takes precedence.
|
||||
|
||||
.. note:: Underscores can be used in instead of dots. For example overriding the ``p2pAddress`` with an environment variable can be done
|
||||
by specifying ``CORDA_P2PADDRESS=host:port``. Variables and properties are not case sensitive. Corda will warn you if a variable
|
||||
prefixed with ``CORDA`` cannot be mapped to a valid property. Shadowing occurs when two properties
|
||||
of the same type with the same key are defined. For example having ``CORDA_P2PADDRESS=host:port`` and ``corda_p2paddress=host1:port1``
|
||||
will raise an exception on startup. This is to prevent hard to spot mistakes.
|
||||
Configuration file fields
|
||||
-------------------------
|
||||
|
||||
|
@ -63,10 +63,10 @@ Hash constraints migration
|
||||
|
||||
.. note:: These instructions only apply to CorDapp Contract JARs (unless otherwise stated).
|
||||
|
||||
Corda 4.0
|
||||
Corda |corda_version|
|
||||
~~~~~~~~~
|
||||
|
||||
Corda 4.0 requires some additional steps to consume and evolve pre-existing on-ledger **hash** constrained states:
|
||||
Corda |corda_version| requires some additional steps to consume and evolve pre-existing on-ledger **hash** constrained states:
|
||||
|
||||
1. All Corda Nodes in the same CZ or business network that may encounter a transaction chain with a hash constrained state must be started using
|
||||
relaxed hash constraint checking mode as described in :ref:`relax_hash_constraints_checking_ref`.
|
||||
@ -106,13 +106,6 @@ Corda 4.0 requires some additional steps to consume and evolve pre-existing on-l
|
||||
Please also ensure that the original unsigned contracts CorDapp is removed from the ``/cordapps`` folder (this will already be present in the
|
||||
nodes attachments store) to ensure the lookup code in step 2 retrieves the correct signed contract CorDapp JAR.
|
||||
|
||||
Later releases
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The next version of Corda will provide automatic transition of *hash constrained* states. This means that signed CorDapps running on a Corda 4.x node will
|
||||
automatically propagate any pre-existing on-ledger *hash-constrained* states (and generate *signature-constrained* outputs) when the system property
|
||||
to break constraints is set.
|
||||
|
||||
.. _cz_whitelisted_constraint_migration:
|
||||
|
||||
CZ whitelisted constraints migration
|
||||
@ -120,17 +113,31 @@ CZ whitelisted constraints migration
|
||||
|
||||
.. note:: These instructions only apply to CorDapp Contract JARs (unless otherwise stated).
|
||||
|
||||
Corda 4.0
|
||||
Corda |corda_version|
|
||||
~~~~~~~~~
|
||||
|
||||
Corda 4.0 requires some additional steps to consume and evolve pre-existing on-ledger **CZ whitelisted** constrained states:
|
||||
Corda |corda_version| requires some additional steps to consume and evolve pre-existing on-ledger **CZ whitelisted** constrained states:
|
||||
|
||||
1. As the original developer of the CorDapp, the first step is to sign the latest version of the JAR that was released (see :doc:`cordapp-build-systems`).
|
||||
The key used for signing will be used to sign all subsequent releases, so it should be stored appropriately. The JAR can be signed by multiple keys owned
|
||||
by different parties and it will be expressed as a ``CompositeKey`` in the ``SignatureAttachmentConstraint`` (See :doc:`api-core-types`).
|
||||
|
||||
2. Any flow that builds transactions using this CorDapp will automatically transition states to use the ``SignatureAttachmentConstraint`` if
|
||||
no other constraint is specified. Therefore, there are two ways to alter the existing code.
|
||||
2. The new Corda 4 signed CorDapp JAR must be registered with the CZ network operator (as whitelisted in the network parameters which are distributed
|
||||
to all nodes in that CZ). The CZ network operator should check that the JAR is signed and not allow any more versions of it to be whitelisted in the future.
|
||||
From now on the development organisation that signed the JAR is responsible for signing new versions.
|
||||
|
||||
The process of CZ network CorDapp whitelisting depends on how the Corda network is configured:
|
||||
|
||||
- if using a hosted CZ network (such as `The Corda Network <https://docs.corda.net/head/corda-network/index.html>`_ or
|
||||
`UAT Environment <https://docs.corda.net/head/corda-network/UAT.html>`_ ) running an Identity Operator (formerly known as Doorman) and
|
||||
Network Map Service, you should manually send the hashes of the two JARs to the CZ network operator and request these be added using
|
||||
their network parameter update process.
|
||||
|
||||
- if using a local network created using the Network Bootstrapper tool, please follow the instructions in
|
||||
:ref:`Updating the contract whitelist for bootstrapped networks <bootstrapper_updating_whitelisted_contracts>` to can add both CorDapp Contract JAR hashes.
|
||||
|
||||
3. Any flow that builds transactions using this CorDapp will automatically transition states to use the ``SignatureAttachmentConstraint`` if
|
||||
no other constraint is specified and the CorDapp continues to be whitelisted. Therefore, there are two ways to alter the existing code.
|
||||
|
||||
* Do not specify a constraint
|
||||
* Explicitly add a Signature Constraint
|
||||
@ -165,6 +172,6 @@ The code below details how to explicitly add a Signature Constraint:
|
||||
// Set the Signature constraint on the new state to migrate away from the WhitelistConstraint.
|
||||
.addOutputState(outputState, myContract, new SignatureAttachmentConstraint(ownersKey))
|
||||
|
||||
3. As a node operator you need to add the new signed version of the contracts CorDapp to the ``/cordapps`` folder together with the latest version of the flows jar.
|
||||
4. As a node operator you need to add the new signed version of the contracts CorDapp to the ``/cordapps`` folder together with the latest version of the flows jar.
|
||||
Please also ensure that the original unsigned contracts CorDapp is removed from the ``/cordapps`` folder (this will already be present in the
|
||||
nodes attachments store) to ensure the lookup code in step 3 retrieves the correct signed contract CorDapp JAR.
|
||||
nodes attachments store) to ensure the lookup code in step 3 retrieves the correct signed contract CorDapp JAR.
|
@ -91,8 +91,12 @@ By default, the node database has the following tables:
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_CONTRACT_UPGRADES | STATE_REF, CONTRACT_CLASS_NAME |
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_HASH_TO_KEY | PK_HASH, PUBLIC_KEY |
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_IDENTITIES | PK_HASH, IDENTITY_VALUE |
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_IDENTITIES_NO_CERT | PK_HASH, NAME |
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_INFOS | NODE_INFO_ID, NODE_INFO_HASH, PLATFORM_VERSION, SERIAL |
|
||||
+-----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| NODE_INFO_HOSTS | HOST_NAME, PORT, NODE_INFO_ID, HOSTS_ID |
|
||||
|
@ -202,6 +202,7 @@ dependencies {
|
||||
|
||||
// BFT-Smart dependencies
|
||||
compile 'com.github.bft-smart:library:master-v1.1-beta-g6215ec8-87'
|
||||
compile 'commons-codec:commons-codec:1.13'
|
||||
|
||||
// Java Atomix: RAFT library
|
||||
compile 'io.atomix.copycat:copycat-client:1.2.3'
|
||||
|
@ -1,87 +0,0 @@
|
||||
package net.corda.node
|
||||
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.logging.logFile
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.NodeParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Test
|
||||
import org.junit.jupiter.api.assertThrows
|
||||
|
||||
class NodeConfigParsingTests {
|
||||
|
||||
@Test
|
||||
fun `config is overriden by underscore variable`() {
|
||||
val portAllocator = incrementalPortAllocation()
|
||||
val sshPort = portAllocator.nextPort()
|
||||
|
||||
driver(DriverParameters(
|
||||
environmentVariables = mapOf("corda_sshd_port" to sshPort.toString()),
|
||||
startNodesInProcess = false,
|
||||
portAllocation = portAllocator)) {
|
||||
val hasSsh = startNode().get()
|
||||
.logFile()
|
||||
.readLines()
|
||||
.filter { it.contains("SSH server listening on port") }
|
||||
.any { it.contains(sshPort.toString()) }
|
||||
assert(hasSsh)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `config is overriden by case insensitive underscore variable`() {
|
||||
val portAllocator = incrementalPortAllocation()
|
||||
val sshPort = portAllocator.nextPort()
|
||||
|
||||
driver(DriverParameters(
|
||||
environmentVariables = mapOf("cORDa_sShD_pOrt" to sshPort.toString()),
|
||||
startNodesInProcess = false,
|
||||
portAllocation = portAllocator)) {
|
||||
val hasSsh = startNode().get()
|
||||
.logFile()
|
||||
.readLines()
|
||||
.filter { it.contains("SSH server listening on port") }
|
||||
.any { it.contains(sshPort.toString()) }
|
||||
assert(hasSsh)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `config is overriden by case insensitive dot variable`() {
|
||||
val portAllocator = incrementalPortAllocation()
|
||||
val sshPort = portAllocator.nextPort()
|
||||
|
||||
driver(DriverParameters(
|
||||
environmentVariables = mapOf("cOrda.sShD.pOrt" to sshPort.toString()),
|
||||
startNodesInProcess = false,
|
||||
portAllocation = portAllocator)) {
|
||||
val hasSsh = startNode(NodeParameters()).get()
|
||||
.logFile()
|
||||
.readLines()
|
||||
.filter { it.contains("SSH server listening on port") }
|
||||
.any { it.contains(sshPort.toString()) }
|
||||
assert(hasSsh)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `shadowing is forbidden`() {
|
||||
val portAllocator = incrementalPortAllocation()
|
||||
val sshPort = portAllocator.nextPort()
|
||||
|
||||
driver(DriverParameters(
|
||||
environmentVariables = mapOf(
|
||||
"cOrda_sShD_POrt" to sshPort.toString(),
|
||||
"cOrda.sShD.pOrt" to sshPort.toString()),
|
||||
startNodesInProcess = false,
|
||||
portAllocation = portAllocator,
|
||||
notarySpecs = emptyList())) {
|
||||
|
||||
assertThatThrownBy {
|
||||
startNode().getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -24,4 +24,4 @@ class NodeStartupPerformanceTests {
|
||||
println(times.map { it / 1_000_000.0 })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package net.corda.node.services.config
|
||||
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigParseOptions
|
||||
import net.corda.cliutils.CordaSystemUtils
|
||||
@ -9,7 +8,6 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.createDirectories
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.internal.exists
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.nodeapi.internal.DEV_CA_KEY_STORE_PASS
|
||||
import net.corda.nodeapi.internal.config.FileBasedCertificateStoreSupplier
|
||||
import net.corda.nodeapi.internal.config.MutualSslConfiguration
|
||||
@ -21,10 +19,7 @@ import net.corda.nodeapi.internal.installDevNodeCaCertPath
|
||||
import net.corda.nodeapi.internal.loadDevCaTrustStore
|
||||
import net.corda.nodeapi.internal.registerDevP2pCertificates
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.lang.IllegalStateException
|
||||
import java.nio.file.Path
|
||||
import kotlin.reflect.KClass
|
||||
import kotlin.reflect.full.memberProperties
|
||||
|
||||
fun configOf(vararg pairs: Pair<String, Any?>): Config = ConfigFactory.parseMap(mapOf(*pairs))
|
||||
operator fun Config.plus(overrides: Map<String, Any?>): Config = ConfigFactory.parseMap(overrides).withFallback(this)
|
||||
@ -67,69 +62,9 @@ object ConfigHelper {
|
||||
return finalConfig
|
||||
}
|
||||
|
||||
private fun <T: Any> getCaseSensitivePropertyPath(target : KClass<T>?, path : List<String>) : String {
|
||||
|
||||
require(path.isNotEmpty()) { "Path to config property cannot be empty." }
|
||||
|
||||
val lookFor = path.first()
|
||||
target?.memberProperties?.forEach {
|
||||
if (it.name.toLowerCase() == lookFor.toLowerCase()) {
|
||||
return if (path.size > 1)
|
||||
"${it.name}." +
|
||||
getCaseSensitivePropertyPath(
|
||||
(it.getter.returnType.classifier as KClass<*>),
|
||||
path.subList(1, path.size))
|
||||
else
|
||||
it.name
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
/*
|
||||
* Gets
|
||||
*/
|
||||
private fun Config.cordaEntriesOnly(): Config {
|
||||
|
||||
val cordaPropOccurrences = mutableSetOf<String>()
|
||||
val badKeyConversions = mutableSetOf<String>()
|
||||
|
||||
return ConfigFactory.parseMap(
|
||||
toProperties()
|
||||
.mapKeys {
|
||||
val newKey = (it.key as String)
|
||||
.replace("_", ".")
|
||||
.toLowerCase()
|
||||
|
||||
if (newKey.contains(CORDA_PROPERTY_PREFIX) && cordaPropOccurrences.contains(newKey)) {
|
||||
throw ShadowingException(it.key.toString(), newKey)
|
||||
}
|
||||
|
||||
cordaPropOccurrences.add(newKey)
|
||||
newKey.let { key ->
|
||||
if (!key.contains(CORDA_PROPERTY_PREFIX))
|
||||
return@let key
|
||||
|
||||
val nodeConfKey = key.removePrefix(CORDA_PROPERTY_PREFIX)
|
||||
val configPath = getCaseSensitivePropertyPath(
|
||||
NodeConfigurationImpl::class,
|
||||
nodeConfKey.split(".")
|
||||
)
|
||||
|
||||
if (nodeConfKey.length != configPath.length) {
|
||||
Node.printWarning(
|
||||
"${it.key} (property or environment variable) cannot be mapped to an existing Corda" +
|
||||
" config property and thus won't be used as a config override!" +
|
||||
" It won't be passed as a config override! If that was the intention " +
|
||||
" double check the spelling and ensure there is such config key.")
|
||||
badKeyConversions.add(configPath)
|
||||
}
|
||||
CORDA_PROPERTY_PREFIX + configPath
|
||||
}
|
||||
}.filterKeys { it.startsWith(CORDA_PROPERTY_PREFIX) }
|
||||
.mapKeys { it.key.removePrefix(CORDA_PROPERTY_PREFIX) }
|
||||
.filterKeys { !badKeyConversions.contains(it) })
|
||||
return ConfigFactory.parseMap(toProperties().filterKeys { (it as String).startsWith(CORDA_PROPERTY_PREFIX) }.mapKeys { (it.key as String).removePrefix(CORDA_PROPERTY_PREFIX) })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
package net.corda.node.services.config
|
||||
|
||||
import com.typesafe.config.ConfigException
|
||||
|
||||
class ShadowingException(definedProperty : String, convertedProperty : String)
|
||||
: ConfigException(
|
||||
"Environment variable $definedProperty is shadowing another property transformed to $convertedProperty")
|
@ -27,7 +27,8 @@ object SerializerFactoryBuilder {
|
||||
Float::class,
|
||||
Int::class,
|
||||
Long::class,
|
||||
Short::class
|
||||
Short::class,
|
||||
Void::class
|
||||
).associate {
|
||||
klazz -> klazz.javaObjectType to klazz.javaPrimitiveType
|
||||
}) as Map<Class<*>, Class<*>>
|
||||
|
@ -199,8 +199,7 @@ fun <A> driver(defaultParameters: DriverParameters = DriverParameters(), dsl: Dr
|
||||
networkParameters = defaultParameters.networkParameters,
|
||||
notaryCustomOverrides = defaultParameters.notaryCustomOverrides,
|
||||
inMemoryDB = defaultParameters.inMemoryDB,
|
||||
cordappsForAllNodes = uncheckedCast(defaultParameters.cordappsForAllNodes),
|
||||
environmentVariables = defaultParameters.environmentVariables
|
||||
cordappsForAllNodes = uncheckedCast(defaultParameters.cordappsForAllNodes)
|
||||
),
|
||||
coerce = { it },
|
||||
dsl = dsl
|
||||
@ -256,46 +255,10 @@ data class DriverParameters(
|
||||
val networkParameters: NetworkParameters = testNetworkParameters(notaries = emptyList()),
|
||||
val notaryCustomOverrides: Map<String, Any?> = emptyMap(),
|
||||
val inMemoryDB: Boolean = true,
|
||||
val cordappsForAllNodes: Collection<TestCordapp>? = null,
|
||||
val environmentVariables : Map<String, String> = emptyMap()
|
||||
val cordappsForAllNodes: Collection<TestCordapp>? = null
|
||||
) {
|
||||
constructor(cordappsForAllNodes: Collection<TestCordapp>) : this(isDebug = false, cordappsForAllNodes = cordappsForAllNodes)
|
||||
|
||||
constructor(
|
||||
isDebug: Boolean = false,
|
||||
driverDirectory: Path = Paths.get("build") / "node-driver" / getTimestampAsDirectoryName(),
|
||||
portAllocation: PortAllocation = incrementalPortAllocation(),
|
||||
debugPortAllocation: PortAllocation = incrementalPortAllocation(),
|
||||
systemProperties: Map<String, String> = emptyMap(),
|
||||
useTestClock: Boolean = false,
|
||||
startNodesInProcess: Boolean = false,
|
||||
waitForAllNodesToFinish: Boolean = false,
|
||||
notarySpecs: List<NotarySpec> = listOf(NotarySpec(DUMMY_NOTARY_NAME)),
|
||||
extraCordappPackagesToScan: List<String> = emptyList(),
|
||||
@Suppress("DEPRECATION") jmxPolicy: JmxPolicy = JmxPolicy(),
|
||||
networkParameters: NetworkParameters = testNetworkParameters(notaries = emptyList()),
|
||||
notaryCustomOverrides: Map<String, Any?> = emptyMap(),
|
||||
inMemoryDB: Boolean = true,
|
||||
cordappsForAllNodes: Collection<TestCordapp>? = null
|
||||
) : this(
|
||||
isDebug,
|
||||
driverDirectory,
|
||||
portAllocation,
|
||||
debugPortAllocation,
|
||||
systemProperties,
|
||||
useTestClock,
|
||||
startNodesInProcess,
|
||||
waitForAllNodesToFinish,
|
||||
notarySpecs,
|
||||
extraCordappPackagesToScan,
|
||||
jmxPolicy,
|
||||
networkParameters,
|
||||
notaryCustomOverrides,
|
||||
inMemoryDB,
|
||||
cordappsForAllNodes,
|
||||
environmentVariables = emptyMap()
|
||||
)
|
||||
|
||||
constructor(
|
||||
isDebug: Boolean = false,
|
||||
driverDirectory: Path = Paths.get("build") / "node-driver" / getTimestampAsDirectoryName(),
|
||||
@ -410,7 +373,6 @@ data class DriverParameters(
|
||||
fun withNotaryCustomOverrides(notaryCustomOverrides: Map<String, Any?>): DriverParameters = copy(notaryCustomOverrides = notaryCustomOverrides)
|
||||
fun withInMemoryDB(inMemoryDB: Boolean): DriverParameters = copy(inMemoryDB = inMemoryDB)
|
||||
fun withCordappsForAllNodes(cordappsForAllNodes: Collection<TestCordapp>?): DriverParameters = copy(cordappsForAllNodes = cordappsForAllNodes)
|
||||
fun withEnvironmentVariables(variables : Map<String, String>): DriverParameters = copy(environmentVariables = variables)
|
||||
|
||||
fun copy(
|
||||
isDebug: Boolean,
|
||||
@ -471,40 +433,4 @@ data class DriverParameters(
|
||||
notaryCustomOverrides = emptyMap(),
|
||||
cordappsForAllNodes = cordappsForAllNodes
|
||||
)
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun copy(
|
||||
isDebug: Boolean,
|
||||
driverDirectory: Path,
|
||||
portAllocation: PortAllocation,
|
||||
debugPortAllocation: PortAllocation,
|
||||
systemProperties: Map<String, String>,
|
||||
useTestClock: Boolean,
|
||||
startNodesInProcess: Boolean,
|
||||
waitForAllNodesToFinish: Boolean,
|
||||
notarySpecs: List<NotarySpec>,
|
||||
extraCordappPackagesToScan: List<String>,
|
||||
jmxPolicy: JmxPolicy,
|
||||
networkParameters: NetworkParameters,
|
||||
notaryCustomOverrides: Map<String, Any?>,
|
||||
inMemoryDB: Boolean,
|
||||
cordappsForAllNodes: Collection<TestCordapp>?
|
||||
) = this.copy(
|
||||
isDebug = isDebug,
|
||||
driverDirectory = driverDirectory,
|
||||
portAllocation = portAllocation,
|
||||
debugPortAllocation = debugPortAllocation,
|
||||
systemProperties = systemProperties,
|
||||
useTestClock = useTestClock,
|
||||
startNodesInProcess = startNodesInProcess,
|
||||
waitForAllNodesToFinish = waitForAllNodesToFinish,
|
||||
notarySpecs = notarySpecs,
|
||||
extraCordappPackagesToScan = extraCordappPackagesToScan,
|
||||
jmxPolicy = jmxPolicy,
|
||||
networkParameters = networkParameters,
|
||||
notaryCustomOverrides = notaryCustomOverrides,
|
||||
inMemoryDB = inMemoryDB,
|
||||
cordappsForAllNodes = cordappsForAllNodes,
|
||||
environmentVariables = emptyMap()
|
||||
)
|
||||
}
|
@ -91,8 +91,7 @@ class DriverDSLImpl(
|
||||
val networkParameters: NetworkParameters,
|
||||
val notaryCustomOverrides: Map<String, Any?>,
|
||||
val inMemoryDB: Boolean,
|
||||
val cordappsForAllNodes: Collection<TestCordappInternal>?,
|
||||
val environmentVariables : Map<String, String>
|
||||
val cordappsForAllNodes: Collection<TestCordappInternal>?
|
||||
) : InternalDriverDSL {
|
||||
|
||||
private var _executorService: ScheduledExecutorService? = null
|
||||
@ -289,11 +288,9 @@ class DriverDSLImpl(
|
||||
} else {
|
||||
startOutOfProcessMiniNode(
|
||||
config,
|
||||
arrayOf(
|
||||
"initial-registration",
|
||||
"--network-root-truststore=${rootTruststorePath.toAbsolutePath()}",
|
||||
"--network-root-truststore-password=$rootTruststorePassword"
|
||||
)
|
||||
"initial-registration",
|
||||
"--network-root-truststore=${rootTruststorePath.toAbsolutePath()}",
|
||||
"--network-root-truststore-password=$rootTruststorePassword"
|
||||
).map { config }
|
||||
}
|
||||
}
|
||||
@ -453,7 +450,7 @@ class DriverDSLImpl(
|
||||
} else {
|
||||
// TODO The config we use here is uses a hardocded p2p port which changes when the node is run proper
|
||||
// This causes two node info files to be generated.
|
||||
startOutOfProcessMiniNode(config, arrayOf("generate-node-info")).map {
|
||||
startOutOfProcessMiniNode(config, "generate-node-info").map {
|
||||
// Once done we have to read the signed node info file that's been generated
|
||||
val nodeInfoFile = config.corda.baseDirectory.list { paths ->
|
||||
paths.filter { it.fileName.toString().startsWith(NodeInfoFilesCopier.NODE_INFO_FILE_NAME_PREFIX) }.findFirst().get()
|
||||
@ -539,7 +536,7 @@ class DriverDSLImpl(
|
||||
* Start the node with the given flag which is expected to start the node for some function, which once complete will
|
||||
* terminate the node.
|
||||
*/
|
||||
private fun startOutOfProcessMiniNode(config: NodeConfig, extraCmdLineFlag: Array<String> = emptyArray()): CordaFuture<Unit> {
|
||||
private fun startOutOfProcessMiniNode(config: NodeConfig, vararg extraCmdLineFlag: String): CordaFuture<Unit> {
|
||||
val debugPort = if (isDebug) debugPortAllocation.nextPort() else null
|
||||
val process = startOutOfProcessNode(
|
||||
config,
|
||||
@ -548,8 +545,7 @@ class DriverDSLImpl(
|
||||
systemProperties,
|
||||
"512m",
|
||||
null,
|
||||
environmentVariables,
|
||||
extraCmdLineFlag
|
||||
*extraCmdLineFlag
|
||||
)
|
||||
|
||||
return poll(executorService, "$extraCmdLineFlag (${config.corda.myLegalName})") {
|
||||
@ -606,15 +602,7 @@ class DriverDSLImpl(
|
||||
nodeFuture
|
||||
} else {
|
||||
val debugPort = if (isDebug) debugPortAllocation.nextPort() else null
|
||||
val process = startOutOfProcessNode(
|
||||
config,
|
||||
quasarJarPath,
|
||||
debugPort,
|
||||
systemProperties,
|
||||
parameters.maximumHeapSize,
|
||||
parameters.logLevelOverride,
|
||||
environmentVariables
|
||||
)
|
||||
val process = startOutOfProcessNode(config, quasarJarPath, debugPort, systemProperties, parameters.maximumHeapSize, parameters.logLevelOverride)
|
||||
|
||||
// Destroy the child process when the parent exits.This is needed even when `waitForAllNodesToFinish` is
|
||||
// true because we don't want orphaned processes in the case that the parent process is terminated by the
|
||||
@ -738,7 +726,6 @@ class DriverDSLImpl(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
private fun startOutOfProcessNode(
|
||||
config: NodeConfig,
|
||||
quasarJarPath: String,
|
||||
@ -746,11 +733,9 @@ class DriverDSLImpl(
|
||||
overriddenSystemProperties: Map<String, String>,
|
||||
maximumHeapSize: String,
|
||||
logLevelOverride: String?,
|
||||
environmentVariables : Map<String,String>,
|
||||
extraCmdLineFlag: Array<String> = emptyArray()
|
||||
): Process {
|
||||
log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, " +
|
||||
"debug port is " + (debugPort ?: "not enabled"))
|
||||
vararg extraCmdLineFlag: String
|
||||
): Process {
|
||||
log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, debug port is " + (debugPort ?: "not enabled"))
|
||||
// Write node.conf
|
||||
writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly())
|
||||
|
||||
@ -789,7 +774,7 @@ class DriverDSLImpl(
|
||||
"--base-directory=${config.corda.baseDirectory}",
|
||||
"--logging-level=$loggingLevel",
|
||||
"--no-local-shell").also {
|
||||
it.addAll(extraCmdLineFlag)
|
||||
it += extraCmdLineFlag
|
||||
}.toList()
|
||||
|
||||
// The following dependencies are excluded from the classpath of the created JVM, so that the environment resembles a real one as close as possible.
|
||||
@ -807,8 +792,7 @@ class DriverDSLImpl(
|
||||
extraJvmArguments = extraJvmArguments,
|
||||
workingDirectory = config.corda.baseDirectory,
|
||||
maximumHeapSize = maximumHeapSize,
|
||||
classPath = cp,
|
||||
environmentVariables = environmentVariables
|
||||
classPath = cp
|
||||
)
|
||||
}
|
||||
|
||||
@ -1029,8 +1013,7 @@ fun <DI : DriverDSL, D : InternalDriverDSL, A> genericDriver(
|
||||
networkParameters = defaultParameters.networkParameters,
|
||||
notaryCustomOverrides = defaultParameters.notaryCustomOverrides,
|
||||
inMemoryDB = defaultParameters.inMemoryDB,
|
||||
cordappsForAllNodes = uncheckedCast(defaultParameters.cordappsForAllNodes),
|
||||
environmentVariables = defaultParameters.environmentVariables
|
||||
cordappsForAllNodes = uncheckedCast(defaultParameters.cordappsForAllNodes)
|
||||
)
|
||||
)
|
||||
val shutdownHook = addShutdownHook(driverDsl::shutdown)
|
||||
@ -1107,7 +1090,6 @@ class SplitCompatibilityZoneParams(
|
||||
override fun config() : NetworkServicesConfig = config
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun <A> internalDriver(
|
||||
isDebug: Boolean = DriverParameters().isDebug,
|
||||
driverDirectory: Path = DriverParameters().driverDirectory,
|
||||
@ -1125,7 +1107,6 @@ fun <A> internalDriver(
|
||||
notaryCustomOverrides: Map<String, Any?> = DriverParameters().notaryCustomOverrides,
|
||||
inMemoryDB: Boolean = DriverParameters().inMemoryDB,
|
||||
cordappsForAllNodes: Collection<TestCordappInternal>? = null,
|
||||
environmentVariables: Map<String, String> = emptyMap(),
|
||||
dsl: DriverDSLImpl.() -> A
|
||||
): A {
|
||||
return genericDriver(
|
||||
@ -1145,8 +1126,7 @@ fun <A> internalDriver(
|
||||
networkParameters = networkParameters,
|
||||
notaryCustomOverrides = notaryCustomOverrides,
|
||||
inMemoryDB = inMemoryDB,
|
||||
cordappsForAllNodes = cordappsForAllNodes,
|
||||
environmentVariables = environmentVariables
|
||||
cordappsForAllNodes = cordappsForAllNodes
|
||||
),
|
||||
coerce = { it },
|
||||
dsl = dsl
|
||||
|
@ -7,29 +7,17 @@ import java.time.ZonedDateTime
|
||||
import java.time.format.DateTimeFormatter
|
||||
|
||||
object ProcessUtilities {
|
||||
@Suppress("LongParameterList")
|
||||
inline fun <reified C : Any> startJavaProcess(
|
||||
arguments: List<String>,
|
||||
classPath: List<String> = defaultClassPath,
|
||||
workingDirectory: Path? = null,
|
||||
jdwpPort: Int? = null,
|
||||
extraJvmArguments: List<String> = emptyList(),
|
||||
maximumHeapSize: String? = null,
|
||||
environmentVariables: Map<String, String> = emptyMap()
|
||||
maximumHeapSize: String? = null
|
||||
): Process {
|
||||
return startJavaProcess(
|
||||
C::class.java.name,
|
||||
arguments,
|
||||
classPath,
|
||||
workingDirectory,
|
||||
jdwpPort,
|
||||
extraJvmArguments,
|
||||
maximumHeapSize,
|
||||
environmentVariables
|
||||
)
|
||||
return startJavaProcess(C::class.java.name, arguments, classPath, workingDirectory, jdwpPort, extraJvmArguments, maximumHeapSize)
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun startJavaProcess(
|
||||
className: String,
|
||||
arguments: List<String>,
|
||||
@ -37,8 +25,7 @@ object ProcessUtilities {
|
||||
workingDirectory: Path? = null,
|
||||
jdwpPort: Int? = null,
|
||||
extraJvmArguments: List<String> = emptyList(),
|
||||
maximumHeapSize: String? = null,
|
||||
environmentVariables: Map<String,String> = emptyMap()
|
||||
maximumHeapSize: String? = null
|
||||
): Process {
|
||||
val command = mutableListOf<String>().apply {
|
||||
add(javaPath)
|
||||
@ -51,7 +38,6 @@ object ProcessUtilities {
|
||||
}
|
||||
return ProcessBuilder(command).apply {
|
||||
inheritIO()
|
||||
environment().putAll(environmentVariables)
|
||||
environment()["CLASSPATH"] = classPath.joinToString(File.pathSeparator)
|
||||
if (workingDirectory != null) {
|
||||
// Timestamp may be handy if the same process started, killed and then re-started. Without timestamp
|
||||
|
@ -104,7 +104,6 @@ val fakeNodeLegalName = CordaX500Name(organisation = "Not:a:real:name", locality
|
||||
private val globalPortAllocation = incrementalPortAllocation()
|
||||
private val globalDebugPortAllocation = incrementalPortAllocation()
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun <A> rpcDriver(
|
||||
isDebug: Boolean = false,
|
||||
driverDirectory: Path = Paths.get("build") / "rpc-driver" / getTimestampAsDirectoryName(),
|
||||
@ -122,7 +121,6 @@ fun <A> rpcDriver(
|
||||
notaryCustomOverrides: Map<String, Any?> = emptyMap(),
|
||||
inMemoryDB: Boolean = true,
|
||||
cordappsForAllNodes: Collection<TestCordappInternal>? = null,
|
||||
environmentVariables: Map<String, String> = emptyMap(),
|
||||
dsl: RPCDriverDSL.() -> A
|
||||
): A {
|
||||
return genericDriver(
|
||||
@ -143,8 +141,7 @@ fun <A> rpcDriver(
|
||||
networkParameters = networkParameters,
|
||||
notaryCustomOverrides = notaryCustomOverrides,
|
||||
inMemoryDB = inMemoryDB,
|
||||
cordappsForAllNodes = cordappsForAllNodes,
|
||||
environmentVariables = environmentVariables
|
||||
cordappsForAllNodes = cordappsForAllNodes
|
||||
), externalTrace
|
||||
),
|
||||
coerce = { it },
|
||||
|
@ -1,45 +0,0 @@
|
||||
package net.corda.testing.node;
|
||||
|
||||
import net.corda.testing.driver.PortAllocation;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.nio.MappedByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
|
||||
public class PortAllocationRunner {
|
||||
|
||||
public static void main(@NotNull String[] args) throws IOException {
|
||||
/*
|
||||
* each JVM will be launched with [spinnerFile, reportingIndex]
|
||||
*/
|
||||
int reportingIndex = Integer.parseInt(args[1]);
|
||||
|
||||
RandomAccessFile spinnerBackingFile = new RandomAccessFile(args[0], "rw");
|
||||
MappedByteBuffer spinnerBuffer = spinnerBackingFile.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, 16);
|
||||
|
||||
/*
|
||||
* notify back to launching process that we are ready to start using the reporting index we were allocated on launch
|
||||
*/
|
||||
spinnerBuffer.putShort(reportingIndex, ((short) 10));
|
||||
|
||||
/*
|
||||
* wait for parent process to notify us that all waiting processes are good to go
|
||||
* do not Thread.sleep as we want to ensure there is as much of an overlap between the ports selected by the spawned processes
|
||||
* and by sleeping, its frequently the case that one will complete selection before another wakes up resulting in sequential ranges rather than overlapping
|
||||
*/
|
||||
while (spinnerBuffer.getShort(0) != 8) {
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate ports and print out for later consumption by the spawning test
|
||||
*/
|
||||
PortAllocation pa = PortAllocation.getDefaultAllocator();
|
||||
int iterCount = Integer.parseInt(args[2]);
|
||||
for (int i = 0; i < iterCount; i++) {
|
||||
System.out.println(pa.nextPort());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
package net.corda.testing.driver
|
||||
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.testing.node.PortAllocationRunner
|
||||
import org.assertj.core.util.Files
|
||||
import org.hamcrest.CoreMatchers.`is`
|
||||
import org.hamcrest.core.IsNot.not
|
||||
import org.hamcrest.number.OrderingComparison
|
||||
import org.junit.Assert
|
||||
import org.junit.Assume.assumeFalse
|
||||
import org.junit.Test
|
||||
import java.io.RandomAccessFile
|
||||
import java.nio.channels.FileChannel
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.streams.toList
|
||||
|
||||
class PortAllocationTest {
|
||||
|
||||
companion object {
|
||||
val logger = contextLogger()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `should allocate a port whilst cycling back round if exceeding start of ephemeral range`() {
|
||||
val startingPoint = PortAllocation.DEFAULT_START_PORT
|
||||
val portAllocator = PortAllocation.defaultAllocator
|
||||
|
||||
var previous = portAllocator.nextPort()
|
||||
(0 until 50_000).forEach { _ ->
|
||||
val next = portAllocator.nextPort()
|
||||
Assert.assertThat(next, `is`(not(previous)))
|
||||
Assert.assertThat(next, `is`(OrderingComparison.lessThan(PortAllocation.FIRST_EPHEMERAL_PORT)))
|
||||
|
||||
if (next == startingPoint) {
|
||||
Assert.assertThat(previous, `is`(PortAllocation.FIRST_EPHEMERAL_PORT - 1))
|
||||
} else {
|
||||
Assert.assertTrue(next >= previous + 1)
|
||||
}
|
||||
previous = next
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 120_000)
|
||||
fun `should support multiprocess port allocation`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().contains("windows"))
|
||||
|
||||
logger.info("Starting multiprocess port allocation test")
|
||||
val spinnerFile = Files.newTemporaryFile().also { it.deleteOnExit() }.absolutePath
|
||||
val iterCount = 8_000 // Default port range 10000-30000 since we will have 2 processes we want to make sure there is enough leg room
|
||||
// If we rollover, we may well receive the ports that were already given to a different process
|
||||
val process1 = buildJvmProcess(spinnerFile, 1, iterCount)
|
||||
val process2 = buildJvmProcess(spinnerFile, 2, iterCount)
|
||||
|
||||
logger.info("Started child processes")
|
||||
|
||||
val processes = listOf(process1, process2)
|
||||
|
||||
val spinnerBackingFile = RandomAccessFile(spinnerFile, "rw")
|
||||
logger.info("Mapped spinner file at: $spinnerFile")
|
||||
val spinnerBuffer = spinnerBackingFile.channel.map(FileChannel.MapMode.READ_WRITE, 0, 512)
|
||||
logger.info("Created spinner buffer")
|
||||
|
||||
var timeWaited = 0L
|
||||
|
||||
while (spinnerBuffer.getShort(1) != 10.toShort() && spinnerBuffer.getShort(2) != 10.toShort() && timeWaited < 60_000) {
|
||||
logger.info("Waiting to childProcesses to report back. waited ${timeWaited}ms")
|
||||
Thread.sleep(1000)
|
||||
timeWaited += 1000
|
||||
}
|
||||
|
||||
//GO!
|
||||
logger.info("Instructing child processes to start allocating ports")
|
||||
spinnerBuffer.putShort(0, 8)
|
||||
logger.info("Waiting for child processes to terminate")
|
||||
val terminationStatuses = processes.parallelStream().map { if (it.waitFor(1, TimeUnit.MINUTES)) "OK" else "STILL RUNNING" }.toList()
|
||||
logger.info("child processes terminated: $terminationStatuses")
|
||||
|
||||
fun List<String>.setOfPorts(): Set<Int> {
|
||||
// May include warnings when ports are busy
|
||||
return map { Try.on { Integer.parseInt(it) } }.filter { it.isSuccess }.map { it.getOrThrow() }.toSet()
|
||||
}
|
||||
|
||||
val lines1 = process1.inputStream.reader().readLines()
|
||||
val portsAllocated1 = lines1.setOfPorts()
|
||||
val lines2 = process2.inputStream.reader().readLines()
|
||||
val portsAllocated2 = lines2.setOfPorts()
|
||||
|
||||
logger.info("child process out captured")
|
||||
|
||||
Assert.assertThat(lines1.joinToString(), portsAllocated1.size, `is`(iterCount))
|
||||
Assert.assertThat(lines2.joinToString(), portsAllocated2.size, `is`(iterCount))
|
||||
|
||||
//there should be no overlap between the outputs as each process should have been allocated a unique set of ports
|
||||
val intersect = portsAllocated1.intersect(portsAllocated2)
|
||||
Assert.assertThat(intersect.joinToString(), intersect, `is`(emptySet()))
|
||||
}
|
||||
|
||||
private fun buildJvmProcess(spinnerFile: String, reportingIndex: Int, iterCount: Int): Process {
|
||||
val separator = System.getProperty("file.separator")
|
||||
val classpath = System.getProperty("java.class.path")
|
||||
val path = (System.getProperty("java.home")
|
||||
+ separator + "bin" + separator + "java")
|
||||
val processBuilder = ProcessBuilder(path, "-cp",
|
||||
classpath,
|
||||
PortAllocationRunner::class.java.name,
|
||||
spinnerFile,
|
||||
reportingIndex.toString(),
|
||||
iterCount.toString())
|
||||
|
||||
return processBuilder.start()
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user