mirror of
https://github.com/corda/corda.git
synced 2024-12-19 21:17:58 +00:00
Merge remote-tracking branch 'origin/release/os/4.3' into merge_4.3_to_4.4
# Conflicts: # constants.properties
This commit is contained in:
commit
0488a5ac5d
59
.ci/dev/integration/Jenkinsfile
vendored
Normal file
59
.ci/dev/integration/Jenkinsfile
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
@Library('existing-build-control')
|
||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent { label 'k8s' }
|
||||||
|
options { timestamps() }
|
||||||
|
|
||||||
|
environment {
|
||||||
|
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||||
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Corda - Generate Build Image') {
|
||||||
|
steps {
|
||||||
|
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" clean pushBuildImage"
|
||||||
|
}
|
||||||
|
sh "kubectl auth can-i get pods"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Corda - Run Tests') {
|
||||||
|
stage('Integration Tests') {
|
||||||
|
steps {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" allParallelIntegrationTest"
|
||||||
|
if (env.CHANGE_ID) {
|
||||||
|
pullRequest.createStatus(status: 'success',
|
||||||
|
context: 'continuous-integration/jenkins/pr-merge/integrationTest',
|
||||||
|
description: 'Integration Tests Passed',
|
||||||
|
targetUrl: "${env.JOB_URL}/testResults")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit '**/build/test-results-xml/**/*.xml'
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
deleteDir() /* clean up our workspace */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
61
.ci/dev/regression/Jenkinsfile
vendored
Normal file
61
.ci/dev/regression/Jenkinsfile
vendored
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
@Library('existing-build-control')
|
||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent { label 'k8s' }
|
||||||
|
options { timestamps() }
|
||||||
|
|
||||||
|
environment {
|
||||||
|
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||||
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Corda Pull Request - Generate Build Image') {
|
||||||
|
steps {
|
||||||
|
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" clean pushBuildImage"
|
||||||
|
}
|
||||||
|
sh "kubectl auth can-i get pods"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Corda Pull Request - Run Tests') {
|
||||||
|
stage('Unit and Integration Tests') {
|
||||||
|
steps {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" allParallelUnitAndIntegrationTest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Slow Integration Tests') {
|
||||||
|
steps {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" allParallelSlowIntegrationTest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit '**/build/test-results-xml/**/*.xml'
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
deleteDir() /* clean up our workspace */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
59
.ci/dev/unit/Jenkinsfile
vendored
Normal file
59
.ci/dev/unit/Jenkinsfile
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
@Library('existing-build-control')
|
||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent { label 'k8s' }
|
||||||
|
options { timestamps() }
|
||||||
|
|
||||||
|
environment {
|
||||||
|
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||||
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Corda Pull Request - Generate Build Image') {
|
||||||
|
steps {
|
||||||
|
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" clean pushBuildImage"
|
||||||
|
}
|
||||||
|
sh "kubectl auth can-i get pods"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Corda Pull Request - Run Tests') {
|
||||||
|
stage('Unit Tests') {
|
||||||
|
steps {
|
||||||
|
sh "./gradlew " +
|
||||||
|
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" allParallelUnitTest"
|
||||||
|
if (env.CHANGE_ID) {
|
||||||
|
pullRequest.createStatus(status: 'success',
|
||||||
|
context: 'continuous-integration/jenkins/pr-merge/unitTest',
|
||||||
|
description: 'Unit Tests Passed',
|
||||||
|
targetUrl: "${env.JOB_URL}/testResults")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
junit '**/build/test-results-xml/**/*.xml'
|
||||||
|
}
|
||||||
|
cleanup {
|
||||||
|
deleteDir() /* clean up our workspace */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
181
.idea/codeStyles/Project.xml
generated
181
.idea/codeStyles/Project.xml
generated
@ -40,187 +40,6 @@
|
|||||||
<option name="EXTENDS_LIST_WRAP" value="0" />
|
<option name="EXTENDS_LIST_WRAP" value="0" />
|
||||||
<option name="ASSIGNMENT_WRAP" value="0" />
|
<option name="ASSIGNMENT_WRAP" value="0" />
|
||||||
<option name="WRAP_ON_TYPING" value="0" />
|
<option name="WRAP_ON_TYPING" value="0" />
|
||||||
<option name="arrangementSettings" />
|
|
||||||
<option name="forceArrangeMenuAvailable" value="false" />
|
|
||||||
<option name="CODE_STYLE_DEFAULTS" value="KOTLIN_OFFICIAL" />
|
|
||||||
<option name="RIGHT_MARGIN" value="140" />
|
|
||||||
<option name="LINE_COMMENT_AT_FIRST_COLUMN" value="true" />
|
|
||||||
<option name="BLOCK_COMMENT_AT_FIRST_COLUMN" value="true" />
|
|
||||||
<option name="LINE_COMMENT_ADD_SPACE" value="false" />
|
|
||||||
<option name="KEEP_LINE_BREAKS" value="true" />
|
|
||||||
<option name="KEEP_FIRST_COLUMN_COMMENT" value="true" />
|
|
||||||
<option name="KEEP_CONTROL_STATEMENT_IN_ONE_LINE" value="true" />
|
|
||||||
<option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
|
|
||||||
<option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
|
|
||||||
<option name="KEEP_BLANK_LINES_BETWEEN_PACKAGE_DECLARATION_AND_HEADER" value="2" />
|
|
||||||
<option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="0" />
|
|
||||||
<option name="BLANK_LINES_BEFORE_PACKAGE" value="0" />
|
|
||||||
<option name="BLANK_LINES_AFTER_PACKAGE" value="1" />
|
|
||||||
<option name="BLANK_LINES_BEFORE_IMPORTS" value="1" />
|
|
||||||
<option name="BLANK_LINES_AFTER_IMPORTS" value="1" />
|
|
||||||
<option name="BLANK_LINES_AROUND_CLASS" value="1" />
|
|
||||||
<option name="BLANK_LINES_AROUND_FIELD" value="0" />
|
|
||||||
<option name="BLANK_LINES_AROUND_METHOD" value="1" />
|
|
||||||
<option name="BLANK_LINES_BEFORE_METHOD_BODY" value="0" />
|
|
||||||
<option name="BLANK_LINES_AROUND_FIELD_IN_INTERFACE" value="0" />
|
|
||||||
<option name="BLANK_LINES_AROUND_METHOD_IN_INTERFACE" value="1" />
|
|
||||||
<option name="BLANK_LINES_AFTER_CLASS_HEADER" value="0" />
|
|
||||||
<option name="BLANK_LINES_AFTER_ANONYMOUS_CLASS_HEADER" value="0" />
|
|
||||||
<option name="BLANK_LINES_BEFORE_CLASS_END" value="0" />
|
|
||||||
<option name="BRACE_STYLE" value="1" />
|
|
||||||
<option name="CLASS_BRACE_STYLE" value="1" />
|
|
||||||
<option name="METHOD_BRACE_STYLE" value="1" />
|
|
||||||
<option name="LAMBDA_BRACE_STYLE" value="1" />
|
|
||||||
<option name="USE_FLYING_GEESE_BRACES" value="false" />
|
|
||||||
<option name="DO_NOT_INDENT_TOP_LEVEL_CLASS_MEMBERS" value="false" />
|
|
||||||
<option name="ELSE_ON_NEW_LINE" value="false" />
|
|
||||||
<option name="WHILE_ON_NEW_LINE" value="false" />
|
|
||||||
<option name="CATCH_ON_NEW_LINE" value="false" />
|
|
||||||
<option name="FINALLY_ON_NEW_LINE" value="false" />
|
|
||||||
<option name="INDENT_CASE_FROM_SWITCH" value="true" />
|
|
||||||
<option name="CASE_STATEMENT_ON_NEW_LINE" value="true" />
|
|
||||||
<option name="INDENT_BREAK_FROM_CASE" value="true" />
|
|
||||||
<option name="SPECIAL_ELSE_IF_TREATMENT" value="true" />
|
|
||||||
<option name="ALIGN_MULTILINE_CHAINED_METHODS" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_PARAMETERS" value="true" />
|
|
||||||
<option name="ALIGN_MULTILINE_PARAMETERS_IN_CALLS" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_RESOURCES" value="true" />
|
|
||||||
<option name="ALIGN_MULTILINE_FOR" value="true" />
|
|
||||||
<option name="INDENT_WHEN_CASES" value="true" />
|
|
||||||
<option name="ALIGN_MULTILINE_BINARY_OPERATION" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_ASSIGNMENT" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_TERNARY_OPERATION" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_THROWS_LIST" value="false" />
|
|
||||||
<option name="ALIGN_THROWS_KEYWORD" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_EXTENDS_LIST" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_METHOD_BRACKETS" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_PARENTHESIZED_EXPRESSION" value="false" />
|
|
||||||
<option name="ALIGN_MULTILINE_ARRAY_INITIALIZER_EXPRESSION" value="false" />
|
|
||||||
<option name="ALIGN_GROUP_FIELD_DECLARATIONS" value="false" />
|
|
||||||
<option name="ALIGN_CONSECUTIVE_VARIABLE_DECLARATIONS" value="false" />
|
|
||||||
<option name="ALIGN_CONSECUTIVE_ASSIGNMENTS" value="false" />
|
|
||||||
<option name="ALIGN_SUBSEQUENT_SIMPLE_METHODS" value="false" />
|
|
||||||
<option name="SPACE_AROUND_ASSIGNMENT_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_LOGICAL_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_EQUALITY_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_RELATIONAL_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_BITWISE_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_ADDITIVE_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_MULTIPLICATIVE_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_SHIFT_OPERATORS" value="true" />
|
|
||||||
<option name="SPACE_AROUND_UNARY_OPERATOR" value="false" />
|
|
||||||
<option name="SPACE_AROUND_LAMBDA_ARROW" value="true" />
|
|
||||||
<option name="SPACE_AROUND_METHOD_REF_DBL_COLON" value="false" />
|
|
||||||
<option name="SPACE_AFTER_COMMA" value="true" />
|
|
||||||
<option name="SPACE_AFTER_COMMA_IN_TYPE_ARGUMENTS" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_COMMA" value="false" />
|
|
||||||
<option name="SPACE_AFTER_SEMICOLON" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_SEMICOLON" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_METHOD_CALL_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_EMPTY_METHOD_CALL_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_METHOD_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_EMPTY_METHOD_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_IF_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_WHILE_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_FOR_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_TRY_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_CATCH_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_SWITCH_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_SYNCHRONIZED_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_CAST_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_BRACKETS" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_BRACES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_ARRAY_INITIALIZER_BRACES" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_EMPTY_ARRAY_INITIALIZER_BRACES" value="false" />
|
|
||||||
<option name="SPACE_AFTER_TYPE_CAST" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_METHOD_CALL_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_BEFORE_METHOD_PARENTHESES" value="false" />
|
|
||||||
<option name="SPACE_BEFORE_IF_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_WHILE_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_FOR_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_TRY_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_CATCH_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_SWITCH_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_SYNCHRONIZED_PARENTHESES" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_CLASS_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_METHOD_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_IF_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_ELSE_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_WHILE_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_FOR_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_DO_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_SWITCH_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_TRY_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_CATCH_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_FINALLY_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_SYNCHRONIZED_LBRACE" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_ARRAY_INITIALIZER_LBRACE" value="false" />
|
|
||||||
<option name="SPACE_BEFORE_ANNOTATION_ARRAY_INITIALIZER_LBRACE" value="false" />
|
|
||||||
<option name="SPACE_BEFORE_ELSE_KEYWORD" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_WHILE_KEYWORD" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_CATCH_KEYWORD" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_FINALLY_KEYWORD" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_QUEST" value="true" />
|
|
||||||
<option name="SPACE_AFTER_QUEST" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_COLON" value="true" />
|
|
||||||
<option name="SPACE_AFTER_COLON" value="true" />
|
|
||||||
<option name="SPACE_BEFORE_TYPE_PARAMETER_LIST" value="false" />
|
|
||||||
<option name="CALL_PARAMETERS_WRAP" value="0" />
|
|
||||||
<option name="PREFER_PARAMETERS_WRAP" value="false" />
|
|
||||||
<option name="CALL_PARAMETERS_LPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="CALL_PARAMETERS_RPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="METHOD_PARAMETERS_WRAP" value="0" />
|
|
||||||
<option name="METHOD_PARAMETERS_LPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="METHOD_PARAMETERS_RPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="RESOURCE_LIST_WRAP" value="0" />
|
|
||||||
<option name="RESOURCE_LIST_LPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="RESOURCE_LIST_RPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="EXTENDS_LIST_WRAP" value="0" />
|
|
||||||
<option name="THROWS_LIST_WRAP" value="0" />
|
|
||||||
<option name="EXTENDS_KEYWORD_WRAP" value="0" />
|
|
||||||
<option name="THROWS_KEYWORD_WRAP" value="0" />
|
|
||||||
<option name="METHOD_CALL_CHAIN_WRAP" value="1" />
|
|
||||||
<option name="WRAP_FIRST_METHOD_IN_CALL_CHAIN" value="false" />
|
|
||||||
<option name="PARENTHESES_EXPRESSION_LPAREN_WRAP" value="false" />
|
|
||||||
<option name="PARENTHESES_EXPRESSION_RPAREN_WRAP" value="false" />
|
|
||||||
<option name="BINARY_OPERATION_WRAP" value="0" />
|
|
||||||
<option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="TERNARY_OPERATION_WRAP" value="0" />
|
|
||||||
<option name="TERNARY_OPERATION_SIGNS_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="MODIFIER_LIST_WRAP" value="false" />
|
|
||||||
<option name="KEEP_SIMPLE_BLOCKS_IN_ONE_LINE" value="false" />
|
|
||||||
<option name="KEEP_SIMPLE_METHODS_IN_ONE_LINE" value="false" />
|
|
||||||
<option name="KEEP_SIMPLE_LAMBDAS_IN_ONE_LINE" value="false" />
|
|
||||||
<option name="KEEP_SIMPLE_CLASSES_IN_ONE_LINE" value="false" />
|
|
||||||
<option name="KEEP_MULTIPLE_EXPRESSIONS_IN_ONE_LINE" value="false" />
|
|
||||||
<option name="FOR_STATEMENT_WRAP" value="0" />
|
|
||||||
<option name="FOR_STATEMENT_LPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="FOR_STATEMENT_RPAREN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="ARRAY_INITIALIZER_WRAP" value="0" />
|
|
||||||
<option name="ARRAY_INITIALIZER_LBRACE_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="ARRAY_INITIALIZER_RBRACE_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="ASSIGNMENT_WRAP" value="0" />
|
|
||||||
<option name="PLACE_ASSIGNMENT_SIGN_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="LABELED_STATEMENT_WRAP" value="2" />
|
|
||||||
<option name="WRAP_COMMENTS" value="false" />
|
|
||||||
<option name="ASSERT_STATEMENT_WRAP" value="0" />
|
|
||||||
<option name="ASSERT_STATEMENT_COLON_ON_NEXT_LINE" value="false" />
|
|
||||||
<option name="IF_BRACE_FORCE" value="0" />
|
|
||||||
<option name="DOWHILE_BRACE_FORCE" value="0" />
|
|
||||||
<option name="WHILE_BRACE_FORCE" value="0" />
|
|
||||||
<option name="FOR_BRACE_FORCE" value="0" />
|
|
||||||
<option name="WRAP_LONG_LINES" value="false" />
|
|
||||||
<option name="METHOD_ANNOTATION_WRAP" value="2" />
|
|
||||||
<option name="CLASS_ANNOTATION_WRAP" value="2" />
|
|
||||||
<option name="FIELD_ANNOTATION_WRAP" value="2" />
|
|
||||||
<option name="PARAMETER_ANNOTATION_WRAP" value="0" />
|
|
||||||
<option name="VARIABLE_ANNOTATION_WRAP" value="0" />
|
|
||||||
<option name="SPACE_BEFORE_ANOTATION_PARAMETER_LIST" value="false" />
|
|
||||||
<option name="SPACE_WITHIN_ANNOTATION_PARENTHESES" value="false" />
|
|
||||||
<option name="ENUM_CONSTANTS_WRAP" value="0" />
|
|
||||||
<option name="FORCE_REARRANGE_MODE" value="0" />
|
|
||||||
<option name="WRAP_ON_TYPING" value="0" />
|
|
||||||
</codeStyleSettings>
|
</codeStyleSettings>
|
||||||
</code_scheme>
|
</code_scheme>
|
||||||
</component>
|
</component>
|
1
.idea/codeStyles/codeStyleConfig.xml
generated
1
.idea/codeStyles/codeStyleConfig.xml
generated
@ -1,5 +1,6 @@
|
|||||||
<component name="ProjectCodeStyleConfiguration">
|
<component name="ProjectCodeStyleConfiguration">
|
||||||
<state>
|
<state>
|
||||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
||||||
|
<option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
|
||||||
</state>
|
</state>
|
||||||
</component>
|
</component>
|
52
Jenkinsfile
vendored
52
Jenkinsfile
vendored
@ -1,4 +1,7 @@
|
|||||||
killall_jobs()
|
@Library('existing-build-control')
|
||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
agent { label 'k8s' }
|
agent { label 'k8s' }
|
||||||
@ -35,48 +38,27 @@ pipeline {
|
|||||||
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
" allParallelIntegrationTest"
|
" allParallelIntegrationTest"
|
||||||
}
|
}
|
||||||
post {
|
}
|
||||||
always {
|
stage('Unit Tests') {
|
||||||
junit '**/build/test-results-xml/**/*.xml'
|
steps {
|
||||||
}
|
sh "./gradlew " +
|
||||||
|
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||||
|
"-Dkubenetize=true " +
|
||||||
|
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
" allParallelUnitTest"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// stage('Unit Tests') {
|
|
||||||
// steps {
|
|
||||||
// sh "./gradlew " +
|
|
||||||
// "-DbuildId=\"\${BUILD_ID}\" " +
|
|
||||||
// "-Dkubenetize=true " +
|
|
||||||
// "-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
|
||||||
// " allParallelUnitTest"
|
|
||||||
// }
|
|
||||||
// post {
|
|
||||||
// always {
|
|
||||||
// junit '**/build/test-results-xml/**/*.xml'
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
@NonCPS
|
post {
|
||||||
def killall_jobs() {
|
always {
|
||||||
def jobname = env.JOB_NAME
|
junit '**/build/test-results-xml/**/*.xml'
|
||||||
def buildnum = env.BUILD_NUMBER.toInteger()
|
|
||||||
|
|
||||||
def job = Jenkins.instance.getItemByFullName(jobname)
|
|
||||||
for (build in job.builds) {
|
|
||||||
if (!build.isBuilding()) {
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
cleanup {
|
||||||
if (buildnum == build.getNumber().toInteger()) {
|
deleteDir() /* clean up our workspace */
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
echo "Killing task = ${build}"
|
|
||||||
build.doStop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,5 +1,5 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://www.corda.net/wp-content/uploads/2016/11/fg005_corda_b.png" alt="Corda" width="500">
|
<img src="https://www.corda.net/wp-content/themes/corda/assets/images/crda-logo-big.svg" alt="Corda" width="500">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<a href="https://ci-master.corda.r3cev.com/viewType.html?buildTypeId=Corda_CordaBuild&tab=buildTypeStatusDiv&guest=1"><img src="https://ci.corda.r3cev.com/app/rest/builds/buildType:Corda_CordaBuild/statusIcon"/></a> [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
|
<a href="https://ci-master.corda.r3cev.com/viewType.html?buildTypeId=Corda_CordaBuild&tab=buildTypeStatusDiv&guest=1"><img src="https://ci.corda.r3cev.com/app/rest/builds/buildType:Corda_CordaBuild/statusIcon"/></a> [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
|
||||||
|
42
build.gradle
42
build.gradle
@ -1,8 +1,10 @@
|
|||||||
import net.corda.testing.DistributedTesting
|
import net.corda.testing.DistributedTesting
|
||||||
|
import net.corda.testing.ImageBuilding
|
||||||
|
import net.corda.testing.Distribution
|
||||||
import net.corda.testing.ParallelTestGroup
|
import net.corda.testing.ParallelTestGroup
|
||||||
|
|
||||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
|
||||||
import static org.gradle.api.JavaVersion.VERSION_11
|
import static org.gradle.api.JavaVersion.VERSION_11
|
||||||
|
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||||
|
|
||||||
buildscript {
|
buildscript {
|
||||||
// For sharing constants between builds
|
// For sharing constants between builds
|
||||||
@ -25,10 +27,12 @@ buildscript {
|
|||||||
if (JavaVersion.current() == JavaVersion.VERSION_11) {
|
if (JavaVersion.current() == JavaVersion.VERSION_11) {
|
||||||
ext.quasar_version = constants.getProperty("quasarVersion11")
|
ext.quasar_version = constants.getProperty("quasarVersion11")
|
||||||
ext.quasar_classifier = constants.getProperty("quasarClassifier11")
|
ext.quasar_classifier = constants.getProperty("quasarClassifier11")
|
||||||
|
ext.jdkClassifier = constants.getProperty("jdkClassifier11")
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ext.quasar_version = constants.getProperty("quasarVersion")
|
ext.quasar_version = constants.getProperty("quasarVersion")
|
||||||
ext.quasar_classifier = constants.getProperty("quasarClassifier")
|
ext.quasar_classifier = constants.getProperty("quasarClassifier")
|
||||||
|
ext.jdkClassifier = constants.getProperty("jdkClassifier")
|
||||||
}
|
}
|
||||||
ext.quasar_exclusions = [
|
ext.quasar_exclusions = [
|
||||||
'co.paralleluniverse**',
|
'co.paralleluniverse**',
|
||||||
@ -121,8 +125,7 @@ buildscript {
|
|||||||
// has been compiled by a more recent version of the Java Runtime (class file version 55.0)
|
// has been compiled by a more recent version of the Java Runtime (class file version 55.0)
|
||||||
ext.fontawesomefx_commons_version = '11.0'
|
ext.fontawesomefx_commons_version = '11.0'
|
||||||
ext.fontawesomefx_fontawesome_version = '4.7.0-11'
|
ext.fontawesomefx_fontawesome_version = '4.7.0-11'
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
ext.fontawesomefx_commons_version = '8.15'
|
ext.fontawesomefx_commons_version = '8.15'
|
||||||
ext.fontawesomefx_fontawesome_version = '4.7.0-5'
|
ext.fontawesomefx_fontawesome_version = '4.7.0-5'
|
||||||
}
|
}
|
||||||
@ -245,7 +248,7 @@ allprojects {
|
|||||||
|
|
||||||
jacoco {
|
jacoco {
|
||||||
// JDK11 official support (https://github.com/jacoco/jacoco/releases/tag/v0.8.3)
|
// JDK11 official support (https://github.com/jacoco/jacoco/releases/tag/v0.8.3)
|
||||||
toolVersion = "0.8.3"
|
toolVersion = "0.8.3"
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.withType(JavaCompile) {
|
tasks.withType(JavaCompile) {
|
||||||
@ -316,6 +319,12 @@ allprojects {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jar {
|
||||||
|
// JDK11 built and published artifacts to include classifier
|
||||||
|
if (jdkClassifier != null)
|
||||||
|
archiveClassifier = jdkClassifier
|
||||||
|
}
|
||||||
|
|
||||||
group 'net.corda'
|
group 'net.corda'
|
||||||
version "$corda_release_version"
|
version "$corda_release_version"
|
||||||
|
|
||||||
@ -496,7 +505,8 @@ bintrayConfig {
|
|||||||
'corda-common-configuration-parsing',
|
'corda-common-configuration-parsing',
|
||||||
'corda-common-validation',
|
'corda-common-validation',
|
||||||
'corda-common-logging',
|
'corda-common-logging',
|
||||||
'corda-tools-network-builder'
|
'corda-tools-network-builder',
|
||||||
|
'corda-tools-checkpoint-agent'
|
||||||
]
|
]
|
||||||
license {
|
license {
|
||||||
name = 'Apache-2.0'
|
name = 'Apache-2.0'
|
||||||
@ -588,25 +598,37 @@ buildScan {
|
|||||||
|
|
||||||
task allParallelIntegrationTest(type: ParallelTestGroup) {
|
task allParallelIntegrationTest(type: ParallelTestGroup) {
|
||||||
testGroups "integrationTest"
|
testGroups "integrationTest"
|
||||||
numberOfShards 15
|
numberOfShards 10
|
||||||
streamOutput false
|
streamOutput false
|
||||||
coresPerFork 6
|
coresPerFork 6
|
||||||
memoryInGbPerFork 10
|
memoryInGbPerFork 10
|
||||||
|
distribute Distribution.CLASS
|
||||||
|
}
|
||||||
|
task allParallelSlowIntegrationTest(type: ParallelTestGroup) {
|
||||||
|
testGroups "slowIntegrationTest"
|
||||||
|
numberOfShards 4
|
||||||
|
streamOutput false
|
||||||
|
coresPerFork 6
|
||||||
|
memoryInGbPerFork 10
|
||||||
|
distribute Distribution.CLASS
|
||||||
}
|
}
|
||||||
task allParallelUnitTest(type: ParallelTestGroup) {
|
task allParallelUnitTest(type: ParallelTestGroup) {
|
||||||
testGroups "test"
|
testGroups "test"
|
||||||
numberOfShards 15
|
numberOfShards 10
|
||||||
streamOutput false
|
streamOutput false
|
||||||
coresPerFork 3
|
coresPerFork 5
|
||||||
memoryInGbPerFork 6
|
memoryInGbPerFork 6
|
||||||
|
distribute Distribution.CLASS
|
||||||
}
|
}
|
||||||
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
|
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
|
||||||
testGroups "test", "integrationTest"
|
testGroups "test", "integrationTest"
|
||||||
numberOfShards 20
|
numberOfShards 15
|
||||||
streamOutput false
|
streamOutput true
|
||||||
coresPerFork 6
|
coresPerFork 6
|
||||||
memoryInGbPerFork 10
|
memoryInGbPerFork 10
|
||||||
|
distribute Distribution.CLASS
|
||||||
}
|
}
|
||||||
|
apply plugin: ImageBuilding
|
||||||
apply plugin: DistributedTesting
|
apply plugin: DistributedTesting
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,5 +39,8 @@ dependencies {
|
|||||||
compile 'commons-codec:commons-codec:1.13'
|
compile 'commons-codec:commons-codec:1.13'
|
||||||
compile "io.github.classgraph:classgraph:$class_graph_version"
|
compile "io.github.classgraph:classgraph:$class_graph_version"
|
||||||
compile "com.bmuschko:gradle-docker-plugin:5.0.0"
|
compile "com.bmuschko:gradle-docker-plugin:5.0.0"
|
||||||
|
compile 'org.apache.commons:commons-csv:1.1'
|
||||||
|
compile group: 'org.jetbrains', name: 'annotations', version: '13.0'
|
||||||
testCompile "junit:junit:$junit_version"
|
testCompile "junit:junit:$junit_version"
|
||||||
|
testCompile group: 'org.hamcrest', name: 'hamcrest-all', version: '1.3'
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,164 @@
|
|||||||
|
package net.corda.testing;
|
||||||
|
|
||||||
|
//Why Java?! because sometimes types are useful.
|
||||||
|
|
||||||
|
import groovy.lang.Tuple2;
|
||||||
|
import org.gradle.api.tasks.TaskAction;
|
||||||
|
import org.jetbrains.annotations.NotNull;
|
||||||
|
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
|
|
||||||
|
public class BucketingAllocator {
|
||||||
|
|
||||||
|
private List<Tuple2<TestLister, Object>> sources = new ArrayList<>();
|
||||||
|
private final List<TestsForForkContainer> forkContainers;
|
||||||
|
private final Supplier<List<Tuple2<String, Double>>> timedTestsProvider;
|
||||||
|
|
||||||
|
|
||||||
|
public BucketingAllocator(Integer forkCount, Supplier<List<Tuple2<String, Double>>> timedTestsProvider) {
|
||||||
|
this.forkContainers = IntStream.range(0, forkCount).mapToObj(TestsForForkContainer::new).collect(Collectors.toList());
|
||||||
|
this.timedTestsProvider = timedTestsProvider;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addSource(TestLister source, Object testTask) {
|
||||||
|
sources.add(new Tuple2<>(source, testTask));
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<String> getTestsForForkAndTestTask(Integer fork, Object testTask) {
|
||||||
|
return forkContainers.get(fork).getTestsForTask(testTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
@TaskAction
|
||||||
|
public void generateTestPlan() {
|
||||||
|
List<Tuple2<String, Double>> allTestsFromCSV = timedTestsProvider.get();
|
||||||
|
List<Tuple2<String, Object>> allDiscoveredTests = getTestsOnClasspathOfTestingTasks();
|
||||||
|
List<TestBucket> matchedTests = matchClasspathTestsToCSV(allTestsFromCSV, allDiscoveredTests);
|
||||||
|
|
||||||
|
//use greedy algo - for each testbucket find the currently smallest container and add to it
|
||||||
|
allocateTestsToForks(matchedTests);
|
||||||
|
forkContainers.forEach(TestsForForkContainer::freeze);
|
||||||
|
|
||||||
|
printSummary();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void printSummary() {
|
||||||
|
forkContainers.forEach(container -> {
|
||||||
|
System.out.println("####### TEST PLAN SUMMARY ( " + container.forkIdx + " ) #######");
|
||||||
|
System.out.println("Duration: " + container.getCurrentDuration());
|
||||||
|
System.out.println("Number of tests: " + container.testsForFork.stream().mapToInt(b -> b.foundTests.size()).sum());
|
||||||
|
System.out.println("Tests to Run: ");
|
||||||
|
container.testsForFork.forEach(tb -> {
|
||||||
|
System.out.println(tb.nameWithAsterix);
|
||||||
|
tb.foundTests.forEach(ft -> System.out.println("\t" + ft.getFirst() + ", " + ft.getSecond()));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void allocateTestsToForks(@NotNull List<TestBucket> matchedTests) {
|
||||||
|
matchedTests.forEach(matchedTestBucket -> {
|
||||||
|
TestsForForkContainer smallestContainer = Collections.min(forkContainers, Comparator.comparing(TestsForForkContainer::getCurrentDuration));
|
||||||
|
smallestContainer.addBucket(matchedTestBucket);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<TestBucket> matchClasspathTestsToCSV(List<Tuple2<String, Double>> allTestsFromCSV, @NotNull List<Tuple2<String, Object>> allDiscoveredTests) {
|
||||||
|
return allDiscoveredTests.stream().map(tuple -> {
|
||||||
|
String testName = tuple.getFirst();
|
||||||
|
Object task = tuple.getSecond();
|
||||||
|
String noAsterixName = testName.substring(0, testName.length() - 1);
|
||||||
|
//2DO [can this filtering algorithm be improved - the test names are sorted, it should be possible to do something using binary search]
|
||||||
|
List<Tuple2<String, Double>> matchingTests = allTestsFromCSV.stream().filter(testFromCSV -> testFromCSV.getFirst().startsWith(noAsterixName)).collect(Collectors.toList());
|
||||||
|
return new TestBucket(task, testName, noAsterixName, matchingTests);
|
||||||
|
}).sorted(Comparator.comparing(TestBucket::getDuration).reversed()).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<Tuple2<String, Object>> getTestsOnClasspathOfTestingTasks() {
|
||||||
|
return sources.stream().map(source -> {
|
||||||
|
TestLister lister = source.getFirst();
|
||||||
|
Object testTask = source.getSecond();
|
||||||
|
return lister.getAllTestsDiscovered().stream().map(test -> new Tuple2<>(test, testTask)).collect(Collectors.toList());
|
||||||
|
}).flatMap(Collection::stream).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TestBucket {
|
||||||
|
final Object testTask;
|
||||||
|
final String nameWithAsterix;
|
||||||
|
final String nameWithoutAsterix;
|
||||||
|
final List<Tuple2<String, Double>> foundTests;
|
||||||
|
final Double duration;
|
||||||
|
|
||||||
|
public TestBucket(Object testTask, String nameWithAsterix, String nameWithoutAsterix, List<Tuple2<String, Double>> foundTests) {
|
||||||
|
this.testTask = testTask;
|
||||||
|
this.nameWithAsterix = nameWithAsterix;
|
||||||
|
this.nameWithoutAsterix = nameWithoutAsterix;
|
||||||
|
this.foundTests = foundTests;
|
||||||
|
duration = Math.max(foundTests.stream().mapToDouble(tp -> Math.max(tp.getSecond(), 10)).sum(), 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Double getDuration() {
|
||||||
|
return duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "TestBucket{" +
|
||||||
|
"testTask=" + testTask +
|
||||||
|
", nameWithAsterix='" + nameWithAsterix + '\'' +
|
||||||
|
", nameWithoutAsterix='" + nameWithoutAsterix + '\'' +
|
||||||
|
", foundTests=" + foundTests +
|
||||||
|
", duration=" + duration +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TestsForForkContainer {
|
||||||
|
private Double runningDuration = 0.0;
|
||||||
|
private final Integer forkIdx;
|
||||||
|
|
||||||
|
private final List<TestBucket> testsForFork = Collections.synchronizedList(new ArrayList<>());
|
||||||
|
private final Map<Object, List<TestBucket>> frozenTests = new HashMap<>();
|
||||||
|
|
||||||
|
public TestsForForkContainer(Integer forkIdx) {
|
||||||
|
this.forkIdx = forkIdx;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addBucket(TestBucket tb) {
|
||||||
|
this.testsForFork.add(tb);
|
||||||
|
this.runningDuration = runningDuration + tb.duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Double getCurrentDuration() {
|
||||||
|
return runningDuration;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void freeze() {
|
||||||
|
testsForFork.forEach(tb -> {
|
||||||
|
frozenTests.computeIfAbsent(tb.testTask, i -> new ArrayList<>()).add(tb);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<String> getTestsForTask(Object task) {
|
||||||
|
return frozenTests.getOrDefault(task, Collections.emptyList()).stream().map(it -> it.nameWithAsterix).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<TestBucket> getBucketsForFork() {
|
||||||
|
return new ArrayList<>(testsForFork);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "TestsForForkContainer{" +
|
||||||
|
"runningDuration=" + runningDuration +
|
||||||
|
", forkIdx=" + forkIdx +
|
||||||
|
", testsForFork=" + testsForFork +
|
||||||
|
", frozenTests=" + frozenTests +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,69 @@
|
|||||||
|
package net.corda.testing;
|
||||||
|
|
||||||
|
import groovy.lang.Tuple2;
|
||||||
|
import org.apache.commons.csv.CSVFormat;
|
||||||
|
import org.apache.commons.csv.CSVRecord;
|
||||||
|
import org.gradle.api.DefaultTask;
|
||||||
|
import org.gradle.api.tasks.TaskAction;
|
||||||
|
import org.gradle.api.tasks.testing.Test;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.Reader;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
public class BucketingAllocatorTask extends DefaultTask {
|
||||||
|
private static final String DEFAULT_TESTING_TEST_TIMES_CSV = "testing/test-times.csv";
|
||||||
|
private final BucketingAllocator allocator;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
public BucketingAllocatorTask(Integer forkCount) {
|
||||||
|
Supplier<List<Tuple2<String, Double>>> defaultTestCSV = () -> {
|
||||||
|
try {
|
||||||
|
FileReader csvSource = new FileReader(new File(BucketingAllocatorTask.this.getProject().getRootDir(), DEFAULT_TESTING_TEST_TIMES_CSV));
|
||||||
|
return fromCSV(csvSource);
|
||||||
|
} catch (IOException e) {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
this.allocator = new BucketingAllocator(forkCount, defaultTestCSV);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addSource(TestLister source, Test testTask) {
|
||||||
|
allocator.addSource(source, testTask);
|
||||||
|
this.dependsOn(source);
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<String> getTestsForForkAndTestTask(Integer fork, Test testTask) {
|
||||||
|
return allocator.getTestsForForkAndTestTask(fork, testTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
@TaskAction
|
||||||
|
public void allocate() {
|
||||||
|
allocator.generateTestPlan();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static List<Tuple2<String, Double>> fromCSV(Reader reader) throws IOException {
|
||||||
|
String name = "Test Name";
|
||||||
|
String duration = "Duration(ms)";
|
||||||
|
List<CSVRecord> records = CSVFormat.DEFAULT.withHeader().parse(reader).getRecords();
|
||||||
|
return records.stream().map(record -> {
|
||||||
|
try{
|
||||||
|
String testName = record.get(name);
|
||||||
|
String testDuration = record.get(duration);
|
||||||
|
return new Tuple2<>(testName, Math.max(Double.parseDouble(testDuration), 10));
|
||||||
|
}catch (IllegalArgumentException | IllegalStateException e){
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).filter(Objects::nonNull).sorted(Comparator.comparing(Tuple2::getFirst)).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -10,6 +10,7 @@ import org.gradle.api.tasks.testing.Test
|
|||||||
*/
|
*/
|
||||||
class DistributedTesting implements Plugin<Project> {
|
class DistributedTesting implements Plugin<Project> {
|
||||||
|
|
||||||
|
|
||||||
static def getPropertyAsInt(Project proj, String property, Integer defaultValue) {
|
static def getPropertyAsInt(Project proj, String property, Integer defaultValue) {
|
||||||
return proj.hasProperty(property) ? Integer.parseInt(proj.property(property).toString()) : defaultValue
|
return proj.hasProperty(property) ? Integer.parseInt(proj.property(property).toString()) : defaultValue
|
||||||
}
|
}
|
||||||
@ -17,10 +18,16 @@ class DistributedTesting implements Plugin<Project> {
|
|||||||
@Override
|
@Override
|
||||||
void apply(Project project) {
|
void apply(Project project) {
|
||||||
if (System.getProperty("kubenetize") != null) {
|
if (System.getProperty("kubenetize") != null) {
|
||||||
|
|
||||||
|
def forks = getPropertyAsInt(project, "dockerForks", 1)
|
||||||
|
|
||||||
ensureImagePluginIsApplied(project)
|
ensureImagePluginIsApplied(project)
|
||||||
ImageBuilding imagePlugin = project.plugins.getPlugin(ImageBuilding)
|
ImageBuilding imagePlugin = project.plugins.getPlugin(ImageBuilding)
|
||||||
DockerPushImage imageBuildingTask = imagePlugin.pushTask
|
DockerPushImage imageBuildingTask = imagePlugin.pushTask
|
||||||
String providedTag = System.getProperty("docker.tag")
|
String providedTag = System.getProperty("docker.tag")
|
||||||
|
BucketingAllocatorTask globalAllocator = project.tasks.create("bucketingAllocator", BucketingAllocatorTask, forks)
|
||||||
|
|
||||||
|
def requestedTasks = project.gradle.startParameter.taskNames.collect { project.tasks.findByPath(it) }
|
||||||
|
|
||||||
//in each subproject
|
//in each subproject
|
||||||
//1. add the task to determine all tests within the module
|
//1. add the task to determine all tests within the module
|
||||||
@ -28,9 +35,19 @@ class DistributedTesting implements Plugin<Project> {
|
|||||||
//3. KubesTest will invoke these test tasks in a parallel fashion on a remote k8s cluster
|
//3. KubesTest will invoke these test tasks in a parallel fashion on a remote k8s cluster
|
||||||
project.subprojects { Project subProject ->
|
project.subprojects { Project subProject ->
|
||||||
subProject.tasks.withType(Test) { Test task ->
|
subProject.tasks.withType(Test) { Test task ->
|
||||||
ListTests testListerTask = createTestListingTasks(task, subProject)
|
println "Evaluating ${task.getPath()}"
|
||||||
Test modifiedTestTask = modifyTestTaskForParallelExecution(subProject, task, testListerTask)
|
if (task in requestedTasks && !task.hasProperty("ignoreForDistribution")) {
|
||||||
KubesTest parallelTestTask = generateParallelTestingTask(subProject, task, imageBuildingTask, providedTag)
|
println "Modifying ${task.getPath()}"
|
||||||
|
ListTests testListerTask = createTestListingTasks(task, subProject)
|
||||||
|
globalAllocator.addSource(testListerTask, task)
|
||||||
|
Test modifiedTestTask = modifyTestTaskForParallelExecution(subProject, task, globalAllocator)
|
||||||
|
} else {
|
||||||
|
println "Skipping modification of ${task.getPath()} as it's not scheduled for execution"
|
||||||
|
}
|
||||||
|
if (!task.hasProperty("ignoreForDistribution")) {
|
||||||
|
KubesTest parallelTestTask = generateParallelTestingTask(subProject, task, imageBuildingTask, providedTag)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,6 +79,7 @@ class DistributedTesting implements Plugin<Project> {
|
|||||||
taskToExecuteName = testGrouping.groups.join("And")
|
taskToExecuteName = testGrouping.groups.join("And")
|
||||||
memoryGbPerFork = testGrouping.gbOfMemory
|
memoryGbPerFork = testGrouping.gbOfMemory
|
||||||
numberOfCoresPerFork = testGrouping.coresToUse
|
numberOfCoresPerFork = testGrouping.coresToUse
|
||||||
|
distribution = testGrouping.distribution
|
||||||
doFirst {
|
doFirst {
|
||||||
dockerTag = dockerTag = providedTag ? ImageBuilding.registryName + ":" + providedTag : (imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get())
|
dockerTag = dockerTag = providedTag ? ImageBuilding.registryName + ":" + providedTag : (imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get())
|
||||||
}
|
}
|
||||||
@ -101,24 +119,21 @@ class DistributedTesting implements Plugin<Project> {
|
|||||||
return createdParallelTestTask as KubesTest
|
return createdParallelTestTask as KubesTest
|
||||||
}
|
}
|
||||||
|
|
||||||
private Test modifyTestTaskForParallelExecution(Project subProject, Test task, ListTests testListerTask) {
|
private Test modifyTestTaskForParallelExecution(Project subProject, Test task, BucketingAllocatorTask globalAllocator) {
|
||||||
subProject.logger.info("modifying task: ${task.getPath()} to depend on task ${testListerTask.getPath()}")
|
subProject.logger.info("modifying task: ${task.getPath()} to depend on task ${globalAllocator.getPath()}")
|
||||||
def reportsDir = new File(new File(subProject.rootProject.getBuildDir(), "test-reports"), subProject.name + "-" + task.name)
|
def reportsDir = new File(new File(subProject.rootProject.getBuildDir(), "test-reports"), subProject.name + "-" + task.name)
|
||||||
task.configure {
|
task.configure {
|
||||||
dependsOn testListerTask
|
dependsOn globalAllocator
|
||||||
binResultsDir new File(reportsDir, "binary")
|
binResultsDir new File(reportsDir, "binary")
|
||||||
reports.junitXml.destination new File(reportsDir, "xml")
|
reports.junitXml.destination new File(reportsDir, "xml")
|
||||||
maxHeapSize = "6g"
|
maxHeapSize = "6g"
|
||||||
doFirst {
|
doFirst {
|
||||||
filter {
|
filter {
|
||||||
def fork = getPropertyAsInt(subProject, "dockerFork", 0)
|
def fork = getPropertyAsInt(subProject, "dockerFork", 0)
|
||||||
def forks = getPropertyAsInt(subProject, "dockerForks", 1)
|
subProject.logger.info("requesting tests to include in testing task ${task.getPath()} (idx: ${fork})")
|
||||||
def shuffleSeed = 42
|
List<String> includes = globalAllocator.getTestsForForkAndTestTask(
|
||||||
subProject.logger.info("requesting tests to include in testing task ${task.getPath()} (${fork}, ${forks}, ${shuffleSeed})")
|
|
||||||
List<String> includes = testListerTask.getTestsForFork(
|
|
||||||
fork,
|
fork,
|
||||||
forks,
|
task)
|
||||||
shuffleSeed)
|
|
||||||
subProject.logger.info "got ${includes.size()} tests to include into testing task ${task.getPath()}"
|
subProject.logger.info "got ${includes.size()} tests to include into testing task ${task.getPath()}"
|
||||||
|
|
||||||
if (includes.size() == 0) {
|
if (includes.size() == 0) {
|
||||||
|
@ -48,6 +48,8 @@ class ImageBuilding implements Plugin<Project> {
|
|||||||
if (!mavenDir.exists()) {
|
if (!mavenDir.exists()) {
|
||||||
mavenDir.mkdirs()
|
mavenDir.mkdirs()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.info("Will use: ${gradleDir.absolutePath} for caching gradle artifacts")
|
||||||
}
|
}
|
||||||
|
|
||||||
dependsOn buildDockerImageForSource
|
dependsOn buildDockerImageForSource
|
||||||
|
@ -39,6 +39,8 @@ class KubesTest extends DefaultTask {
|
|||||||
int numberOfPods = 20
|
int numberOfPods = 20
|
||||||
int timeoutInMinutesForPodToStart = 60
|
int timeoutInMinutesForPodToStart = 60
|
||||||
|
|
||||||
|
Distribution distribution = Distribution.METHOD
|
||||||
|
|
||||||
@TaskAction
|
@TaskAction
|
||||||
void runTestsOnKubes() {
|
void runTestsOnKubes() {
|
||||||
|
|
||||||
@ -78,7 +80,8 @@ class KubesTest extends DefaultTask {
|
|||||||
}
|
}
|
||||||
|
|
||||||
List<CompletableFuture<KubePodResult>> futures = IntStream.range(0, numberOfPods).mapToObj({ i ->
|
List<CompletableFuture<KubePodResult>> futures = IntStream.range(0, numberOfPods).mapToObj({ i ->
|
||||||
String podName = (taskToExecuteName + "-" + stableRunId + suffix + i).toLowerCase()
|
String potentialPodName = (taskToExecuteName + "-" + stableRunId + suffix + i).toLowerCase()
|
||||||
|
String podName = potentialPodName.substring(0, Math.min(potentialPodName.size(), 62))
|
||||||
runBuild(client, namespace, numberOfPods, i, podName, printOutput, 3)
|
runBuild(client, namespace, numberOfPods, i, podName, printOutput, 3)
|
||||||
}).collect(Collectors.toList())
|
}).collect(Collectors.toList())
|
||||||
this.testOutput = Collections.synchronizedList(futures.collect { it -> it.get().binaryResults }.flatten())
|
this.testOutput = Collections.synchronizedList(futures.collect { it -> it.get().binaryResults }.flatten())
|
||||||
@ -115,10 +118,11 @@ class KubesTest extends DefaultTask {
|
|||||||
CompletableFuture<KubePodResult> waiter = new CompletableFuture<>()
|
CompletableFuture<KubePodResult> waiter = new CompletableFuture<>()
|
||||||
ExecListener execListener = buildExecListenerForPod(podName, errChannelStream, waiter, result)
|
ExecListener execListener = buildExecListenerForPod(podName, errChannelStream, waiter, result)
|
||||||
stdOutIs.connect(stdOutOs)
|
stdOutIs.connect(stdOutOs)
|
||||||
|
String[] buildCommand = getBuildCommand(numberOfPods, podIdx)
|
||||||
ExecWatch execWatch = client.pods().inNamespace(namespace).withName(podName)
|
ExecWatch execWatch = client.pods().inNamespace(namespace).withName(podName)
|
||||||
.writingOutput(stdOutOs)
|
.writingOutput(stdOutOs)
|
||||||
.writingErrorChannel(errChannelStream)
|
.writingErrorChannel(errChannelStream)
|
||||||
.usingListener(execListener).exec(getBuildCommand(numberOfPods, podIdx))
|
.usingListener(execListener).exec(buildCommand)
|
||||||
|
|
||||||
startLogPumping(outputFile, stdOutIs, podIdx, printOutput)
|
startLogPumping(outputFile, stdOutIs, podIdx, printOutput)
|
||||||
KubePodResult execResult = waiter.join()
|
KubePodResult execResult = waiter.join()
|
||||||
@ -183,20 +187,21 @@ class KubesTest extends DefaultTask {
|
|||||||
ExecListener buildExecListenerForPod(podName, errChannelStream, CompletableFuture<KubePodResult> waitingFuture, KubePodResult result) {
|
ExecListener buildExecListenerForPod(podName, errChannelStream, CompletableFuture<KubePodResult> waitingFuture, KubePodResult result) {
|
||||||
|
|
||||||
new ExecListener() {
|
new ExecListener() {
|
||||||
|
final Long start = System.currentTimeMillis()
|
||||||
@Override
|
@Override
|
||||||
void onOpen(Response response) {
|
void onOpen(Response response) {
|
||||||
project.logger.lifecycle("Build started on pod " + podName)
|
project.logger.lifecycle("Build started on pod $podName")
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void onFailure(Throwable t, Response response) {
|
void onFailure(Throwable t, Response response) {
|
||||||
project.logger.lifecycle("Received error from rom pod " + podName)
|
project.logger.lifecycle("Received error from rom pod $podName")
|
||||||
waitingFuture.completeExceptionally(t)
|
waitingFuture.completeExceptionally(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void onClose(int code, String reason) {
|
void onClose(int code, String reason) {
|
||||||
project.logger.lifecycle("Received onClose() from pod " + podName + " with returnCode=" + code)
|
project.logger.lifecycle("Received onClose() from pod ${podName}, build took: ${(System.currentTimeMillis() - start) / 1000} seconds")
|
||||||
try {
|
try {
|
||||||
def errChannelContents = errChannelStream.toString()
|
def errChannelContents = errChannelStream.toString()
|
||||||
Status status = Serialization.unmarshal(errChannelContents, Status.class);
|
Status status = Serialization.unmarshal(errChannelContents, Status.class);
|
||||||
@ -277,7 +282,7 @@ class KubesTest extends DefaultTask {
|
|||||||
"let x=1 ; while [ \${x} -ne 0 ] ; do echo \"Waiting for DNS\" ; curl services.gradle.org > /dev/null 2>&1 ; x=\$? ; sleep 1 ; done ; " +
|
"let x=1 ; while [ \${x} -ne 0 ] ; do echo \"Waiting for DNS\" ; curl services.gradle.org > /dev/null 2>&1 ; x=\$? ; sleep 1 ; done ; " +
|
||||||
"cd /tmp/source ; " +
|
"cd /tmp/source ; " +
|
||||||
"let y=1 ; while [ \${y} -ne 0 ] ; do echo \"Preparing build directory\" ; ./gradlew testClasses integrationTestClasses --parallel 2>&1 ; y=\$? ; sleep 1 ; done ;" +
|
"let y=1 ; while [ \${y} -ne 0 ] ; do echo \"Preparing build directory\" ; ./gradlew testClasses integrationTestClasses --parallel 2>&1 ; y=\$? ; sleep 1 ; done ;" +
|
||||||
"./gradlew -Dkubenetize -PdockerFork=" + podIdx + " -PdockerForks=" + numberOfPods + " $fullTaskToExecutePath --info 2>&1 ;" +
|
"./gradlew -D${ListTests.DISTRIBUTION_PROPERTY}=${distribution.name()} -Dkubenetize -PdockerFork=" + podIdx + " -PdockerForks=" + numberOfPods + " $fullTaskToExecutePath --info 2>&1 ;" +
|
||||||
"let rs=\$? ; sleep 10 ; exit \${rs}"]
|
"let rs=\$? ; sleep 10 ; exit \${rs}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,10 +37,17 @@ class ListShufflerAndAllocator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ListTests extends DefaultTask {
|
interface TestLister {
|
||||||
|
List<String> getAllTestsDiscovered()
|
||||||
|
}
|
||||||
|
|
||||||
|
class ListTests extends DefaultTask implements TestLister {
|
||||||
|
|
||||||
|
public static final String DISTRIBUTION_PROPERTY = "distributeBy"
|
||||||
|
|
||||||
FileCollection scanClassPath
|
FileCollection scanClassPath
|
||||||
List<String> allTests
|
List<String> allTests
|
||||||
|
Distribution distribution = System.getProperty(DISTRIBUTION_PROPERTY) ? Distribution.valueOf(System.getProperty(DISTRIBUTION_PROPERTY)) : Distribution.METHOD
|
||||||
|
|
||||||
def getTestsForFork(int fork, int forks, Integer seed) {
|
def getTestsForFork(int fork, int forks, Integer seed) {
|
||||||
def gitSha = new BigInteger(project.hasProperty("corda_revision") ? project.property("corda_revision").toString() : "0", 36)
|
def gitSha = new BigInteger(project.hasProperty("corda_revision") ? project.property("corda_revision").toString() : "0", 36)
|
||||||
@ -51,24 +58,53 @@ class ListTests extends DefaultTask {
|
|||||||
return new ListShufflerAndAllocator(allTests).getTestsForFork(fork, forks, seedToUse)
|
return new ListShufflerAndAllocator(allTests).getTestsForFork(fork, forks, seedToUse)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getAllTestsDiscovered() {
|
||||||
|
return new ArrayList<>(allTests)
|
||||||
|
}
|
||||||
|
|
||||||
@TaskAction
|
@TaskAction
|
||||||
def discoverTests() {
|
def discoverTests() {
|
||||||
Collection<String> results = new ClassGraph()
|
switch (distribution) {
|
||||||
.enableClassInfo()
|
case Distribution.METHOD:
|
||||||
.enableMethodInfo()
|
Collection<String> results = new ClassGraph()
|
||||||
.ignoreClassVisibility()
|
.enableClassInfo()
|
||||||
.ignoreMethodVisibility()
|
.enableMethodInfo()
|
||||||
.enableAnnotationInfo()
|
.ignoreClassVisibility()
|
||||||
.overrideClasspath(scanClassPath)
|
.ignoreMethodVisibility()
|
||||||
.scan()
|
.enableAnnotationInfo()
|
||||||
.getClassesWithMethodAnnotation("org.junit.Test")
|
.overrideClasspath(scanClassPath)
|
||||||
.collect { c -> (c.getSubclasses() + Collections.singletonList(c)) }
|
.scan()
|
||||||
.flatten()
|
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||||
.collect { ClassInfo c ->
|
.collect { c -> (c.getSubclasses() + Collections.singletonList(c)) }
|
||||||
c.getMethodInfo().filter { m -> m.hasAnnotation("org.junit.Test") }.collect { m -> c.name + "." + m.name + "*" }
|
.flatten()
|
||||||
}.flatten()
|
.collect { ClassInfo c ->
|
||||||
.toSet()
|
c.getMethodInfo().filter { m -> m.hasAnnotation("org.junit.Test") }.collect { m -> c.name + "." + m.name + "*" }
|
||||||
|
}.flatten()
|
||||||
|
.toSet()
|
||||||
|
|
||||||
this.allTests = results.stream().sorted().collect(Collectors.toList())
|
this.allTests = results.stream().sorted().collect(Collectors.toList())
|
||||||
|
break
|
||||||
|
case Distribution.CLASS:
|
||||||
|
Collection<String> results = new ClassGraph()
|
||||||
|
.enableClassInfo()
|
||||||
|
.enableMethodInfo()
|
||||||
|
.ignoreClassVisibility()
|
||||||
|
.ignoreMethodVisibility()
|
||||||
|
.enableAnnotationInfo()
|
||||||
|
.overrideClasspath(scanClassPath)
|
||||||
|
.scan()
|
||||||
|
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||||
|
.collect { c -> (c.getSubclasses() + Collections.singletonList(c)) }
|
||||||
|
.flatten()
|
||||||
|
.collect { ClassInfo c -> c.name + "*" }.flatten()
|
||||||
|
.toSet()
|
||||||
|
this.allTests = results.stream().sorted().collect(Collectors.toList())
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public enum Distribution {
|
||||||
|
CLASS, METHOD
|
||||||
}
|
}
|
@ -1,9 +1,9 @@
|
|||||||
package net.corda.testing
|
package net.corda.testing
|
||||||
|
|
||||||
import org.gradle.api.DefaultTask
|
import org.gradle.api.DefaultTask
|
||||||
import org.gradle.api.tasks.TaskAction
|
|
||||||
|
|
||||||
class ParallelTestGroup extends DefaultTask {
|
class ParallelTestGroup extends DefaultTask {
|
||||||
|
Distribution distribution = Distribution.METHOD
|
||||||
|
|
||||||
List<String> groups = new ArrayList<>()
|
List<String> groups = new ArrayList<>()
|
||||||
int shardCount = 20
|
int shardCount = 20
|
||||||
@ -11,20 +11,24 @@ class ParallelTestGroup extends DefaultTask {
|
|||||||
int gbOfMemory = 4
|
int gbOfMemory = 4
|
||||||
boolean printToStdOut = true
|
boolean printToStdOut = true
|
||||||
|
|
||||||
void numberOfShards(int shards){
|
void numberOfShards(int shards) {
|
||||||
this.shardCount = shards
|
this.shardCount = shards
|
||||||
}
|
}
|
||||||
|
|
||||||
void coresPerFork(int cores){
|
void distribute(Distribution dist){
|
||||||
|
this.distribution = dist
|
||||||
|
}
|
||||||
|
|
||||||
|
void coresPerFork(int cores) {
|
||||||
this.coresToUse = cores
|
this.coresToUse = cores
|
||||||
}
|
}
|
||||||
|
|
||||||
void memoryInGbPerFork(int gb){
|
void memoryInGbPerFork(int gb) {
|
||||||
this.gbOfMemory = gb
|
this.gbOfMemory = gb
|
||||||
}
|
}
|
||||||
|
|
||||||
//when this is false, only containers will "failed" exit codes will be printed to stdout
|
//when this is false, only containers will "failed" exit codes will be printed to stdout
|
||||||
void streamOutput(boolean print){
|
void streamOutput(boolean print) {
|
||||||
this.printToStdOut = print
|
this.printToStdOut = print
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,54 @@
|
|||||||
|
package net.corda.testing;
|
||||||
|
|
||||||
|
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.is;
|
||||||
|
|
||||||
|
public class BucketingAllocatorTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldAlwaysBucketTestsEvenIfNotInTimedFile() {
|
||||||
|
|
||||||
|
BucketingAllocator bucketingAllocator = new BucketingAllocator(1, Collections::emptyList);
|
||||||
|
|
||||||
|
Object task = new Object();
|
||||||
|
bucketingAllocator.addSource(() -> Arrays.asList("SomeTestingClass*", "AnotherTestingClass*"), task);
|
||||||
|
|
||||||
|
bucketingAllocator.generateTestPlan();
|
||||||
|
List<String> testsForForkAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(0, task);
|
||||||
|
|
||||||
|
Assert.assertThat(testsForForkAndTestTask, IsIterableContainingInAnyOrder.containsInAnyOrder("SomeTestingClass*", "AnotherTestingClass*"));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldAllocateTestsAcrossForksEvenIfNoMatchingTestsFound() {
|
||||||
|
|
||||||
|
BucketingAllocator bucketingAllocator = new BucketingAllocator(2, Collections::emptyList);
|
||||||
|
|
||||||
|
Object task = new Object();
|
||||||
|
bucketingAllocator.addSource(() -> Arrays.asList("SomeTestingClass*", "AnotherTestingClass*"), task);
|
||||||
|
|
||||||
|
bucketingAllocator.generateTestPlan();
|
||||||
|
List<String> testsForForkOneAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(0, task);
|
||||||
|
List<String> testsForForkTwoAndTestTask = bucketingAllocator.getTestsForForkAndTestTask(1, task);
|
||||||
|
|
||||||
|
Assert.assertThat(testsForForkOneAndTestTask.size(), is(1));
|
||||||
|
Assert.assertThat(testsForForkTwoAndTestTask.size(), is(1));
|
||||||
|
|
||||||
|
List<String> allTests = Stream.of(testsForForkOneAndTestTask, testsForForkTwoAndTestTask).flatMap(Collection::stream).collect(Collectors.toList());
|
||||||
|
|
||||||
|
Assert.assertThat(allTests, IsIterableContainingInAnyOrder.containsInAnyOrder("SomeTestingClass*", "AnotherTestingClass*"));
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -14,7 +14,7 @@ import org.assertj.core.api.Assertions.assertThat
|
|||||||
import org.junit.Before
|
import org.junit.Before
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
|
|
||||||
class FlowsExecutionModeTests : NodeBasedTest(listOf("net.corda.finance.contracts", CashSchemaV1::class.packageName)) {
|
class FlowsExecutionModeTests : NodeBasedTest(emptyList()) {
|
||||||
|
|
||||||
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
||||||
private lateinit var node: NodeWithInfo
|
private lateinit var node: NodeWithInfo
|
||||||
|
@ -17,6 +17,7 @@ import net.corda.core.context.Trace
|
|||||||
import net.corda.core.context.Trace.InvocationId
|
import net.corda.core.context.Trace.InvocationId
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.internal.*
|
import net.corda.core.internal.*
|
||||||
|
import net.corda.core.internal.messaging.InternalCordaRPCOps
|
||||||
import net.corda.core.messaging.CordaRPCOps
|
import net.corda.core.messaging.CordaRPCOps
|
||||||
import net.corda.core.messaging.RPCOps
|
import net.corda.core.messaging.RPCOps
|
||||||
import net.corda.core.serialization.SerializationContext
|
import net.corda.core.serialization.SerializationContext
|
||||||
@ -286,8 +287,12 @@ class RPCClientProxyHandler(
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun produceMethodFullyQualifiedName(method: Method) : String {
|
private fun produceMethodFullyQualifiedName(method: Method) : String {
|
||||||
// For CordaRPCOps send method only - for backwards compatibility
|
/*
|
||||||
return if (CordaRPCOps::class.java == rpcOpsClass) {
|
* Until version 4.3, rpc calls did not include class names.
|
||||||
|
* Up to this version, only CordaRPCOps and InternalCordaRPCOps were supported.
|
||||||
|
* So, for these classes only methods are sent across the wire to preserve backwards compatibility.
|
||||||
|
*/
|
||||||
|
return if (CordaRPCOps::class.java == rpcOpsClass || InternalCordaRPCOps::class.java == rpcOpsClass) {
|
||||||
method.name
|
method.name
|
||||||
} else {
|
} else {
|
||||||
rpcOpsClass.name + CLASS_METHOD_DIVIDER + method.name
|
rpcOpsClass.name + CLASS_METHOD_DIVIDER + method.name
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Configuration status="info" packages="net.corda.common.logging">
|
<Configuration status="info" packages="net.corda.common.logging" shutdownHook="disable">
|
||||||
|
|
||||||
<Properties>
|
<Properties>
|
||||||
<Property name="log-path">${sys:log-path:-logs}</Property>
|
<Property name="log-path">${sys:log-path:-logs}</Property>
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
# their own projects. So don't get fancy with syntax!
|
# their own projects. So don't get fancy with syntax!
|
||||||
|
|
||||||
cordaVersion=4.4-SNAPSHOT
|
cordaVersion=4.4-SNAPSHOT
|
||||||
gradlePluginsVersion=5.0.3
|
gradlePluginsVersion=5.0.4
|
||||||
kotlinVersion=1.2.71
|
kotlinVersion=1.2.71
|
||||||
java8MinUpdateVersion=171
|
java8MinUpdateVersion=171
|
||||||
# ***************************************************************#
|
# ***************************************************************#
|
||||||
@ -17,6 +17,8 @@ quasarVersion=0.7.10
|
|||||||
quasarClassifier=jdk8
|
quasarClassifier=jdk8
|
||||||
# Quasar version to use with Java 11:
|
# Quasar version to use with Java 11:
|
||||||
quasarVersion11=0.8.0
|
quasarVersion11=0.8.0
|
||||||
|
# Specify a classifier for Java 11 built artifacts
|
||||||
|
jdkClassifier11=jdk11
|
||||||
proguardVersion=6.1.1
|
proguardVersion=6.1.1
|
||||||
bouncycastleVersion=1.60
|
bouncycastleVersion=1.60
|
||||||
classgraphVersion=4.8.41
|
classgraphVersion=4.8.41
|
||||||
|
@ -22,6 +22,9 @@ dependencies {
|
|||||||
jar.enabled = false
|
jar.enabled = false
|
||||||
|
|
||||||
test {
|
test {
|
||||||
|
ext {
|
||||||
|
ignoreForDistribution = true
|
||||||
|
}
|
||||||
filter {
|
filter {
|
||||||
// Running this class is the whole point, so include it explicitly.
|
// Running this class is the whole point, so include it explicitly.
|
||||||
includeTestsMatching "net.corda.deterministic.data.GenerateData"
|
includeTestsMatching "net.corda.deterministic.data.GenerateData"
|
||||||
|
@ -15,6 +15,8 @@ object CordappResolver {
|
|||||||
private val logger = loggerFor<CordappResolver>()
|
private val logger = loggerFor<CordappResolver>()
|
||||||
private val cordappClasses: ConcurrentHashMap<String, Set<Cordapp>> = ConcurrentHashMap()
|
private val cordappClasses: ConcurrentHashMap<String, Set<Cordapp>> = ConcurrentHashMap()
|
||||||
|
|
||||||
|
private val insideInMemoryTest: Boolean by lazy { insideInMemoryTest() }
|
||||||
|
|
||||||
// TODO Use the StackWalker API once we migrate to Java 9+
|
// TODO Use the StackWalker API once we migrate to Java 9+
|
||||||
private var cordappResolver: () -> Cordapp? = {
|
private var cordappResolver: () -> Cordapp? = {
|
||||||
Exception().stackTrace
|
Exception().stackTrace
|
||||||
@ -25,9 +27,12 @@ object CordappResolver {
|
|||||||
?.single()
|
?.single()
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Associates class names with CorDapps or logs a warning when a CorDapp is already registered for a given class.
|
* Associates class names with CorDapps or logs a warning when a CorDapp is already registered for a given class.
|
||||||
* This could happen when trying to run different versions of the same CorDapp on the same node.
|
* This could happen when trying to run different versions of the same CorDapp on the same node.
|
||||||
|
*
|
||||||
|
* @throws IllegalStateException when multiple CorDapps are registered for the same contract class,
|
||||||
|
* since this can lead to undefined behaviour.
|
||||||
*/
|
*/
|
||||||
@Synchronized
|
@Synchronized
|
||||||
fun register(cordapp: Cordapp) {
|
fun register(cordapp: Cordapp) {
|
||||||
@ -39,12 +44,30 @@ object CordappResolver {
|
|||||||
|
|
||||||
notAlreadyRegisteredClasses.forEach { cordappClasses[it] = setOf(cordapp) }
|
notAlreadyRegisteredClasses.forEach { cordappClasses[it] = setOf(cordapp) }
|
||||||
|
|
||||||
for ((className, registeredCordapps) in alreadyRegistered) {
|
for ((registeredClassName, registeredCordapps) in alreadyRegistered) {
|
||||||
if (registeredCordapps.any { it.jarHash == cordapp.jarHash }) continue
|
val duplicateCordapps = registeredCordapps.filter { it.jarHash == cordapp.jarHash }.toSet()
|
||||||
if (className in contractClasses) {
|
|
||||||
logger.error("ATTENTION: More than one CorDapp installed on the node for contract $className. Please remove the previous version when upgrading to a new version.")
|
if (duplicateCordapps.isNotEmpty()) {
|
||||||
|
logger.warnOnce("The CorDapp (name: ${cordapp.info.shortName}, file: ${cordapp.name}) " +
|
||||||
|
"is installed multiple times on the node. The following files correspond to the exact same content: " +
|
||||||
|
"${duplicateCordapps.map { it.name }}")
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
cordappClasses[className] = registeredCordapps + cordapp
|
// During in-memory tests, the spawned nodes share the same CordappResolver, so detected conflicts can be spurious.
|
||||||
|
if (registeredClassName in contractClasses && !insideInMemoryTest) {
|
||||||
|
throw IllegalStateException("More than one CorDapp installed on the node for contract $registeredClassName. " +
|
||||||
|
"Please remove the previous version when upgrading to a new version.")
|
||||||
|
}
|
||||||
|
|
||||||
|
cordappClasses[registeredClassName] = registeredCordapps + cordapp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun insideInMemoryTest(): Boolean {
|
||||||
|
return Exception().stackTrace.any {
|
||||||
|
it.className.startsWith("net.corda.testing.node.internal.InternalMockNetwork") ||
|
||||||
|
it.className.startsWith("net.corda.testing.node.internal.InProcessNode") ||
|
||||||
|
it.className.startsWith("net.corda.testing.node.MockServices")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,9 +2,11 @@ package net.corda.core.internal.cordapp
|
|||||||
|
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
|
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
import org.junit.Before
|
import org.junit.Before
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
|
import java.lang.IllegalStateException
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
|
|
||||||
class CordappResolverTest {
|
class CordappResolverTest {
|
||||||
@ -49,19 +51,42 @@ class CordappResolverTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun `when different cordapps are registered for the same class, the resolver returns null`() {
|
fun `when different cordapps are registered for the same (non-contract) class, the resolver returns null`() {
|
||||||
CordappResolver.register(CordappImpl.TEST_INSTANCE.copy(
|
CordappResolver.register(CordappImpl.TEST_INSTANCE.copy(
|
||||||
contractClassNames = listOf(javaClass.name),
|
contractClassNames = listOf("ContractClass1"),
|
||||||
minimumPlatformVersion = 3,
|
minimumPlatformVersion = 3,
|
||||||
targetPlatformVersion = 222,
|
targetPlatformVersion = 222,
|
||||||
jarHash = SecureHash.randomSHA256()
|
jarHash = SecureHash.randomSHA256()
|
||||||
))
|
))
|
||||||
CordappResolver.register(CordappImpl.TEST_INSTANCE.copy(
|
CordappResolver.register(CordappImpl.TEST_INSTANCE.copy(
|
||||||
contractClassNames = listOf(javaClass.name),
|
contractClassNames = listOf("ContractClass2"),
|
||||||
minimumPlatformVersion = 2,
|
minimumPlatformVersion = 2,
|
||||||
targetPlatformVersion = 456,
|
targetPlatformVersion = 456,
|
||||||
jarHash = SecureHash.randomSHA256()
|
jarHash = SecureHash.randomSHA256()
|
||||||
))
|
))
|
||||||
assertThat(CordappResolver.currentCordapp).isNull()
|
assertThat(CordappResolver.currentCordapp).isNull()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `when different cordapps are registered for the same (contract) class, the resolver throws an exception`() {
|
||||||
|
val firstCordapp = CordappImpl.TEST_INSTANCE.copy(
|
||||||
|
contractClassNames = listOf(javaClass.name),
|
||||||
|
minimumPlatformVersion = 3,
|
||||||
|
targetPlatformVersion = 222,
|
||||||
|
jarHash = SecureHash.randomSHA256()
|
||||||
|
)
|
||||||
|
val secondCordapp = CordappImpl.TEST_INSTANCE.copy(
|
||||||
|
contractClassNames = listOf(javaClass.name),
|
||||||
|
minimumPlatformVersion = 2,
|
||||||
|
targetPlatformVersion = 456,
|
||||||
|
jarHash = SecureHash.randomSHA256()
|
||||||
|
)
|
||||||
|
|
||||||
|
CordappResolver.register(firstCordapp)
|
||||||
|
assertThatThrownBy { CordappResolver.register(secondCordapp) }
|
||||||
|
.isInstanceOf(IllegalStateException::class.java)
|
||||||
|
.hasMessageContaining("More than one CorDapp installed on the node for contract ${javaClass.name}. " +
|
||||||
|
"Please remove the previous version when upgrading to a new version.")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -184,7 +184,6 @@
|
|||||||
<ID>ComplexMethod:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean): List<Vault.Update<ContractState>></ID>
|
<ID>ComplexMethod:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean): List<Vault.Update<ContractState>></ID>
|
||||||
<ID>ComplexMethod:ObjectDiffer.kt$ObjectDiffer$fun diff(a: Any?, b: Any?): DiffTree?</ID>
|
<ID>ComplexMethod:ObjectDiffer.kt$ObjectDiffer$fun diff(a: Any?, b: Any?): DiffTree?</ID>
|
||||||
<ID>ComplexMethod:Obligation.kt$Obligation$override fun verify(tx: LedgerTransaction)</ID>
|
<ID>ComplexMethod:Obligation.kt$Obligation$override fun verify(tx: LedgerTransaction)</ID>
|
||||||
<ID>ComplexMethod:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$override fun addNodes(nodes: List<NodeInfo>)</ID>
|
|
||||||
<ID>ComplexMethod:QuasarInstrumentationHook.kt$QuasarInstrumentationHookAgent.Companion$@JvmStatic fun premain(argumentsString: String?, instrumentation: Instrumentation)</ID>
|
<ID>ComplexMethod:QuasarInstrumentationHook.kt$QuasarInstrumentationHookAgent.Companion$@JvmStatic fun premain(argumentsString: String?, instrumentation: Instrumentation)</ID>
|
||||||
<ID>ComplexMethod:RPCClientProxyHandler.kt$RPCClientProxyHandler$ private fun close(notify: Boolean = true)</ID>
|
<ID>ComplexMethod:RPCClientProxyHandler.kt$RPCClientProxyHandler$ private fun close(notify: Boolean = true)</ID>
|
||||||
<ID>ComplexMethod:RPCClientProxyHandler.kt$RPCClientProxyHandler$// The handler for Artemis messages. private fun artemisMessageHandler(message: ClientMessage)</ID>
|
<ID>ComplexMethod:RPCClientProxyHandler.kt$RPCClientProxyHandler$// The handler for Artemis messages. private fun artemisMessageHandler(message: ClientMessage)</ID>
|
||||||
@ -3021,18 +3020,6 @@
|
|||||||
<ID>MaxLineLength:PersistentIdentityMigrationNewTable.kt$PersistentIdentityMigrationNewTable$throw PersistentIdentitiesMigrationException("Cannot migrate persistent identities as liquibase failed to provide a suitable database connection")</ID>
|
<ID>MaxLineLength:PersistentIdentityMigrationNewTable.kt$PersistentIdentityMigrationNewTable$throw PersistentIdentitiesMigrationException("Cannot migrate persistent identities as liquibase failed to provide a suitable database connection")</ID>
|
||||||
<ID>MaxLineLength:PersistentIdentityMigrationNewTableTest.kt$PersistentIdentityMigrationNewTableTest$session.save(PersistentIdentityService.PersistentPublicKeyHashToCertificate(it.owningKey.hash.toString(), it.certPath.encoded))</ID>
|
<ID>MaxLineLength:PersistentIdentityMigrationNewTableTest.kt$PersistentIdentityMigrationNewTableTest$session.save(PersistentIdentityService.PersistentPublicKeyHashToCertificate(it.owningKey.hash.toString(), it.certPath.encoded))</ID>
|
||||||
<ID>MaxLineLength:PersistentIdentityMigrationNewTableTest.kt$PersistentIdentityMigrationNewTableTest$val identityService = makeTestIdentityService(PersistentIdentityMigrationNewTableTest.dummyNotary.identity, BOB_IDENTITY, ALICE_IDENTITY)</ID>
|
<ID>MaxLineLength:PersistentIdentityMigrationNewTableTest.kt$PersistentIdentityMigrationNewTableTest$val identityService = makeTestIdentityService(PersistentIdentityMigrationNewTableTest.dummyNotary.identity, BOB_IDENTITY, ALICE_IDENTITY)</ID>
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService$ @Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class) private fun verifyAndRegisterIdentity(trustAnchor: TrustAnchor, identity: PartyAndCertificate): PartyAndCertificate?</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService$// Allows us to eliminate keys we know belong to others by using the cache contents that might have been seen during other identity activity. // Concentrating activity on the identity cache works better than spreading checking across identity and key management, because we cache misses too. fun stripNotOurKeys(keys: Iterable<PublicKey>): Iterable<PublicKey></ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService$@Throws(UnknownAnonymousPartyException::class) override</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService$fun loadIdentities(identities: Collection<PartyAndCertificate> = emptySet(), confidentialIdentities: Collection<PartyAndCertificate> = emptySet())</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService$log.warn("Certificate validation failed for ${identity.name} against trusted root ${trustAnchor.trustedCert.subjectX500Principal}.")</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService${ // If there is no entry in the legal keyToPartyAndCert table then the party must be a confidential identity so we perform // a lookup in the keyToName table. If an entry for that public key exists, then we attempt look up the associated node's // PartyAndCertificate. val name = keyToName[party.owningKey.toStringShort()] if (name != null) { // This should never return null as this node would not be able to communicate with the node providing a confidential // identity unless its NodeInfo/PartyAndCertificate were available. wellKnownPartyFromX500Name(name) } else { null } }</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService${ // This should never return null as this node would not be able to communicate with the node providing a confidential // identity unless its NodeInfo/PartyAndCertificate were available. wellKnownPartyFromX500Name(name) }</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService${ // Update the three tables as necessary. We definitely store the public key and map it to a party and we optionally update // the public key to external ID mapping table. This block will only ever be reached when registering keys generated on // other because when a node generates its own keys "registerKeyToParty" is automatically called by KeyManagementService.freshKey. registerKeyToParty(publicKey, party) hashToKey[publicKeyHash] = publicKey if (externalId != null) { registerKeyToExternalId(publicKey, externalId) } }</ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService.Companion$fun createHashToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PublicKey, PersistentHashToPublicKey, String></ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService.Companion$fun createKeyToPartyAndCertMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PartyAndCertificate, PersistentPublicKeyHashToCertificate, String></ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService.Companion$fun createKeyToX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, CordaX500Name, PersistentPublicKeyHashToParty, String></ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityService.kt$PersistentIdentityService.Companion$fun createX500ToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, String, PersistentPartyToPublicKeyHash, String></ID>
|
|
||||||
<ID>MaxLineLength:PersistentIdentityServiceTests.kt$PersistentIdentityServiceTests$listOf("Organisation A", "Organisation B", "Organisation C") .map { getTestPartyAndCertificate(CordaX500Name(organisation = it, locality = "London", country = "GB"), generateKeyPair().public) }</ID>
|
<ID>MaxLineLength:PersistentIdentityServiceTests.kt$PersistentIdentityServiceTests$listOf("Organisation A", "Organisation B", "Organisation C") .map { getTestPartyAndCertificate(CordaX500Name(organisation = it, locality = "London", country = "GB"), generateKeyPair().public) }</ID>
|
||||||
<ID>MaxLineLength:PersistentIdentityServiceTests.kt$PersistentIdentityServiceTests$val alicente = getTestPartyAndCertificate(CordaX500Name(organisation = "Alicente Worldwide", locality = "London", country = "GB"), generateKeyPair().public)</ID>
|
<ID>MaxLineLength:PersistentIdentityServiceTests.kt$PersistentIdentityServiceTests$val alicente = getTestPartyAndCertificate(CordaX500Name(organisation = "Alicente Worldwide", locality = "London", country = "GB"), generateKeyPair().public)</ID>
|
||||||
<ID>MaxLineLength:PersistentMap.kt$PersistentMap$ExplicitRemoval<K, V, E, EK> : RemovalListener</ID>
|
<ID>MaxLineLength:PersistentMap.kt$PersistentMap$ExplicitRemoval<K, V, E, EK> : RemovalListener</ID>
|
||||||
@ -3042,6 +3029,7 @@
|
|||||||
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$"SELECT n FROM ${NodeInfoSchemaV1.PersistentNodeInfo::class.java.name} n JOIN n.legalIdentitiesAndCerts l WHERE l.name = :name"</ID>
|
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$"SELECT n FROM ${NodeInfoSchemaV1.PersistentNodeInfo::class.java.name} n JOIN n.legalIdentitiesAndCerts l WHERE l.name = :name"</ID>
|
||||||
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$"SELECT n FROM ${NodeInfoSchemaV1.PersistentNodeInfo::class.java.name} n JOIN n.legalIdentitiesAndCerts l WHERE l.owningKeyHash = :owningKeyHash"</ID>
|
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$"SELECT n FROM ${NodeInfoSchemaV1.PersistentNodeInfo::class.java.name} n JOIN n.legalIdentitiesAndCerts l WHERE l.owningKeyHash = :owningKeyHash"</ID>
|
||||||
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$val info = findByIdentityKey(session, nodeInfo.legalIdentitiesAndCerts.first().owningKey).singleOrNull { it.serial == nodeInfo.serial }</ID>
|
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$val info = findByIdentityKey(session, nodeInfo.legalIdentitiesAndCerts.first().owningKey).singleOrNull { it.serial == nodeInfo.serial }</ID>
|
||||||
|
<ID>MaxLineLength:PersistentNetworkMapCache.kt$PersistentNetworkMapCache$val newNodes = mutableListOf<NodeInfo>() val updatedNodes = mutableListOf<Pair<NodeInfo, NodeInfo>>() nodes.map { it to getNodesByLegalIdentityKey(it.legalIdentities.first().owningKey).firstOrNull() } .forEach { (node, previousNode) -> when { previousNode == null -> { logger.info("No previous node found for ${node.legalIdentities.first().name}") if (verifyAndRegisterIdentities(node)) { newNodes.add(node) } } previousNode.serial > node.serial -> { logger.info("Discarding older nodeInfo for ${node.legalIdentities.first().name}") } previousNode != node -> { logger.info("Previous node was found for ${node.legalIdentities.first().name} as: $previousNode") // TODO We should be adding any new identities as well if (verifyIdentities(node)) { updatedNodes.add(node to previousNode) } } else -> logger.info("Previous node was identical to incoming one - doing nothing") } } /** * This algorithm protects against database failure (eg. attempt to persist a nodeInfo entry larger than permissible by the * database X500Name) without sacrificing performance incurred by attempting to flush nodeInfo's individually. * Upon database transaction failure, the list of new nodeInfo's is split in half, and then each half is persisted independently. * This continues recursively until all valid nodeInfo's are persisted, and failed ones reported as warnings. */ recursivelyUpdateNodes(newNodes.map { nodeInfo -> Pair(nodeInfo, MapChange.Added(nodeInfo)) } + updatedNodes.map { (nodeInfo, previousNodeInfo) -> Pair(nodeInfo, MapChange.Modified(nodeInfo, previousNodeInfo)) })</ID>
|
||||||
<ID>MaxLineLength:PersistentNetworkMapCacheTest.kt$PersistentNetworkMapCacheTest$private val charlieNetMapCache = PersistentNetworkMapCache(TestingNamedCacheFactory(), database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate))</ID>
|
<ID>MaxLineLength:PersistentNetworkMapCacheTest.kt$PersistentNetworkMapCacheTest$private val charlieNetMapCache = PersistentNetworkMapCache(TestingNamedCacheFactory(), database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate))</ID>
|
||||||
<ID>MaxLineLength:PersistentScheduledFlowRepository.kt$PersistentScheduledFlowRepository$private</ID>
|
<ID>MaxLineLength:PersistentScheduledFlowRepository.kt$PersistentScheduledFlowRepository$private</ID>
|
||||||
<ID>MaxLineLength:PersistentScheduledFlowRepository.kt$PersistentScheduledFlowRepository$return Pair(StateRef(SecureHash.parse(txId), index), ScheduledStateRef(StateRef(SecureHash.parse(txId), index), scheduledStateRecord.scheduledAt))</ID>
|
<ID>MaxLineLength:PersistentScheduledFlowRepository.kt$PersistentScheduledFlowRepository$return Pair(StateRef(SecureHash.parse(txId), index), ScheduledStateRef(StateRef(SecureHash.parse(txId), index), scheduledStateRecord.scheduledAt))</ID>
|
||||||
@ -4224,7 +4212,7 @@
|
|||||||
<ID>TooGenericExceptionCaught:InternalUtils.kt$ex: Exception</ID>
|
<ID>TooGenericExceptionCaught:InternalUtils.kt$ex: Exception</ID>
|
||||||
<ID>TooGenericExceptionCaught:InternalUtils.kt$th: Throwable</ID>
|
<ID>TooGenericExceptionCaught:InternalUtils.kt$th: Throwable</ID>
|
||||||
<ID>TooGenericExceptionCaught:IssueCash.kt$IssueCash$e: Exception</ID>
|
<ID>TooGenericExceptionCaught:IssueCash.kt$IssueCash$e: Exception</ID>
|
||||||
<ID>TooGenericExceptionCaught:JVMAgentUtil.kt$JVMAgentUtil$e: Exception</ID>
|
<ID>TooGenericExceptionCaught:JVMAgentUtil.kt$JVMAgentUtil$e: Throwable</ID>
|
||||||
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.PartyDeserializer$e: Exception</ID>
|
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.PartyDeserializer$e: Exception</ID>
|
||||||
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.PublicKeyDeserializer$e: Exception</ID>
|
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.PublicKeyDeserializer$e: Exception</ID>
|
||||||
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.SecureHashDeserializer$e: Exception</ID>
|
<ID>TooGenericExceptionCaught:JacksonSupport.kt$JacksonSupport.SecureHashDeserializer$e: Exception</ID>
|
||||||
@ -4393,7 +4381,7 @@
|
|||||||
<ID>TooManyFunctions:P2PMessagingClient.kt$P2PMessagingClient : SingletonSerializeAsTokenMessagingServiceAddressToArtemisQueueResolver</ID>
|
<ID>TooManyFunctions:P2PMessagingClient.kt$P2PMessagingClient : SingletonSerializeAsTokenMessagingServiceAddressToArtemisQueueResolver</ID>
|
||||||
<ID>TooManyFunctions:PathUtils.kt$net.corda.core.internal.PathUtils.kt</ID>
|
<ID>TooManyFunctions:PathUtils.kt$net.corda.core.internal.PathUtils.kt</ID>
|
||||||
<ID>TooManyFunctions:Perceivable.kt$net.corda.finance.contracts.universal.Perceivable.kt</ID>
|
<ID>TooManyFunctions:Perceivable.kt$net.corda.finance.contracts.universal.Perceivable.kt</ID>
|
||||||
<ID>TooManyFunctions:PersistentIdentityService.kt$PersistentIdentityService : SingletonSerializeAsTokenIdentityService</ID>
|
<ID>TooManyFunctions:PersistentIdentityService.kt$PersistentIdentityService : SingletonSerializeAsTokenIdentityServiceInternal</ID>
|
||||||
<ID>TooManyFunctions:PersistentNetworkMapCache.kt$PersistentNetworkMapCache : NetworkMapCacheInternalSingletonSerializeAsToken</ID>
|
<ID>TooManyFunctions:PersistentNetworkMapCache.kt$PersistentNetworkMapCache : NetworkMapCacheInternalSingletonSerializeAsToken</ID>
|
||||||
<ID>TooManyFunctions:PortfolioApi.kt$PortfolioApi</ID>
|
<ID>TooManyFunctions:PortfolioApi.kt$PortfolioApi</ID>
|
||||||
<ID>TooManyFunctions:PropertyDescriptor.kt$net.corda.serialization.internal.amqp.PropertyDescriptor.kt</ID>
|
<ID>TooManyFunctions:PropertyDescriptor.kt$net.corda.serialization.internal.amqp.PropertyDescriptor.kt</ID>
|
||||||
|
@ -19,8 +19,8 @@ function generateTestnetConfig() {
|
|||||||
MY_P2P_PORT=${MY_P2P_PORT} \
|
MY_P2P_PORT=${MY_P2P_PORT} \
|
||||||
MY_RPC_PORT=${MY_RPC_PORT} \
|
MY_RPC_PORT=${MY_RPC_PORT} \
|
||||||
MY_RPC_ADMIN_PORT=${MY_RPC_ADMIN_PORT} \
|
MY_RPC_ADMIN_PORT=${MY_RPC_ADMIN_PORT} \
|
||||||
NETWORKMAP_URL='https://map.testnet.corda.network' \
|
NETWORKMAP_URL='https://netmap.testnet.r3.com' \
|
||||||
DOORMAN_URL='https://doorman.testnet.corda.network' \
|
DOORMAN_URL='https://doorman.testnet.r3.com/' \
|
||||||
java -jar config-exporter.jar "TEST-NET-COMBINE" "node.conf" "/opt/corda/starting-node.conf" "${CONFIG_FOLDER}/node.conf"
|
java -jar config-exporter.jar "TEST-NET-COMBINE" "node.conf" "/opt/corda/starting-node.conf" "${CONFIG_FOLDER}/node.conf"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ function downloadTestnetCerts() {
|
|||||||
: ${COUNTRY:? '$COUNTRY (the country used when registering for Testnet) must be set as environment variable'}
|
: ${COUNTRY:? '$COUNTRY (the country used when registering for Testnet) must be set as environment variable'}
|
||||||
curl -L -d "{\"x500Name\":{\"locality\":\"${LOCALITY}\", \"country\":\"${COUNTRY}\"}, \"configType\": \"INSTALLSCRIPT\", \"include\": { \"systemdServices\": false, \"cordapps\": false, \"cordaJar\": false, \"cordaWebserverJar\": false, \"scripts\": false} }" \
|
curl -L -d "{\"x500Name\":{\"locality\":\"${LOCALITY}\", \"country\":\"${COUNTRY}\"}, \"configType\": \"INSTALLSCRIPT\", \"include\": { \"systemdServices\": false, \"cordapps\": false, \"cordaJar\": false, \"cordaWebserverJar\": false, \"scripts\": false} }" \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
-X POST "https://testnet.corda.network/api/user/node/generate/one-time-key/redeem/$ONE_TIME_DOWNLOAD_KEY" \
|
-X POST "https://onboarder.prod.ws.r3.com/api/user/node/generate/one-time-key/redeem/$ONE_TIME_DOWNLOAD_KEY" \
|
||||||
-o "${CERTIFICATES_FOLDER}/certs.zip"
|
-o "${CERTIFICATES_FOLDER}/certs.zip"
|
||||||
fi
|
fi
|
||||||
rm -rf ${CERTIFICATES_FOLDER}/*.jks
|
rm -rf ${CERTIFICATES_FOLDER}/*.jks
|
||||||
|
@ -28,6 +28,11 @@ This prevents configuration errors when mixing keys containing ``.`` wrapped wit
|
|||||||
``"dataSourceProperties.dataSourceClassName" = "val"`` in `Reference.conf`_ would be not overwritten by the property
|
``"dataSourceProperties.dataSourceClassName" = "val"`` in `Reference.conf`_ would be not overwritten by the property
|
||||||
``dataSourceProperties.dataSourceClassName = "val2"`` in *node.conf*.
|
``dataSourceProperties.dataSourceClassName = "val2"`` in *node.conf*.
|
||||||
|
|
||||||
|
.. warning:: If a property is defined twice the last one will take precedence. The library currently used for parsing HOCON
|
||||||
|
currently does not provide a way to catch duplicates when parsing files and will silently override values for the same key.
|
||||||
|
For example having ``key=initialValue`` defined first in node.conf and later on down the
|
||||||
|
lines ``key=overridingValue`` will result into the value being ``overridingValue``.
|
||||||
|
|
||||||
By default the node will fail to start in presence of unknown property keys.
|
By default the node will fail to start in presence of unknown property keys.
|
||||||
To alter this behaviour, the ``on-unknown-config-keys`` command-line argument can be set to ``IGNORE`` (default is ``FAIL``).
|
To alter this behaviour, the ``on-unknown-config-keys`` command-line argument can be set to ``IGNORE`` (default is ``FAIL``).
|
||||||
|
|
||||||
@ -46,6 +51,9 @@ JVM options
|
|||||||
|
|
||||||
java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar
|
java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar
|
||||||
|
|
||||||
|
.. note:: If the same field is overriden by both an environment variable and system property, the system property
|
||||||
|
takes precedence.
|
||||||
|
|
||||||
Configuration file fields
|
Configuration file fields
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
|
@ -25,6 +25,11 @@ If you need to switch to synchronous logging (e.g. for debugging/testing purpose
|
|||||||
by adding ``-DLog4jContextSelector=org.apache.logging.log4j.core.selector.ClassLoaderContextSelector`` to the node's
|
by adding ``-DLog4jContextSelector=org.apache.logging.log4j.core.selector.ClassLoaderContextSelector`` to the node's
|
||||||
command line or to the ``jvmArgs`` section of the node configuration (see :doc:`corda-configuration-file`).
|
command line or to the ``jvmArgs`` section of the node configuration (see :doc:`corda-configuration-file`).
|
||||||
|
|
||||||
|
.. warning:: Ensure that ``shutdownHook="disable"`` is set if you are overriding the log4j2 configuration file
|
||||||
|
otherwise logs will not be flushed properly on shutdown and loss may occur. The option is set in the ``Configuration``
|
||||||
|
tag of the log4j configuration file, for example ``<Configuration ... shutdownHook="disable">``. This is because
|
||||||
|
Corda overrides the default log4j2 shutdown logic in order to make sure it gets shut down correctly.
|
||||||
|
|
||||||
Example
|
Example
|
||||||
+++++++
|
+++++++
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ class AMQPBridgeManager(config: MutualSslConfiguration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun withMDC(block: () -> Unit) {
|
private fun withMDC(block: () -> Unit) {
|
||||||
val oldMDC = MDC.getCopyOfContextMap()
|
val oldMDC = MDC.getCopyOfContextMap() ?: emptyMap<String, String>()
|
||||||
try {
|
try {
|
||||||
MDC.put("queueName", queueName)
|
MDC.put("queueName", queueName)
|
||||||
MDC.put("targets", targets.joinToString(separator = ";") { it.toString() })
|
MDC.put("targets", targets.joinToString(separator = ";") { it.toString() })
|
||||||
|
@ -46,7 +46,7 @@ internal class ConnectionStateMachine(private val serverMode: Boolean,
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun withMDC(block: () -> Unit) {
|
private fun withMDC(block: () -> Unit) {
|
||||||
val oldMDC = MDC.getCopyOfContextMap()
|
val oldMDC = MDC.getCopyOfContextMap() ?: emptyMap<String, String>()
|
||||||
try {
|
try {
|
||||||
MDC.put("serverMode", serverMode.toString())
|
MDC.put("serverMode", serverMode.toString())
|
||||||
MDC.put("localLegalName", localLegalName)
|
MDC.put("localLegalName", localLegalName)
|
||||||
|
@ -41,7 +41,7 @@ internal class EventProcessor(channel: Channel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun withMDC(block: () -> Unit) {
|
private fun withMDC(block: () -> Unit) {
|
||||||
val oldMDC = MDC.getCopyOfContextMap()
|
val oldMDC = MDC.getCopyOfContextMap() ?: emptyMap<String, String>()
|
||||||
try {
|
try {
|
||||||
MDC.put("serverMode", serverMode.toString())
|
MDC.put("serverMode", serverMode.toString())
|
||||||
MDC.put("localLegalName", localLegalName)
|
MDC.put("localLegalName", localLegalName)
|
||||||
|
@ -49,7 +49,7 @@ internal class AMQPChannelHandler(private val serverMode: Boolean,
|
|||||||
private var badCert: Boolean = false
|
private var badCert: Boolean = false
|
||||||
|
|
||||||
private fun withMDC(block: () -> Unit) {
|
private fun withMDC(block: () -> Unit) {
|
||||||
val oldMDC = MDC.getCopyOfContextMap()
|
val oldMDC = MDC.getCopyOfContextMap() ?: emptyMap<String, String>()
|
||||||
try {
|
try {
|
||||||
MDC.put("serverMode", serverMode.toString())
|
MDC.put("serverMode", serverMode.toString())
|
||||||
MDC.put("remoteAddress", remoteAddress.toString())
|
MDC.put("remoteAddress", remoteAddress.toString())
|
||||||
|
@ -36,7 +36,10 @@ capsule {
|
|||||||
|
|
||||||
task buildCordaJAR(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
task buildCordaJAR(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
||||||
applicationClass 'net.corda.node.Corda'
|
applicationClass 'net.corda.node.Corda'
|
||||||
archiveName "corda-${corda_release_version}.jar"
|
archiveBaseName = 'corda'
|
||||||
|
archiveVersion = corda_release_version
|
||||||
|
archiveClassifier = jdkClassifier
|
||||||
|
archiveName = archiveFileName.get()
|
||||||
applicationSource = files(
|
applicationSource = files(
|
||||||
project(':node').configurations.runtimeClasspath,
|
project(':node').configurations.runtimeClasspath,
|
||||||
project(':node').tasks.jar,
|
project(':node').tasks.jar,
|
||||||
|
@ -43,7 +43,8 @@ class AddressBindingFailureTests {
|
|||||||
driver(DriverParameters(startNodesInProcess = false,
|
driver(DriverParameters(startNodesInProcess = false,
|
||||||
notarySpecs = listOf(NotarySpec(notaryName)),
|
notarySpecs = listOf(NotarySpec(notaryName)),
|
||||||
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
||||||
portAllocation = portAllocation)
|
portAllocation = portAllocation,
|
||||||
|
cordappsForAllNodes = emptyList())
|
||||||
) {} }.isInstanceOfSatisfying(IllegalStateException::class.java) { error ->
|
) {} }.isInstanceOfSatisfying(IllegalStateException::class.java) { error ->
|
||||||
|
|
||||||
assertThat(error.message).contains("Unable to start notaries")
|
assertThat(error.message).contains("Unable to start notaries")
|
||||||
@ -56,7 +57,11 @@ class AddressBindingFailureTests {
|
|||||||
ServerSocket(0).use { socket ->
|
ServerSocket(0).use { socket ->
|
||||||
|
|
||||||
val address = InetSocketAddress("localhost", socket.localPort).toNetworkHostAndPort()
|
val address = InetSocketAddress("localhost", socket.localPort).toNetworkHostAndPort()
|
||||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), inMemoryDB = false, portAllocation = portAllocation)) {
|
driver(DriverParameters(startNodesInProcess = true,
|
||||||
|
notarySpecs = emptyList(),
|
||||||
|
inMemoryDB = false,
|
||||||
|
portAllocation = portAllocation,
|
||||||
|
cordappsForAllNodes = emptyList())) {
|
||||||
|
|
||||||
assertThatThrownBy { startNode(customOverrides = overrides(address)).getOrThrow() }.isInstanceOfSatisfying(AddressBindingException::class.java) { exception ->
|
assertThatThrownBy { startNode(customOverrides = overrides(address)).getOrThrow() }.isInstanceOfSatisfying(AddressBindingException::class.java) { exception ->
|
||||||
assertThat(exception.addresses).contains(address).withFailMessage("Expected addresses to contain $address but was ${exception.addresses}.")
|
assertThat(exception.addresses).contains(address).withFailMessage("Expected addresses to contain $address but was ${exception.addresses}.")
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package net.corda.node.services.network
|
package net.corda.node.services.network
|
||||||
|
|
||||||
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.node.NodeInfo
|
import net.corda.core.node.NodeInfo
|
||||||
import net.corda.core.utilities.NetworkHostAndPort
|
import net.corda.core.utilities.NetworkHostAndPort
|
||||||
import net.corda.node.internal.schemas.NodeInfoSchemaV1
|
import net.corda.node.internal.schemas.NodeInfoSchemaV1
|
||||||
@ -20,6 +21,17 @@ class PersistentNetworkMapCacheTest {
|
|||||||
private companion object {
|
private companion object {
|
||||||
val ALICE = TestIdentity(ALICE_NAME, 70)
|
val ALICE = TestIdentity(ALICE_NAME, 70)
|
||||||
val BOB = TestIdentity(BOB_NAME, 80)
|
val BOB = TestIdentity(BOB_NAME, 80)
|
||||||
|
val CHARLIE = TestIdentity(CHARLIE_NAME, 90)
|
||||||
|
|
||||||
|
val LONG_X500_NAME = CordaX500Name(
|
||||||
|
commonName = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
organisationUnit = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
organisation = "Long Plc",
|
||||||
|
locality = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
state = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
country= "IT")
|
||||||
|
val LONG_PLC = TestIdentity(LONG_X500_NAME, 95)
|
||||||
|
val LONGER_PLC = TestIdentity(LONG_X500_NAME.copy(organisation = "Longer Plc"), 96)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
@ -100,6 +112,53 @@ class PersistentNetworkMapCacheTest {
|
|||||||
assertThat(nodeInfos).hasSize(2)
|
assertThat(nodeInfos).hasSize(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - attempt to insert invalid node info`() {
|
||||||
|
charlieNetMapCache.addNode(createNodeInfo(listOf(LONG_PLC)))
|
||||||
|
assertThat(charlieNetMapCache.allNodes).hasSize(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - attempt to update existing node with invalid node info`() {
|
||||||
|
charlieNetMapCache.addNode(createNodeInfo(listOf(ALICE)))
|
||||||
|
val aliceUpdate = TestIdentity(LONG_X500_NAME, ALICE.keyPair)
|
||||||
|
charlieNetMapCache.addNode(createNodeInfo(listOf(aliceUpdate)))
|
||||||
|
assertThat(charlieNetMapCache.allNodes).hasSize(1)
|
||||||
|
assertThat(charlieNetMapCache.getNodeByLegalName(ALICE_NAME)).isNotNull
|
||||||
|
assertThat(charlieNetMapCache.getNodeByLegalName(LONG_X500_NAME)).isNull()
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - insert two valid node infos and one invalid one`() {
|
||||||
|
charlieNetMapCache.addNodes(listOf(createNodeInfo(listOf(ALICE)),
|
||||||
|
createNodeInfo(listOf(BOB)),
|
||||||
|
createNodeInfo(listOf(LONG_PLC))))
|
||||||
|
assertThat(charlieNetMapCache.allNodes).hasSize(2)
|
||||||
|
assertThat(charlieNetMapCache.allNodes.flatMap { it.legalIdentities }).isEqualTo(listOf(ALICE.party, BOB.party))
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - insert three valid node infos and two invalid ones`() {
|
||||||
|
charlieNetMapCache.addNodes(listOf(createNodeInfo(listOf(LONG_PLC)),
|
||||||
|
createNodeInfo(listOf(ALICE)),
|
||||||
|
createNodeInfo(listOf(BOB)),
|
||||||
|
createNodeInfo(listOf(CHARLIE)),
|
||||||
|
createNodeInfo(listOf(LONGER_PLC))))
|
||||||
|
assertThat(charlieNetMapCache.allNodes).hasSize(3)
|
||||||
|
assertThat(charlieNetMapCache.allNodes.flatMap { it.legalIdentities }).isEqualTo(listOf(ALICE.party, BOB.party, CHARLIE.party))
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - insert one valid node info then attempt to add one invalid node info and update the existing valid nodeinfo`() {
|
||||||
|
charlieNetMapCache.addNode(createNodeInfo(listOf(ALICE)))
|
||||||
|
val aliceUpdate = TestIdentity(LONG_X500_NAME, ALICE.keyPair)
|
||||||
|
charlieNetMapCache.addNodes(listOf(createNodeInfo(listOf(aliceUpdate)),
|
||||||
|
createNodeInfo(listOf(LONGER_PLC)), createNodeInfo(listOf(BOB))))
|
||||||
|
assertThat(charlieNetMapCache.allNodes).hasSize(2)
|
||||||
|
assertThat(charlieNetMapCache.getNodeByLegalName(ALICE_NAME)).isNotNull
|
||||||
|
assertThat(charlieNetMapCache.getNodeByLegalName(BOB_NAME)).isNotNull
|
||||||
|
}
|
||||||
|
|
||||||
private fun createNodeInfo(identities: List<TestIdentity>,
|
private fun createNodeInfo(identities: List<TestIdentity>,
|
||||||
address: NetworkHostAndPort = NetworkHostAndPort("localhost", portCounter++)): NodeInfo {
|
address: NetworkHostAndPort = NetworkHostAndPort("localhost", portCounter++)): NodeInfo {
|
||||||
return NodeInfo(
|
return NodeInfo(
|
||||||
|
@ -5,14 +5,21 @@ import org.slf4j.LoggerFactory
|
|||||||
import java.sql.SQLException
|
import java.sql.SQLException
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If a thread dies because it can't connect to the database, the node ends up in an inconsistent state.
|
*
|
||||||
|
* Cater for all type of unrecoverable [VirtualMachineError] in which the node may end up in an inconsistent state.
|
||||||
* Fail fast and hard.
|
* Fail fast and hard.
|
||||||
*/
|
*/
|
||||||
class DbExceptionHandler(private val parentHandler: Thread.UncaughtExceptionHandler? = null) : Thread.UncaughtExceptionHandler {
|
class GeneralExceptionHandler(private val parentHandler: Thread.UncaughtExceptionHandler? = null) : Thread.UncaughtExceptionHandler {
|
||||||
|
|
||||||
override fun uncaughtException(t: Thread?, e: Throwable?) {
|
override fun uncaughtException(t: Thread?, e: Throwable?) {
|
||||||
|
|
||||||
|
// fail fast with minimal overhead and further processing
|
||||||
|
if (e is VirtualMachineError) {
|
||||||
|
System.err.println("${e.message}")
|
||||||
|
Runtime.getRuntime().halt(1)
|
||||||
|
}
|
||||||
// the error is a database connection issue - pull the rug
|
// the error is a database connection issue - pull the rug
|
||||||
if (e is Error && e.cause is SQLException) {
|
else if (e is Error && e.cause is SQLException) {
|
||||||
errorAndTerminate("Thread ${t!!.name} failed due to database connection error. This is unrecoverable, terminating node.", e)
|
errorAndTerminate("Thread ${t!!.name} failed due to database connection error. This is unrecoverable, terminating node.", e)
|
||||||
}
|
}
|
||||||
|
|
@ -462,11 +462,10 @@ open class Node(configuration: NodeConfiguration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register a default exception handler for all threads that terminates the process if the database connection goes away and
|
* Register a default exception handler for all threads that terminate the process due to an unrecoverable Virtual Machine error.
|
||||||
* cannot be recovered.
|
|
||||||
*/
|
*/
|
||||||
private fun registerDefaultExceptionHandler() {
|
private fun registerDefaultExceptionHandler() {
|
||||||
Thread.setDefaultUncaughtExceptionHandler(DbExceptionHandler(Thread.getDefaultUncaughtExceptionHandler()))
|
Thread.setDefaultUncaughtExceptionHandler(GeneralExceptionHandler(Thread.getDefaultUncaughtExceptionHandler()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
package net.corda.node.services.api
|
||||||
|
|
||||||
|
import net.corda.core.identity.PartyAndCertificate
|
||||||
|
import net.corda.core.node.services.IdentityService
|
||||||
|
import net.corda.core.utilities.contextLogger
|
||||||
|
import java.security.InvalidAlgorithmParameterException
|
||||||
|
import java.security.cert.CertificateExpiredException
|
||||||
|
import java.security.cert.CertificateNotYetValidException
|
||||||
|
|
||||||
|
interface IdentityServiceInternal : IdentityService {
|
||||||
|
private companion object {
|
||||||
|
val log = contextLogger()
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This method exists so it can be mocked with doNothing, rather than having to make up a possibly invalid return value. */
|
||||||
|
fun justVerifyAndRegisterIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean = false) {
|
||||||
|
verifyAndRegisterIdentity(identity, isNewRandomIdentity)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
|
fun verifyAndRegisterIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean): PartyAndCertificate?
|
||||||
|
}
|
@ -11,6 +11,7 @@ import net.corda.core.node.services.IdentityService
|
|||||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import net.corda.core.utilities.trace
|
import net.corda.core.utilities.trace
|
||||||
|
import net.corda.node.services.api.IdentityServiceInternal
|
||||||
import net.corda.node.services.persistence.WritablePublicKeyToOwningIdentityCache
|
import net.corda.node.services.persistence.WritablePublicKeyToOwningIdentityCache
|
||||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||||
import net.corda.nodeapi.internal.crypto.x509Certificates
|
import net.corda.nodeapi.internal.crypto.x509Certificates
|
||||||
@ -32,7 +33,7 @@ import kotlin.collections.LinkedHashSet
|
|||||||
class InMemoryIdentityService(
|
class InMemoryIdentityService(
|
||||||
identities: List<PartyAndCertificate> = emptyList(),
|
identities: List<PartyAndCertificate> = emptyList(),
|
||||||
override val trustRoot: X509Certificate
|
override val trustRoot: X509Certificate
|
||||||
) : SingletonSerializeAsToken(), IdentityService {
|
) : SingletonSerializeAsToken(), IdentityServiceInternal {
|
||||||
companion object {
|
companion object {
|
||||||
private val log = contextLogger()
|
private val log = contextLogger()
|
||||||
}
|
}
|
||||||
@ -60,6 +61,11 @@ class InMemoryIdentityService(
|
|||||||
return verifyAndRegisterIdentity(trustAnchor, identity)
|
return verifyAndRegisterIdentity(trustAnchor, identity)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
|
override fun verifyAndRegisterIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean): PartyAndCertificate? {
|
||||||
|
return verifyAndRegisterIdentity(trustAnchor, identity)
|
||||||
|
}
|
||||||
|
|
||||||
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
private fun verifyAndRegisterIdentity(trustAnchor: TrustAnchor, identity: PartyAndCertificate): PartyAndCertificate? {
|
private fun verifyAndRegisterIdentity(trustAnchor: TrustAnchor, identity: PartyAndCertificate): PartyAndCertificate? {
|
||||||
// Validate the chain first, before we do anything clever with it
|
// Validate the chain first, before we do anything clever with it
|
||||||
@ -82,12 +88,12 @@ class InMemoryIdentityService(
|
|||||||
val firstPath = X509Utilities.buildCertPath(identityCertChain.slice(idx until identityCertChain.size))
|
val firstPath = X509Utilities.buildCertPath(identityCertChain.slice(idx until identityCertChain.size))
|
||||||
verifyAndRegisterIdentity(trustAnchor, PartyAndCertificate(firstPath))
|
verifyAndRegisterIdentity(trustAnchor, PartyAndCertificate(firstPath))
|
||||||
}
|
}
|
||||||
return registerIdentity(identity)
|
return registerIdentity(identity, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun registerIdentity(identity: PartyAndCertificate): PartyAndCertificate? {
|
private fun registerIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean): PartyAndCertificate? {
|
||||||
val identityCertChain = identity.certPath.x509Certificates
|
val identityCertChain = identity.certPath.x509Certificates
|
||||||
log.trace { "Registering identity $identity" }
|
log.trace { "Registering identity $identity isNewRandomIdentity=${isNewRandomIdentity}" }
|
||||||
keyToPartyAndCerts[identity.owningKey] = identity
|
keyToPartyAndCerts[identity.owningKey] = identity
|
||||||
// Always keep the first party we registered, as that's the well known identity
|
// Always keep the first party we registered, as that's the well known identity
|
||||||
nameToKey.computeIfAbsent(identity.name) {identity.owningKey}
|
nameToKey.computeIfAbsent(identity.name) {identity.owningKey}
|
||||||
|
@ -13,6 +13,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken
|
|||||||
import net.corda.core.utilities.MAX_HASH_HEX_SIZE
|
import net.corda.core.utilities.MAX_HASH_HEX_SIZE
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import net.corda.core.utilities.debug
|
import net.corda.core.utilities.debug
|
||||||
|
import net.corda.node.services.api.IdentityServiceInternal
|
||||||
import net.corda.node.services.keys.BasicHSMKeyManagementService
|
import net.corda.node.services.keys.BasicHSMKeyManagementService
|
||||||
import net.corda.node.services.persistence.PublicKeyHashToExternalId
|
import net.corda.node.services.persistence.PublicKeyHashToExternalId
|
||||||
import net.corda.node.services.persistence.WritablePublicKeyToOwningIdentityCache
|
import net.corda.node.services.persistence.WritablePublicKeyToOwningIdentityCache
|
||||||
@ -43,7 +44,7 @@ import kotlin.streams.toList
|
|||||||
* cached for efficient lookup.
|
* cached for efficient lookup.
|
||||||
*/
|
*/
|
||||||
@ThreadSafe
|
@ThreadSafe
|
||||||
class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), IdentityService {
|
class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), IdentityServiceInternal {
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
private val log = contextLogger()
|
private val log = contextLogger()
|
||||||
@ -56,7 +57,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
const val IDENTITY_COLUMN_NAME = "identity_value"
|
const val IDENTITY_COLUMN_NAME = "identity_value"
|
||||||
const val NAME_COLUMN_NAME = "name"
|
const val NAME_COLUMN_NAME = "name"
|
||||||
|
|
||||||
fun createKeyToPartyAndCertMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PartyAndCertificate, PersistentPublicKeyHashToCertificate, String> {
|
fun createKeyToPartyAndCertMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PartyAndCertificate,
|
||||||
|
PersistentPublicKeyHashToCertificate, String> {
|
||||||
return AppendOnlyPersistentMap(
|
return AppendOnlyPersistentMap(
|
||||||
cacheFactory = cacheFactory,
|
cacheFactory = cacheFactory,
|
||||||
name = "PersistentIdentityService_keyToPartyAndCert",
|
name = "PersistentIdentityService_keyToPartyAndCert",
|
||||||
@ -74,7 +76,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun createX500ToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, String, PersistentPartyToPublicKeyHash, String> {
|
fun createX500ToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, String,
|
||||||
|
PersistentPartyToPublicKeyHash, String> {
|
||||||
return AppendOnlyPersistentMap(
|
return AppendOnlyPersistentMap(
|
||||||
cacheFactory = cacheFactory,
|
cacheFactory = cacheFactory,
|
||||||
name = "PersistentIdentityService_nameToKey",
|
name = "PersistentIdentityService_nameToKey",
|
||||||
@ -89,7 +92,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun createKeyToX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, CordaX500Name, PersistentPublicKeyHashToParty, String> {
|
fun createKeyToX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, CordaX500Name,
|
||||||
|
PersistentPublicKeyHashToParty, String> {
|
||||||
return AppendOnlyPersistentMap(
|
return AppendOnlyPersistentMap(
|
||||||
cacheFactory = cacheFactory,
|
cacheFactory = cacheFactory,
|
||||||
name = "PersistentIdentityService_keyToName",
|
name = "PersistentIdentityService_keyToName",
|
||||||
@ -106,7 +110,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
persistentEntityClass = PersistentPublicKeyHashToParty::class.java)
|
persistentEntityClass = PersistentPublicKeyHashToParty::class.java)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun createHashToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PublicKey, PersistentHashToPublicKey, String> {
|
fun createHashToKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PublicKey, PersistentHashToPublicKey,
|
||||||
|
String> {
|
||||||
return AppendOnlyPersistentMap(
|
return AppendOnlyPersistentMap(
|
||||||
cacheFactory = cacheFactory,
|
cacheFactory = cacheFactory,
|
||||||
name = "PersistentIdentityService_hashToKey",
|
name = "PersistentIdentityService_hashToKey",
|
||||||
@ -207,7 +212,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
notaryIdentityCache.addAll(notaryIdentities)
|
notaryIdentityCache.addAll(notaryIdentities)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun loadIdentities(identities: Collection<PartyAndCertificate> = emptySet(), confidentialIdentities: Collection<PartyAndCertificate> = emptySet()) {
|
fun loadIdentities(identities: Collection<PartyAndCertificate> = emptySet(), confidentialIdentities: Collection<PartyAndCertificate> =
|
||||||
|
emptySet()) {
|
||||||
identities.forEach {
|
identities.forEach {
|
||||||
val key = mapToKey(it)
|
val key = mapToKey(it)
|
||||||
keyToPartyAndCert.addWithDuplicatesAllowed(key, it, false)
|
keyToPartyAndCert.addWithDuplicatesAllowed(key, it, false)
|
||||||
@ -222,7 +228,14 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
|
|
||||||
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
override fun verifyAndRegisterIdentity(identity: PartyAndCertificate): PartyAndCertificate? {
|
override fun verifyAndRegisterIdentity(identity: PartyAndCertificate): PartyAndCertificate? {
|
||||||
return verifyAndRegisterIdentity(trustAnchor, identity)
|
return verifyAndRegisterIdentity(identity, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
|
override fun verifyAndRegisterIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean): PartyAndCertificate? {
|
||||||
|
return database.transaction {
|
||||||
|
verifyAndRegisterIdentity(trustAnchor, identity, isNewRandomIdentity)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -230,16 +243,17 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
*
|
*
|
||||||
* @param trustAnchor The trust anchor that will verify the identity's validity
|
* @param trustAnchor The trust anchor that will verify the identity's validity
|
||||||
* @param identity The identity to verify
|
* @param identity The identity to verify
|
||||||
* @param isNewRandomIdentity true if the identity will not have been registered before (e.g. because it is randomly generated by ourselves).
|
* @param isNewRandomIdentity true if identity will not have been registered before (e.g. because it is randomly generated by us)
|
||||||
*/
|
*/
|
||||||
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
@Throws(CertificateExpiredException::class, CertificateNotYetValidException::class, InvalidAlgorithmParameterException::class)
|
||||||
private fun verifyAndRegisterIdentity(trustAnchor: TrustAnchor, identity: PartyAndCertificate): PartyAndCertificate? {
|
private fun verifyAndRegisterIdentity(trustAnchor: TrustAnchor, identity: PartyAndCertificate, isNewRandomIdentity: Boolean = false):
|
||||||
// Validate the chain first, before we do anything clever with it
|
PartyAndCertificate? {
|
||||||
|
// Validate the chain first, before we do anything clever with it
|
||||||
val identityCertChain = identity.certPath.x509Certificates
|
val identityCertChain = identity.certPath.x509Certificates
|
||||||
try {
|
try {
|
||||||
identity.verify(trustAnchor)
|
identity.verify(trustAnchor)
|
||||||
} catch (e: CertPathValidatorException) {
|
} catch (e: CertPathValidatorException) {
|
||||||
log.warn("Certificate validation failed for ${identity.name} against trusted root ${trustAnchor.trustedCert.subjectX500Principal}.")
|
log.warn("Certificate validation failed for ${identity.name} against trusted root ${trustAnchor.trustedCert.subjectX500Principal}.")
|
||||||
log.warn("Certificate path :")
|
log.warn("Certificate path :")
|
||||||
identityCertChain.reversed().forEachIndexed { index, certificate ->
|
identityCertChain.reversed().forEachIndexed { index, certificate ->
|
||||||
val space = (0 until index).joinToString("") { " " }
|
val space = (0 until index).joinToString("") { " " }
|
||||||
@ -249,24 +263,32 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
}
|
}
|
||||||
// Ensure we record the first identity of the same name, first
|
// Ensure we record the first identity of the same name, first
|
||||||
val wellKnownCert = identityCertChain.single { CertRole.extract(it)?.isWellKnown ?: false }
|
val wellKnownCert = identityCertChain.single { CertRole.extract(it)?.isWellKnown ?: false }
|
||||||
if (wellKnownCert != identity.certificate) {
|
if (wellKnownCert != identity.certificate && !isNewRandomIdentity) {
|
||||||
val idx = identityCertChain.lastIndexOf(wellKnownCert)
|
val idx = identityCertChain.lastIndexOf(wellKnownCert)
|
||||||
val firstPath = X509Utilities.buildCertPath(identityCertChain.slice(idx until identityCertChain.size))
|
val firstPath = X509Utilities.buildCertPath(identityCertChain.slice(idx until identityCertChain.size))
|
||||||
verifyAndRegisterIdentity(trustAnchor, PartyAndCertificate(firstPath))
|
verifyAndRegisterIdentity(trustAnchor, PartyAndCertificate(firstPath))
|
||||||
}
|
}
|
||||||
return registerIdentity(identity)
|
return registerIdentity(identity, isNewRandomIdentity)
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun registerIdentity(identity: PartyAndCertificate): PartyAndCertificate? {
|
private fun registerIdentity(identity: PartyAndCertificate, isNewRandomIdentity: Boolean): PartyAndCertificate? {
|
||||||
log.debug { "Registering identity $identity" }
|
log.debug { "Registering identity $identity" }
|
||||||
val identityCertChain = identity.certPath.x509Certificates
|
val identityCertChain = identity.certPath.x509Certificates
|
||||||
val key = mapToKey(identity)
|
val key = mapToKey(identity)
|
||||||
return database.transaction {
|
|
||||||
keyToPartyAndCert.addWithDuplicatesAllowed(key, identity, false)
|
if (isNewRandomIdentity) {
|
||||||
nameToKey.addWithDuplicatesAllowed(identity.name, key, false)
|
// Because this is supposed to be new and random, there's no way we have it in the database already, so skip the this check
|
||||||
keyToName.addWithDuplicatesAllowed(key, identity.name, false)
|
keyToPartyAndCert[key] = identity
|
||||||
val parentId = identityCertChain[1].publicKey.toStringShort()
|
val parentId = identityCertChain[1].publicKey.toStringShort()
|
||||||
keyToPartyAndCert[parentId]
|
return keyToPartyAndCert[parentId]
|
||||||
|
} else {
|
||||||
|
return database.transaction {
|
||||||
|
keyToPartyAndCert.addWithDuplicatesAllowed(key, identity, false)
|
||||||
|
nameToKey.addWithDuplicatesAllowed(identity.name, key, false)
|
||||||
|
keyToName.addWithDuplicatesAllowed(key, identity.name, false)
|
||||||
|
val parentId = identityCertChain[1].publicKey.toStringShort()
|
||||||
|
keyToPartyAndCert[parentId]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,13 +327,13 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
// If we cannot find it then we perform a lookup on the public key to X500 name table
|
// If we cannot find it then we perform a lookup on the public key to X500 name table
|
||||||
val legalIdentity = super.wellKnownPartyFromAnonymous(party)
|
val legalIdentity = super.wellKnownPartyFromAnonymous(party)
|
||||||
if (legalIdentity == null) {
|
if (legalIdentity == null) {
|
||||||
// If there is no entry in the legal keyToPartyAndCert table then the party must be a confidential identity so we perform
|
// If there is no entry in the legal keyToPartyAndCert table then the party must be a confidential identity so we
|
||||||
// a lookup in the keyToName table. If an entry for that public key exists, then we attempt look up the associated node's
|
// perform a lookup in the keyToName table. If an entry for that public key exists, then we attempt look up the
|
||||||
// PartyAndCertificate.
|
// associated node's PartyAndCertificate.
|
||||||
val name = keyToName[party.owningKey.toStringShort()]
|
val name = keyToName[party.owningKey.toStringShort()]
|
||||||
if (name != null) {
|
if (name != null) {
|
||||||
// This should never return null as this node would not be able to communicate with the node providing a confidential
|
// This should never return null as this node would not be able to communicate with the node providing a
|
||||||
// identity unless its NodeInfo/PartyAndCertificate were available.
|
// confidential identity unless its NodeInfo/PartyAndCertificate were available.
|
||||||
wellKnownPartyFromX500Name(name)
|
wellKnownPartyFromX500Name(name)
|
||||||
} else {
|
} else {
|
||||||
null
|
null
|
||||||
@ -332,12 +354,14 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Throws(UnknownAnonymousPartyException::class)
|
@Throws(UnknownAnonymousPartyException::class)
|
||||||
override fun assertOwnership(party: Party, anonymousParty: AnonymousParty) = database.transaction { super.assertOwnership(party, anonymousParty) }
|
override fun assertOwnership(party: Party, anonymousParty: AnonymousParty) = database.transaction { super.assertOwnership(party,
|
||||||
|
anonymousParty) }
|
||||||
|
|
||||||
lateinit var ourNames: Set<CordaX500Name>
|
lateinit var ourNames: Set<CordaX500Name>
|
||||||
|
|
||||||
// Allows us to eliminate keys we know belong to others by using the cache contents that might have been seen during other identity activity.
|
// Allows us to eliminate keys we know belong to others by using the cache contents that might have been seen during other identity
|
||||||
// Concentrating activity on the identity cache works better than spreading checking across identity and key management, because we cache misses too.
|
// activity. Concentrating activity on the identity cache works better than spreading checking across identity and key management,
|
||||||
|
// because we cache misses too.
|
||||||
fun stripNotOurKeys(keys: Iterable<PublicKey>): Iterable<PublicKey> {
|
fun stripNotOurKeys(keys: Iterable<PublicKey>): Iterable<PublicKey> {
|
||||||
return keys.filter { (@Suppress("DEPRECATION") certificateFromKey(it))?.name in ourNames }
|
return keys.filter { (@Suppress("DEPRECATION") certificateFromKey(it))?.name in ourNames }
|
||||||
}
|
}
|
||||||
@ -351,7 +375,8 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
|||||||
if (existingEntryForKey == null) {
|
if (existingEntryForKey == null) {
|
||||||
// Update the three tables as necessary. We definitely store the public key and map it to a party and we optionally update
|
// Update the three tables as necessary. We definitely store the public key and map it to a party and we optionally update
|
||||||
// the public key to external ID mapping table. This block will only ever be reached when registering keys generated on
|
// the public key to external ID mapping table. This block will only ever be reached when registering keys generated on
|
||||||
// other because when a node generates its own keys "registerKeyToParty" is automatically called by KeyManagementService.freshKey.
|
// other because when a node generates its own keys "registerKeyToParty" is automatically called by
|
||||||
|
// KeyManagementService.freshKey.
|
||||||
registerKeyToParty(publicKey, party)
|
registerKeyToParty(publicKey, party)
|
||||||
hashToKey[publicKeyHash] = publicKey
|
hashToKey[publicKeyHash] = publicKey
|
||||||
if (externalId != null) {
|
if (externalId != null) {
|
||||||
|
@ -5,6 +5,7 @@ import net.corda.core.identity.PartyAndCertificate
|
|||||||
import net.corda.core.internal.CertRole
|
import net.corda.core.internal.CertRole
|
||||||
import net.corda.core.node.services.IdentityService
|
import net.corda.core.node.services.IdentityService
|
||||||
import net.corda.core.utilities.days
|
import net.corda.core.utilities.days
|
||||||
|
import net.corda.node.services.api.IdentityServiceInternal
|
||||||
import net.corda.nodeapi.internal.crypto.CertificateType
|
import net.corda.nodeapi.internal.crypto.CertificateType
|
||||||
import net.corda.nodeapi.internal.crypto.ContentSignerBuilder
|
import net.corda.nodeapi.internal.crypto.ContentSignerBuilder
|
||||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||||
@ -44,7 +45,12 @@ fun freshCertificate(identityService: IdentityService,
|
|||||||
window)
|
window)
|
||||||
val ourCertPath = X509Utilities.buildCertPath(ourCertificate, issuer.certPath.x509Certificates)
|
val ourCertPath = X509Utilities.buildCertPath(ourCertificate, issuer.certPath.x509Certificates)
|
||||||
val anonymisedIdentity = PartyAndCertificate(ourCertPath)
|
val anonymisedIdentity = PartyAndCertificate(ourCertPath)
|
||||||
identityService.verifyAndRegisterIdentity(anonymisedIdentity)
|
if (identityService is IdentityServiceInternal) {
|
||||||
|
identityService.justVerifyAndRegisterIdentity(anonymisedIdentity, true)
|
||||||
|
} else {
|
||||||
|
identityService.verifyAndRegisterIdentity(anonymisedIdentity)
|
||||||
|
}
|
||||||
|
|
||||||
return anonymisedIdentity
|
return anonymisedIdentity
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ import rx.subjects.PublishSubject
|
|||||||
import java.security.PublicKey
|
import java.security.PublicKey
|
||||||
import java.util.*
|
import java.util.*
|
||||||
import javax.annotation.concurrent.ThreadSafe
|
import javax.annotation.concurrent.ThreadSafe
|
||||||
|
import javax.persistence.PersistenceException
|
||||||
|
|
||||||
/** Database-based network map cache. */
|
/** Database-based network map cache. */
|
||||||
@ThreadSafe
|
@ThreadSafe
|
||||||
@ -184,20 +185,44 @@ open class PersistentNetworkMapCache(cacheFactory: NamedCacheFactory,
|
|||||||
else -> logger.info("Previous node was identical to incoming one - doing nothing")
|
else -> logger.info("Previous node was identical to incoming one - doing nothing")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* This algorithm protects against database failure (eg. attempt to persist a nodeInfo entry larger than permissible by the
|
||||||
|
* database X500Name) without sacrificing performance incurred by attempting to flush nodeInfo's individually.
|
||||||
|
* Upon database transaction failure, the list of new nodeInfo's is split in half, and then each half is persisted independently.
|
||||||
|
* This continues recursively until all valid nodeInfo's are persisted, and failed ones reported as warnings.
|
||||||
|
*/
|
||||||
|
recursivelyUpdateNodes(newNodes.map { nodeInfo -> Pair(nodeInfo, MapChange.Added(nodeInfo)) } +
|
||||||
|
updatedNodes.map { (nodeInfo, previousNodeInfo) -> Pair(nodeInfo, MapChange.Modified(nodeInfo, previousNodeInfo)) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
database.transaction {
|
private fun recursivelyUpdateNodes(nodeUpdates: List<Pair<NodeInfo, MapChange>>) {
|
||||||
updatedNodes.forEach { (node, previousNode) ->
|
try {
|
||||||
//updated
|
persistNodeUpdates(nodeUpdates)
|
||||||
updateInfoDB(node, session)
|
}
|
||||||
changePublisher.onNext(MapChange.Modified(node, previousNode))
|
catch (e: PersistenceException) {
|
||||||
}
|
if (nodeUpdates.isNotEmpty()) {
|
||||||
newNodes.forEach { node ->
|
when {
|
||||||
//new
|
nodeUpdates.size > 1 -> {
|
||||||
updateInfoDB(node, session)
|
// persist first half
|
||||||
changePublisher.onNext(MapChange.Added(node))
|
val nodeUpdatesLow = nodeUpdates.subList(0, (nodeUpdates.size / 2))
|
||||||
|
recursivelyUpdateNodes(nodeUpdatesLow)
|
||||||
|
// persist second half
|
||||||
|
val nodeUpdatesHigh = nodeUpdates.subList((nodeUpdates.size / 2), nodeUpdates.size)
|
||||||
|
recursivelyUpdateNodes(nodeUpdatesHigh)
|
||||||
|
}
|
||||||
|
else -> logger.warn("Failed to add or update node with info: ${nodeUpdates.single()}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun persistNodeUpdates(nodeUpdates: List<Pair<NodeInfo, MapChange>>) {
|
||||||
|
database.transaction {
|
||||||
|
nodeUpdates.forEach { (nodeInfo, change) ->
|
||||||
|
updateInfoDB(nodeInfo, session)
|
||||||
|
changePublisher.onNext(change)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,8 +151,9 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
|||||||
|
|
||||||
private fun checkpointAgentRunning(): Boolean {
|
private fun checkpointAgentRunning(): Boolean {
|
||||||
val agentProperties = getJvmAgentProperties(log)
|
val agentProperties = getJvmAgentProperties(log)
|
||||||
|
val pattern = "(.+)?checkpoint-agent(-.+)?\\.jar.*".toRegex()
|
||||||
return agentProperties.values.any { value ->
|
return agentProperties.values.any { value ->
|
||||||
value is String && value.contains("checkpoint-agent.jar")
|
value is String && value.contains(pattern)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ import net.corda.core.context.Trace.InvocationId
|
|||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.internal.LifeCycle
|
import net.corda.core.internal.LifeCycle
|
||||||
import net.corda.core.internal.NamedCacheFactory
|
import net.corda.core.internal.NamedCacheFactory
|
||||||
|
import net.corda.core.internal.messaging.InternalCordaRPCOps
|
||||||
import net.corda.core.messaging.CordaRPCOps
|
import net.corda.core.messaging.CordaRPCOps
|
||||||
import net.corda.core.messaging.RPCOps
|
import net.corda.core.messaging.RPCOps
|
||||||
import net.corda.core.serialization.SerializationContext
|
import net.corda.core.serialization.SerializationContext
|
||||||
@ -158,7 +159,12 @@ class RPCServer(
|
|||||||
opsList.forEach { ops ->
|
opsList.forEach { ops ->
|
||||||
listOfApplicableInterfacesRec(ops.javaClass).toSet().forEach { interfaceClass ->
|
listOfApplicableInterfacesRec(ops.javaClass).toSet().forEach { interfaceClass ->
|
||||||
val groupedMethods = with(interfaceClass) {
|
val groupedMethods = with(interfaceClass) {
|
||||||
if(interfaceClass == CordaRPCOps::class.java) {
|
/*
|
||||||
|
* Until version 4.3, rpc calls did not include class names.
|
||||||
|
* Up to this version, only CordaRPCOps and InternalCordaRPCOps were supported.
|
||||||
|
* So, for these classes methods are registered without their class name as well to preserve backwards compatibility.
|
||||||
|
*/
|
||||||
|
if(interfaceClass == CordaRPCOps::class.java || interfaceClass == InternalCordaRPCOps::class.java) {
|
||||||
methods.groupBy { it.name }
|
methods.groupBy { it.name }
|
||||||
} else {
|
} else {
|
||||||
methods.groupBy { interfaceClass.name + CLASS_METHOD_DIVIDER + it.name }
|
methods.groupBy { interfaceClass.name + CLASS_METHOD_DIVIDER + it.name }
|
||||||
|
@ -16,8 +16,8 @@ object JVMAgentUtil {
|
|||||||
return try {
|
return try {
|
||||||
val vm = VirtualMachine.attach(jvmPid)
|
val vm = VirtualMachine.attach(jvmPid)
|
||||||
return vm.agentProperties
|
return vm.agentProperties
|
||||||
} catch (e: Exception) {
|
} catch (e: Throwable) {
|
||||||
log.warn("Unable to determine whether checkpoint agent is running: ${e.message}.\n" +
|
log.warn("Unable to determine whether agent is running: ${e.message}.\n" +
|
||||||
"You may need to pass in -Djdk.attach.allowAttachSelf=true if running on a Java 9 or later VM")
|
"You may need to pass in -Djdk.attach.allowAttachSelf=true if running on a Java 9 or later VM")
|
||||||
Properties()
|
Properties()
|
||||||
}
|
}
|
||||||
|
@ -10,12 +10,16 @@ import kotlin.concurrent.thread
|
|||||||
*/
|
*/
|
||||||
@Synchronized
|
@Synchronized
|
||||||
fun errorAndTerminate(message: String, e: Throwable?) {
|
fun errorAndTerminate(message: String, e: Throwable?) {
|
||||||
thread {
|
try {
|
||||||
val log = LoggerFactory.getLogger("errorAndTerminate")
|
thread {
|
||||||
log.error(message, e)
|
val log = LoggerFactory.getLogger("errorAndTerminate")
|
||||||
}
|
log.error(message, e)
|
||||||
|
}
|
||||||
|
|
||||||
// give the logger a chance to flush the error message before killing the node
|
// give the logger a chance to flush the error message before killing the node
|
||||||
Thread.sleep(10.seconds.toMillis())
|
Thread.sleep(10.seconds.toMillis())
|
||||||
Runtime.getRuntime().halt(1)
|
}
|
||||||
|
finally {
|
||||||
|
Runtime.getRuntime().halt(1)
|
||||||
|
}
|
||||||
}
|
}
|
@ -13,6 +13,8 @@
|
|||||||
<constraints nullable="false"/>
|
<constraints nullable="false"/>
|
||||||
</column>
|
</column>
|
||||||
</createTable>
|
</createTable>
|
||||||
|
<addPrimaryKey columnNames="pk_hash" constraintName="node_identities_no_cert_pkey"
|
||||||
|
tableName="node_identities_no_cert"/>
|
||||||
</changeSet>
|
</changeSet>
|
||||||
|
|
||||||
</databaseChangeLog>
|
</databaseChangeLog>
|
@ -2,6 +2,7 @@ package net.corda.node.services.network
|
|||||||
|
|
||||||
import net.corda.core.crypto.Crypto
|
import net.corda.core.crypto.Crypto
|
||||||
import net.corda.core.crypto.sha256
|
import net.corda.core.crypto.sha256
|
||||||
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.internal.sign
|
import net.corda.core.internal.sign
|
||||||
import net.corda.core.serialization.serialize
|
import net.corda.core.serialization.serialize
|
||||||
import net.corda.core.utilities.seconds
|
import net.corda.core.utilities.seconds
|
||||||
@ -71,6 +72,26 @@ class NetworkMapClientTest {
|
|||||||
assertEquals(nodeInfo2, networkMapClient.getNodeInfo(nodeInfoHash2))
|
assertEquals(nodeInfo2, networkMapClient.getNodeInfo(nodeInfoHash2))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `negative test - registered invalid node is added to the network map`() {
|
||||||
|
val invalidLongNodeName = CordaX500Name(
|
||||||
|
commonName = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
organisationUnit = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
organisation = "Long Plc",
|
||||||
|
locality = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
state = "AB123456789012345678901234567890123456789012345678901234567890",
|
||||||
|
country= "IT")
|
||||||
|
|
||||||
|
val (nodeInfo, signedNodeInfo) = createNodeInfoAndSigned(invalidLongNodeName)
|
||||||
|
|
||||||
|
networkMapClient.publish(signedNodeInfo)
|
||||||
|
|
||||||
|
val nodeInfoHash = nodeInfo.serialize().sha256()
|
||||||
|
|
||||||
|
assertThat(networkMapClient.getNetworkMap().payload.nodeInfoHashes).containsExactly(nodeInfoHash)
|
||||||
|
assertEquals(nodeInfo, networkMapClient.getNodeInfo(nodeInfoHash))
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun `errors return a meaningful error message`() {
|
fun `errors return a meaningful error message`() {
|
||||||
val nodeInfoBuilder = TestNodeInfoBuilder()
|
val nodeInfoBuilder = TestNodeInfoBuilder()
|
||||||
|
@ -100,4 +100,3 @@ include 'core-deterministic:testing:verifier'
|
|||||||
include 'serialization-deterministic'
|
include 'serialization-deterministic'
|
||||||
|
|
||||||
include 'tools:checkpoint-agent'
|
include 'tools:checkpoint-agent'
|
||||||
findProject(':tools:checkpoint-agent')?.name = 'checkpoint-agent'
|
|
@ -1,4 +1,4 @@
|
|||||||
FROM stefanotestingcr.azurecr.io/buildbase:latest
|
FROM stefanotestingcr.azurecr.io/buildbase:latest
|
||||||
COPY . /tmp/source
|
COPY . /tmp/source
|
||||||
CMD ls /tmp/gradle && cd /tmp/source && GRADLE_USER_HOME=/tmp/gradle ./gradlew clean testClasses integrationTestClasses --parallel --info
|
CMD cd /tmp/source && GRADLE_USER_HOME=/tmp/gradle ./gradlew clean testClasses integrationTestClasses --parallel --info
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ FROM ubuntu:18.04
|
|||||||
ENV GRADLE_USER_HOME=/tmp/gradle
|
ENV GRADLE_USER_HOME=/tmp/gradle
|
||||||
RUN mkdir /tmp/gradle && mkdir -p /home/root/.m2/repository
|
RUN mkdir /tmp/gradle && mkdir -p /home/root/.m2/repository
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y curl && \
|
RUN apt-get update && apt-get install -y curl libatomic1 && \
|
||||||
curl -O https://d3pxv6yz143wms.cloudfront.net/8.222.10.1/java-1.8.0-amazon-corretto-jdk_8.222.10-1_amd64.deb && \
|
curl -O https://d3pxv6yz143wms.cloudfront.net/8.222.10.1/java-1.8.0-amazon-corretto-jdk_8.222.10-1_amd64.deb && \
|
||||||
apt-get install -y java-common && dpkg -i java-1.8.0-amazon-corretto-jdk_8.222.10-1_amd64.deb && \
|
apt-get install -y java-common && dpkg -i java-1.8.0-amazon-corretto-jdk_8.222.10-1_amd64.deb && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
|
3224
testing/test-times.csv
Normal file
3224
testing/test-times.csv
Normal file
File diff suppressed because it is too large
Load Diff
@ -26,7 +26,10 @@ capsule {
|
|||||||
|
|
||||||
task buildWebserverJar(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
task buildWebserverJar(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
||||||
applicationClass 'net.corda.webserver.WebServer'
|
applicationClass 'net.corda.webserver.WebServer'
|
||||||
archiveName "corda-testserver-${corda_release_version}.jar"
|
archiveBaseName = 'corda-testserver'
|
||||||
|
archiveVersion = corda_release_version
|
||||||
|
archiveClassifier = jdkClassifier
|
||||||
|
archiveName = archiveFileName.get()
|
||||||
applicationSource = files(
|
applicationSource = files(
|
||||||
project(':testing:testserver').configurations.runtimeClasspath,
|
project(':testing:testserver').configurations.runtimeClasspath,
|
||||||
project(':testing:testserver').tasks.jar,
|
project(':testing:testserver').tasks.jar,
|
||||||
|
@ -25,6 +25,8 @@ repositories {
|
|||||||
|
|
||||||
apply plugin: 'kotlin'
|
apply plugin: 'kotlin'
|
||||||
apply plugin: 'idea'
|
apply plugin: 'idea'
|
||||||
|
apply plugin: 'net.corda.plugins.publish-utils'
|
||||||
|
apply plugin: 'com.jfrog.artifactory'
|
||||||
|
|
||||||
description 'A javaagent to allow hooking into Kryo checkpoints'
|
description 'A javaagent to allow hooking into Kryo checkpoints'
|
||||||
|
|
||||||
@ -49,7 +51,7 @@ dependencies {
|
|||||||
}
|
}
|
||||||
|
|
||||||
jar {
|
jar {
|
||||||
archiveName = "${project.name}.jar"
|
archiveBaseName = "${project.name}"
|
||||||
manifest {
|
manifest {
|
||||||
attributes(
|
attributes(
|
||||||
'Premain-Class': 'net.corda.tools.CheckpointAgent',
|
'Premain-Class': 'net.corda.tools.CheckpointAgent',
|
||||||
@ -62,3 +64,7 @@ jar {
|
|||||||
}
|
}
|
||||||
from { configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } }
|
from { configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
publish {
|
||||||
|
name 'corda-tools-checkpoint-agent'
|
||||||
|
}
|
||||||
|
@ -17,13 +17,15 @@ capsule {
|
|||||||
|
|
||||||
task buildExplorerJAR(type: FatCapsule, dependsOn: project(':tools:explorer').tasks.jar) {
|
task buildExplorerJAR(type: FatCapsule, dependsOn: project(':tools:explorer').tasks.jar) {
|
||||||
applicationClass 'net.corda.explorer.Main'
|
applicationClass 'net.corda.explorer.Main'
|
||||||
archiveName "node-explorer-${corda_release_version}.jar"
|
archiveBaseName = 'node-explorer'
|
||||||
|
archiveVersion = corda_release_version
|
||||||
|
archiveClassifier = jdkClassifier
|
||||||
|
archiveName = archiveFileName.get()
|
||||||
applicationSource = files(
|
applicationSource = files(
|
||||||
project(':tools:explorer').configurations.runtimeClasspath,
|
project(':tools:explorer').configurations.runtimeClasspath,
|
||||||
project(':tools:explorer').tasks.jar,
|
project(':tools:explorer').tasks.jar,
|
||||||
project(':tools:explorer').sourceSets.main.java.outputDir.toString() + '/ExplorerCaplet.class'
|
project(':tools:explorer').sourceSets.main.java.outputDir.toString() + '/ExplorerCaplet.class'
|
||||||
)
|
)
|
||||||
classifier 'fat'
|
|
||||||
|
|
||||||
capsuleManifest {
|
capsuleManifest {
|
||||||
applicationVersion = corda_release_version
|
applicationVersion = corda_release_version
|
||||||
|
@ -73,7 +73,7 @@ processResources {
|
|||||||
|
|
||||||
shadowJar {
|
shadowJar {
|
||||||
baseName = 'network-builder'
|
baseName = 'network-builder'
|
||||||
classifier = null
|
archiveClassifier = jdkClassifier
|
||||||
version = null
|
version = null
|
||||||
zip64 true
|
zip64 true
|
||||||
}
|
}
|
||||||
@ -83,12 +83,11 @@ assemble.dependsOn buildNetworkBuilder
|
|||||||
|
|
||||||
artifacts {
|
artifacts {
|
||||||
publish shadowJar {
|
publish shadowJar {
|
||||||
classifier = ""
|
archiveClassifier = jdkClassifier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jar {
|
jar {
|
||||||
classifier "ignore"
|
|
||||||
enabled = false
|
enabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,12 +35,12 @@ assemble.dependsOn buildShellCli
|
|||||||
|
|
||||||
artifacts {
|
artifacts {
|
||||||
publish shadowJar {
|
publish shadowJar {
|
||||||
classifier = ""
|
archiveClassifier = jdkClassifier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jar {
|
jar {
|
||||||
classifier "ignore"
|
archiveClassifier = "ignore"
|
||||||
enabled = false
|
enabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user