mirror of
https://github.com/corda/corda.git
synced 2025-04-07 11:27:01 +00:00
Razvan/merge 4.3 into 4.4 (#5494)
* NOTICK: Corda 4.3-RC01 Created first release candidate of Corda 4.3 - RC01. * CORDA-3141: Add GracefulReconnect callbacks which allow logic to be performed when RPC disconnects unexpectedly (#5430) Also removed potential for growing stack trace on reconnects. * CORDA-2050 Upgrade Corda to Java 11 (compatibility mode) (#5356) Upgrade Corda to run with Java 11 (compatibility mode) - see https://github.com/corda/corda/pull/5356 * ENT-4198 Adding legal text Signed-off-by: Ed Prosser <edward.prosser@r3.com> * TM-29 new baseline for 4.3 since new debt has been added with the last few commits (#5487) * TM-23 compileAll task to compile all code (#5490) * Add simple compileAll task to be used by warning check * lazy configure compileAll * TM-32 Merge OS 4.3 into 4.4 * TM-32 fixed detekt issue * Downgrade Dokka back to 0.9.17 due to failing docs_builder. * add ability to group test types together (#5459) * add ability to group test types together * add ability to specify podCount for use in parallel testing * remove compiler xml * add Jenkinsfile to enable scanning * trigger build * add ability to specify what docker tag to use from outside of the build * fix docker work dir * fix pipeline syntax issues * use environment rather than `def` * move agent restrictor outside of stages block * use steps block * more pipeline syntax fixes * even more pipeline syntax fixes * even more pipeline syntax fixes * add kubenetize as property to image build * move clear of docker image to end of build rather than start to prevent colocated builds * escape dollar on docker image remove command * attempt to kill all existing jobs * fix compile issue due to killall_jobs * fix compile issue due to killall_jobs pt2 * fix spelling * make all variables environment variables * add logic to delete images locally after pushing * wrap testing phase with try / finally so that junit reports are always evaluated * change the behaviour around post build actions * break implicit link between testing phase and image building phase, allowing testing to occur without a rebuild and push of image * prepend registry name to provided tag * allow tasks to specify whether they wish to stream output from containers * add timestamps directive to Jenkinsfile to have timing info on output * make KubesTest resilient against transient pod failures in k8s * increase CPU request * add logic to allow specifying container resource requests * attempt to run unit and integration tests in parallel * change unit tests to use 3 cores to allow co-location on 8c machines * join grouped tests together to give pod meaningful name * add step to renew token with GKE * change renew step to use pods instead of nodes * fix bug where memory request is not correctly passed to pod * disable unit tests for now * [CORDA-2368] Added exception handling for missing files that displays appropriate messages rather than defaulting to file names. (#5472) * NOTIK Minor adjustments to Detekt rules to reflect current working practises (#5498) * Minor adjustments to rules to reflect current working practises (including IntelliJ code style alignment) * Adjust another rule in line with existing code style. * rebaseline with changed detekt ruleset * rebaseline with NodeStartup changes
This commit is contained in:
parent
74e8b6e468
commit
28852ce47d
183
.idea/codeStyles/Project.xml
generated
183
.idea/codeStyles/Project.xml
generated
@ -27,6 +27,7 @@
|
||||
</editorconfig>
|
||||
<codeStyleSettings language="kotlin">
|
||||
<option name="CODE_STYLE_DEFAULTS" value="KOTLIN_OFFICIAL" />
|
||||
<option name="RIGHT_MARGIN" value="140" />
|
||||
<option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
|
||||
<option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
|
||||
<option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="0" />
|
||||
@ -38,6 +39,188 @@
|
||||
<option name="METHOD_PARAMETERS_RPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="EXTENDS_LIST_WRAP" value="0" />
|
||||
<option name="ASSIGNMENT_WRAP" value="0" />
|
||||
<option name="WRAP_ON_TYPING" value="0" />
|
||||
<option name="arrangementSettings" />
|
||||
<option name="forceArrangeMenuAvailable" value="false" />
|
||||
<option name="CODE_STYLE_DEFAULTS" value="KOTLIN_OFFICIAL" />
|
||||
<option name="RIGHT_MARGIN" value="140" />
|
||||
<option name="LINE_COMMENT_AT_FIRST_COLUMN" value="true" />
|
||||
<option name="BLOCK_COMMENT_AT_FIRST_COLUMN" value="true" />
|
||||
<option name="LINE_COMMENT_ADD_SPACE" value="false" />
|
||||
<option name="KEEP_LINE_BREAKS" value="true" />
|
||||
<option name="KEEP_FIRST_COLUMN_COMMENT" value="true" />
|
||||
<option name="KEEP_CONTROL_STATEMENT_IN_ONE_LINE" value="true" />
|
||||
<option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
|
||||
<option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
|
||||
<option name="KEEP_BLANK_LINES_BETWEEN_PACKAGE_DECLARATION_AND_HEADER" value="2" />
|
||||
<option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="0" />
|
||||
<option name="BLANK_LINES_BEFORE_PACKAGE" value="0" />
|
||||
<option name="BLANK_LINES_AFTER_PACKAGE" value="1" />
|
||||
<option name="BLANK_LINES_BEFORE_IMPORTS" value="1" />
|
||||
<option name="BLANK_LINES_AFTER_IMPORTS" value="1" />
|
||||
<option name="BLANK_LINES_AROUND_CLASS" value="1" />
|
||||
<option name="BLANK_LINES_AROUND_FIELD" value="0" />
|
||||
<option name="BLANK_LINES_AROUND_METHOD" value="1" />
|
||||
<option name="BLANK_LINES_BEFORE_METHOD_BODY" value="0" />
|
||||
<option name="BLANK_LINES_AROUND_FIELD_IN_INTERFACE" value="0" />
|
||||
<option name="BLANK_LINES_AROUND_METHOD_IN_INTERFACE" value="1" />
|
||||
<option name="BLANK_LINES_AFTER_CLASS_HEADER" value="0" />
|
||||
<option name="BLANK_LINES_AFTER_ANONYMOUS_CLASS_HEADER" value="0" />
|
||||
<option name="BLANK_LINES_BEFORE_CLASS_END" value="0" />
|
||||
<option name="BRACE_STYLE" value="1" />
|
||||
<option name="CLASS_BRACE_STYLE" value="1" />
|
||||
<option name="METHOD_BRACE_STYLE" value="1" />
|
||||
<option name="LAMBDA_BRACE_STYLE" value="1" />
|
||||
<option name="USE_FLYING_GEESE_BRACES" value="false" />
|
||||
<option name="DO_NOT_INDENT_TOP_LEVEL_CLASS_MEMBERS" value="false" />
|
||||
<option name="ELSE_ON_NEW_LINE" value="false" />
|
||||
<option name="WHILE_ON_NEW_LINE" value="false" />
|
||||
<option name="CATCH_ON_NEW_LINE" value="false" />
|
||||
<option name="FINALLY_ON_NEW_LINE" value="false" />
|
||||
<option name="INDENT_CASE_FROM_SWITCH" value="true" />
|
||||
<option name="CASE_STATEMENT_ON_NEW_LINE" value="true" />
|
||||
<option name="INDENT_BREAK_FROM_CASE" value="true" />
|
||||
<option name="SPECIAL_ELSE_IF_TREATMENT" value="true" />
|
||||
<option name="ALIGN_MULTILINE_CHAINED_METHODS" value="false" />
|
||||
<option name="ALIGN_MULTILINE_PARAMETERS" value="true" />
|
||||
<option name="ALIGN_MULTILINE_PARAMETERS_IN_CALLS" value="false" />
|
||||
<option name="ALIGN_MULTILINE_RESOURCES" value="true" />
|
||||
<option name="ALIGN_MULTILINE_FOR" value="true" />
|
||||
<option name="INDENT_WHEN_CASES" value="true" />
|
||||
<option name="ALIGN_MULTILINE_BINARY_OPERATION" value="false" />
|
||||
<option name="ALIGN_MULTILINE_ASSIGNMENT" value="false" />
|
||||
<option name="ALIGN_MULTILINE_TERNARY_OPERATION" value="false" />
|
||||
<option name="ALIGN_MULTILINE_THROWS_LIST" value="false" />
|
||||
<option name="ALIGN_THROWS_KEYWORD" value="false" />
|
||||
<option name="ALIGN_MULTILINE_EXTENDS_LIST" value="false" />
|
||||
<option name="ALIGN_MULTILINE_METHOD_BRACKETS" value="false" />
|
||||
<option name="ALIGN_MULTILINE_PARENTHESIZED_EXPRESSION" value="false" />
|
||||
<option name="ALIGN_MULTILINE_ARRAY_INITIALIZER_EXPRESSION" value="false" />
|
||||
<option name="ALIGN_GROUP_FIELD_DECLARATIONS" value="false" />
|
||||
<option name="ALIGN_CONSECUTIVE_VARIABLE_DECLARATIONS" value="false" />
|
||||
<option name="ALIGN_CONSECUTIVE_ASSIGNMENTS" value="false" />
|
||||
<option name="ALIGN_SUBSEQUENT_SIMPLE_METHODS" value="false" />
|
||||
<option name="SPACE_AROUND_ASSIGNMENT_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_LOGICAL_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_EQUALITY_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_RELATIONAL_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_BITWISE_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_ADDITIVE_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_MULTIPLICATIVE_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_SHIFT_OPERATORS" value="true" />
|
||||
<option name="SPACE_AROUND_UNARY_OPERATOR" value="false" />
|
||||
<option name="SPACE_AROUND_LAMBDA_ARROW" value="true" />
|
||||
<option name="SPACE_AROUND_METHOD_REF_DBL_COLON" value="false" />
|
||||
<option name="SPACE_AFTER_COMMA" value="true" />
|
||||
<option name="SPACE_AFTER_COMMA_IN_TYPE_ARGUMENTS" value="true" />
|
||||
<option name="SPACE_BEFORE_COMMA" value="false" />
|
||||
<option name="SPACE_AFTER_SEMICOLON" value="true" />
|
||||
<option name="SPACE_BEFORE_SEMICOLON" value="false" />
|
||||
<option name="SPACE_WITHIN_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_METHOD_CALL_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_EMPTY_METHOD_CALL_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_METHOD_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_EMPTY_METHOD_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_IF_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_WHILE_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_FOR_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_TRY_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_CATCH_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_SWITCH_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_SYNCHRONIZED_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_CAST_PARENTHESES" value="false" />
|
||||
<option name="SPACE_WITHIN_BRACKETS" value="false" />
|
||||
<option name="SPACE_WITHIN_BRACES" value="false" />
|
||||
<option name="SPACE_WITHIN_ARRAY_INITIALIZER_BRACES" value="false" />
|
||||
<option name="SPACE_WITHIN_EMPTY_ARRAY_INITIALIZER_BRACES" value="false" />
|
||||
<option name="SPACE_AFTER_TYPE_CAST" value="true" />
|
||||
<option name="SPACE_BEFORE_METHOD_CALL_PARENTHESES" value="false" />
|
||||
<option name="SPACE_BEFORE_METHOD_PARENTHESES" value="false" />
|
||||
<option name="SPACE_BEFORE_IF_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_WHILE_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_FOR_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_TRY_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_CATCH_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_SWITCH_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_SYNCHRONIZED_PARENTHESES" value="true" />
|
||||
<option name="SPACE_BEFORE_CLASS_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_METHOD_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_IF_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_ELSE_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_WHILE_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_FOR_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_DO_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_SWITCH_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_TRY_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_CATCH_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_FINALLY_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_SYNCHRONIZED_LBRACE" value="true" />
|
||||
<option name="SPACE_BEFORE_ARRAY_INITIALIZER_LBRACE" value="false" />
|
||||
<option name="SPACE_BEFORE_ANNOTATION_ARRAY_INITIALIZER_LBRACE" value="false" />
|
||||
<option name="SPACE_BEFORE_ELSE_KEYWORD" value="true" />
|
||||
<option name="SPACE_BEFORE_WHILE_KEYWORD" value="true" />
|
||||
<option name="SPACE_BEFORE_CATCH_KEYWORD" value="true" />
|
||||
<option name="SPACE_BEFORE_FINALLY_KEYWORD" value="true" />
|
||||
<option name="SPACE_BEFORE_QUEST" value="true" />
|
||||
<option name="SPACE_AFTER_QUEST" value="true" />
|
||||
<option name="SPACE_BEFORE_COLON" value="true" />
|
||||
<option name="SPACE_AFTER_COLON" value="true" />
|
||||
<option name="SPACE_BEFORE_TYPE_PARAMETER_LIST" value="false" />
|
||||
<option name="CALL_PARAMETERS_WRAP" value="0" />
|
||||
<option name="PREFER_PARAMETERS_WRAP" value="false" />
|
||||
<option name="CALL_PARAMETERS_LPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="CALL_PARAMETERS_RPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="METHOD_PARAMETERS_WRAP" value="0" />
|
||||
<option name="METHOD_PARAMETERS_LPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="METHOD_PARAMETERS_RPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="RESOURCE_LIST_WRAP" value="0" />
|
||||
<option name="RESOURCE_LIST_LPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="RESOURCE_LIST_RPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="EXTENDS_LIST_WRAP" value="0" />
|
||||
<option name="THROWS_LIST_WRAP" value="0" />
|
||||
<option name="EXTENDS_KEYWORD_WRAP" value="0" />
|
||||
<option name="THROWS_KEYWORD_WRAP" value="0" />
|
||||
<option name="METHOD_CALL_CHAIN_WRAP" value="1" />
|
||||
<option name="WRAP_FIRST_METHOD_IN_CALL_CHAIN" value="false" />
|
||||
<option name="PARENTHESES_EXPRESSION_LPAREN_WRAP" value="false" />
|
||||
<option name="PARENTHESES_EXPRESSION_RPAREN_WRAP" value="false" />
|
||||
<option name="BINARY_OPERATION_WRAP" value="0" />
|
||||
<option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="false" />
|
||||
<option name="TERNARY_OPERATION_WRAP" value="0" />
|
||||
<option name="TERNARY_OPERATION_SIGNS_ON_NEXT_LINE" value="false" />
|
||||
<option name="MODIFIER_LIST_WRAP" value="false" />
|
||||
<option name="KEEP_SIMPLE_BLOCKS_IN_ONE_LINE" value="false" />
|
||||
<option name="KEEP_SIMPLE_METHODS_IN_ONE_LINE" value="false" />
|
||||
<option name="KEEP_SIMPLE_LAMBDAS_IN_ONE_LINE" value="false" />
|
||||
<option name="KEEP_SIMPLE_CLASSES_IN_ONE_LINE" value="false" />
|
||||
<option name="KEEP_MULTIPLE_EXPRESSIONS_IN_ONE_LINE" value="false" />
|
||||
<option name="FOR_STATEMENT_WRAP" value="0" />
|
||||
<option name="FOR_STATEMENT_LPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="FOR_STATEMENT_RPAREN_ON_NEXT_LINE" value="false" />
|
||||
<option name="ARRAY_INITIALIZER_WRAP" value="0" />
|
||||
<option name="ARRAY_INITIALIZER_LBRACE_ON_NEXT_LINE" value="false" />
|
||||
<option name="ARRAY_INITIALIZER_RBRACE_ON_NEXT_LINE" value="false" />
|
||||
<option name="ASSIGNMENT_WRAP" value="0" />
|
||||
<option name="PLACE_ASSIGNMENT_SIGN_ON_NEXT_LINE" value="false" />
|
||||
<option name="LABELED_STATEMENT_WRAP" value="2" />
|
||||
<option name="WRAP_COMMENTS" value="false" />
|
||||
<option name="ASSERT_STATEMENT_WRAP" value="0" />
|
||||
<option name="ASSERT_STATEMENT_COLON_ON_NEXT_LINE" value="false" />
|
||||
<option name="IF_BRACE_FORCE" value="0" />
|
||||
<option name="DOWHILE_BRACE_FORCE" value="0" />
|
||||
<option name="WHILE_BRACE_FORCE" value="0" />
|
||||
<option name="FOR_BRACE_FORCE" value="0" />
|
||||
<option name="WRAP_LONG_LINES" value="false" />
|
||||
<option name="METHOD_ANNOTATION_WRAP" value="2" />
|
||||
<option name="CLASS_ANNOTATION_WRAP" value="2" />
|
||||
<option name="FIELD_ANNOTATION_WRAP" value="2" />
|
||||
<option name="PARAMETER_ANNOTATION_WRAP" value="0" />
|
||||
<option name="VARIABLE_ANNOTATION_WRAP" value="0" />
|
||||
<option name="SPACE_BEFORE_ANOTATION_PARAMETER_LIST" value="false" />
|
||||
<option name="SPACE_WITHIN_ANNOTATION_PARENTHESES" value="false" />
|
||||
<option name="ENUM_CONSTANTS_WRAP" value="0" />
|
||||
<option name="FORCE_REARRANGE_MODE" value="0" />
|
||||
<option name="WRAP_ON_TYPING" value="0" />
|
||||
</codeStyleSettings>
|
||||
</code_scheme>
|
||||
</component>
|
1
.idea/codeStyles/codeStyleConfig.xml
generated
1
.idea/codeStyles/codeStyleConfig.xml
generated
@ -1,6 +1,5 @@
|
||||
<component name="ProjectCodeStyleConfiguration">
|
||||
<state>
|
||||
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
|
||||
<option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
|
||||
</state>
|
||||
</component>
|
52
Jenkinsfile
vendored
52
Jenkinsfile
vendored
@ -2,6 +2,8 @@ killall_jobs()
|
||||
|
||||
pipeline {
|
||||
agent { label 'k8s' }
|
||||
options { timestamps() }
|
||||
|
||||
environment {
|
||||
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||
@ -9,7 +11,7 @@ pipeline {
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Corda Pull Request Integration Tests - Generate Build Image') {
|
||||
stage('Corda Pull Request - Generate Build Image') {
|
||||
steps {
|
||||
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||
sh "./gradlew " +
|
||||
@ -19,26 +21,42 @@ pipeline {
|
||||
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" clean pushBuildImage"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Corda Pull Request Integration Tests - Run Integration Tests') {
|
||||
steps {
|
||||
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" allParallelIntegrationTest"
|
||||
}
|
||||
junit '**/build/test-results-xml/**/*.xml'
|
||||
sh "kubectl auth can-i get pods"
|
||||
}
|
||||
}
|
||||
|
||||
stage('Clear testing images') {
|
||||
steps {
|
||||
sh """docker rmi -f \$(docker images | grep \${DOCKER_TAG_TO_USE} | awk '{print \$3}') || echo \"there were no images to delete\""""
|
||||
stage('Corda Pull Request - Run Tests') {
|
||||
parallel {
|
||||
stage('Integration Tests') {
|
||||
steps {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" allParallelIntegrationTest"
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit '**/build/test-results-xml/**/*.xml'
|
||||
}
|
||||
}
|
||||
}
|
||||
// stage('Unit Tests') {
|
||||
// steps {
|
||||
// sh "./gradlew " +
|
||||
// "-DbuildId=\"\${BUILD_ID}\" " +
|
||||
// "-Dkubenetize=true " +
|
||||
// "-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
// " allParallelUnitTest"
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// junit '**/build/test-results-xml/**/*.xml'
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
121
build.gradle
121
build.gradle
@ -1,4 +1,8 @@
|
||||
import net.corda.testing.DistributedTesting
|
||||
import net.corda.testing.ParallelTestGroup
|
||||
|
||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||
import static org.gradle.api.JavaVersion.VERSION_11
|
||||
|
||||
buildscript {
|
||||
// For sharing constants between builds
|
||||
@ -17,7 +21,15 @@ buildscript {
|
||||
ext.warnings_as_errors = project.hasProperty("compilation.warningsAsErrors") ? project.property("compilation.warningsAsErrors").toBoolean() : false
|
||||
|
||||
ext.quasar_group = 'co.paralleluniverse'
|
||||
ext.quasar_version = constants.getProperty("quasarVersion")
|
||||
// Set version of Quasar according to version of Java used:
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_11) {
|
||||
ext.quasar_version = constants.getProperty("quasarVersion11")
|
||||
ext.quasar_classifier = constants.getProperty("quasarClassifier11")
|
||||
}
|
||||
else {
|
||||
ext.quasar_version = constants.getProperty("quasarVersion")
|
||||
ext.quasar_classifier = constants.getProperty("quasarClassifier")
|
||||
}
|
||||
ext.quasar_exclusions = [
|
||||
'co.paralleluniverse**',
|
||||
'groovy**',
|
||||
@ -96,7 +108,6 @@ buildscript {
|
||||
ext.docker_compose_rule_version = '0.35.0'
|
||||
ext.selenium_version = '3.141.59'
|
||||
ext.ghostdriver_version = '2.1.0'
|
||||
ext.eaagentloader_version = '1.0.3'
|
||||
ext.proguard_version = constants.getProperty('proguardVersion')
|
||||
ext.jsch_version = '0.1.55'
|
||||
ext.protonj_version = '0.33.0' // Overide Artemis version
|
||||
@ -106,8 +117,8 @@ buildscript {
|
||||
ext.picocli_version = '3.9.6'
|
||||
ext.commons_io_version = '2.6'
|
||||
ext.controlsfx_version = '8.40.15'
|
||||
ext.fontawesomefx_commons_version = '8.15'
|
||||
ext.fontawesomefx_fontawesome_version = '4.7.0-5'
|
||||
ext.fontawesomefx_commons_version = '11.0'
|
||||
ext.fontawesomefx_fontawesome_version = '4.7.0-11'
|
||||
|
||||
// Name of the IntelliJ SDK created for the deterministic Java rt.jar.
|
||||
// ext.deterministic_idea_sdk = '1.8 (Deterministic)'
|
||||
@ -179,8 +190,17 @@ apply plugin: 'com.jfrog.artifactory'
|
||||
// with the run configurations. It also doesn't realise that the project is a Java 8 project and misconfigures
|
||||
// the resulting import. This fixes it.
|
||||
apply plugin: 'java'
|
||||
sourceCompatibility = 1.8
|
||||
targetCompatibility = 1.8
|
||||
|
||||
println "Java version: " + JavaVersion.current()
|
||||
sourceCompatibility = VERSION_1_8
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_1_8)
|
||||
targetCompatibility = VERSION_1_8
|
||||
else
|
||||
targetCompatibility = VERSION_11
|
||||
println "Java source compatibility: " + sourceCompatibility
|
||||
println "Java target compatibility: " + targetCompatibility
|
||||
println "Quasar version: " + quasar_version
|
||||
println "Quasar classifier: " + quasar_classifier
|
||||
|
||||
allprojects {
|
||||
apply plugin: 'kotlin'
|
||||
@ -210,8 +230,16 @@ allprojects {
|
||||
nugetconfEnabled = false
|
||||
}
|
||||
}
|
||||
sourceCompatibility = 1.8
|
||||
targetCompatibility = 1.8
|
||||
sourceCompatibility = VERSION_1_8
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_1_8)
|
||||
targetCompatibility = VERSION_1_8
|
||||
else
|
||||
targetCompatibility = VERSION_11
|
||||
|
||||
jacoco {
|
||||
// JDK11 official support (https://github.com/jacoco/jacoco/releases/tag/v0.8.3)
|
||||
toolVersion = "0.8.3"
|
||||
}
|
||||
|
||||
tasks.withType(JavaCompile) {
|
||||
options.compilerArgs << "-Xlint:unchecked" << "-Xlint:deprecation" << "-Xlint:-options" << "-parameters"
|
||||
@ -235,6 +263,10 @@ allprojects {
|
||||
}
|
||||
}
|
||||
|
||||
tasks.register('compileAll') { task ->
|
||||
task.dependsOn tasks.withType(AbstractCompile)
|
||||
}
|
||||
|
||||
tasks.withType(Jar) { task ->
|
||||
// Includes War and Ear
|
||||
manifest {
|
||||
@ -248,6 +280,7 @@ allprojects {
|
||||
|
||||
tasks.withType(Test) {
|
||||
forkEvery = 10
|
||||
ignoreFailures = project.hasProperty('tests.ignoreFailures') ? project.property('tests.ignoreFailures').toBoolean() : false
|
||||
failFast = project.hasProperty('tests.failFast') ? project.property('tests.failFast').toBoolean() : false
|
||||
|
||||
// Prevent the project from creating temporary files outside of the build directory.
|
||||
@ -333,6 +366,10 @@ allprojects {
|
||||
if (!JavaVersion.current().java8Compatible)
|
||||
throw new GradleException("Corda requires Java 8, please upgrade to at least 1.8.0_$java8_minUpdateVersion")
|
||||
|
||||
configurations {
|
||||
detekt
|
||||
}
|
||||
|
||||
// Required for building out the fat JAR.
|
||||
dependencies {
|
||||
compile project(':node')
|
||||
@ -352,6 +389,7 @@ dependencies {
|
||||
runtime project(':finance:contracts')
|
||||
runtime project(':webserver')
|
||||
testCompile project(':test-utils')
|
||||
detekt 'io.gitlab.arturbosch.detekt:detekt-cli:1.0.1'
|
||||
}
|
||||
|
||||
jar {
|
||||
@ -380,6 +418,26 @@ task jacocoRootReport(type: org.gradle.testing.jacoco.tasks.JacocoReport) {
|
||||
}
|
||||
}
|
||||
|
||||
task detekt(type: JavaExec) {
|
||||
main = "io.gitlab.arturbosch.detekt.cli.Main"
|
||||
classpath = configurations.detekt
|
||||
def input = "$projectDir"
|
||||
def config = "$projectDir/detekt-config.yml"
|
||||
def baseline = "$projectDir/detekt-baseline.xml"
|
||||
def params = ['-i', input, '-c', config, '-b', baseline]
|
||||
args(params)
|
||||
}
|
||||
|
||||
task detektBaseline(type: JavaExec) {
|
||||
main = "io.gitlab.arturbosch.detekt.cli.Main"
|
||||
classpath = configurations.detekt
|
||||
def input = "$projectDir"
|
||||
def config = "$projectDir/detekt-config.yml"
|
||||
def baseline = "$projectDir/detekt-baseline.xml"
|
||||
def params = ['-i', input, '-c', config, '-b', baseline, '--create-baseline']
|
||||
args(params)
|
||||
}
|
||||
|
||||
tasks.withType(Test) {
|
||||
reports.html.destination = file("${reporting.baseDir}/${name}")
|
||||
}
|
||||
@ -521,32 +579,27 @@ buildScan {
|
||||
termsOfServiceAgree = 'yes'
|
||||
}
|
||||
|
||||
task allParallelIntegrationTest(type: ParallelTestGroup) {
|
||||
testGroups "integrationTest"
|
||||
numberOfShards 15
|
||||
streamOutput false
|
||||
coresPerFork 6
|
||||
memoryInGbPerFork 10
|
||||
}
|
||||
task allParallelUnitTest(type: ParallelTestGroup) {
|
||||
testGroups "test"
|
||||
numberOfShards 15
|
||||
streamOutput false
|
||||
coresPerFork 3
|
||||
memoryInGbPerFork 6
|
||||
}
|
||||
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
|
||||
testGroups "test", "integrationTest"
|
||||
numberOfShards 20
|
||||
streamOutput false
|
||||
coresPerFork 6
|
||||
memoryInGbPerFork 10
|
||||
}
|
||||
apply plugin: DistributedTesting
|
||||
|
||||
configurations {
|
||||
detekt
|
||||
}
|
||||
|
||||
dependencies {
|
||||
detekt 'io.gitlab.arturbosch.detekt:detekt-cli:1.0.1'
|
||||
}
|
||||
|
||||
task detekt(type: JavaExec) {
|
||||
main = "io.gitlab.arturbosch.detekt.cli.Main"
|
||||
classpath = configurations.detekt
|
||||
def input = "$projectDir"
|
||||
def config = "$projectDir/detekt-config.yml"
|
||||
def baseline = "$projectDir/detekt-baseline.xml"
|
||||
def params = ['-i', input, '-c', config, '-b', baseline]
|
||||
args(params)
|
||||
}
|
||||
|
||||
task detektBaseline(type: JavaExec) {
|
||||
main = "io.gitlab.arturbosch.detekt.cli.Main"
|
||||
classpath = configurations.detekt
|
||||
def input = "$projectDir"
|
||||
def config = "$projectDir/detekt-config.yml"
|
||||
def baseline = "$projectDir/detekt-baseline.xml"
|
||||
def params = ['-i', input, '-c', config, '-b', baseline, '--create-baseline']
|
||||
args(params)
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ dependencies {
|
||||
compile gradleApi()
|
||||
compile "io.fabric8:kubernetes-client:4.4.1"
|
||||
compile 'org.apache.commons:commons-compress:1.19'
|
||||
compile 'org.apache.commons:commons-lang3:3.9'
|
||||
compile 'commons-codec:commons-codec:1.13'
|
||||
compile "io.github.classgraph:classgraph:$class_graph_version"
|
||||
compile "com.bmuschko:gradle-docker-plugin:5.0.0"
|
||||
|
@ -1,10 +1,8 @@
|
||||
package net.corda.testing
|
||||
|
||||
|
||||
import com.bmuschko.gradle.docker.tasks.image.DockerPushImage
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.testing.Test
|
||||
|
||||
/**
|
||||
@ -22,6 +20,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
ensureImagePluginIsApplied(project)
|
||||
ImageBuilding imagePlugin = project.plugins.getPlugin(ImageBuilding)
|
||||
DockerPushImage imageBuildingTask = imagePlugin.pushTask
|
||||
String providedTag = System.getProperty("docker.tag")
|
||||
|
||||
//in each subproject
|
||||
//1. add the task to determine all tests within the module
|
||||
@ -31,7 +30,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
subProject.tasks.withType(Test) { Test task ->
|
||||
ListTests testListerTask = createTestListingTasks(task, subProject)
|
||||
Test modifiedTestTask = modifyTestTaskForParallelExecution(subProject, task, testListerTask)
|
||||
KubesTest parallelTestTask = generateParallelTestingTask(subProject, task, imageBuildingTask)
|
||||
KubesTest parallelTestTask = generateParallelTestingTask(subProject, task, imageBuildingTask, providedTag)
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,55 +44,57 @@ class DistributedTesting implements Plugin<Project> {
|
||||
//first step is to create a single task which will invoke all the submodule tasks for each grouping
|
||||
//ie allParallelTest will invoke [node:test, core:test, client:rpc:test ... etc]
|
||||
//ie allIntegrationTest will invoke [node:integrationTest, core:integrationTest, client:rpc:integrationTest ... etc]
|
||||
createGroupedParallelTestTasks(allKubesTestingTasksGroupedByType, project, imageBuildingTask)
|
||||
Set<ParallelTestGroup> userGroups = new HashSet<>(project.tasks.withType(ParallelTestGroup))
|
||||
|
||||
Collection<ParallelTestGroup> userDefinedGroups = userGroups.forEach { testGrouping ->
|
||||
List<KubesTest> groups = ((ParallelTestGroup) testGrouping).groups.collect {
|
||||
allKubesTestingTasksGroupedByType.get(it)
|
||||
}.flatten()
|
||||
String superListOfTasks = groups.collect { it.fullTaskToExecutePath }.join(" ")
|
||||
|
||||
def userDefinedParallelTask = project.rootProject.tasks.create("userDefined" + testGrouping.name.capitalize(), KubesTest) {
|
||||
if (!providedTag) {
|
||||
dependsOn imageBuildingTask
|
||||
}
|
||||
numberOfPods = testGrouping.getShardCount()
|
||||
printOutput = testGrouping.printToStdOut
|
||||
fullTaskToExecutePath = superListOfTasks
|
||||
taskToExecuteName = testGrouping.groups.join("And")
|
||||
memoryGbPerFork = testGrouping.gbOfMemory
|
||||
numberOfCoresPerFork = testGrouping.coresToUse
|
||||
doFirst {
|
||||
dockerTag = dockerTag = providedTag ? ImageBuilding.registryName + ":" + providedTag : (imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get())
|
||||
}
|
||||
}
|
||||
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.name.capitalize()}", KubesReporting) {
|
||||
dependsOn userDefinedParallelTask
|
||||
destinationDir new File(project.rootProject.getBuildDir(), "userDefinedReports${testGrouping.name.capitalize()}")
|
||||
doFirst {
|
||||
destinationDir.deleteDir()
|
||||
shouldPrintOutput = !testGrouping.printToStdOut
|
||||
podResults = userDefinedParallelTask.containerResults
|
||||
reportOn(userDefinedParallelTask.testOutput)
|
||||
}
|
||||
}
|
||||
userDefinedParallelTask.finalizedBy(reportOnAllTask)
|
||||
testGrouping.dependsOn(userDefinedParallelTask)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private List<Task> createGroupedParallelTestTasks(Map<String, List<KubesTest>> allKubesTestingTasksGroupedByType, Project project, DockerPushImage imageBuildingTask) {
|
||||
allKubesTestingTasksGroupedByType.entrySet().collect { entry ->
|
||||
def taskType = entry.key
|
||||
def allTasksOfType = entry.value
|
||||
def allParallelTask = project.rootProject.tasks.create("allParallel" + taskType.capitalize(), KubesTest) {
|
||||
dependsOn imageBuildingTask
|
||||
printOutput = true
|
||||
fullTaskToExecutePath = allTasksOfType.collect { task -> task.fullTaskToExecutePath }.join(" ")
|
||||
taskToExecuteName = taskType
|
||||
doFirst {
|
||||
dockerTag = imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get()
|
||||
}
|
||||
}
|
||||
|
||||
//second step is to create a task to use the reports output by the parallel test task
|
||||
def reportOnAllTask = project.rootProject.tasks.create("reportAllParallel${taskType.capitalize()}", KubesReporting) {
|
||||
dependsOn allParallelTask
|
||||
destinationDir new File(project.rootProject.getBuildDir(), "allResults${taskType.capitalize()}")
|
||||
doFirst {
|
||||
destinationDir.deleteDir()
|
||||
podResults = allParallelTask.containerResults
|
||||
reportOn(allParallelTask.testOutput)
|
||||
}
|
||||
}
|
||||
|
||||
//invoke this report task after parallel testing
|
||||
allParallelTask.finalizedBy(reportOnAllTask)
|
||||
project.logger.info "Created task: ${allParallelTask.getPath()} to enable testing on kubenetes for tasks: ${allParallelTask.fullTaskToExecutePath}"
|
||||
project.logger.info "Created task: ${reportOnAllTask.getPath()} to generate test html output for task ${allParallelTask.getPath()}"
|
||||
return allParallelTask
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private KubesTest generateParallelTestingTask(Project projectContainingTask, Test task, DockerPushImage imageBuildingTask) {
|
||||
private KubesTest generateParallelTestingTask(Project projectContainingTask, Test task, DockerPushImage imageBuildingTask, String providedTag) {
|
||||
def taskName = task.getName()
|
||||
def capitalizedTaskName = task.getName().capitalize()
|
||||
|
||||
KubesTest createdParallelTestTask = projectContainingTask.tasks.create("parallel" + capitalizedTaskName, KubesTest) {
|
||||
dependsOn imageBuildingTask
|
||||
if (!providedTag) {
|
||||
dependsOn imageBuildingTask
|
||||
}
|
||||
printOutput = true
|
||||
fullTaskToExecutePath = task.getPath()
|
||||
taskToExecuteName = taskName
|
||||
doFirst {
|
||||
dockerTag = imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get()
|
||||
dockerTag = providedTag ? ImageBuilding.registryName + ":" + providedTag : (imageBuildingTask.imageName.get() + ":" + imageBuildingTask.tag.get())
|
||||
}
|
||||
}
|
||||
projectContainingTask.logger.info "Created task: ${createdParallelTestTask.getPath()} to enable testing on kubenetes for task: ${task.getPath()}"
|
||||
|
@ -1,10 +1,7 @@
|
||||
package net.corda.testing
|
||||
|
||||
import com.bmuschko.gradle.docker.DockerRegistryCredentials
|
||||
import com.bmuschko.gradle.docker.tasks.container.DockerCreateContainer
|
||||
import com.bmuschko.gradle.docker.tasks.container.DockerLogsContainer
|
||||
import com.bmuschko.gradle.docker.tasks.container.DockerStartContainer
|
||||
import com.bmuschko.gradle.docker.tasks.container.DockerWaitContainer
|
||||
import com.bmuschko.gradle.docker.tasks.container.*
|
||||
import com.bmuschko.gradle.docker.tasks.image.*
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Plugin
|
||||
@ -16,6 +13,7 @@ import org.gradle.api.Project
|
||||
*/
|
||||
class ImageBuilding implements Plugin<Project> {
|
||||
|
||||
public static final String registryName = "stefanotestingcr.azurecr.io/testing"
|
||||
DockerPushImage pushTask
|
||||
|
||||
@Override
|
||||
@ -25,7 +23,7 @@ class ImageBuilding implements Plugin<Project> {
|
||||
registryCredentialsForPush.username.set("stefanotestingcr")
|
||||
registryCredentialsForPush.password.set(System.getProperty("docker.push.password") ? System.getProperty("docker.push.password") : "")
|
||||
|
||||
DockerPullImage pullTask = project.tasks.create("pullBaseImage", DockerPullImage){
|
||||
DockerPullImage pullTask = project.tasks.create("pullBaseImage", DockerPullImage) {
|
||||
repository = "stefanotestingcr.azurecr.io/buildbase"
|
||||
tag = "latest"
|
||||
doFirst {
|
||||
@ -83,33 +81,41 @@ class ImageBuilding implements Plugin<Project> {
|
||||
targetContainerId createBuildContainer.getContainerId()
|
||||
}
|
||||
|
||||
|
||||
DockerTagImage tagBuildImageResult = project.tasks.create('tagBuildImageResult', DockerTagImage) {
|
||||
dependsOn commitBuildImageResult
|
||||
imageId = commitBuildImageResult.getImageId()
|
||||
tag = System.getProperty("docker.provided.tag") ? System.getProperty("docker.provided.tag") : "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||
repository = "stefanotestingcr.azurecr.io/testing"
|
||||
tag = System.getProperty("docker.provided.tag") ? System.getProperty("docker.provided.tag") : "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
|
||||
repository = registryName
|
||||
}
|
||||
|
||||
if (System.getProperty("docker.tag")) {
|
||||
DockerPushImage pushBuildImage = project.tasks.create('pushBuildImage', DockerPushImage) {
|
||||
doFirst {
|
||||
registryCredentials = registryCredentialsForPush
|
||||
}
|
||||
imageName = "stefanotestingcr.azurecr.io/testing"
|
||||
tag = System.getProperty("docker.tag")
|
||||
DockerPushImage pushBuildImage = project.tasks.create('pushBuildImage', DockerPushImage) {
|
||||
dependsOn tagBuildImageResult
|
||||
doFirst {
|
||||
registryCredentials = registryCredentialsForPush
|
||||
}
|
||||
this.pushTask = pushBuildImage
|
||||
} else {
|
||||
DockerPushImage pushBuildImage = project.tasks.create('pushBuildImage', DockerPushImage) {
|
||||
dependsOn tagBuildImageResult
|
||||
doFirst {
|
||||
registryCredentials = registryCredentialsForPush
|
||||
}
|
||||
imageName = "stefanotestingcr.azurecr.io/testing"
|
||||
tag = tagBuildImageResult.tag
|
||||
}
|
||||
this.pushTask = pushBuildImage
|
||||
imageName = registryName
|
||||
tag = tagBuildImageResult.tag
|
||||
}
|
||||
this.pushTask = pushBuildImage
|
||||
|
||||
|
||||
DockerRemoveContainer deleteContainer = project.tasks.create('deleteBuildContainer', DockerRemoveContainer) {
|
||||
dependsOn pushBuildImage
|
||||
targetContainerId createBuildContainer.getContainerId()
|
||||
}
|
||||
DockerRemoveImage deleteTaggedImage = project.tasks.create('deleteTaggedImage', DockerRemoveImage) {
|
||||
dependsOn pushBuildImage
|
||||
force = true
|
||||
targetImageId commitBuildImageResult.getImageId()
|
||||
}
|
||||
DockerRemoveImage deleteBuildImage = project.tasks.create('deleteBuildImage', DockerRemoveImage) {
|
||||
dependsOn deleteContainer, deleteTaggedImage
|
||||
force = true
|
||||
targetImageId buildDockerImageForSource.getImageId()
|
||||
}
|
||||
if (System.getProperty("docker.keep.image") == null) {
|
||||
pushBuildImage.finalizedBy(deleteContainer, deleteBuildImage, deleteTaggedImage)
|
||||
}
|
||||
}
|
||||
}
|
@ -16,7 +16,6 @@ import java.util.concurrent.CompletableFuture
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.function.Consumer
|
||||
import java.util.stream.Collectors
|
||||
import java.util.stream.IntStream
|
||||
|
||||
@ -29,6 +28,8 @@ class KubesTest extends DefaultTask {
|
||||
String fullTaskToExecutePath
|
||||
String taskToExecuteName
|
||||
Boolean printOutput = false
|
||||
Integer numberOfCoresPerFork = 4
|
||||
Integer memoryGbPerFork = 6
|
||||
public volatile List<File> testOutput = Collections.emptyList()
|
||||
public volatile List<KubePodResult> containerResults = Collections.emptyList()
|
||||
|
||||
@ -38,7 +39,6 @@ class KubesTest extends DefaultTask {
|
||||
int numberOfPods = 20
|
||||
int timeoutInMinutesForPodToStart = 60
|
||||
|
||||
|
||||
@TaskAction
|
||||
void runTestsOnKubes() {
|
||||
|
||||
@ -53,7 +53,7 @@ class KubesTest extends DefaultTask {
|
||||
|
||||
def currentUser = System.getProperty("user.name") ? System.getProperty("user.name") : "UNKNOWN_USER"
|
||||
|
||||
String stableRunId = new BigInteger(64, new Random(buildId.hashCode() + currentUser.hashCode())).toString(36).toLowerCase()
|
||||
String stableRunId = new BigInteger(64, new Random(buildId.hashCode() + currentUser.hashCode() + taskToExecuteName.hashCode())).toString(36).toLowerCase()
|
||||
String suffix = new BigInteger(64, new Random()).toString(36).toLowerCase()
|
||||
|
||||
io.fabric8.kubernetes.client.Config config = new io.fabric8.kubernetes.client.ConfigBuilder()
|
||||
@ -77,164 +77,179 @@ class KubesTest extends DefaultTask {
|
||||
//it's possible that a pod is being deleted by the original build, this can lead to racey conditions
|
||||
}
|
||||
|
||||
|
||||
List<CompletableFuture<KubePodResult>> podCreationFutures = IntStream.range(0, numberOfPods).mapToObj({ i ->
|
||||
CompletableFuture.supplyAsync({
|
||||
File outputFile = Files.createTempFile("container", ".log").toFile()
|
||||
String podName = (taskToExecuteName + "-" + stableRunId + suffix + i).toLowerCase()
|
||||
Pod podRequest = buildPod(podName)
|
||||
project.logger.lifecycle("created pod: " + podName)
|
||||
Pod createdPod = client.pods().inNamespace(namespace).create(podRequest)
|
||||
Runtime.getRuntime().addShutdownHook({
|
||||
println "Deleting pod: " + podName
|
||||
client.pods().delete(createdPod)
|
||||
})
|
||||
CompletableFuture<Void> waiter = new CompletableFuture<Void>()
|
||||
KubePodResult result = new KubePodResult(createdPod, waiter, outputFile)
|
||||
startBuildAndLogging(client, namespace, numberOfPods, i, podName, printOutput, waiter, { int resultCode ->
|
||||
println podName + " has completed with resultCode=$resultCode"
|
||||
result.setResultCode(resultCode)
|
||||
}, outputFile)
|
||||
|
||||
return result
|
||||
}, executorService)
|
||||
List<CompletableFuture<KubePodResult>> futures = IntStream.range(0, numberOfPods).mapToObj({ i ->
|
||||
String podName = (taskToExecuteName + "-" + stableRunId + suffix + i).toLowerCase()
|
||||
runBuild(client, namespace, numberOfPods, i, podName, printOutput, 3)
|
||||
}).collect(Collectors.toList())
|
||||
|
||||
def binaryFileFutures = podCreationFutures.collect { creationFuture ->
|
||||
return creationFuture.thenComposeAsync({ podResult ->
|
||||
return podResult.waiter.thenApply {
|
||||
project.logger.lifecycle("Successfully terminated log streaming for " + podResult.createdPod.getMetadata().getName())
|
||||
println "Gathering test results from ${podResult.createdPod.metadata.name}"
|
||||
def binaryResults = downloadTestXmlFromPod(client, namespace, podResult.createdPod)
|
||||
project.logger.lifecycle("deleting: " + podResult.createdPod.getMetadata().getName())
|
||||
client.resource(podResult.createdPod).delete()
|
||||
return binaryResults
|
||||
}
|
||||
}, singleThreadedExecutor)
|
||||
}
|
||||
|
||||
def allFilesDownloadedFuture = CompletableFuture.allOf(*binaryFileFutures.toArray(new CompletableFuture[0])).thenApply {
|
||||
def allBinaryFiles = binaryFileFutures.collect { future ->
|
||||
Collection<File> binaryFiles = future.get()
|
||||
return binaryFiles
|
||||
}.flatten()
|
||||
this.testOutput = Collections.synchronizedList(allBinaryFiles)
|
||||
return allBinaryFiles
|
||||
}
|
||||
|
||||
allFilesDownloadedFuture.get()
|
||||
this.containerResults = podCreationFutures.collect { it -> it.get() }
|
||||
this.testOutput = Collections.synchronizedList(futures.collect { it -> it.get().binaryResults }.flatten())
|
||||
this.containerResults = futures.collect { it -> it.get() }
|
||||
}
|
||||
|
||||
void startBuildAndLogging(KubernetesClient client,
|
||||
String namespace,
|
||||
int numberOfPods,
|
||||
int podIdx,
|
||||
String podName,
|
||||
boolean printOutput,
|
||||
CompletableFuture<Void> waiter,
|
||||
Consumer<Integer> resultSetter,
|
||||
File outputFileForContainer) {
|
||||
try {
|
||||
project.logger.lifecycle("Waiting for pod " + podName + " to start before executing build")
|
||||
client.pods().inNamespace(namespace).withName(podName).waitUntilReady(timeoutInMinutesForPodToStart, TimeUnit.MINUTES)
|
||||
project.logger.lifecycle("pod " + podName + " has started, executing build")
|
||||
Watch eventWatch = client.pods().inNamespace(namespace).withName(podName).watch(new Watcher<Pod>() {
|
||||
@Override
|
||||
void eventReceived(Watcher.Action action, Pod resource) {
|
||||
project.logger.lifecycle("[StatusChange] pod " + resource.getMetadata().getName() + " " + action.name())
|
||||
}
|
||||
CompletableFuture<KubePodResult> runBuild(KubernetesClient client,
|
||||
String namespace,
|
||||
int numberOfPods,
|
||||
int podIdx,
|
||||
String podName,
|
||||
boolean printOutput,
|
||||
int numberOfRetries) {
|
||||
|
||||
@Override
|
||||
void onClose(KubernetesClientException cause) {
|
||||
}
|
||||
})
|
||||
CompletableFuture<KubePodResult> toReturn = new CompletableFuture<KubePodResult>()
|
||||
|
||||
def stdOutOs = new PipedOutputStream()
|
||||
def stdOutIs = new PipedInputStream(4096)
|
||||
ByteArrayOutputStream errChannelStream = new ByteArrayOutputStream();
|
||||
executorService.submit({
|
||||
int tryCount = 0
|
||||
Pod createdPod = null
|
||||
while (tryCount < numberOfRetries) {
|
||||
try {
|
||||
Pod podRequest = buildPod(podName)
|
||||
project.logger.lifecycle("requesting pod: " + podName)
|
||||
createdPod = client.pods().inNamespace(namespace).create(podRequest)
|
||||
project.logger.lifecycle("scheduled pod: " + podName)
|
||||
File outputFile = Files.createTempFile("container", ".log").toFile()
|
||||
attachStatusListenerToPod(client, namespace, podName)
|
||||
schedulePodForDeleteOnShutdown(podName, client, createdPod)
|
||||
waitForPodToStart(podName, client, namespace)
|
||||
def stdOutOs = new PipedOutputStream()
|
||||
def stdOutIs = new PipedInputStream(4096)
|
||||
ByteArrayOutputStream errChannelStream = new ByteArrayOutputStream();
|
||||
KubePodResult result = new KubePodResult(createdPod, null, outputFile)
|
||||
CompletableFuture<KubePodResult> waiter = new CompletableFuture<>()
|
||||
ExecListener execListener = buildExecListenerForPod(podName, errChannelStream, waiter, result)
|
||||
stdOutIs.connect(stdOutOs)
|
||||
ExecWatch execWatch = client.pods().inNamespace(namespace).withName(podName)
|
||||
.writingOutput(stdOutOs)
|
||||
.writingErrorChannel(errChannelStream)
|
||||
.usingListener(execListener).exec(getBuildCommand(numberOfPods, podIdx))
|
||||
|
||||
def terminatingListener = new ExecListener() {
|
||||
|
||||
@Override
|
||||
void onOpen(Response response) {
|
||||
project.logger.lifecycle("Build started on pod " + podName)
|
||||
}
|
||||
|
||||
@Override
|
||||
void onFailure(Throwable t, Response response) {
|
||||
project.logger.lifecycle("Received error from rom pod " + podName)
|
||||
waiter.completeExceptionally(t)
|
||||
}
|
||||
|
||||
@Override
|
||||
void onClose(int code, String reason) {
|
||||
project.logger.lifecycle("Received onClose() from pod " + podName + " with returnCode=" + code)
|
||||
startLogPumping(outputFile, stdOutIs, podIdx, printOutput)
|
||||
KubePodResult execResult = waiter.join()
|
||||
project.logger.lifecycle("build has ended on on pod ${podName} (${podIdx}/${numberOfPods})")
|
||||
project.logger.lifecycle "Gathering test results from ${execResult.createdPod.metadata.name}"
|
||||
def binaryResults = downloadTestXmlFromPod(client, namespace, execResult.createdPod)
|
||||
project.logger.lifecycle("deleting: " + execResult.createdPod.getMetadata().getName())
|
||||
client.resource(execResult.createdPod).delete()
|
||||
result.binaryResults = binaryResults
|
||||
toReturn.complete(result)
|
||||
break
|
||||
} catch (Exception e) {
|
||||
logger.error("Encountered error during testing cycle on pod ${podName} (${podIdx}/${numberOfPods})", e)
|
||||
try {
|
||||
def errChannelContents = errChannelStream.toString()
|
||||
println errChannelContents
|
||||
Status status = Serialization.unmarshal(errChannelContents, Status.class);
|
||||
resultSetter.accept(status.details?.causes?.first()?.message?.toInteger() ? status.details?.causes?.first()?.message?.toInteger() : 0)
|
||||
waiter.complete()
|
||||
} catch (Exception e) {
|
||||
waiter.completeExceptionally(e)
|
||||
if (createdPod) {
|
||||
client.pods().delete(createdPod)
|
||||
while (client.pods().inNamespace(namespace).list().getItems().find { p -> p.metadata.name == podName }) {
|
||||
logger.warn("pod ${podName} has not been deleted, waiting 1s")
|
||||
Thread.sleep(1000)
|
||||
}
|
||||
}
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
tryCount++
|
||||
logger.lifecycle("will retry ${podName} another ${numberOfRetries - tryCount} times")
|
||||
}
|
||||
}
|
||||
if (tryCount >= numberOfRetries) {
|
||||
toReturn.completeExceptionally(new RuntimeException("Failed to build in pod ${podName} (${podIdx}/${numberOfPods}) within retry limit"))
|
||||
}
|
||||
})
|
||||
return toReturn
|
||||
}
|
||||
|
||||
stdOutIs.connect(stdOutOs)
|
||||
|
||||
ExecWatch execWatch = client.pods().inNamespace(namespace).withName(podName)
|
||||
.writingOutput(stdOutOs)
|
||||
.writingErrorChannel(errChannelStream)
|
||||
.usingListener(terminatingListener).exec(getBuildCommand(numberOfPods, podIdx))
|
||||
|
||||
project.logger.lifecycle("Pod: " + podName + " has started ")
|
||||
|
||||
Thread loggingThread = new Thread({ ->
|
||||
BufferedWriter out = null
|
||||
BufferedReader br = null
|
||||
try {
|
||||
out = new BufferedWriter(new FileWriter(outputFileForContainer))
|
||||
br = new BufferedReader(new InputStreamReader(stdOutIs))
|
||||
String line
|
||||
while ((line = br.readLine()) != null) {
|
||||
def toWrite = ("${taskToExecuteName}/Container" + podIdx + ": " + line).trim()
|
||||
if (printOutput) {
|
||||
project.logger.lifecycle(toWrite)
|
||||
}
|
||||
out.println(toWrite)
|
||||
void startLogPumping(File outputFile, stdOutIs, podIdx, boolean printOutput) {
|
||||
Thread loggingThread = new Thread({ ->
|
||||
BufferedWriter out = null
|
||||
BufferedReader br = null
|
||||
try {
|
||||
out = new BufferedWriter(new FileWriter(outputFile))
|
||||
br = new BufferedReader(new InputStreamReader(stdOutIs))
|
||||
String line
|
||||
while ((line = br.readLine()) != null) {
|
||||
def toWrite = ("${taskToExecuteName}/Container" + podIdx + ": " + line).trim()
|
||||
if (printOutput) {
|
||||
project.logger.lifecycle(toWrite)
|
||||
}
|
||||
} catch (IOException ignored) {
|
||||
out.println(toWrite)
|
||||
}
|
||||
finally {
|
||||
out?.close()
|
||||
br?.close()
|
||||
}
|
||||
})
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
finally {
|
||||
out?.close()
|
||||
br?.close()
|
||||
}
|
||||
})
|
||||
|
||||
loggingThread.setDaemon(true)
|
||||
loggingThread.start()
|
||||
} catch (InterruptedException ignored) {
|
||||
throw new GradleException("Could not get slot on cluster within timeout")
|
||||
loggingThread.setDaemon(true)
|
||||
loggingThread.start()
|
||||
}
|
||||
|
||||
ExecListener buildExecListenerForPod(podName, errChannelStream, CompletableFuture<KubePodResult> waitingFuture, KubePodResult result) {
|
||||
|
||||
new ExecListener() {
|
||||
@Override
|
||||
void onOpen(Response response) {
|
||||
project.logger.lifecycle("Build started on pod " + podName)
|
||||
}
|
||||
|
||||
@Override
|
||||
void onFailure(Throwable t, Response response) {
|
||||
project.logger.lifecycle("Received error from rom pod " + podName)
|
||||
waitingFuture.completeExceptionally(t)
|
||||
}
|
||||
|
||||
@Override
|
||||
void onClose(int code, String reason) {
|
||||
project.logger.lifecycle("Received onClose() from pod " + podName + " with returnCode=" + code)
|
||||
try {
|
||||
def errChannelContents = errChannelStream.toString()
|
||||
Status status = Serialization.unmarshal(errChannelContents, Status.class);
|
||||
result.resultCode = status.details?.causes?.first()?.message?.toInteger() ? status.details?.causes?.first()?.message?.toInteger() : 0
|
||||
waitingFuture.complete(result)
|
||||
} catch (Exception e) {
|
||||
waitingFuture.completeExceptionally(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void schedulePodForDeleteOnShutdown(String podName, client, Pod createdPod) {
|
||||
project.logger.info("attaching shutdown hook for pod ${podName}")
|
||||
Runtime.getRuntime().addShutdownHook({
|
||||
println "Deleting pod: " + podName
|
||||
client.pods().delete(createdPod)
|
||||
})
|
||||
}
|
||||
|
||||
Watch attachStatusListenerToPod(KubernetesClient client, String namespace, String podName) {
|
||||
client.pods().inNamespace(namespace).withName(podName).watch(new Watcher<Pod>() {
|
||||
@Override
|
||||
void eventReceived(Watcher.Action action, Pod resource) {
|
||||
project.logger.lifecycle("[StatusChange] pod ${resource.getMetadata().getName()} ${action.name()} (${resource.status.phase})")
|
||||
}
|
||||
|
||||
@Override
|
||||
void onClose(KubernetesClientException cause) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
void waitForPodToStart(String podName, KubernetesClient client, String namespace) {
|
||||
project.logger.lifecycle("Waiting for pod " + podName + " to start before executing build")
|
||||
client.pods().inNamespace(namespace).withName(podName).waitUntilReady(timeoutInMinutesForPodToStart, TimeUnit.MINUTES)
|
||||
project.logger.lifecycle("pod " + podName + " has started, executing build")
|
||||
}
|
||||
|
||||
Pod buildPod(String podName) {
|
||||
return new PodBuilder().withNewMetadata().withName(podName).endMetadata()
|
||||
.withNewSpec()
|
||||
.addNewVolume()
|
||||
.withName("gradlecache")
|
||||
.withNewHostPath()
|
||||
.withPath("/gradle")
|
||||
.withPath("/tmp/gradle")
|
||||
.withType("DirectoryOrCreate")
|
||||
.endHostPath()
|
||||
.endVolume()
|
||||
.addNewContainer()
|
||||
.withImage(dockerTag)
|
||||
.withCommand("bash")
|
||||
//max container life time is 30min
|
||||
.withArgs("-c", "sleep 1800")
|
||||
.withArgs("-c", "sleep 3600")
|
||||
.addNewEnv()
|
||||
.withName("DRIVER_NODE_MEMORY")
|
||||
.withValue("1024m")
|
||||
@ -243,8 +258,8 @@ class KubesTest extends DefaultTask {
|
||||
.endEnv()
|
||||
.withName(podName)
|
||||
.withNewResources()
|
||||
.addToRequests("cpu", new Quantity("2"))
|
||||
.addToRequests("memory", new Quantity("6Gi"))
|
||||
.addToRequests("cpu", new Quantity("${numberOfCoresPerFork}"))
|
||||
.addToRequests("memory", new Quantity("${memoryGbPerFork}Gi"))
|
||||
.endResources()
|
||||
.addNewVolumeMount()
|
||||
.withName("gradlecache")
|
||||
@ -276,7 +291,7 @@ class KubesTest extends DefaultTask {
|
||||
tempDir.toFile().mkdirs()
|
||||
}
|
||||
|
||||
project.logger.lifecycle("saving to " + podName + " results to: " + tempDir.toAbsolutePath().toFile().getAbsolutePath())
|
||||
project.logger.lifecycle("Saving " + podName + " results to: " + tempDir.toAbsolutePath().toFile().getAbsolutePath())
|
||||
client.pods()
|
||||
.inNamespace(namespace)
|
||||
.withName(podName)
|
||||
|
@ -0,0 +1,41 @@
|
||||
package net.corda.testing
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
class ParallelTestGroup extends DefaultTask {
|
||||
|
||||
List<String> groups = new ArrayList<>()
|
||||
int shardCount = 20
|
||||
int coresToUse = 4
|
||||
int gbOfMemory = 4
|
||||
boolean printToStdOut = true
|
||||
|
||||
void numberOfShards(int shards){
|
||||
this.shardCount = shards
|
||||
}
|
||||
|
||||
void coresPerFork(int cores){
|
||||
this.coresToUse = cores
|
||||
}
|
||||
|
||||
void memoryInGbPerFork(int gb){
|
||||
this.gbOfMemory = gb
|
||||
}
|
||||
|
||||
//when this is false, only containers will "failed" exit codes will be printed to stdout
|
||||
void streamOutput(boolean print){
|
||||
this.printToStdOut = print
|
||||
}
|
||||
|
||||
void testGroups(String... group) {
|
||||
testGroups(group.toList())
|
||||
}
|
||||
|
||||
void testGroups(List<String> group) {
|
||||
group.forEach {
|
||||
groups.add(it)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -3,6 +3,8 @@ package net.corda.testing;
|
||||
import io.fabric8.kubernetes.api.model.Pod;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
|
||||
public class KubePodResult {
|
||||
@ -11,6 +13,7 @@ public class KubePodResult {
|
||||
private final CompletableFuture<Void> waiter;
|
||||
private volatile Integer resultCode = 255;
|
||||
private final File output;
|
||||
private volatile Collection<File> binaryResults = Collections.emptyList();
|
||||
|
||||
KubePodResult(Pod createdPod, CompletableFuture<Void> waiter, File output) {
|
||||
this.createdPod = createdPod;
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
package net.corda.testing;
|
||||
|
||||
import org.apache.commons.compress.utils.IOUtils;
|
||||
import org.gradle.api.DefaultTask;
|
||||
import org.gradle.api.GradleException;
|
||||
import org.gradle.api.Transformer;
|
||||
@ -33,6 +34,8 @@ import org.gradle.internal.operations.BuildOperationExecutor;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
@ -49,6 +52,7 @@ public class KubesReporting extends DefaultTask {
|
||||
private File destinationDir = new File(getProject().getBuildDir(), "test-reporting");
|
||||
private List<Object> results = new ArrayList<Object>();
|
||||
List<KubePodResult> podResults = new ArrayList<>();
|
||||
boolean shouldPrintOutput = true;
|
||||
|
||||
public KubesReporting() {
|
||||
//force this task to always run, as it's responsible for parsing exit codes
|
||||
@ -147,12 +151,17 @@ public class KubesReporting extends DefaultTask {
|
||||
|
||||
if (!containersWithNonZeroReturnCodes.isEmpty()) {
|
||||
String reportUrl = new ConsoleRenderer().asClickableFileUrl(new File(destinationDir, "index.html"));
|
||||
|
||||
String containerOutputs = containersWithNonZeroReturnCodes.stream().map(KubePodResult::getOutput).map(file -> new ConsoleRenderer().asClickableFileUrl(file)).reduce("",
|
||||
(s, s2) -> s + "\n" + s2
|
||||
);
|
||||
|
||||
String message = "remote build failed, check test report at " + reportUrl + "\n and container outputs at " + containerOutputs;
|
||||
if (shouldPrintOutput){
|
||||
containersWithNonZeroReturnCodes.forEach(container -> {
|
||||
try {
|
||||
System.out.println("\n##### CONTAINER OUTPUT START #####");
|
||||
IOUtils.copy(new FileInputStream(container.getOutput()), System.out);
|
||||
System.out.println("##### CONTAINER OUTPUT END #####\n");
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
});
|
||||
}
|
||||
String message = "remote build failed, check test report at " + reportUrl;
|
||||
throw new GradleException(message);
|
||||
}
|
||||
} else {
|
||||
|
@ -1,3 +1,17 @@
|
||||
// JDK 11 JavaFX
|
||||
plugins {
|
||||
id 'org.openjfx.javafxplugin' version '0.0.7' apply false
|
||||
}
|
||||
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
apply plugin: 'org.openjfx.javafxplugin'
|
||||
javafx {
|
||||
version = "11.0.2"
|
||||
modules = ['javafx.controls',
|
||||
'javafx.fxml'
|
||||
]
|
||||
}
|
||||
}
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'net.corda.plugins.quasar-utils'
|
||||
apply plugin: 'net.corda.plugins.publish-utils'
|
||||
|
@ -0,0 +1,19 @@
|
||||
package net.corda.client.jfx.utils;
|
||||
|
||||
import javafx.collections.ObservableList;
|
||||
import kotlin.jvm.functions.Function1;
|
||||
import kotlin.jvm.functions.Function2;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
// Java 9 introduces a new abstract method that we need to override (without using the explicit Kotlin `override` keyword to be backwards compatible
|
||||
// https://docs.oracle.com/javase/9/docs/api/javafx/collections/transformation/TransformationList.html#getViewIndex-int-
|
||||
public class AggregatedList<A,E,K> extends AbstractAggregatedList<A,E,K> {
|
||||
@SuppressWarnings("unchecked")
|
||||
public AggregatedList(@NotNull ObservableList<? extends E> list, @NotNull Function1<E,K> toKey, @NotNull Function2<K,ObservableList<E>,A> assemble) {
|
||||
super(list, toKey, assemble);
|
||||
}
|
||||
|
||||
public int getViewIndex(int i) {
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package net.corda.client.jfx.utils;
|
||||
|
||||
import javafx.collections.ObservableList;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
// Java 9 introduces a new abstract method that we need to override (without using the explicit Kotlin `override` keyword to be backwards compatible
|
||||
// https://docs.oracle.com/javase/9/docs/api/javafx/collections/transformation/TransformationList.html#getViewIndex-int-
|
||||
public class ConcatenatedList<A> extends AbstractConcatenatedList<A> {
|
||||
public ConcatenatedList(@NotNull ObservableList<ObservableList<A>> sourceList) {
|
||||
super(sourceList);
|
||||
}
|
||||
|
||||
public int getViewIndex(int i) {
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
package net.corda.client.jfx.utils;
|
||||
|
||||
import javafx.collections.ObservableList;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
// Java 9 introduces a new abstract method that we need to override (without using the explicit Kotlin `override` keyword to be backwards compatible
|
||||
// https://docs.oracle.com/javase/9/docs/api/javafx/collections/transformation/TransformationList.html#getViewIndex-int-
|
||||
public class FlattenedList extends AbstractFlattenedList {
|
||||
@SuppressWarnings("unchecked")
|
||||
public FlattenedList(@NotNull ObservableList sourceList) {
|
||||
super(sourceList);
|
||||
}
|
||||
|
||||
public int getViewIndex(int i) {
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
package net.corda.client.jfx.utils;
|
||||
|
||||
import javafx.collections.ObservableList;
|
||||
import kotlin.jvm.functions.Function1;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
// Java 9 introduces a new abstract method that we need to override (without using the explicit Kotlin `override` keyword to be backwards compatible
|
||||
// https://docs.oracle.com/javase/9/docs/api/javafx/collections/transformation/TransformationList.html#getViewIndex-int-
|
||||
public class MappedList<A,B> extends AbstractMappedList<A,B> {
|
||||
public MappedList(@NotNull ObservableList<A> list, @NotNull Function1<A,B> function) {
|
||||
super(list, function);
|
||||
}
|
||||
|
||||
public int getViewIndex(int i) {
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package net.corda.client.jfx.utils;
|
||||
|
||||
import javafx.collections.ObservableList;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
// Java 9 introduces a new abstract method that we need to override (without using the explicit Kotlin `override` keyword to be backwards compatible
|
||||
// https://docs.oracle.com/javase/9/docs/api/javafx/collections/transformation/TransformationList.html#getViewIndex-int-
|
||||
public class ReplayedList<A> extends AbstractReplayedList<A> {
|
||||
public ReplayedList(@NotNull ObservableList<A> sourceList) {
|
||||
super(sourceList);
|
||||
}
|
||||
|
||||
public int getViewIndex(int i) {
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -63,7 +63,7 @@ object Models {
|
||||
*/
|
||||
private val dependencyGraph = HashMap<KClass<*>, MutableSet<KClass<*>>>()
|
||||
|
||||
fun <M : Any> initModel(klass: KClass<M>) = modelStore.getOrPut(klass) { klass.java.newInstance() }
|
||||
fun <M : Any> initModel(klass: KClass<M>) = modelStore.getOrPut(klass) { klass.java.getDeclaredConstructor().newInstance() }
|
||||
fun <M : Any> get(klass: KClass<M>, origin: KClass<*>): M {
|
||||
dependencyGraph.getOrPut(origin) { mutableSetOf() }.add(klass)
|
||||
val model = initModel(klass)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.client.jfx.model
|
||||
|
||||
import javafx.beans.property.SimpleObjectProperty
|
||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
@ -71,7 +72,7 @@ class NodeMonitorModel : AutoCloseable {
|
||||
* TODO provide an unsubscribe mechanism
|
||||
*/
|
||||
fun register(nodeHostAndPort: NetworkHostAndPort, username: String, password: String) {
|
||||
rpc = ReconnectingCordaRPCOps(nodeHostAndPort, username, password)
|
||||
rpc = ReconnectingCordaRPCOps(nodeHostAndPort, username, password, CordaRPCClientConfiguration.DEFAULT)
|
||||
|
||||
proxyObservable.value = rpc
|
||||
|
||||
|
@ -6,7 +6,7 @@ import javafx.collections.ObservableList
|
||||
import javafx.collections.transformation.TransformationList
|
||||
|
||||
/**
|
||||
* Given an [ObservableList]<[E]> and a grouping key [K], [AggregatedList] groups the elements by the key into a fresh
|
||||
* Given an [ObservableList]<[E]> and a grouping key [K], [AbstractAggregatedList] groups the elements by the key into a fresh
|
||||
* [ObservableList]<[E]> for each group and exposes the groups as an observable list of [A]s by calling [assemble] on each.
|
||||
*
|
||||
* Changes done to elements of the input list are reflected in the observable list of the respective group, whereas
|
||||
@ -36,8 +36,8 @@ import javafx.collections.transformation.TransformationList
|
||||
* @param toKey Function to extract the key from an element.
|
||||
* @param assemble Function to assemble the aggregation into the exposed [A].
|
||||
*/
|
||||
class AggregatedList<A, E : Any, K : Any>(
|
||||
list: ObservableList<out E>,
|
||||
abstract class AbstractAggregatedList<A, E : Any, K : Any>(
|
||||
val list: ObservableList<out E>,
|
||||
val toKey: (E) -> K,
|
||||
val assemble: (K, ObservableList<E>) -> A
|
||||
) : TransformationList<A, E>(list) {
|
@ -7,13 +7,14 @@ import javafx.collections.transformation.TransformationList
|
||||
import java.util.*
|
||||
|
||||
/**
|
||||
* [ConcatenatedList] takes a list of lists and concatenates them. Any change to the underlying lists or the outer list
|
||||
* [AbstractConcatenatedList] takes a list of lists and concatenates them. Any change to the underlying lists or the outer list
|
||||
* is propagated as expected.
|
||||
*/
|
||||
class ConcatenatedList<A>(sourceList: ObservableList<ObservableList<A>>) : TransformationList<A, ObservableList<A>>(sourceList) {
|
||||
abstract class AbstractConcatenatedList<A>(sourceList: ObservableList<ObservableList<A>>) : TransformationList<A, ObservableList<A>>(sourceList) {
|
||||
|
||||
// A wrapper for input lists so we hash differently even if a list is reused in the input.
|
||||
@VisibleForTesting
|
||||
internal class WrappedObservableList<A>(
|
||||
class WrappedObservableList<A>(
|
||||
val observableList: ObservableList<A>
|
||||
)
|
||||
// First let's clarify some concepts as it's easy to confuse which list we're handling where.
|
||||
@ -37,9 +38,9 @@ class ConcatenatedList<A>(sourceList: ObservableList<ObservableList<A>>) : Trans
|
||||
// Note that similar to 'nestedIndexOffsets', 'startingOffsetOf' also isn't a one-to-one mapping because of
|
||||
// potentially several empty nested lists.
|
||||
@VisibleForTesting
|
||||
internal val indexMap = HashMap<WrappedObservableList<out A>, Pair<Int, ListChangeListener<A>>>()
|
||||
val indexMap = HashMap<WrappedObservableList<out A>, Pair<Int, ListChangeListener<A>>>()
|
||||
@VisibleForTesting
|
||||
internal val nestedIndexOffsets = ArrayList<Int>(sourceList.size)
|
||||
val nestedIndexOffsets = ArrayList<Int>(sourceList.size)
|
||||
|
||||
init {
|
||||
var offset = 0
|
@ -8,11 +8,10 @@ import javafx.collections.transformation.TransformationList
|
||||
import java.util.*
|
||||
|
||||
/**
|
||||
* [FlattenedList] flattens the passed in list of [ObservableValue]s so that changes in individual updates to the values
|
||||
* [AbstractFlattenedList] flattens the passed in list of [ObservableValue]s so that changes in individual updates to the values
|
||||
* are reflected in the exposed list as expected.
|
||||
*/
|
||||
class FlattenedList<A>(val sourceList: ObservableList<out ObservableValue<out A>>) : TransformationList<A, ObservableValue<out A>>(sourceList) {
|
||||
|
||||
abstract class AbstractFlattenedList<A>(val sourceList: ObservableList<out ObservableValue<out A>>) : TransformationList<A, ObservableValue<out A>>(sourceList) {
|
||||
/**
|
||||
* We maintain an ObservableValue->index map. This is needed because we need the ObservableValue's index in order to
|
||||
* propagate a change and if the listener closure captures the index at the time of the call to
|
@ -10,7 +10,8 @@ import java.util.*
|
||||
* when an element is inserted or updated.
|
||||
* Use this instead of [EasyBind.map] to trade off memory vs CPU, or if (god forbid) the mapped function is side-effecting.
|
||||
*/
|
||||
class MappedList<A, B>(list: ObservableList<A>, val function: (A) -> B) : TransformationList<B, A>(list) {
|
||||
abstract class AbstractMappedList<A, B>(list: ObservableList<A>, val function: (A) -> B) : TransformationList<B, A>(list) {
|
||||
|
||||
private val backingList = ArrayList<B>(list.size)
|
||||
|
||||
init {
|
@ -9,7 +9,7 @@ import java.util.*
|
||||
* This list type just replays changes propagated from the underlying source list. Used for testing changes and backing a
|
||||
* non-backed observable
|
||||
*/
|
||||
class ReplayedList<A>(sourceList: ObservableList<A>) : TransformationList<A, A>(sourceList) {
|
||||
abstract class AbstractReplayedList<A>(sourceList: ObservableList<A>) : TransformationList<A, A>(sourceList) {
|
||||
|
||||
val replayedList = ArrayList<A>(sourceList)
|
||||
|
@ -143,14 +143,16 @@ fun <A, B> ObservableList<out A>.foldObservable(initial: B, folderFunction: (B,
|
||||
* val people: ObservableList<Person> = (..)
|
||||
* val heights: ObservableList<Long> = people.map(Person::height).flatten()
|
||||
*/
|
||||
fun <A> ObservableList<out ObservableValue<out A>>.flatten(): ObservableList<A> = FlattenedList(this)
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
fun <A> ObservableList<out ObservableValue<out A>>.flatten(): ObservableList<A> = FlattenedList(this) as ObservableList<A>
|
||||
|
||||
/**
|
||||
* data class Person(val height: ObservableValue<Long>)
|
||||
* val people: List<Person> = listOf(alice, bob)
|
||||
* val heights: ObservableList<Long> = people.map(Person::height).sequence()
|
||||
*/
|
||||
fun <A> Collection<ObservableValue<out A>>.sequence(): ObservableList<A> = FlattenedList(FXCollections.observableArrayList(this))
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
fun <A> Collection<ObservableValue<out A>>.sequence(): ObservableList<A> = FlattenedList(FXCollections.observableArrayList(this)) as ObservableList<A>
|
||||
|
||||
/**
|
||||
* data class Person(val height: Long)
|
||||
@ -173,9 +175,12 @@ fun <K, A> ObservableList<out A>.associateBy(toKey: (A) -> K): ObservableMap<K,
|
||||
* val people: ObservableList<Person> = (..)
|
||||
* val heightToNames: ObservableMap<Long, ObservableList<String>> = people.associateByAggregation(Person::height) { name, person -> person.name }
|
||||
*/
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
fun <K : Any, A : Any, B> ObservableList<out A>.associateByAggregation(toKey: (A) -> K, assemble: (K, A) -> B): ObservableMap<K, ObservableList<B>> {
|
||||
return AssociatedList(AggregatedList(this, toKey) { key, members -> Pair(key, members) }, { it.first }) { key, pair ->
|
||||
pair.second.map { assemble(key, it) }
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
val sourceList = AggregatedList(this, toKey) { key, members -> Pair(key, members) } as AggregatedList<Pair<K, ObservableList<A>>, A, K>
|
||||
return AssociatedList(sourceList, { (it as Pair<K,ObservableList<A>>).first }) { key, pair ->
|
||||
(pair as Pair<K,ObservableList<A>>).second.map { assemble(key, it) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,8 @@ class FlattenedListTest {
|
||||
@Before
|
||||
fun setup() {
|
||||
sourceList = FXCollections.observableArrayList(SimpleObjectProperty(1234))
|
||||
flattenedList = FlattenedList(sourceList)
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
flattenedList = FlattenedList(sourceList) as ObservableList<Int>
|
||||
replayedList = ReplayedList(flattenedList)
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,9 @@ dependencies {
|
||||
smokeTestImplementation "junit:junit:$junit_version"
|
||||
smokeTestRuntimeOnly "org.junit.vintage:junit-vintage-engine:${junit_vintage_version}"
|
||||
smokeTestRuntimeOnly "org.junit.platform:junit-platform-launcher:${junit_platform_version}"
|
||||
|
||||
// JDK11: required by Quasar at run-time
|
||||
smokeTestRuntimeOnly "com.esotericsoftware:kryo:4.0.2"
|
||||
}
|
||||
|
||||
task integrationTest(type: Test) {
|
||||
|
@ -3,6 +3,7 @@ package net.corda.client.rpcreconnect
|
||||
import net.corda.client.rpc.CordaRPCClient
|
||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
||||
import net.corda.client.rpc.CordaRPCClientTest
|
||||
import net.corda.client.rpc.GracefulReconnect
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
||||
import net.corda.core.messaging.startTrackedFlow
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
@ -30,6 +31,8 @@ class CordaRPCClientReconnectionTest {
|
||||
|
||||
private val portAllocator = incrementalPortAllocation()
|
||||
|
||||
private val gracefulReconnect = GracefulReconnect()
|
||||
|
||||
companion object {
|
||||
val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
||||
}
|
||||
@ -53,7 +56,7 @@ class CordaRPCClientReconnectionTest {
|
||||
maxReconnectAttempts = 5
|
||||
))
|
||||
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = true).proxy as ReconnectingCordaRPCOps).use {
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = gracefulReconnect).proxy as ReconnectingCordaRPCOps).use {
|
||||
val rpcOps = it
|
||||
val networkParameters = rpcOps.networkParameters
|
||||
val cashStatesFeed = rpcOps.vaultTrack(Cash.State::class.java)
|
||||
@ -68,7 +71,7 @@ class CordaRPCClientReconnectionTest {
|
||||
val networkParametersAfterCrash = rpcOps.networkParameters
|
||||
assertThat(networkParameters).isEqualTo(networkParametersAfterCrash)
|
||||
assertTrue {
|
||||
latch.await(2, TimeUnit.SECONDS)
|
||||
latch.await(20, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -93,7 +96,7 @@ class CordaRPCClientReconnectionTest {
|
||||
maxReconnectAttempts = 5
|
||||
))
|
||||
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = true).proxy as ReconnectingCordaRPCOps).use {
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = gracefulReconnect).proxy as ReconnectingCordaRPCOps).use {
|
||||
val rpcOps = it
|
||||
val cashStatesFeed = rpcOps.vaultTrack(Cash.State::class.java)
|
||||
val subscription = cashStatesFeed.updates.subscribe { latch.countDown() }
|
||||
@ -133,7 +136,7 @@ class CordaRPCClientReconnectionTest {
|
||||
maxReconnectAttempts = 5
|
||||
))
|
||||
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = true).proxy as ReconnectingCordaRPCOps).use {
|
||||
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = gracefulReconnect).proxy as ReconnectingCordaRPCOps).use {
|
||||
val rpcOps = it
|
||||
val networkParameters = rpcOps.networkParameters
|
||||
val cashStatesFeed = rpcOps.vaultTrack(Cash.State::class.java)
|
||||
|
@ -41,8 +41,20 @@ class CordaRPCConnection private constructor(
|
||||
|
||||
companion object {
|
||||
@CordaInternal
|
||||
internal fun createWithGracefulReconnection(username: String, password: String, addresses: List<NetworkHostAndPort>): CordaRPCConnection {
|
||||
return CordaRPCConnection(null, ReconnectingCordaRPCOps(addresses, username, password))
|
||||
internal fun createWithGracefulReconnection(
|
||||
username: String,
|
||||
password: String,
|
||||
addresses: List<NetworkHostAndPort>,
|
||||
rpcConfiguration: CordaRPCClientConfiguration,
|
||||
gracefulReconnect: GracefulReconnect
|
||||
): CordaRPCConnection {
|
||||
return CordaRPCConnection(null, ReconnectingCordaRPCOps(
|
||||
addresses,
|
||||
username,
|
||||
password,
|
||||
rpcConfiguration,
|
||||
gracefulReconnect
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -241,6 +253,20 @@ open class CordaRPCClientConfiguration @JvmOverloads constructor(
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* GracefulReconnect provides the opportunity to perform certain logic when the RPC encounters a connection disconnect
|
||||
* during communication with the node.
|
||||
*
|
||||
* NOTE: The callbacks provided may be executed on a separate thread to that which called the RPC command.
|
||||
*
|
||||
* @param onDisconnect implement this callback to perform logic when the RPC disconnects on connection disconnect
|
||||
* @param onReconnect implement this callback to perform logic when the RPC has reconnected after connection disconnect
|
||||
*/
|
||||
class GracefulReconnect(val onDisconnect: () -> Unit = {}, val onReconnect: () -> Unit = {}) {
|
||||
constructor(onDisconnect: Runnable, onReconnect: Runnable ) :
|
||||
this(onDisconnect = { onDisconnect.run() }, onReconnect = { onReconnect.run() })
|
||||
}
|
||||
|
||||
/**
|
||||
* An RPC client connects to the specified server and allows you to make calls to the server that perform various
|
||||
* useful tasks. Please see the Client RPC section of docs.corda.net to learn more about how this API works. A brief
|
||||
@ -371,11 +397,11 @@ class CordaRPCClient private constructor(
|
||||
*
|
||||
* @param username The username to authenticate with.
|
||||
* @param password The password to authenticate with.
|
||||
* @param gracefulReconnect whether the connection will reconnect gracefully.
|
||||
* @param gracefulReconnect a [GracefulReconnect] class containing callback logic when the RPC is dis/reconnected unexpectedly
|
||||
* @throws RPCException if the server version is too low or if the server isn't reachable within a reasonable timeout.
|
||||
*/
|
||||
@JvmOverloads
|
||||
fun start(username: String, password: String, gracefulReconnect: Boolean = false): CordaRPCConnection {
|
||||
fun start(username: String, password: String, gracefulReconnect: GracefulReconnect? = null): CordaRPCConnection {
|
||||
return start(username, password, null, null, gracefulReconnect)
|
||||
}
|
||||
|
||||
@ -388,11 +414,11 @@ class CordaRPCClient private constructor(
|
||||
* @param username The username to authenticate with.
|
||||
* @param password The password to authenticate with.
|
||||
* @param targetLegalIdentity in case of multi-identity RPC endpoint specific legal identity to which the calls must be addressed.
|
||||
* @param gracefulReconnect whether the connection will reconnect gracefully.
|
||||
* @param gracefulReconnect a [GracefulReconnect] class containing callback logic when the RPC is dis/reconnected unexpectedly
|
||||
* @throws RPCException if the server version is too low or if the server isn't reachable within a reasonable timeout.
|
||||
*/
|
||||
@JvmOverloads
|
||||
fun start(username: String, password: String, targetLegalIdentity: CordaX500Name, gracefulReconnect: Boolean = false): CordaRPCConnection {
|
||||
fun start(username: String, password: String, targetLegalIdentity: CordaX500Name, gracefulReconnect: GracefulReconnect? = null): CordaRPCConnection {
|
||||
return start(username, password, null, null, targetLegalIdentity, gracefulReconnect)
|
||||
}
|
||||
|
||||
@ -406,11 +432,11 @@ class CordaRPCClient private constructor(
|
||||
* @param password The password to authenticate with.
|
||||
* @param externalTrace external [Trace] for correlation.
|
||||
* @param impersonatedActor the actor on behalf of which all the invocations will be made.
|
||||
* @param gracefulReconnect whether the connection will reconnect gracefully.
|
||||
* @param gracefulReconnect a [GracefulReconnect] class containing callback logic when the RPC is dis/reconnected unexpectedly
|
||||
* @throws RPCException if the server version is too low or if the server isn't reachable within a reasonable timeout.
|
||||
*/
|
||||
@JvmOverloads
|
||||
fun start(username: String, password: String, externalTrace: Trace?, impersonatedActor: Actor?, gracefulReconnect: Boolean = false): CordaRPCConnection {
|
||||
fun start(username: String, password: String, externalTrace: Trace?, impersonatedActor: Actor?, gracefulReconnect: GracefulReconnect? = null): CordaRPCConnection {
|
||||
return start(username, password, externalTrace, impersonatedActor, null, gracefulReconnect)
|
||||
}
|
||||
|
||||
@ -425,19 +451,21 @@ class CordaRPCClient private constructor(
|
||||
* @param externalTrace external [Trace] for correlation.
|
||||
* @param impersonatedActor the actor on behalf of which all the invocations will be made.
|
||||
* @param targetLegalIdentity in case of multi-identity RPC endpoint specific legal identity to which the calls must be addressed.
|
||||
* @param gracefulReconnect whether the connection will reconnect gracefully.
|
||||
* @param gracefulReconnect a [GracefulReconnect] class containing callback logic when the RPC is dis/reconnected unexpectedly.
|
||||
* Note that when using graceful reconnect the values for [CordaRPCClientConfiguration.connectionMaxRetryInterval] and
|
||||
* [CordaRPCClientConfiguration.maxReconnectAttempts] will be overridden in order to mangage the reconnects.
|
||||
* @throws RPCException if the server version is too low or if the server isn't reachable within a reasonable timeout.
|
||||
*/
|
||||
@JvmOverloads
|
||||
fun start(username: String, password: String, externalTrace: Trace?, impersonatedActor: Actor?, targetLegalIdentity: CordaX500Name?, gracefulReconnect: Boolean = false): CordaRPCConnection {
|
||||
fun start(username: String, password: String, externalTrace: Trace?, impersonatedActor: Actor?, targetLegalIdentity: CordaX500Name?, gracefulReconnect: GracefulReconnect? = null): CordaRPCConnection {
|
||||
val addresses = if (haAddressPool.isEmpty()) {
|
||||
listOf(hostAndPort!!)
|
||||
} else {
|
||||
haAddressPool
|
||||
}
|
||||
|
||||
return if (gracefulReconnect) {
|
||||
CordaRPCConnection.createWithGracefulReconnection(username, password, addresses)
|
||||
return if (gracefulReconnect != null) {
|
||||
CordaRPCConnection.createWithGracefulReconnection(username, password, addresses, configuration, gracefulReconnect)
|
||||
} else {
|
||||
CordaRPCConnection(getRpcClient().start(InternalCordaRPCOps::class.java, username, password, externalTrace, impersonatedActor, targetLegalIdentity))
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.client.rpc.internal
|
||||
|
||||
import net.corda.client.rpc.*
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps.ReconnectingRPCConnection.CurrentState.*
|
||||
import net.corda.client.rpc.reconnect.CouldNotStartFlowException
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.internal.div
|
||||
@ -11,12 +12,14 @@ import net.corda.core.messaging.ClientRpcSslOptions
|
||||
import net.corda.core.messaging.CordaRPCOps
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.messaging.FlowHandle
|
||||
import net.corda.core.utilities.*
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.nodeapi.exceptions.RejectedCommandException
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQConnectionTimedOutException
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQSecurityException
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQUnBlockedException
|
||||
import rx.Observable
|
||||
import java.lang.reflect.InvocationHandler
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
import java.lang.reflect.Method
|
||||
@ -52,22 +55,25 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
nodeHostAndPort: NetworkHostAndPort,
|
||||
username: String,
|
||||
password: String,
|
||||
rpcConfiguration: CordaRPCClientConfiguration,
|
||||
sslConfiguration: ClientRpcSslOptions? = null,
|
||||
classLoader: ClassLoader? = null,
|
||||
observersPool: ExecutorService? = null
|
||||
) : this(
|
||||
ReconnectingRPCConnection(listOf(nodeHostAndPort), username, password, sslConfiguration, classLoader),
|
||||
ReconnectingRPCConnection(listOf(nodeHostAndPort), username, password, rpcConfiguration, sslConfiguration, classLoader),
|
||||
observersPool ?: Executors.newCachedThreadPool(),
|
||||
observersPool != null)
|
||||
constructor(
|
||||
nodeHostAndPorts: List<NetworkHostAndPort>,
|
||||
username: String,
|
||||
password: String,
|
||||
rpcConfiguration: CordaRPCClientConfiguration,
|
||||
gracefulReconnect: GracefulReconnect? = null,
|
||||
sslConfiguration: ClientRpcSslOptions? = null,
|
||||
classLoader: ClassLoader? = null,
|
||||
observersPool: ExecutorService? = null
|
||||
) : this(
|
||||
ReconnectingRPCConnection(nodeHostAndPorts, username, password, sslConfiguration, classLoader),
|
||||
ReconnectingRPCConnection(nodeHostAndPorts, username, password, rpcConfiguration, sslConfiguration, classLoader, gracefulReconnect),
|
||||
observersPool ?: Executors.newCachedThreadPool(),
|
||||
observersPool != null)
|
||||
private companion object {
|
||||
@ -116,43 +122,59 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
val nodeHostAndPorts: List<NetworkHostAndPort>,
|
||||
val username: String,
|
||||
val password: String,
|
||||
val rpcConfiguration: CordaRPCClientConfiguration,
|
||||
val sslConfiguration: ClientRpcSslOptions? = null,
|
||||
val classLoader: ClassLoader?
|
||||
val classLoader: ClassLoader?,
|
||||
val gracefulReconnect: GracefulReconnect? = null
|
||||
) : RPCConnection<CordaRPCOps> {
|
||||
private var currentRPCConnection: CordaRPCConnection? = null
|
||||
enum class CurrentState {
|
||||
UNCONNECTED, CONNECTED, CONNECTING, CLOSED, DIED
|
||||
}
|
||||
private var currentState = CurrentState.UNCONNECTED
|
||||
|
||||
private var currentState = UNCONNECTED
|
||||
|
||||
init {
|
||||
current
|
||||
}
|
||||
private val current: CordaRPCConnection
|
||||
@Synchronized get() = when (currentState) {
|
||||
CurrentState.UNCONNECTED -> connect()
|
||||
CurrentState.CONNECTED -> currentRPCConnection!!
|
||||
CurrentState.CLOSED -> throw IllegalArgumentException("The ReconnectingRPCConnection has been closed.")
|
||||
CurrentState.CONNECTING, CurrentState.DIED -> throw IllegalArgumentException("Illegal state: $currentState ")
|
||||
UNCONNECTED -> connect()
|
||||
CONNECTED -> currentRPCConnection!!
|
||||
CLOSED -> throw IllegalArgumentException("The ReconnectingRPCConnection has been closed.")
|
||||
CONNECTING, DIED -> throw IllegalArgumentException("Illegal state: $currentState ")
|
||||
}
|
||||
/**
|
||||
* Called on external error.
|
||||
* Will block until the connection is established again.
|
||||
*/
|
||||
|
||||
@Synchronized
|
||||
fun reconnectOnError(e: Throwable) {
|
||||
val previousConnection = currentRPCConnection
|
||||
currentState = CurrentState.DIED
|
||||
private fun doReconnect(e: Throwable, previousConnection: CordaRPCConnection?) {
|
||||
if (previousConnection != currentRPCConnection) {
|
||||
// We've already done this, skip
|
||||
return
|
||||
}
|
||||
// First one to get here gets to do all the reconnect logic, including calling onDisconnect and onReconnect. This makes sure
|
||||
// that they're only called once per reconnect.
|
||||
currentState = DIED
|
||||
gracefulReconnect?.onDisconnect?.invoke()
|
||||
//TODO - handle error cases
|
||||
log.error("Reconnecting to ${this.nodeHostAndPorts} due to error: ${e.message}")
|
||||
log.debug("", e)
|
||||
connect()
|
||||
previousConnection?.forceClose()
|
||||
gracefulReconnect?.onReconnect?.invoke()
|
||||
}
|
||||
/**
|
||||
* Called on external error.
|
||||
* Will block until the connection is established again.
|
||||
*/
|
||||
fun reconnectOnError(e: Throwable) {
|
||||
val previousConnection = currentRPCConnection
|
||||
doReconnect(e, previousConnection)
|
||||
}
|
||||
@Synchronized
|
||||
private fun connect(): CordaRPCConnection {
|
||||
currentState = CurrentState.CONNECTING
|
||||
currentState = CONNECTING
|
||||
currentRPCConnection = establishConnectionWithRetry()
|
||||
currentState = CurrentState.CONNECTED
|
||||
currentState = CONNECTED
|
||||
return currentRPCConnection!!
|
||||
}
|
||||
|
||||
@ -161,7 +183,7 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
log.info("Connecting to: $attemptedAddress")
|
||||
try {
|
||||
return CordaRPCClient(
|
||||
attemptedAddress, CordaRPCClientConfiguration(connectionMaxRetryInterval = retryInterval, maxReconnectAttempts = 1), sslConfiguration, classLoader
|
||||
attemptedAddress, rpcConfiguration.copy(connectionMaxRetryInterval = retryInterval, maxReconnectAttempts = 1), sslConfiguration, classLoader
|
||||
).start(username, password).also {
|
||||
// Check connection is truly operational before returning it.
|
||||
require(it.proxy.nodeInfo().legalIdentitiesAndCerts.isNotEmpty()) {
|
||||
@ -204,63 +226,70 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
get() = current.serverProtocolVersion
|
||||
@Synchronized
|
||||
override fun notifyServerAndClose() {
|
||||
currentState = CurrentState.CLOSED
|
||||
currentState = CLOSED
|
||||
currentRPCConnection?.notifyServerAndClose()
|
||||
}
|
||||
@Synchronized
|
||||
override fun forceClose() {
|
||||
currentState = CurrentState.CLOSED
|
||||
currentState = CLOSED
|
||||
currentRPCConnection?.forceClose()
|
||||
}
|
||||
@Synchronized
|
||||
override fun close() {
|
||||
currentState = CurrentState.CLOSED
|
||||
currentState = CLOSED
|
||||
currentRPCConnection?.close()
|
||||
}
|
||||
}
|
||||
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection, val observersPool: ExecutorService) : InvocationHandler {
|
||||
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
||||
override fun invoke(proxy: Any, method: Method, args: Array<out Any>?): Any? {
|
||||
val result: Any? = try {
|
||||
log.debug { "Invoking RPC $method..." }
|
||||
method.invoke(reconnectingRPCConnection.proxy, *(args ?: emptyArray())).also {
|
||||
log.debug { "RPC $method invoked successfully." }
|
||||
}
|
||||
} catch (e: InvocationTargetException) {
|
||||
fun retry() = if (method.isStartFlow()) {
|
||||
// Don't retry flows
|
||||
throw CouldNotStartFlowException(e.targetException)
|
||||
} else {
|
||||
this.invoke(proxy, method, args)
|
||||
}
|
||||
when (e.targetException) {
|
||||
is RejectedCommandException -> {
|
||||
log.error("Node is being shutdown. Operation ${method.name} rejected. Retrying when node is up...", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
this.invoke(proxy, method, args)
|
||||
|
||||
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
||||
if (method.isStartFlow()) {
|
||||
// Don't retry flows
|
||||
throw CouldNotStartFlowException(e.targetException)
|
||||
}
|
||||
}
|
||||
|
||||
private fun doInvoke(method: Method, args: Array<out Any>?): Any? {
|
||||
// will stop looping when [method.invoke] succeeds
|
||||
while (true) {
|
||||
try {
|
||||
log.debug { "Invoking RPC $method..." }
|
||||
return method.invoke(reconnectingRPCConnection.proxy, *(args ?: emptyArray())).also {
|
||||
log.debug { "RPC $method invoked successfully." }
|
||||
}
|
||||
is ConnectionFailureException -> {
|
||||
log.error("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
retry()
|
||||
}
|
||||
is RPCException -> {
|
||||
log.error("Failed to perform operation ${method.name}. RPCException. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
Thread.sleep(1000) // TODO - explain why this sleep is necessary
|
||||
retry()
|
||||
}
|
||||
else -> {
|
||||
log.error("Failed to perform operation ${method.name}. Unknown error. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
retry()
|
||||
} catch (e: InvocationTargetException) {
|
||||
when (e.targetException) {
|
||||
is RejectedCommandException -> {
|
||||
log.error("Node is being shutdown. Operation ${method.name} rejected. Retrying when node is up...", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
}
|
||||
is ConnectionFailureException -> {
|
||||
log.error("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
checkIfIsStartFlow(method, e)
|
||||
}
|
||||
is RPCException -> {
|
||||
log.error("Failed to perform operation ${method.name}. RPCException. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
Thread.sleep(1000) // TODO - explain why this sleep is necessary
|
||||
checkIfIsStartFlow(method, e)
|
||||
}
|
||||
else -> {
|
||||
log.error("Failed to perform operation ${method.name}. Unknown error. Retrying....", e)
|
||||
reconnectingRPCConnection.reconnectOnError(e)
|
||||
checkIfIsStartFlow(method, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun invoke(proxy: Any, method: Method, args: Array<out Any>?): Any? {
|
||||
return when (method.returnType) {
|
||||
DataFeed::class.java -> {
|
||||
// Intercept the data feed methods and returned a ReconnectingObservable instance
|
||||
val initialFeed: DataFeed<Any, Any?> = uncheckedCast(result)
|
||||
// Intercept the data feed methods and return a ReconnectingObservable instance
|
||||
val initialFeed: DataFeed<Any, Any?> = uncheckedCast(doInvoke(method, args))
|
||||
val observable = ReconnectingObservable(reconnectingRPCConnection, observersPool, initialFeed) {
|
||||
// This handles reconnecting and creates new feeds.
|
||||
uncheckedCast(this.invoke(reconnectingRPCConnection.proxy, method, args))
|
||||
@ -268,10 +297,11 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
initialFeed.copy(updates = observable)
|
||||
}
|
||||
// TODO - add handlers for Observable return types.
|
||||
else -> result
|
||||
else -> doInvoke(method, args)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
if (!userPool) observersPool.shutdown()
|
||||
retryFlowsPool.shutdown()
|
||||
|
@ -5,7 +5,8 @@ package net.corda.common.logging
|
||||
/**
|
||||
* constants in this file are generated by gradle
|
||||
* to change this file, edit src/main/template/kotlin/net/corda/common/logging/Constants.kt
|
||||
* the generated file does not need to be committed to source control (originally added to source control for ease of use)
|
||||
* the generated file does not need to be committed to source control
|
||||
* (originally added to source control for ease of use)
|
||||
*/
|
||||
|
||||
internal const val CURRENT_MAJOR_RELEASE = "4.4-SNAPSHOT"
|
@ -5,7 +5,8 @@ package net.corda.common.logging
|
||||
/**
|
||||
* constants in this file are generated by gradle
|
||||
* to change this file, edit src/main/template/kotlin/net/corda/common/logging/Constants.kt
|
||||
* the generated file does not need to be committed to source control (originally added to source control for ease of use)
|
||||
* the generated file does not need to be committed to source control
|
||||
* (originally added to source control for ease of use)
|
||||
*/
|
||||
|
||||
internal const val CURRENT_MAJOR_RELEASE = "@corda_release_version@"
|
@ -12,7 +12,11 @@ java8MinUpdateVersion=171
|
||||
# ***************************************************************#
|
||||
platformVersion=5
|
||||
guavaVersion=28.0-jre
|
||||
# Quasar version to use with Java 8:
|
||||
quasarVersion=0.7.10
|
||||
quasarClassifier=jdk8
|
||||
# Quasar version to use with Java 11:
|
||||
quasarVersion11=0.8.0
|
||||
proguardVersion=6.1.1
|
||||
bouncycastleVersion=1.60
|
||||
classgraphVersion=4.8.41
|
||||
|
@ -8,6 +8,9 @@ apply plugin: 'idea'
|
||||
|
||||
evaluationDependsOn(":core")
|
||||
|
||||
// required by DJVM and Avian JVM (for running inside the SGX enclave) which only supports Java 8.
|
||||
targetCompatibility = VERSION_1_8
|
||||
|
||||
def javaHome = System.getProperty('java.home')
|
||||
def jarBaseName = "corda-${project.name}".toString()
|
||||
|
||||
@ -69,8 +72,12 @@ task predeterminise(type: ProGuardTask) {
|
||||
injars patchCore
|
||||
outjars file("$buildDir/proguard/pre-deterministic-${project.version}.jar")
|
||||
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
libraryjars "$javaHome/jmods"
|
||||
} else {
|
||||
libraryjars "$javaHome/lib/rt.jar"
|
||||
libraryjars "$javaHome/lib/jce.jar"
|
||||
}
|
||||
configurations.compileClasspath.forEach {
|
||||
if (originalJar != it) {
|
||||
libraryjars it, filter: '!META-INF/versions/**'
|
||||
@ -118,8 +125,12 @@ task determinise(type: ProGuardTask) {
|
||||
injars jarFilter
|
||||
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
|
||||
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
libraryjars "$javaHome/jmods"
|
||||
} else {
|
||||
libraryjars "$javaHome/lib/rt.jar"
|
||||
libraryjars "$javaHome/lib/jce.jar"
|
||||
}
|
||||
configurations.deterministicLibraries.forEach {
|
||||
libraryjars it, filter: '!META-INF/versions/**'
|
||||
}
|
||||
@ -145,6 +156,8 @@ task determinise(type: ProGuardTask) {
|
||||
}
|
||||
|
||||
import net.corda.gradle.jarfilter.MetaFixerTask
|
||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||
|
||||
task metafix(type: MetaFixerTask) {
|
||||
outputDir file("$buildDir/libs")
|
||||
jars determinise
|
||||
|
@ -0,0 +1,61 @@
|
||||
package net.corda.deterministic;
|
||||
|
||||
import java.security.Provider;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.SecureRandomSpi;
|
||||
import java.security.Security;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Temporarily restore Sun's [SecureRandom] provider.
|
||||
* This is ONLY for allowing us to generate test data, e.g. signatures.
|
||||
*
|
||||
* JDK11 upgrade: rewritten in Java to gain access to private internal JDK classes via module directives (not available to Kotlin compiler):
|
||||
* sun.security.provider.SecureRandom()
|
||||
*/
|
||||
public class CheatingSecurityProvider extends Provider implements AutoCloseable {
|
||||
|
||||
private static AtomicInteger counter = new AtomicInteger();
|
||||
|
||||
@SuppressWarnings("deprecation") // JDK11: should replace with Provider(String name, double version, String info) (since 9)
|
||||
public CheatingSecurityProvider() {
|
||||
super("Cheat-" + counter.getAndIncrement(), 1.8, "Cheat security provider");
|
||||
putService(new CheatingSecureRandomService(this));
|
||||
assertEquals(1, Security.insertProviderAt(this, 1));
|
||||
}
|
||||
|
||||
public void close() {
|
||||
Security.removeProvider(getName());
|
||||
}
|
||||
|
||||
private class SunSecureRandom extends SecureRandom {
|
||||
public SunSecureRandom() {
|
||||
// JDK11 upgrade: rewritten in Java to gain access to private internal JDK classes via open module directive
|
||||
super(new sun.security.provider.SecureRandom(), null);
|
||||
}
|
||||
}
|
||||
|
||||
private class CheatingSecureRandomService extends Provider.Service {
|
||||
|
||||
public CheatingSecureRandomService(Provider provider) {
|
||||
super(provider, "SecureRandom", "CheatingPRNG", CheatingSecureRandomSpi.class.getName(), null, null);
|
||||
}
|
||||
|
||||
private SecureRandomSpi instance = new CheatingSecureRandomSpi();
|
||||
|
||||
public Object newInstance(Object constructorParameter){
|
||||
return instance;
|
||||
}
|
||||
}
|
||||
|
||||
private class CheatingSecureRandomSpi extends SecureRandomSpi {
|
||||
|
||||
private SecureRandom secureRandom = new SunSecureRandom();
|
||||
|
||||
public void engineSetSeed(byte[] seed) { secureRandom.setSeed(seed); }
|
||||
public void engineNextBytes(byte[] bytes) { secureRandom.nextBytes(bytes); }
|
||||
public byte[] engineGenerateSeed(int numBytes) { return secureRandom.generateSeed(numBytes); }
|
||||
}
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
package net.corda.deterministic
|
||||
|
||||
import org.junit.Assert.*
|
||||
import java.security.Provider
|
||||
import java.security.SecureRandom
|
||||
import java.security.SecureRandomSpi
|
||||
import java.security.Security
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
/**
|
||||
* Temporarily restore Sun's [SecureRandom] provider.
|
||||
* This is ONLY for allowing us to generate test data, e.g. signatures.
|
||||
*/
|
||||
class CheatingSecurityProvider : Provider("Cheat-${counter.getAndIncrement()}", 1.8, "Cheat security provider"), AutoCloseable {
|
||||
private companion object {
|
||||
private val counter = AtomicInteger()
|
||||
}
|
||||
|
||||
init {
|
||||
putService(CheatingSecureRandomService(this))
|
||||
assertEquals(1, Security.insertProviderAt(this, 1))
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
Security.removeProvider(name)
|
||||
}
|
||||
|
||||
private class SunSecureRandom : SecureRandom(sun.security.provider.SecureRandom(), null)
|
||||
|
||||
private class CheatingSecureRandomService(provider: Provider)
|
||||
: Provider.Service(provider, "SecureRandom", "CheatingPRNG", CheatingSecureRandomSpi::javaClass.name, null, null) {
|
||||
|
||||
private val instance: SecureRandomSpi = CheatingSecureRandomSpi()
|
||||
override fun newInstance(constructorParameter: Any?) = instance
|
||||
}
|
||||
|
||||
private class CheatingSecureRandomSpi : SecureRandomSpi() {
|
||||
private val secureRandom: SecureRandom = SunSecureRandom()
|
||||
|
||||
override fun engineSetSeed(seed: ByteArray) = secureRandom.setSeed(seed)
|
||||
override fun engineNextBytes(bytes: ByteArray) = secureRandom.nextBytes(bytes)
|
||||
override fun engineGenerateSeed(numBytes: Int): ByteArray = secureRandom.generateSeed(numBytes)
|
||||
}
|
||||
}
|
@ -57,7 +57,7 @@ class AttachmentsClassLoaderSerializationTests {
|
||||
SecureHash.zeroHash,
|
||||
{ attachmentTrustCalculator.calculate(it) }) { classLoader ->
|
||||
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classLoader)
|
||||
val contract = contractClass.newInstance() as Contract
|
||||
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
|
||||
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)
|
||||
|
||||
val txt = IOUtils.toString(classLoader.getResourceAsStream("file1.txt"), Charsets.UTF_8.name())
|
||||
|
@ -90,7 +90,7 @@ class AttachmentsClassLoaderTests {
|
||||
|
||||
val classloader = createClassloader(isolatedId)
|
||||
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classloader)
|
||||
val contract = contractClass.newInstance() as Contract
|
||||
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
|
||||
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'kotlin-jpa'
|
||||
apply plugin: 'net.corda.plugins.quasar-utils'
|
||||
@ -7,6 +9,11 @@ apply plugin: 'com.jfrog.artifactory'
|
||||
|
||||
description 'Corda core'
|
||||
|
||||
evaluationDependsOn(':node:capsule')
|
||||
|
||||
// required by DJVM and Avian JVM (for running inside the SGX enclave) which only supports Java 8.
|
||||
targetCompatibility = VERSION_1_8
|
||||
|
||||
configurations {
|
||||
integrationTestCompile.extendsFrom testCompile
|
||||
integrationTestRuntimeOnly.extendsFrom testRuntimeOnly
|
||||
@ -71,6 +78,9 @@ dependencies {
|
||||
|
||||
compile group: "io.github.classgraph", name: "classgraph", version: class_graph_version
|
||||
|
||||
// JDK11: required by Quasar at run-time
|
||||
testRuntimeOnly "com.esotericsoftware:kryo:4.0.2"
|
||||
|
||||
testCompile "com.nhaarman:mockito-kotlin:$mockito_kotlin_version"
|
||||
testCompile "org.mockito:mockito-core:$mockito_version"
|
||||
testCompile "org.assertj:assertj-core:$assertj_version"
|
||||
|
@ -9,6 +9,7 @@ import org.bouncycastle.asn1.ASN1ObjectIdentifier
|
||||
import java.security.Provider
|
||||
|
||||
@KeepForDJVM
|
||||
@Suppress("DEPRECATION") // JDK11: should replace with Provider(String name, double version, String info) (since 9)
|
||||
class CordaSecurityProvider : Provider(PROVIDER_NAME, 0.1, "$PROVIDER_NAME security provider wrapper") {
|
||||
companion object {
|
||||
const val PROVIDER_NAME = "Corda"
|
||||
|
@ -35,7 +35,7 @@ object ContractUpgradeFlow {
|
||||
// DOCEND 1
|
||||
@Suspendable
|
||||
override fun call(): Void? {
|
||||
val upgrade = upgradedContractClass.newInstance()
|
||||
val upgrade = upgradedContractClass.getDeclaredConstructor().newInstance()
|
||||
if (upgrade.legacyContract != stateAndRef.state.contract) {
|
||||
throw FlowException("The contract state cannot be upgraded using provided UpgradedContract.")
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ fun checkMinimumPlatformVersion(minimumPlatformVersion: Int, requiredMinPlatform
|
||||
}
|
||||
}
|
||||
|
||||
// JDK11: revisit (JDK 9+ uses different numbering scheme: see https://docs.oracle.com/javase/9/docs/api/java/lang/Runtime.Version.html)
|
||||
@Throws(NumberFormatException::class)
|
||||
fun getJavaUpdateVersion(javaVersion: String): Long = javaVersion.substringAfter("_").substringBefore("-").toLong()
|
||||
|
||||
|
@ -352,6 +352,7 @@ class DeclaredField<T>(clazz: Class<*>, name: String, private val receiver: Any?
|
||||
val name: String = javaField.name
|
||||
|
||||
private fun <RESULT> Field.accessible(action: Field.() -> RESULT): RESULT {
|
||||
@Suppress("DEPRECATION") // JDK11: isAccessible() should be replaced with canAccess() (since 9)
|
||||
val accessible = isAccessible
|
||||
isAccessible = true
|
||||
try {
|
||||
@ -393,16 +394,17 @@ fun <K, V> Iterable<Pair<K, V>>.toMultiMap(): Map<K, List<V>> = this.groupBy({ i
|
||||
val Class<*>.location: URL get() = protectionDomain.codeSource.location
|
||||
|
||||
/** Convenience method to get the package name of a class literal. */
|
||||
val KClass<*>.packageName: String get() = java.packageName
|
||||
val Class<*>.packageName: String get() = requireNotNull(this.packageNameOrNull) { "$this not defined inside a package" }
|
||||
val KClass<*>.packageName: String get() = java.packageName_
|
||||
// re-defined to prevent clash with Java 9 Class.packageName: https://docs.oracle.com/javase/9/docs/api/java/lang/Class.html#getPackageName--
|
||||
val Class<*>.packageName_: String get() = requireNotNull(this.packageNameOrNull) { "$this not defined inside a package" }
|
||||
val Class<*>.packageNameOrNull: String? // This intentionally does not go via `package` as that code path is slow and contended and just ends up doing this.
|
||||
get() {
|
||||
val name = this.getName()
|
||||
val name = this.name
|
||||
val i = name.lastIndexOf('.')
|
||||
if (i != -1) {
|
||||
return name.substring(0, i)
|
||||
return if (i != -1) {
|
||||
name.substring(0, i)
|
||||
} else {
|
||||
return null
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ class Verifier(val ltx: LedgerTransaction, private val transactionClassLoader: C
|
||||
|
||||
val contractInstances: List<Contract> = contractClasses.map { (contractClassName, contractClass) ->
|
||||
try {
|
||||
contractClass.newInstance()
|
||||
contractClass.getDeclaredConstructor().newInstance()
|
||||
} catch (e: Exception) {
|
||||
throw TransactionVerificationException.ContractCreationError(ltx.id, contractClassName, e)
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ data class ContractUpgradeWireTransaction(
|
||||
|
||||
private fun upgradedContract(className: ContractClassName, classLoader: ClassLoader): UpgradedContract<ContractState, ContractState> = try {
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
classLoader.loadClass(className).asSubclass(UpgradedContract::class.java).newInstance() as UpgradedContract<ContractState, ContractState>
|
||||
classLoader.loadClass(className).asSubclass(UpgradedContract::class.java).getDeclaredConstructor().newInstance() as UpgradedContract<ContractState, ContractState>
|
||||
} catch (e: Exception) {
|
||||
throw TransactionVerificationException.ContractCreationError(id, className, e)
|
||||
}
|
||||
|
@ -0,0 +1,133 @@
|
||||
package net.corda.core.internal;
|
||||
|
||||
import net.corda.core.crypto.Crypto;
|
||||
import net.i2p.crypto.eddsa.EdDSAEngine;
|
||||
import net.i2p.crypto.eddsa.EdDSAPublicKey;
|
||||
import org.junit.Test;
|
||||
import sun.security.util.BitArray;
|
||||
import sun.security.util.ObjectIdentifier;
|
||||
import sun.security.x509.AlgorithmId;
|
||||
import sun.security.x509.X509Key;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigInteger;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.KeyPair;
|
||||
import java.security.SignatureException;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* JDK11 upgrade: rewritten in Java to gain access to private internal JDK classes via module directives (not available to Kotlin compiler):
|
||||
* import sun.security.util.BitArray;
|
||||
* import sun.security.util.ObjectIdentifier;
|
||||
* import sun.security.x509.AlgorithmId;
|
||||
* import sun.security.x509.X509Key;
|
||||
*/
|
||||
public class X509EdDSAEngineTest {
|
||||
|
||||
private static long SEED = 20170920L;
|
||||
private static int TEST_DATA_SIZE = 2000;
|
||||
|
||||
// offset into an EdDSA header indicating where the key header and actual key start
|
||||
// in the underlying byte array
|
||||
private static int keyHeaderStart = 9;
|
||||
private static int keyStart = 12;
|
||||
|
||||
private X509Key toX509Key(EdDSAPublicKey publicKey) throws IOException, InvalidKeyException {
|
||||
byte[] internals = publicKey.getEncoded();
|
||||
|
||||
// key size in the header includes the count unused bits at the end of the key
|
||||
// [keyHeaderStart + 2] but NOT the key header ID [keyHeaderStart] so the
|
||||
// actual length of the key blob is size - 1
|
||||
int keySize = (internals[keyHeaderStart + 1]) - 1;
|
||||
|
||||
byte[] key = new byte[keySize];
|
||||
System.arraycopy(internals, keyStart, key, 0, keySize);
|
||||
|
||||
// 1.3.101.102 is the EdDSA OID
|
||||
return new TestX509Key(new AlgorithmId(new ObjectIdentifier("1.3.101.112")), new BitArray(keySize * 8, key));
|
||||
}
|
||||
|
||||
class TestX509Key extends X509Key {
|
||||
TestX509Key(AlgorithmId algorithmId, BitArray key) throws InvalidKeyException {
|
||||
this.algid = algorithmId;
|
||||
this.setKey(key);
|
||||
this.encode();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Put the X509EdDSA engine through basic tests to verify that the functions are hooked up correctly.
|
||||
*/
|
||||
@Test
|
||||
public void SignAndVerify() throws InvalidKeyException, SignatureException {
|
||||
X509EdDSAEngine engine = new X509EdDSAEngine();
|
||||
KeyPair keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED));
|
||||
EdDSAPublicKey publicKey = (EdDSAPublicKey) keyPair.getPublic();
|
||||
byte[] randomBytes = new byte[TEST_DATA_SIZE];
|
||||
new Random(SEED).nextBytes(randomBytes);
|
||||
engine.initSign(keyPair.getPrivate());
|
||||
engine.update(randomBytes[0]);
|
||||
engine.update(randomBytes, 1, randomBytes.length - 1);
|
||||
|
||||
// Now verify the signature
|
||||
byte[] signature = engine.sign();
|
||||
|
||||
engine.initVerify(publicKey);
|
||||
engine.update(randomBytes);
|
||||
assertTrue(engine.verify(signature));
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that signing with an X509Key wrapped EdDSA key works.
|
||||
*/
|
||||
@Test
|
||||
public void SignAndVerifyWithX509Key() throws InvalidKeyException, SignatureException, IOException {
|
||||
X509EdDSAEngine engine = new X509EdDSAEngine();
|
||||
KeyPair keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED + 1));
|
||||
X509Key publicKey = toX509Key((EdDSAPublicKey) keyPair.getPublic());
|
||||
byte[] randomBytes = new byte[TEST_DATA_SIZE];
|
||||
new Random(SEED + 1).nextBytes(randomBytes);
|
||||
engine.initSign(keyPair.getPrivate());
|
||||
engine.update(randomBytes[0]);
|
||||
engine.update(randomBytes, 1, randomBytes.length - 1);
|
||||
|
||||
// Now verify the signature
|
||||
byte[] signature = engine.sign();
|
||||
|
||||
engine.initVerify(publicKey);
|
||||
engine.update(randomBytes);
|
||||
assertTrue(engine.verify(signature));
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that signing with an X509Key wrapped EdDSA key succeeds when using the underlying EdDSAEngine.
|
||||
*/
|
||||
@Test
|
||||
public void SignAndVerifyWithX509KeyAndOldEngineFails() throws InvalidKeyException, SignatureException, IOException {
|
||||
X509EdDSAEngine engine = new X509EdDSAEngine();
|
||||
KeyPair keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED + 1));
|
||||
X509Key publicKey = toX509Key((EdDSAPublicKey) keyPair.getPublic());
|
||||
byte[] randomBytes = new byte[TEST_DATA_SIZE];
|
||||
new Random(SEED + 1).nextBytes(randomBytes);
|
||||
engine.initSign(keyPair.getPrivate());
|
||||
engine.update(randomBytes[0]);
|
||||
engine.update(randomBytes, 1, randomBytes.length - 1);
|
||||
|
||||
// Now verify the signature
|
||||
byte[] signature = engine.sign();
|
||||
engine.initVerify(publicKey);
|
||||
engine.update(randomBytes);
|
||||
engine.verify(signature);
|
||||
}
|
||||
|
||||
/** Verify will fail if the input public key cannot be converted to EdDSA public key. */
|
||||
@Test(expected = InvalidKeyException.class)
|
||||
public void verifyWithNonSupportedKeyTypeFails() throws InvalidKeyException {
|
||||
EdDSAEngine engine = new EdDSAEngine();
|
||||
KeyPair keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.ECDSA_SECP256K1_SHA256, BigInteger.valueOf(SEED));
|
||||
engine.initVerify(keyPair.getPublic());
|
||||
}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
package net.corda.core.internal
|
||||
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.i2p.crypto.eddsa.EdDSAEngine
|
||||
import net.i2p.crypto.eddsa.EdDSAPublicKey
|
||||
import org.junit.Test
|
||||
import sun.security.util.BitArray
|
||||
import sun.security.util.ObjectIdentifier
|
||||
import sun.security.x509.AlgorithmId
|
||||
import sun.security.x509.X509Key
|
||||
import java.math.BigInteger
|
||||
import java.security.InvalidKeyException
|
||||
import java.util.*
|
||||
import kotlin.test.assertFailsWith
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class TestX509Key(algorithmId: AlgorithmId, key: BitArray) : X509Key() {
|
||||
init {
|
||||
this.algid = algorithmId
|
||||
this.setKey(key)
|
||||
this.encode()
|
||||
}
|
||||
}
|
||||
|
||||
class X509EdDSAEngineTest {
|
||||
companion object {
|
||||
private const val SEED = 20170920L
|
||||
private const val TEST_DATA_SIZE = 2000
|
||||
|
||||
// offset into an EdDSA header indicating where the key header and actual key start
|
||||
// in the underlying byte array
|
||||
private const val keyHeaderStart = 9
|
||||
private const val keyStart = 12
|
||||
|
||||
private fun toX509Key(publicKey: EdDSAPublicKey): X509Key {
|
||||
val internals = publicKey.encoded
|
||||
|
||||
// key size in the header includes the count unused bits at the end of the key
|
||||
// [keyHeaderStart + 2] but NOT the key header ID [keyHeaderStart] so the
|
||||
// actual length of the key blob is size - 1
|
||||
val keySize = (internals[keyHeaderStart + 1].toInt()) - 1
|
||||
|
||||
val key = ByteArray(keySize)
|
||||
System.arraycopy(internals, keyStart, key, 0, keySize)
|
||||
|
||||
// 1.3.101.102 is the EdDSA OID
|
||||
return TestX509Key(AlgorithmId(ObjectIdentifier("1.3.101.112")), BitArray(keySize * 8, key))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Put the X509EdDSA engine through basic tests to verify that the functions are hooked up correctly.
|
||||
*/
|
||||
@Test
|
||||
fun `sign and verify`() {
|
||||
val engine = X509EdDSAEngine()
|
||||
val keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED))
|
||||
val publicKey = keyPair.public as EdDSAPublicKey
|
||||
val randomBytes = ByteArray(TEST_DATA_SIZE)
|
||||
Random(SEED).nextBytes(randomBytes)
|
||||
engine.initSign(keyPair.private)
|
||||
engine.update(randomBytes[0])
|
||||
engine.update(randomBytes, 1, randomBytes.size - 1)
|
||||
|
||||
// Now verify the signature
|
||||
val signature = engine.sign()
|
||||
|
||||
engine.initVerify(publicKey)
|
||||
engine.update(randomBytes)
|
||||
assertTrue { engine.verify(signature) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that signing with an X509Key wrapped EdDSA key works.
|
||||
*/
|
||||
@Test
|
||||
fun `sign and verify with X509Key`() {
|
||||
val engine = X509EdDSAEngine()
|
||||
val keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED + 1))
|
||||
val publicKey = toX509Key(keyPair.public as EdDSAPublicKey)
|
||||
val randomBytes = ByteArray(TEST_DATA_SIZE)
|
||||
Random(SEED + 1).nextBytes(randomBytes)
|
||||
engine.initSign(keyPair.private)
|
||||
engine.update(randomBytes[0])
|
||||
engine.update(randomBytes, 1, randomBytes.size - 1)
|
||||
|
||||
// Now verify the signature
|
||||
val signature = engine.sign()
|
||||
|
||||
engine.initVerify(publicKey)
|
||||
engine.update(randomBytes)
|
||||
assertTrue { engine.verify(signature) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that signing with an X509Key wrapped EdDSA key fails when using the underlying EdDSAEngine.
|
||||
*/
|
||||
@Test
|
||||
fun `sign and verify with X509Key and old engine fails`() {
|
||||
val engine = EdDSAEngine()
|
||||
val keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.EDDSA_ED25519_SHA512, BigInteger.valueOf(SEED + 1))
|
||||
val publicKey = toX509Key(keyPair.public as EdDSAPublicKey)
|
||||
val randomBytes = ByteArray(TEST_DATA_SIZE)
|
||||
Random(SEED + 1).nextBytes(randomBytes)
|
||||
engine.initSign(keyPair.private)
|
||||
engine.update(randomBytes[0])
|
||||
engine.update(randomBytes, 1, randomBytes.size - 1)
|
||||
|
||||
// Now verify the signature
|
||||
val signature = engine.sign()
|
||||
assertFailsWith<InvalidKeyException> {
|
||||
engine.initVerify(publicKey)
|
||||
engine.update(randomBytes)
|
||||
engine.verify(signature)
|
||||
}
|
||||
}
|
||||
|
||||
/** Verify will fail if the input public key cannot be converted to EdDSA public key. */
|
||||
@Test
|
||||
fun `verify with non-supported key type fails`() {
|
||||
val engine = EdDSAEngine()
|
||||
val keyPair = Crypto.deriveKeyPairFromEntropy(Crypto.ECDSA_SECP256K1_SHA256, BigInteger.valueOf(SEED))
|
||||
assertFailsWith<InvalidKeyException> { engine.initVerify(keyPair.public) }
|
||||
}
|
||||
}
|
5469
detekt-baseline.xml
5469
detekt-baseline.xml
File diff suppressed because one or more lines are too long
@ -84,7 +84,7 @@ complexity:
|
||||
ComplexMethod:
|
||||
active: true
|
||||
threshold: 10
|
||||
ignoreSingleWhenExpression: false
|
||||
ignoreSingleWhenExpression: true
|
||||
LabeledExpression:
|
||||
active: false
|
||||
LargeClass:
|
||||
@ -92,7 +92,7 @@ complexity:
|
||||
threshold: 150
|
||||
LongMethod:
|
||||
active: true
|
||||
threshold: 20
|
||||
threshold: 120
|
||||
LongParameterList:
|
||||
active: true
|
||||
threshold: 6
|
||||
@ -135,7 +135,7 @@ empty-blocks:
|
||||
EmptyForBlock:
|
||||
active: true
|
||||
EmptyFunctionBlock:
|
||||
active: true
|
||||
active: false
|
||||
EmptyIfBlock:
|
||||
active: true
|
||||
EmptyInitBlock:
|
||||
@ -240,7 +240,7 @@ naming:
|
||||
VariableNaming:
|
||||
active: true
|
||||
variablePattern: '[a-z][A-Za-z0-9]*'
|
||||
privateVariablePattern: '(_)?[a-z][A-Za-z0-9]*'
|
||||
privateVariablePattern: '(_)?[A-Za-z][_A-Za-z0-9]*'
|
||||
excludeClassPattern: '$^'
|
||||
|
||||
performance:
|
||||
@ -321,7 +321,7 @@ style:
|
||||
ignoreEnums: false
|
||||
MaxLineLength:
|
||||
active: true
|
||||
maxLineLength: 120
|
||||
maxLineLength: 140
|
||||
excludePackageStatements: false
|
||||
excludeImportStatements: false
|
||||
MayBeConst:
|
||||
@ -331,7 +331,7 @@ style:
|
||||
NestedClassesVisibility:
|
||||
active: false
|
||||
NewLineAtEndOfFile:
|
||||
active: true
|
||||
active: false
|
||||
NoTabs:
|
||||
active: false
|
||||
OptionalAbstractKeyword:
|
||||
|
@ -59,6 +59,7 @@ task buildDockerFolder(dependsOn: [":node:capsule:buildCordaJAR", shadowJar]) {
|
||||
from "src/bash/generate-config.sh"
|
||||
from "src/docker/DockerfileAL"
|
||||
from "src/docker/Dockerfile"
|
||||
from "src/docker/Dockerfile11"
|
||||
rename(cordaJar.name, "corda.jar")
|
||||
rename(shadowJar.archivePath.name, "config-exporter.jar")
|
||||
}
|
||||
@ -67,8 +68,8 @@ task buildDockerFolder(dependsOn: [":node:capsule:buildCordaJAR", shadowJar]) {
|
||||
|
||||
final String runTime = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMddHHmmss"))
|
||||
final String suffix = project.version.toString().toLowerCase().contains("snapshot") ? runTime : "RELEASE"
|
||||
final zuluBuildTags = ["corda/corda-zulu-${project.version.toString().toLowerCase()}:${suffix}", "corda/corda-zulu-${project.version.toString().toLowerCase()}:latest"]
|
||||
final correttoBuildTags = ["corda/corda-corretto-${project.version.toString().toLowerCase()}:${suffix}", "corda/corda-corretto-${project.version.toString().toLowerCase()}:latest"]
|
||||
final zuluBuildTags = ["corda/corda-zulu-java${JavaVersion.current()}-${project.version.toString().toLowerCase()}:${suffix}", "corda/corda-zulu-java${JavaVersion.current()}-${project.version.toString().toLowerCase()}:latest"]
|
||||
final correttoBuildTags = ["corda/corda-corretto-java${JavaVersion.current()}-${project.version.toString().toLowerCase()}:${suffix}", "corda/corda-corretto-java${JavaVersion.current()}-${project.version.toString().toLowerCase()}:latest"]
|
||||
|
||||
task buildOfficialZuluDockerImage(type: DockerBuildImage, dependsOn: [buildDockerFolder]) {
|
||||
inputDir = new File(project.buildDir, "docker-temp")
|
||||
@ -76,6 +77,12 @@ task buildOfficialZuluDockerImage(type: DockerBuildImage, dependsOn: [buildDocke
|
||||
dockerFile = new File(new File(project.buildDir, "docker-temp"), "Dockerfile")
|
||||
}
|
||||
|
||||
task buildOfficialZuluJDK11DockerImage(type: DockerBuildImage, dependsOn: [buildDockerFolder]) {
|
||||
inputDir = new File(project.buildDir, "docker-temp")
|
||||
tags = zuluBuildTags
|
||||
dockerFile = new File(new File(project.buildDir, "docker-temp"), "Dockerfile11")
|
||||
}
|
||||
|
||||
task buildOfficialCorrettoDockerImage(type: DockerBuildImage, dependsOn: [buildDockerFolder]) {
|
||||
inputDir = new File(project.buildDir, "docker-temp")
|
||||
tags = correttoBuildTags
|
||||
@ -90,6 +97,14 @@ task pushZuluLatestTag('type': DockerPushImage, dependsOn: [buildOfficialZuluDoc
|
||||
imageName = zuluBuildTags[1]
|
||||
}
|
||||
|
||||
task pushZulu11TimeStampedTag('type': DockerPushImage, dependsOn: [buildOfficialZuluJDK11DockerImage]){
|
||||
imageName = zuluBuildTags[0]
|
||||
}
|
||||
|
||||
task pushZulu11LatestTag('type': DockerPushImage, dependsOn: [buildOfficialZuluJDK11DockerImage]){
|
||||
imageName = zuluBuildTags[1]
|
||||
}
|
||||
|
||||
task pushCorrettoTimeStampedTag('type': DockerPushImage, dependsOn: [buildOfficialCorrettoDockerImage]){
|
||||
imageName = correttoBuildTags[0]
|
||||
}
|
||||
|
28
docker/src/docker/Dockerfile.zulu-sa-jdk-11-patch
Normal file
28
docker/src/docker/Dockerfile.zulu-sa-jdk-11-patch
Normal file
@ -0,0 +1,28 @@
|
||||
# Build and publish an Azul Zulu patched JDK 11 to the R3 Azure docker registry as follows:
|
||||
|
||||
# colljos@ci-agent-101l:~$ cd /home/colljos/azul/case17645
|
||||
# $docker build . -f Dockerfile.zulu-sa-jdk-11-patch --no-cache -t azul/zulu-sa-jdk:11.0.3_7_LTS
|
||||
# $docker tag azul/zulu-sa-jdk:11.0.3_7_LTS corda.azurecr.io/jdk/azul/zulu-sa-jdk:11.0.3_7_LTS
|
||||
# $docker login -u corda corda.azurecr.io
|
||||
# docker push corda.azurecr.io/jdk/azul/zulu-sa-jdk:11.0.3_7_LTS
|
||||
|
||||
# Remember to set the DOCKER env variables accordingly to access the R3 Azure docker registry:
|
||||
# export DOCKER_URL=https://corda.azurecr.io
|
||||
# export DOCKER_USERNAME=<username>
|
||||
# export DOCKER_PASSWORD=<password>
|
||||
|
||||
RUN addgroup corda && adduser --ingroup corda --disabled-password -gecos "" --shell /bin/bash corda
|
||||
|
||||
COPY zulu11.31.16-sa-jdk11.0.3-linux_x64.tar /opt
|
||||
|
||||
RUN tar xvf /opt/zulu11.31.16-sa-jdk11.0.3-linux_x64.tar -C /opt && ln -s /opt/zulu11.31.16-sa-jdk11.0.3-linux_x64 /opt/jdk
|
||||
|
||||
RUN rm /opt/zulu11.31.16-sa-jdk11.0.3-linux_x64.tar && \
|
||||
chown -R corda /opt/zulu11.31.16-sa-jdk11.0.3-linux_x64 && \
|
||||
chgrp -R corda /opt/zulu11.31.16-sa-jdk11.0.3-linux_x64
|
||||
|
||||
# Set environment
|
||||
ENV JAVA_HOME /opt/jdk
|
||||
ENV PATH ${PATH}:${JAVA_HOME}/bin
|
||||
|
||||
CMD ["java", "-version"]
|
78
docker/src/docker/Dockerfile11
Normal file
78
docker/src/docker/Dockerfile11
Normal file
@ -0,0 +1,78 @@
|
||||
# Using Azul Zulu patched JDK 11 (local built and published docker image)
|
||||
|
||||
# colljos@ci-agent-101l:~$ jdk11azul
|
||||
# openjdk version "11.0.3" 2019-04-16 LTS
|
||||
# OpenJDK Runtime Environment Zulu11.31+16-SA (build 11.0.3+7-LTS)
|
||||
# OpenJDK 64-Bit Server VM Zulu11.31+16-SA (build 11.0.3+7-LTS, mixed mode)
|
||||
|
||||
# Remember to set the DOCKER env variables accordingly to access the R3 Azure docker registry:
|
||||
# export DOCKER_URL=https://corda.azurecr.io
|
||||
# export DOCKER_USERNAME=<username>
|
||||
# export DOCKER_PASSWORD=<password>
|
||||
|
||||
FROM corda.azurecr.io/jdk/azul/zulu-sa-jdk:11.0.3_7_LTS
|
||||
|
||||
## Add packages, clean cache, create dirs, create corda user and change ownership
|
||||
RUN apt-get update && \
|
||||
apt-get -y upgrade && \
|
||||
apt-get -y install bash curl unzip && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
mkdir -p /opt/corda/cordapps && \
|
||||
mkdir -p /opt/corda/persistence && \
|
||||
mkdir -p /opt/corda/certificates && \
|
||||
mkdir -p /opt/corda/drivers && \
|
||||
mkdir -p /opt/corda/logs && \
|
||||
mkdir -p /opt/corda/bin && \
|
||||
mkdir -p /opt/corda/additional-node-infos && \
|
||||
mkdir -p /etc/corda && \
|
||||
chown -R corda /opt/corda && \
|
||||
chgrp -R corda /opt/corda && \
|
||||
chown -R corda /etc/corda && \
|
||||
chgrp -R corda /etc/corda && \
|
||||
chown -R corda /opt/corda && \
|
||||
chgrp -R corda /opt/corda && \
|
||||
chown -R corda /etc/corda && \
|
||||
chgrp -R corda /etc/corda
|
||||
|
||||
ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
|
||||
PERSISTENCE_FOLDER="/opt/corda/persistence" \
|
||||
CERTIFICATES_FOLDER="/opt/corda/certificates" \
|
||||
DRIVERS_FOLDER="/opt/corda/drivers" \
|
||||
CONFIG_FOLDER="/etc/corda" \
|
||||
MY_P2P_PORT=10200 \
|
||||
MY_RPC_PORT=10201 \
|
||||
MY_RPC_ADMIN_PORT=10202 \
|
||||
PATH=$PATH:/opt/corda/bin \
|
||||
JVM_ARGS="-XX:+UseG1GC -XX:+UnlockExperimentalVMOptions " \
|
||||
CORDA_ARGS=""
|
||||
|
||||
##CORDAPPS FOLDER
|
||||
VOLUME ["/opt/corda/cordapps"]
|
||||
##PERSISTENCE FOLDER
|
||||
VOLUME ["/opt/corda/persistence"]
|
||||
##CERTS FOLDER
|
||||
VOLUME ["/opt/corda/certificates"]
|
||||
##OPTIONAL JDBC DRIVERS FOLDER
|
||||
VOLUME ["/opt/corda/drivers"]
|
||||
##LOG FOLDER
|
||||
VOLUME ["/opt/corda/logs"]
|
||||
##ADDITIONAL NODE INFOS FOLDER
|
||||
VOLUME ["/opt/corda/additional-node-infos"]
|
||||
##CONFIG LOCATION
|
||||
VOLUME ["/etc/corda"]
|
||||
|
||||
##CORDA JAR
|
||||
COPY --chown=corda:corda corda.jar /opt/corda/bin/corda.jar
|
||||
##CONFIG MANIPULATOR JAR
|
||||
COPY --chown=corda:corda config-exporter.jar /opt/corda/config-exporter.jar
|
||||
##CONFIG GENERATOR SHELL SCRIPT
|
||||
COPY --chown=corda:corda generate-config.sh /opt/corda/bin/config-generator
|
||||
##CORDA RUN SCRIPT
|
||||
COPY --chown=corda:corda run-corda.sh /opt/corda/bin/run-corda
|
||||
##BASE CONFIG FOR GENERATOR
|
||||
COPY --chown=corda:corda starting-node.conf /opt/corda/starting-node.conf
|
||||
|
||||
USER "corda"
|
||||
EXPOSE ${MY_P2P_PORT} ${MY_RPC_PORT} ${MY_RPC_ADMIN_PORT}
|
||||
WORKDIR /opt/corda
|
||||
CMD ["run-corda"]
|
@ -6,6 +6,7 @@ release, see :doc:`app-upgrade-notes`.
|
||||
|
||||
Unreleased
|
||||
----------
|
||||
* Support for Java 11 (compatibility mode). Please read https://github.com/corda/corda/pull/5356.
|
||||
|
||||
* Updating FinalityFlow with functionality to indicate the appropriate StatesToRecord. This allows the initiating party to record states
|
||||
from transactions which they are proposing, but are not necessarily participants of.
|
||||
@ -47,8 +48,9 @@ Unreleased
|
||||
* Added ``nodeDiagnosticInfo`` to the RPC API. The new RPC is also available as the ``run nodeDiagnosticInfo`` command executable from
|
||||
the Corda shell. It retrieves version information about the Corda platform and the CorDapps installed on the node.
|
||||
|
||||
* ``CordaRPCClient.start`` has a new ``gracefulReconnect`` parameter. When ``true`` (the default is ``false``) it will cause the RPC client
|
||||
to try to automatically reconnect to the node on disconnect. Further any ``Observable`` s previously created will continue to vend new
|
||||
* ``CordaRPCClient.start`` has a new ``gracefulReconnect`` parameter. The class ``GracefulReconnect`` takes two lambdas - one for callbacks
|
||||
on disconnect, and one for callbacks on reconnection. When provided (ie. the ``gracefulReconnect`` parameter is not null) the RPC client
|
||||
will to try to automatically reconnect to the node on disconnect. Further any ``Observable`` s previously created will continue to vend new
|
||||
events on reconnect.
|
||||
|
||||
.. note:: This is only best-effort and there are no guarantees of reliability.
|
||||
|
@ -373,10 +373,29 @@ More specifically, the behaviour in the second case is a bit more subtle:
|
||||
|
||||
You can enable this graceful form of reconnection by using the ``gracefulReconnect`` parameter in the following way:
|
||||
|
||||
.. sourcecode:: kotlin
|
||||
.. container:: codeset
|
||||
|
||||
val cordaClient = CordaRPCClient(nodeRpcAddress)
|
||||
val cordaRpcOps = cordaClient.start(rpcUserName, rpcUserPassword, gracefulReconnect = true).proxy
|
||||
.. sourcecode:: kotlin
|
||||
|
||||
val gracefulReconnect = GracefulReconnect(onDisconnect={/*insert disconnect handling*/}, onReconnect{/*insert reconnect handling*/})
|
||||
val cordaClient = CordaRPCClient(nodeRpcAddress)
|
||||
val cordaRpcOps = cordaClient.start(rpcUserName, rpcUserPassword, gracefulReconnect = gracefulReconnect).proxy
|
||||
|
||||
.. sourcecode:: java
|
||||
|
||||
private void onDisconnect() {
|
||||
// Insert implementation
|
||||
}
|
||||
|
||||
private void onReconnect() {
|
||||
// Insert implementation
|
||||
}
|
||||
|
||||
void method() {
|
||||
GracefulReconnect gracefulReconnect = new GracefulReconnect(this::onDisconnect, this::onReconnect);
|
||||
CordaRPCClient cordaClient = new CordaRPCClient(nodeRpcAddress);
|
||||
CordaRPCConnection cordaRpcOps = cordaClient.start(rpcUserName, rpcUserPassword, gracefulReconnect);
|
||||
}
|
||||
|
||||
Retrying flow invocations
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -26,10 +26,21 @@ sourceSets {
|
||||
compileTestJava.dependsOn tasks.getByPath(':node:capsule:buildCordaJAR')
|
||||
|
||||
dependencies {
|
||||
// Cordformation needs a SLF4J implementation when executing the Network
|
||||
// Bootstrapper, but Log4J doesn't shutdown completely from within Gradle.
|
||||
// Use a much simpler SLF4J implementation here instead.
|
||||
cordaRuntime "org.slf4j:slf4j-simple:$slf4j_version"
|
||||
|
||||
compile project(':core')
|
||||
compile project(':client:jfx')
|
||||
compile project(':node-driver')
|
||||
compile project(':webserver')
|
||||
compile (project(':node-driver')) {
|
||||
// We already have a SLF4J implementation on our runtime classpath,
|
||||
// and we don't need another one.
|
||||
exclude group: 'org.apache.logging.log4j'
|
||||
}
|
||||
compile (project(':webserver')) {
|
||||
exclude group: "org.apache.logging.log4j"
|
||||
}
|
||||
testCompile project(':test-utils')
|
||||
|
||||
compile "org.graphstream:gs-core:1.3"
|
||||
|
@ -4,7 +4,7 @@ import net.corda.core.contracts.LinearState
|
||||
import net.corda.core.contracts.StateAndRef
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.internal.packageName_
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.node.services.queryBy
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
@ -34,7 +34,7 @@ class WorkflowTransactionBuildTutorialTest {
|
||||
|
||||
@Before
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = listOf(javaClass.packageName))
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = listOf(javaClass.packageName_))
|
||||
aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
alice = aliceNode.services.myInfo.identityFromX500Name(ALICE_NAME)
|
||||
|
@ -4,6 +4,7 @@ import net.corda.core.contracts.Amount
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.internal.packageName_
|
||||
import net.corda.core.node.services.queryBy
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
@ -31,7 +32,7 @@ class CustomVaultQueryTest {
|
||||
|
||||
@Before
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = listOf("net.corda.finance", IOUFlow::class.packageName, javaClass.packageName, "com.template"))
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = listOf("net.corda.finance", IOUFlow::class.packageName, javaClass.packageName_, "com.template"))
|
||||
nodeA = mockNet.createPartyNode()
|
||||
nodeB = mockNet.createPartyNode()
|
||||
notary = mockNet.defaultNotaryIdentity
|
||||
|
@ -36,7 +36,7 @@ Welcome to Corda !
|
||||
.. _`繁體中文 (Traditional Chinese)`: _static/corda-introductory-whitepaper-zht.pdf
|
||||
.. _`日本語 (Japanese)`: _static/corda-introductory-whitepaper-jp.pdf
|
||||
.. _`download the PDF`: _static/corda-developer-site.pdf
|
||||
|
||||
|
||||
.. only:: latex
|
||||
|
||||
Welcome to Corda, a platform for building decentralized applications. This guidebook covers everything you need to know to create
|
||||
@ -147,3 +147,4 @@ Welcome to Corda !
|
||||
deterministic-modules.rst
|
||||
design/design-docs-index.rst
|
||||
changelog
|
||||
legal-info
|
||||
|
3813
docs/source/legal-info.rst
Normal file
3813
docs/source/legal-info.rst
Normal file
File diff suppressed because it is too large
Load Diff
BIN
lib/quasar.jar
BIN
lib/quasar.jar
Binary file not shown.
@ -38,6 +38,9 @@ dependencies {
|
||||
compile "com.fasterxml.jackson.core:jackson-databind:$jackson_version"
|
||||
runtime 'com.mattbertolini:liquibase-slf4j:2.0.0'
|
||||
|
||||
// JDK11: required by Quasar at run-time
|
||||
runtime "com.esotericsoftware:kryo:4.0.2"
|
||||
|
||||
testImplementation "org.junit.jupiter:junit-jupiter-api:${junit_jupiter_version}"
|
||||
testImplementation "junit:junit:$junit_version"
|
||||
|
||||
|
@ -107,7 +107,7 @@ class AttachmentsClassLoaderStaticContractTests {
|
||||
@Test
|
||||
fun `verify that contract DummyContract is in classPath`() {
|
||||
val contractClass = Class.forName(ATTACHMENT_PROGRAM_ID)
|
||||
assertThat(contractClass.newInstance()).isInstanceOf(Contract::class.java)
|
||||
assertThat(contractClass.getDeclaredConstructor().newInstance()).isInstanceOf(Contract::class.java)
|
||||
}
|
||||
|
||||
private fun cordappLoaderForPackages(packages: Collection<String>): CordappLoader {
|
||||
|
@ -70,7 +70,9 @@ class TlsDiffProtocolsTest(private val serverAlgo: String, private val clientAlg
|
||||
CIPHER_SUITES_ALL(arrayOf(
|
||||
// 1.3 only
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
// Unsupported CipherSuite: TLS_CHACHA20_POLY1305_SHA256 (java version "11.0.2" 2019-01-15 LTS)
|
||||
// Works with: openjdk version "12.0.1" 2019-04-16 (OpenJDK Runtime Environment (build 12.0.1+12))
|
||||
// "TLS_CHACHA20_POLY1305_SHA256",
|
||||
// 1.2 only
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
|
@ -40,7 +40,6 @@ import org.bouncycastle.pqc.jcajce.provider.sphincs.BCSphincs256PrivateKey
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.rules.TemporaryFolder
|
||||
import sun.security.rsa.RSAPrivateCrtKeyImpl
|
||||
import java.io.DataInputStream
|
||||
import java.io.DataOutputStream
|
||||
import java.io.IOException
|
||||
@ -86,7 +85,6 @@ class X509UtilitiesTest {
|
||||
Triple(ECDSA_SECP256K1_SHA256,java.security.interfaces.ECPrivateKey::class.java, org.bouncycastle.jce.interfaces.ECPrivateKey::class.java),
|
||||
Triple(EDDSA_ED25519_SHA512, EdDSAPrivateKey::class.java, EdDSAPrivateKey::class.java),
|
||||
// By default, JKS returns SUN RSA key.
|
||||
Triple(RSA_SHA256, RSAPrivateCrtKeyImpl::class.java, BCRSAPrivateCrtKey::class.java),
|
||||
Triple(SPHINCS256_SHA256, BCSphincs256PrivateKey::class.java, BCSphincs256PrivateKey::class.java)
|
||||
)
|
||||
}
|
||||
|
@ -69,10 +69,10 @@ sourceSets {
|
||||
}
|
||||
|
||||
jib.container {
|
||||
mainClass = "net.corda.node.Corda"
|
||||
args = ['--log-to-console', '--no-local-shell', '--config-file=/config/node.conf']
|
||||
// The Groovy string needs to be converted to a `java.lang.String` below.
|
||||
jvmFlags = ['-Xmx1g', "-javaagent:/app/libs/quasar-core-${quasar_version}-jdk8.jar".toString()]
|
||||
mainClass = "net.corda.node.Corda"
|
||||
args = ['--log-to-console', '--no-local-shell', '--config-file=/config/node.conf']
|
||||
// The Groovy string needs to be converted to a `java.lang.String` below.
|
||||
jvmFlags = ['-Xmx1g', "-javaagent:/app/libs/quasar-core-${quasar_version}.jar".toString()]
|
||||
}
|
||||
|
||||
// Use manual resource copying of log4j2.xml rather than source sets.
|
||||
@ -194,10 +194,7 @@ dependencies {
|
||||
// Integration test helpers
|
||||
integrationTestCompile "junit:junit:$junit_version"
|
||||
integrationTestCompile "org.assertj:assertj-core:${assertj_version}"
|
||||
|
||||
// AgentLoader: dynamic loading of JVM agents
|
||||
compile group: 'com.ea.agentloader', name: 'ea-agent-loader', version: "${eaagentloader_version}"
|
||||
|
||||
|
||||
// BFT-Smart dependencies
|
||||
compile 'com.github.bft-smart:library:master-v1.1-beta-g6215ec8-87'
|
||||
|
||||
@ -221,7 +218,10 @@ dependencies {
|
||||
compile "org.jolokia:jolokia-jvm:${jolokia_version}:agent"
|
||||
// Optional New Relic JVM reporter, used to push metrics to the configured account associated with a newrelic.yml configuration. See https://mvnrepository.com/artifact/com.palominolabs.metrics/metrics-new-relic
|
||||
compile "com.palominolabs.metrics:metrics-new-relic:${metrics_new_relic_version}"
|
||||
|
||||
|
||||
// Required by JVMAgentUtil (x-compatible java 8 & 11 agent lookup mechanism)
|
||||
compile files("${System.properties['java.home']}/../lib/tools.jar")
|
||||
|
||||
testCompile(project(':test-cli'))
|
||||
testCompile(project(':test-utils'))
|
||||
|
||||
@ -238,6 +238,11 @@ tasks.withType(JavaCompile) {
|
||||
options.compilerArgs << '-proc:none'
|
||||
}
|
||||
|
||||
tasks.withType(Test) {
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_11)
|
||||
jvmArgs '-Djdk.attach.allowAttachSelf=true'
|
||||
}
|
||||
|
||||
task integrationTest(type: Test) {
|
||||
testClassesDirs = sourceSets.integrationTest.output.classesDirs
|
||||
classpath = sourceSets.integrationTest.runtimeClasspath
|
||||
|
@ -52,10 +52,12 @@ task buildCordaJAR(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
||||
applicationId = "net.corda.node.Corda"
|
||||
// See experimental/quasar-hook/README.md for how to generate.
|
||||
def quasarExcludeExpression = "x(antlr**;bftsmart**;co.paralleluniverse**;com.codahale**;com.esotericsoftware**;com.fasterxml**;com.google**;com.ibm**;com.intellij**;com.jcabi**;com.nhaarman**;com.opengamma**;com.typesafe**;com.zaxxer**;de.javakaffee**;groovy**;groovyjarjarantlr**;groovyjarjarasm**;io.atomix**;io.github**;io.netty**;jdk**;junit**;kotlin**;net.bytebuddy**;net.i2p**;org.apache**;org.assertj**;org.bouncycastle**;org.codehaus**;org.crsh**;org.dom4j**;org.fusesource**;org.h2**;org.hamcrest**;org.hibernate**;org.jboss**;org.jcp**;org.joda**;org.junit**;org.mockito**;org.objectweb**;org.objenesis**;org.slf4j**;org.w3c**;org.xml**;org.yaml**;reflectasm**;rx**;org.jolokia**;com.lmax**;picocli**;liquibase**;com.github.benmanes**;org.json**;org.postgresql**;nonapi.io.github.classgraph**)"
|
||||
javaAgents = ["quasar-core-${quasar_version}-jdk8.jar=${quasarExcludeExpression}"]
|
||||
javaAgents = quasar_classifier == null ? ["quasar-core-${quasar_version}.jar=${quasarExcludeExpression}"] : ["quasar-core-${quasar_version}-${quasar_classifier}.jar=${quasarExcludeExpression}"]
|
||||
systemProperties['visualvm.display.name'] = 'Corda'
|
||||
minJavaVersion = '1.8.0'
|
||||
minUpdateVersion['1.8'] = java8_minUpdateVersion
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_1_8) {
|
||||
minJavaVersion = '1.8.0'
|
||||
minUpdateVersion['1.8'] = java8_minUpdateVersion
|
||||
}
|
||||
caplets = ['CordaCaplet']
|
||||
|
||||
// JVM configuration:
|
||||
@ -65,6 +67,8 @@ task buildCordaJAR(type: FatCapsule, dependsOn: project(':node').tasks.jar) {
|
||||
//
|
||||
// If you change these flags, please also update Driver.kt
|
||||
jvmArgs = ['-Xmx512m', '-XX:+UseG1GC']
|
||||
if (JavaVersion.current() == JavaVersion.VERSION_11)
|
||||
jvmArgs += ['-Djdk.attach.allowAttachSelf=true']
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import java.io.File;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class CordaCaplet extends Capsule {
|
||||
|
||||
@ -185,8 +186,8 @@ public class CordaCaplet extends Capsule {
|
||||
|
||||
private static void checkJavaVersion() {
|
||||
String version = System.getProperty("java.version");
|
||||
if (version == null || !version.startsWith("1.8")) {
|
||||
System.err.printf("Error: Unsupported Java version %s; currently only version 1.8 is supported.\n", version);
|
||||
if (version == null || Stream.of("1.8", "11").noneMatch(version::startsWith)) {
|
||||
System.err.printf("Error: Unsupported Java version %s; currently only version 1.8 or 11 is supported.\n", version);
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package net.corda.node.services.rpc
|
||||
|
||||
import net.corda.client.rpc.CordaRPCClient
|
||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
||||
import net.corda.client.rpc.GracefulReconnect
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
||||
import net.corda.client.rpc.notUsed
|
||||
import net.corda.core.contracts.Amount
|
||||
@ -12,7 +12,10 @@ import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.vault.PageSpecification
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.builder
|
||||
import net.corda.core.utilities.*
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
@ -36,8 +39,10 @@ import java.util.concurrent.TimeUnit
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import kotlin.concurrent.thread
|
||||
import kotlin.math.absoluteValue
|
||||
import kotlin.math.max
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
import kotlin.test.currentStackTrace
|
||||
|
||||
/**
|
||||
* This is a stress test for the rpc reconnection logic, which triggers failures in a probabilistic way.
|
||||
@ -114,10 +119,21 @@ class RpcReconnectTests {
|
||||
val baseAmount = Amount.parseCurrency("0 USD")
|
||||
val issuerRef = OpaqueBytes.of(0x01)
|
||||
|
||||
var numDisconnects = 0
|
||||
var numReconnects = 0
|
||||
val maxStackOccurrences = AtomicInteger()
|
||||
|
||||
val addressesForRpc = addresses.map { it.proxyAddress }
|
||||
// DOCSTART rpcReconnectingRPC
|
||||
val onReconnect = {
|
||||
numReconnects++
|
||||
// We only expect to see a single reconnectOnError in the stack trace. Otherwise we're in danger of stack overflow recursion
|
||||
maxStackOccurrences.set(max(maxStackOccurrences.get(), currentStackTrace().count { it.methodName == "reconnectOnError" }))
|
||||
Unit
|
||||
}
|
||||
val reconnect = GracefulReconnect(onDisconnect = { numDisconnects++ }, onReconnect = onReconnect)
|
||||
val client = CordaRPCClient(addressesForRpc)
|
||||
val bankAReconnectingRpc = client.start(demoUser.username, demoUser.password, gracefulReconnect = true).proxy as ReconnectingCordaRPCOps
|
||||
val bankAReconnectingRpc = client.start(demoUser.username, demoUser.password, gracefulReconnect = reconnect).proxy as ReconnectingCordaRPCOps
|
||||
// DOCEND rpcReconnectingRPC
|
||||
|
||||
// Observe the vault and collect the observations.
|
||||
@ -266,6 +282,11 @@ class RpcReconnectTests {
|
||||
val nrFailures = nrRestarts.get()
|
||||
log.info("Checking results after $nrFailures restarts.")
|
||||
|
||||
// We should get one disconnect and one reconnect for each failure
|
||||
assertThat(numDisconnects).isEqualTo(numReconnects)
|
||||
assertThat(numReconnects).isLessThanOrEqualTo(nrFailures)
|
||||
assertThat(maxStackOccurrences.get()).isLessThan(2)
|
||||
|
||||
// Query the vault and check that states were created for all flows.
|
||||
fun readCashStates() = bankAReconnectingRpc
|
||||
.vaultQueryByWithPagingSpec(Cash.State::class.java, QueryCriteria.VaultQueryCriteria(status = Vault.StateStatus.CONSUMED), PageSpecification(1, 10000))
|
||||
|
@ -88,13 +88,14 @@ import net.corda.nodeapi.internal.cryptoservice.bouncycastle.BCCryptoService
|
||||
import net.corda.nodeapi.internal.persistence.*
|
||||
import net.corda.tools.shell.InteractiveShell
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import org.jolokia.jvmagent.JolokiaServer
|
||||
import org.jolokia.jvmagent.JolokiaServerConfig
|
||||
import org.slf4j.Logger
|
||||
import rx.Observable
|
||||
import rx.Scheduler
|
||||
import java.io.IOException
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
import java.security.KeyPair
|
||||
import java.security.KeyStoreException
|
||||
import java.security.cert.X509Certificate
|
||||
@ -331,7 +332,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
log.info("Node starting up ...")
|
||||
|
||||
val trustRoot = initKeyStores()
|
||||
initialiseJVMAgents()
|
||||
initialiseJolokia()
|
||||
|
||||
schemaService.mappedSchemasWarnings().forEach {
|
||||
val warning = it.toWarning()
|
||||
@ -992,19 +993,15 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
return NodeVaultService(platformClock, keyManagementService, services, database, schemaService, cordappLoader.appClassLoader)
|
||||
}
|
||||
|
||||
/** Load configured JVM agents */
|
||||
private fun initialiseJVMAgents() {
|
||||
// JDK 11: switch to directly instantiating jolokia server (rather than indirectly via dynamically self attaching Java Agents,
|
||||
// which is no longer supported from JDK 9 onwards (https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8180425).
|
||||
// No longer need to use https://github.com/electronicarts/ea-agent-loader either (which is also deprecated)
|
||||
private fun initialiseJolokia() {
|
||||
configuration.jmxMonitoringHttpPort?.let { port ->
|
||||
requireNotNull(NodeBuildProperties.JOLOKIA_AGENT_VERSION) {
|
||||
"'jolokiaAgentVersion' missing from build properties"
|
||||
}
|
||||
log.info("Starting Jolokia agent on HTTP port: $port")
|
||||
val libDir = Paths.get(configuration.baseDirectory.toString(), "drivers")
|
||||
val jarFilePath = JVMAgentRegistry.resolveAgentJar(
|
||||
"jolokia-jvm-${NodeBuildProperties.JOLOKIA_AGENT_VERSION}-agent.jar", libDir)
|
||||
?: throw Error("Unable to locate agent jar file")
|
||||
log.info("Agent jar file: $jarFilePath")
|
||||
JVMAgentRegistry.attach("jolokia", "port=$port", jarFilePath)
|
||||
val config = JolokiaServerConfig(mapOf("port" to port.toString()))
|
||||
val server = JolokiaServer(config, false)
|
||||
log.info("Starting Jolokia server on HTTP port: $port")
|
||||
server.start()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ object DataSourceFactory {
|
||||
dataSource
|
||||
} else {
|
||||
// Basic init for the one test that wants to go via this API but without starting a HikariPool:
|
||||
(Class.forName(hikariProperties.getProperty("dataSourceClassName")).newInstance() as DataSource).also {
|
||||
(Class.forName(hikariProperties.getProperty("dataSourceClassName")).getDeclaredConstructor().newInstance() as DataSource).also {
|
||||
PropertyElf.setTargetFromProperties(it, config.dataSourceProperties)
|
||||
}
|
||||
}
|
||||
|
@ -159,10 +159,14 @@ open class Node(configuration: NodeConfiguration,
|
||||
}
|
||||
|
||||
private fun hasMinimumJavaVersion(): Boolean {
|
||||
// when the ext.java8_minUpdateVersion gradle constant changes, so must this check
|
||||
// JDK 11: review naming convention and checking of 'minUpdateVersion' and 'distributionType` (OpenJDK, Oracle, Zulu, AdoptOpenJDK, Cornetto)
|
||||
return try {
|
||||
val update = getJavaUpdateVersion(SystemUtils.JAVA_VERSION) // To filter out cases like 1.8.0_202-ea
|
||||
SystemUtils.IS_JAVA_1_8 && update >= 171
|
||||
if (SystemUtils.IS_JAVA_11)
|
||||
return true
|
||||
else {
|
||||
val update = getJavaUpdateVersion(SystemUtils.JAVA_VERSION) // To filter out cases like 1.8.0_202-ea
|
||||
(SystemUtils.IS_JAVA_1_8 && update >= 171)
|
||||
}
|
||||
} catch (e: NumberFormatException) { // custom JDKs may not have the update version (e.g. 1.8.0-adoptopenjdk)
|
||||
false
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import net.corda.node.internal.subcommands.ValidateConfigurationCli.Companion.lo
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.shouldStartLocalShell
|
||||
import net.corda.node.services.config.shouldStartSSHDaemon
|
||||
import net.corda.node.utilities.JVMAgentUtil.getJvmAgentProperties
|
||||
import net.corda.node.utilities.registration.NodeRegistrationException
|
||||
import net.corda.nodeapi.internal.addShutdownHook
|
||||
import net.corda.nodeapi.internal.persistence.CouldNotCreateDataSourceException
|
||||
@ -32,7 +33,6 @@ import net.corda.tools.shell.InteractiveShell
|
||||
import org.fusesource.jansi.Ansi
|
||||
import org.slf4j.bridge.SLF4JBridgeHandler
|
||||
import picocli.CommandLine.Mixin
|
||||
import sun.misc.VMSupport
|
||||
import java.io.IOException
|
||||
import java.io.RandomAccessFile
|
||||
import java.lang.management.ManagementFactory
|
||||
@ -245,12 +245,13 @@ open class NodeStartup : NodeStartupLogging {
|
||||
logger.info("PID: ${info.name.split("@").firstOrNull()}") // TODO Java 9 has better support for this
|
||||
logger.info("Main class: ${NodeConfiguration::class.java.location.toURI().path}")
|
||||
logger.info("CommandLine Args: ${info.inputArguments.joinToString(" ")}")
|
||||
logger.info("bootclasspath: ${info.bootClassPath}")
|
||||
// JDK 11 (bootclasspath no longer supported from JDK 9)
|
||||
if (info.isBootClassPathSupported) logger.info("bootclasspath: ${info.bootClassPath}")
|
||||
logger.info("classpath: ${info.classPath}")
|
||||
logger.info("VM ${info.vmName} ${info.vmVendor} ${info.vmVersion}")
|
||||
logger.info("Machine: ${lookupMachineNameAndMaybeWarn()}")
|
||||
logger.info("Working Directory: ${cmdLineOptions.baseDirectory}")
|
||||
val agentProperties = VMSupport.getAgentProperties()
|
||||
val agentProperties = getJvmAgentProperties(logger)
|
||||
if (agentProperties.containsKey("sun.jdwp.listenerAddress")) {
|
||||
logger.info("Debug port: ${agentProperties.getProperty("sun.jdwp.listenerAddress")}")
|
||||
}
|
||||
@ -467,6 +468,8 @@ interface NodeStartupLogging {
|
||||
error is Errors.NativeIoException && error.message?.contains("Address already in use") == true -> error.logAsExpected("One of the ports required by the Corda node is already in use.")
|
||||
error is Errors.NativeIoException && error.message?.contains("Can't assign requested address") == true -> error.logAsExpected("Exception during node startup. Check that addresses in node config resolve correctly.")
|
||||
error is UnresolvedAddressException -> error.logAsExpected("Exception during node startup. Check that addresses in node config resolve correctly.")
|
||||
error is java.nio.file.AccessDeniedException -> error.logAsExpected("Exception during node startup. Corda started with insufficient privileges to access ${error.file}")
|
||||
error is java.nio.file.NoSuchFileException -> error.logAsExpected("Exception during node startup. Corda cannot find file ${error.file}")
|
||||
error.isOpenJdkKnownIssue() -> error.logAsExpected("Exception during node startup - ${error.message}. This is a known OpenJDK issue on some Linux distributions, please use OpenJDK from zulu.org or Oracle JDK.")
|
||||
else -> error.logAsUnexpected("Exception during node startup")
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package net.corda.node.internal.artemis
|
||||
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.node.internal.security.Password
|
||||
import net.corda.node.internal.security.RPCSecurityManager
|
||||
import net.corda.node.services.rpc.LoginListener
|
||||
@ -23,7 +22,6 @@ import javax.security.auth.callback.UnsupportedCallbackException
|
||||
import javax.security.auth.login.FailedLoginException
|
||||
import javax.security.auth.login.LoginException
|
||||
import javax.security.auth.spi.LoginModule
|
||||
import javax.security.cert.X509Certificate
|
||||
|
||||
/**
|
||||
*
|
||||
@ -120,8 +118,9 @@ class BrokerJaasLoginModule : BaseBrokerJaasLoginModule() {
|
||||
|
||||
// The Main authentication logic, responsible for running all the configured checks for each user type
|
||||
// and return the actual User and principals
|
||||
private fun authenticateAndAuthorise(username: String, certificates: Array<X509Certificate>?, password: String): Pair<String, List<RolePrincipal>> {
|
||||
fun requireTls(certificates: Array<X509Certificate>?) = requireNotNull(certificates) { "No client certificates presented." }
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
private fun authenticateAndAuthorise(username: String, certificates: Array<javax.security.cert.X509Certificate>?, password: String): Pair<String, List<RolePrincipal>> {
|
||||
fun requireTls(certificates: Array<javax.security.cert.X509Certificate>?) = requireNotNull(certificates) { "No client certificates presented." }
|
||||
|
||||
return when (username) {
|
||||
ArtemisMessagingComponent.NODE_P2P_USER -> {
|
||||
@ -174,7 +173,8 @@ abstract class BaseBrokerJaasLoginModule : LoginModule {
|
||||
protected lateinit var callbackHandler: CallbackHandler
|
||||
protected val principals = ArrayList<Principal>()
|
||||
|
||||
protected fun getUsernamePasswordAndCerts(): Triple<String, String, Array<X509Certificate>?> {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
protected fun getUsernamePasswordAndCerts(): Triple<String, String, Array<javax.security.cert.X509Certificate>?> {
|
||||
val nameCallback = NameCallback("Username: ")
|
||||
val passwordCallback = PasswordCallback("Password: ", false)
|
||||
val certificateCallback = CertificateCallback()
|
||||
|
@ -3,13 +3,13 @@ package net.corda.node.internal.artemis
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import java.security.KeyStore
|
||||
import javax.security.cert.CertificateException
|
||||
import javax.security.cert.X509Certificate
|
||||
import java.security.cert.CertificateException
|
||||
|
||||
sealed class CertificateChainCheckPolicy {
|
||||
@FunctionalInterface
|
||||
interface Check {
|
||||
fun checkCertificateChain(theirChain: Array<X509Certificate>)
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>)
|
||||
}
|
||||
|
||||
abstract fun createCheck(keyStore: KeyStore, trustStore: KeyStore): Check
|
||||
@ -17,7 +17,8 @@ sealed class CertificateChainCheckPolicy {
|
||||
object Any : CertificateChainCheckPolicy() {
|
||||
override fun createCheck(keyStore: KeyStore, trustStore: KeyStore): Check {
|
||||
return object : Check {
|
||||
override fun checkCertificateChain(theirChain: Array<X509Certificate>) {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
override fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>) {
|
||||
// nothing to do here
|
||||
}
|
||||
}
|
||||
@ -28,7 +29,8 @@ sealed class CertificateChainCheckPolicy {
|
||||
override fun createCheck(keyStore: KeyStore, trustStore: KeyStore): Check {
|
||||
val rootPublicKey = trustStore.getCertificate(X509Utilities.CORDA_ROOT_CA).publicKey
|
||||
return object : Check {
|
||||
override fun checkCertificateChain(theirChain: Array<X509Certificate>) {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
override fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>) {
|
||||
val theirRoot = theirChain.last().publicKey
|
||||
if (rootPublicKey != theirRoot) {
|
||||
throw CertificateException("Root certificate mismatch, their root = $theirRoot")
|
||||
@ -42,7 +44,8 @@ sealed class CertificateChainCheckPolicy {
|
||||
override fun createCheck(keyStore: KeyStore, trustStore: KeyStore): Check {
|
||||
val ourPublicKey = keyStore.getCertificate(X509Utilities.CORDA_CLIENT_TLS).publicKey
|
||||
return object : Check {
|
||||
override fun checkCertificateChain(theirChain: Array<X509Certificate>) {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
override fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>) {
|
||||
val theirLeaf = theirChain.first().publicKey
|
||||
if (ourPublicKey != theirLeaf) {
|
||||
throw CertificateException("Leaf certificate mismatch, their leaf = $theirLeaf")
|
||||
@ -56,7 +59,8 @@ sealed class CertificateChainCheckPolicy {
|
||||
override fun createCheck(keyStore: KeyStore, trustStore: KeyStore): Check {
|
||||
val trustedPublicKeys = trustedAliases.map { trustStore.getCertificate(it).publicKey }.toSet()
|
||||
return object : Check {
|
||||
override fun checkCertificateChain(theirChain: Array<X509Certificate>) {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
override fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>) {
|
||||
if (!theirChain.any { it.publicKey in trustedPublicKeys }) {
|
||||
throw CertificateException("Their certificate chain contained none of the trusted ones")
|
||||
}
|
||||
@ -73,7 +77,8 @@ sealed class CertificateChainCheckPolicy {
|
||||
|
||||
class UsernameMustMatchCommonNameCheck : Check {
|
||||
lateinit var username: String
|
||||
override fun checkCertificateChain(theirChain: Array<X509Certificate>) {
|
||||
@Suppress("DEPRECATION") // should use java.security.cert.X509Certificate
|
||||
override fun checkCertificateChain(theirChain: Array<javax.security.cert.X509Certificate>) {
|
||||
if (!theirChain.any { certificate -> CordaX500Name.parse(certificate.subjectDN.name).commonName == username }) {
|
||||
throw CertificateException("Client certificate does not match login username.")
|
||||
}
|
||||
|
@ -26,7 +26,6 @@ import net.corda.nodeapi.internal.network.NETWORK_PARAMS_FILE_NAME
|
||||
import net.corda.nodeapi.internal.network.SignedNetworkParameters
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration
|
||||
import sun.reflect.generics.reflectiveObjects.NotImplementedException
|
||||
import java.nio.file.Paths
|
||||
import java.time.Clock
|
||||
import java.time.Duration
|
||||
@ -49,11 +48,11 @@ class MigrationServicesForResolution(
|
||||
val cordappLoader = SchemaMigration.loader.get()
|
||||
|
||||
override fun getAppContext(): CordappContext {
|
||||
throw NotImplementedException()
|
||||
TODO("not implemented")
|
||||
}
|
||||
|
||||
override fun getContractAttachmentID(contractClassName: ContractClassName): AttachmentId? {
|
||||
throw NotImplementedException()
|
||||
TODO("not implemented")
|
||||
}
|
||||
}
|
||||
private val cordappLoader = SchemaMigration.loader.get()
|
||||
|
@ -42,13 +42,12 @@ import org.objenesis.instantiator.ObjectInstantiator
|
||||
import org.objenesis.strategy.InstantiatorStrategy
|
||||
import org.objenesis.strategy.StdInstantiatorStrategy
|
||||
import org.slf4j.Logger
|
||||
import sun.security.ec.ECPublicKeyImpl
|
||||
import sun.security.provider.certpath.X509CertPath
|
||||
import java.io.BufferedInputStream
|
||||
import java.io.ByteArrayOutputStream
|
||||
import java.io.FileInputStream
|
||||
import java.io.InputStream
|
||||
import java.lang.reflect.Modifier.isPublic
|
||||
import java.security.PrivateKey
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.X509Certificate
|
||||
@ -97,7 +96,8 @@ object DefaultKryoCustomizer {
|
||||
register(BufferedInputStream::class.java, InputStreamSerializer)
|
||||
register(Class.forName("sun.net.www.protocol.jar.JarURLConnection\$JarURLInputStream"), InputStreamSerializer)
|
||||
noReferencesWithin<WireTransaction>()
|
||||
register(ECPublicKeyImpl::class.java, publicKeySerializer)
|
||||
register(PublicKey::class.java, publicKeySerializer)
|
||||
register(PrivateKey::class.java, PrivateKeySerializer)
|
||||
register(EdDSAPublicKey::class.java, publicKeySerializer)
|
||||
register(EdDSAPrivateKey::class.java, PrivateKeySerializer)
|
||||
register(CompositeKey::class.java, publicKeySerializer) // Using a custom serializer for compactness
|
||||
@ -109,7 +109,6 @@ object DefaultKryoCustomizer {
|
||||
register(Class::class.java, ClassSerializer)
|
||||
register(FileInputStream::class.java, InputStreamSerializer)
|
||||
register(CertPath::class.java, CertPathSerializer)
|
||||
register(X509CertPath::class.java, CertPathSerializer)
|
||||
register(BCECPrivateKey::class.java, PrivateKeySerializer)
|
||||
register(BCECPublicKey::class.java, publicKeySerializer)
|
||||
register(BCRSAPrivateCrtKey::class.java, PrivateKeySerializer)
|
||||
|
@ -41,10 +41,10 @@ import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.internal.NodeStartup
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
import net.corda.node.services.statemachine.*
|
||||
import net.corda.node.utilities.JVMAgentUtil.getJvmAgentProperties
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.serialization.internal.CheckpointSerializeAsTokenContextImpl
|
||||
import net.corda.serialization.internal.withTokenContext
|
||||
import sun.misc.VMSupport
|
||||
import java.nio.file.Path
|
||||
import java.time.Duration
|
||||
import java.time.Instant
|
||||
@ -150,9 +150,9 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
}
|
||||
|
||||
private fun checkpointAgentRunning(): Boolean {
|
||||
val agentProperties = VMSupport.getAgentProperties()
|
||||
val agentProperties = getJvmAgentProperties(log)
|
||||
return agentProperties.values.any { value ->
|
||||
(value is String && value.contains("checkpoint-agent.jar"))
|
||||
value is String && value.contains("checkpoint-agent.jar")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,52 +0,0 @@
|
||||
package net.corda.node.utilities
|
||||
|
||||
import com.ea.agentloader.AgentLoader
|
||||
import net.corda.core.internal.exists
|
||||
import net.corda.core.internal.isRegularFile
|
||||
import net.corda.core.internal.toPath
|
||||
import java.net.URL
|
||||
import java.net.URLClassLoader
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
/**
|
||||
* Helper class for loading JVM agents dynamically
|
||||
*/
|
||||
object JVMAgentRegistry {
|
||||
|
||||
/**
|
||||
* Names and options of loaded agents
|
||||
*/
|
||||
val loadedAgents = ConcurrentHashMap<String, String>()
|
||||
|
||||
/**
|
||||
* Load and attach agent located at given [jar], unless [loadedAgents]
|
||||
* indicate that one of its instance has been already loaded.
|
||||
*/
|
||||
fun attach(agentName: String, options: String, jar: Path) {
|
||||
loadedAgents.computeIfAbsent(agentName.toLowerCase()) {
|
||||
AgentLoader.loadAgent(jar.toString(), options)
|
||||
options
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt finding location of jar for given agent by first searching into
|
||||
* "drivers" directory of [nodeBaseDirectory] and then falling back to
|
||||
* classpath. Returns null if no match is found.
|
||||
*/
|
||||
fun resolveAgentJar(jarFileName: String, driversDir: Path): Path? {
|
||||
require(jarFileName.endsWith(".jar")) { "jarFileName does not have .jar suffix" }
|
||||
|
||||
val path = Paths.get(driversDir.toString(), jarFileName)
|
||||
return if (path.exists() && path.isRegularFile()) {
|
||||
path
|
||||
} else {
|
||||
(this::class.java.classLoader as? URLClassLoader)
|
||||
?.urLs
|
||||
?.map(URL::toPath)
|
||||
?.firstOrNull { it.fileName.toString() == jarFileName }
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package net.corda.node.utilities
|
||||
|
||||
import com.sun.tools.attach.VirtualMachine
|
||||
import org.slf4j.Logger
|
||||
import java.lang.management.ManagementFactory
|
||||
import java.util.*
|
||||
|
||||
object JVMAgentUtil {
|
||||
/**
|
||||
* Utility to attach to own VM at run-time and obtain agent details.
|
||||
* In Java 9 this requires setting the following run-time jvm flag: -Djdk.attach.allowAttachSelf=true
|
||||
* This mechanism supersedes the usage of VMSupport which is not available from Java 9 onwards.
|
||||
*/
|
||||
fun getJvmAgentProperties(log: Logger): Properties {
|
||||
val jvmPid = ManagementFactory.getRuntimeMXBean().name.substringBefore('@')
|
||||
return try {
|
||||
val vm = VirtualMachine.attach(jvmPid)
|
||||
return vm.agentProperties
|
||||
} catch (e: Exception) {
|
||||
log.warn("Unable to determine whether checkpoint agent is running: ${e.message}.\n" +
|
||||
"You may need to pass in -Djdk.attach.allowAttachSelf=true if running on a Java 9 or later VM")
|
||||
Properties()
|
||||
}
|
||||
}
|
||||
}
|
@ -23,7 +23,9 @@ import net.corda.testing.internal.configureDatabase
|
||||
import net.corda.testing.internal.createNodeInfoAndSigned
|
||||
import net.corda.testing.internal.rigorousMock
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
||||
import org.apache.commons.lang3.SystemUtils
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Ignore
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.rules.TemporaryFolder
|
||||
@ -32,6 +34,7 @@ import java.time.Duration
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertFailsWith
|
||||
import kotlin.test.assertNull
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class NodeTest {
|
||||
@Rule
|
||||
@ -143,6 +146,14 @@ class NodeTest {
|
||||
}
|
||||
}
|
||||
|
||||
// JDK 11 check
|
||||
@Test
|
||||
fun `test getJavaRuntimeVersion`() {
|
||||
assertTrue(SystemUtils.IS_JAVA_1_8 || SystemUtils.IS_JAVA_11)
|
||||
}
|
||||
|
||||
// JDK11: revisit (JDK 9+ uses different numbering scheme: see https://docs.oracle.com/javase/9/docs/api/java/lang/Runtime.Version.html)
|
||||
@Ignore
|
||||
@Test
|
||||
fun `test getJavaUpdateVersion`() {
|
||||
assertThat(getJavaUpdateVersion("1.8.0_202-ea")).isEqualTo(202)
|
||||
|
@ -2,13 +2,13 @@ package net.corda.node.internal.cordapp
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.nodeapi.internal.DEV_PUB_KEY_HASHES
|
||||
import net.corda.testing.node.internal.cordappWithPackages
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Test
|
||||
import java.nio.file.Paths
|
||||
import net.corda.core.internal.packageName_
|
||||
|
||||
@InitiatingFlow
|
||||
class DummyFlow : FlowLogic<Unit>() {
|
||||
@ -79,7 +79,7 @@ class JarScanningCordappLoaderTest {
|
||||
|
||||
@Test
|
||||
fun `flows are loaded by loader`() {
|
||||
val jarFile = cordappWithPackages(javaClass.packageName).jarFile
|
||||
val jarFile = cordappWithPackages(javaClass.packageName_).jarFile
|
||||
val loader = JarScanningCordappLoader.fromJarUrls(listOf(jarFile.toUri().toURL()))
|
||||
|
||||
// One cordapp from this source tree. In gradle it will also pick up the node jar.
|
||||
|
@ -24,6 +24,7 @@ import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.core.internal.CheckpointSerializationEnvironmentRule
|
||||
import net.corda.testing.internal.rigorousMock
|
||||
import org.apache.commons.lang3.SystemUtils
|
||||
import org.assertj.core.api.Assertions.*
|
||||
import org.junit.Assert.assertArrayEquals
|
||||
import org.junit.Assert.assertEquals
|
||||
@ -353,8 +354,11 @@ class KryoTests(private val compression: CordaSerializationEncoding?) {
|
||||
val obj = Holder(ByteArray(20000))
|
||||
val uncompressedSize = obj.checkpointSerialize(context.withEncoding(null)).size
|
||||
val compressedSize = obj.checkpointSerialize(context.withEncoding(CordaSerializationEncoding.SNAPPY)).size
|
||||
// If these need fixing, sounds like Kryo wire format changed and checkpoints might not surive an upgrade.
|
||||
assertEquals(20222, uncompressedSize)
|
||||
// If these need fixing, sounds like Kryo wire format changed and checkpoints might not survive an upgrade.
|
||||
if (SystemUtils.IS_JAVA_11)
|
||||
assertEquals(20172, uncompressedSize)
|
||||
else
|
||||
assertEquals(20222, uncompressedSize)
|
||||
assertEquals(1111, compressedSize)
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import com.nhaarman.mockito_kotlin.any
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import com.nhaarman.mockito_kotlin.times
|
||||
import com.nhaarman.mockito_kotlin.verify
|
||||
import com.sun.xml.internal.messaging.saaj.util.ByteOutputStream
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.internal.SignedDataWithCert
|
||||
import net.corda.core.node.NetworkParameters
|
||||
@ -26,8 +25,7 @@ import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import java.io.PrintStream
|
||||
import kotlin.streams.toList
|
||||
import kotlin.test.assertNull
|
||||
|
||||
class DBNetworkParametersStorageTest {
|
||||
@Rule
|
||||
@ -98,24 +96,11 @@ class DBNetworkParametersStorageTest {
|
||||
@Test
|
||||
fun `try save parameters with incorrect signature`() {
|
||||
database.transaction {
|
||||
val consoleOutput = interceptConsoleOutput {
|
||||
networkParametersService.lookup(hash3)
|
||||
}
|
||||
assertThat(consoleOutput).anySatisfy {
|
||||
it.contains("Caused by: java.security.cert.CertPathValidatorException: subject/issuer name chaining check failed")
|
||||
}
|
||||
// logs a warning (java.security.cert.CertPathValidatorException: Cert path failed to validate)
|
||||
assertNull(networkParametersService.lookup(hash3))
|
||||
}
|
||||
}
|
||||
|
||||
private fun interceptConsoleOutput(block: () -> Unit): List<String> {
|
||||
val oldOut = System.out
|
||||
val out = ByteOutputStream()
|
||||
System.setOut(PrintStream(out))
|
||||
block()
|
||||
System.setOut(oldOut)
|
||||
return out.bytes.inputStream().bufferedReader().lines().toList()
|
||||
}
|
||||
|
||||
private fun createMockNetworkMapClient(): NetworkMapClient {
|
||||
return mock {
|
||||
on { getNetworkParameters(any()) }.then {
|
||||
|
@ -5,7 +5,7 @@ import com.esotericsoftware.kryo.KryoException
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.internal.packageName_
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.testing.common.internal.testNetworkParameters
|
||||
@ -45,7 +45,7 @@ class ExposeJpaToFlowsTests {
|
||||
fun setUp() {
|
||||
mockNet = MockNetwork(MockNetworkParameters(cordappsForAllNodes = listOf(enclosedCordapp())))
|
||||
val (db, mockServices) = MockServices.makeTestDatabaseAndMockServices(
|
||||
cordappPackages = listOf(javaClass.packageName),
|
||||
cordappPackages = listOf(javaClass.packageName_),
|
||||
identityService = makeTestIdentityService(myself.identity),
|
||||
initialIdentity = myself,
|
||||
networkParameters = testNetworkParameters(minimumPlatformVersion = 4)
|
||||
|
@ -12,14 +12,23 @@ dependencies {
|
||||
cordapp project(':finance:contracts')
|
||||
cordapp project(':finance:workflows')
|
||||
|
||||
// Cordformation needs a SLF4J implementation when executing the Network
|
||||
// Bootstrapper, but Log4J doesn't shutdown completely from within Gradle.
|
||||
// Use a much simpler SLF4J implementation here instead.
|
||||
cordaRuntime "org.slf4j:slf4j-simple:$slf4j_version"
|
||||
|
||||
// Corda integration dependencies
|
||||
cordaRuntime project(path: ":node:capsule", configuration: 'runtimeArtifacts')
|
||||
cordaRuntime project(path: ":webserver:webcapsule", configuration: 'runtimeArtifacts')
|
||||
cordaCompile project(':core')
|
||||
cordaCompile project(':client:jfx')
|
||||
cordaCompile project(':client:rpc')
|
||||
cordaCompile project(':webserver')
|
||||
cordaCompile project(':node-driver')
|
||||
cordaCompile (project(':webserver')) {
|
||||
exclude group: "org.apache.logging.log4j"
|
||||
}
|
||||
cordaCompile (project(':node-driver')) {
|
||||
exclude group: "org.apache.logging.log4j"
|
||||
}
|
||||
|
||||
// Javax is required for webapis
|
||||
compile "org.glassfish.jersey.core:jersey-server:${jersey_version}"
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.bank.api
|
||||
|
||||
import net.corda.bank.api.BankOfCordaWebApi.IssueRequestParams
|
||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
@ -43,7 +44,7 @@ object BankOfCordaClientApi {
|
||||
*/
|
||||
fun requestRPCIssueHA(availableRpcServers: List<NetworkHostAndPort>, params: IssueRequestParams): SignedTransaction {
|
||||
// TODO: privileged security controls required
|
||||
ReconnectingCordaRPCOps(availableRpcServers, BOC_RPC_USER, BOC_RPC_PWD).use { rpc->
|
||||
ReconnectingCordaRPCOps(availableRpcServers, BOC_RPC_USER, BOC_RPC_PWD, CordaRPCClientConfiguration.DEFAULT).use { rpc->
|
||||
rpc.waitUntilNetworkReady().getOrThrow()
|
||||
|
||||
// Resolve parties via RPC
|
||||
|
@ -4,7 +4,10 @@ apply plugin: 'net.corda.plugins.cordformation'
|
||||
|
||||
dependencies {
|
||||
runtimeOnly project(':node-api')
|
||||
runtimeOnly "org.apache.logging.log4j:log4j-slf4j-impl:$log4j_version"
|
||||
// Cordformation needs a SLF4J implementation when executing the Network
|
||||
// Bootstrapper, but Log4J doesn't shutdown completely from within Gradle.
|
||||
// Use a much simpler SLF4J implementation here instead.
|
||||
cordaRuntime "org.slf4j:slf4j-simple:$slf4j_version"
|
||||
|
||||
// Corda integration dependencies
|
||||
runtime project(path: ":node:capsule", configuration: 'runtimeArtifacts')
|
||||
|
@ -34,11 +34,18 @@ dependencies {
|
||||
cordapp project(path: ':samples:simm-valuation-demo:contracts-states', configuration: 'shrinkArtifacts')
|
||||
cordapp project(':samples:simm-valuation-demo:flows')
|
||||
|
||||
// Cordformation needs a SLF4J implementation when executing the Network
|
||||
// Bootstrapper, but Log4J doesn't shutdown completely from within Gradle.
|
||||
// Use a much simpler SLF4J implementation here instead.
|
||||
cordaRuntime "org.slf4j:slf4j-simple:$slf4j_version"
|
||||
|
||||
// Corda integration dependencies
|
||||
cordaRuntime project(path: ":node:capsule", configuration: 'runtimeArtifacts')
|
||||
cordaRuntime project(path: ":webserver:webcapsule", configuration: 'runtimeArtifacts')
|
||||
cordaCompile project(':core')
|
||||
cordaCompile project(':webserver')
|
||||
cordaCompile (project(':webserver')) {
|
||||
exclude group: "org.apache.logging.log4j"
|
||||
}
|
||||
|
||||
// Javax is required for webapis
|
||||
compile "org.glassfish.jersey.core:jersey-server:$jersey_version"
|
||||
|
@ -51,8 +51,12 @@ task shrink(type: ProGuardTask) {
|
||||
injars jar
|
||||
outjars shrinkJar
|
||||
|
||||
libraryjars "$javaHome/lib/rt.jar"
|
||||
libraryjars "$javaHome/lib/jce.jar"
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
libraryjars "$javaHome/jmods"
|
||||
} else {
|
||||
libraryjars "$javaHome/lib/rt.jar"
|
||||
libraryjars "$javaHome/lib/jce.jar"
|
||||
}
|
||||
configurations.runtimeClasspath.forEach {
|
||||
libraryjars it.path, filter: '!META-INF/versions/**'
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||
|
||||
description 'Corda serialization (deterministic)'
|
||||
|
||||
apply from: '../deterministic.gradle'
|
||||
@ -8,6 +10,9 @@ apply plugin: 'idea'
|
||||
|
||||
evaluationDependsOn(":serialization")
|
||||
|
||||
// required by DJVM and Avian JVM (for running inside the SGX enclave) which only supports Java 8.
|
||||
targetCompatibility = VERSION_1_8
|
||||
|
||||
def javaHome = System.getProperty('java.home')
|
||||
def jarBaseName = "corda-${project.name}".toString()
|
||||
|
||||
@ -68,9 +73,13 @@ task predeterminise(type: ProGuardTask, dependsOn: project(':core-deterministic'
|
||||
injars patchSerialization
|
||||
outjars file("$buildDir/proguard/pre-deterministic-${project.version}.jar")
|
||||
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
libraryjars file("$javaHome/lib/ext/sunec.jar")
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
libraryjars "$javaHome/jmods"
|
||||
} else {
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
libraryjars file("$javaHome/lib/ext/sunec.jar")
|
||||
}
|
||||
configurations.compileClasspath.forEach {
|
||||
if (originalJar != it) {
|
||||
libraryjars it, filter: '!META-INF/versions/**'
|
||||
@ -113,8 +122,12 @@ task determinise(type: ProGuardTask) {
|
||||
injars jarFilter
|
||||
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
|
||||
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
if (JavaVersion.current().isJava9Compatible()) {
|
||||
libraryjars "$javaHome/jmods"
|
||||
} else {
|
||||
libraryjars file("$javaHome/lib/rt.jar")
|
||||
libraryjars file("$javaHome/lib/jce.jar")
|
||||
}
|
||||
configurations.deterministicLibraries.forEach {
|
||||
libraryjars it, filter: '!META-INF/versions/**'
|
||||
}
|
||||
|
@ -242,14 +242,14 @@ class SerializationOutputTests(private val compression: CordaSerializationEncodi
|
||||
}
|
||||
val des = DeserializationInput(freshDeserializationFactory)
|
||||
val desObj = des.deserialize(bytes, testSerializationContext.withEncodingWhitelist(encodingWhitelist))
|
||||
assertTrue(Objects.deepEquals(obj, desObj) == expectedEqual)
|
||||
assertTrue(deepEquals(obj, desObj) == expectedEqual)
|
||||
|
||||
// Now repeat with a re-used factory
|
||||
val ser2 = SerializationOutput(factory)
|
||||
val des2 = DeserializationInput(factory)
|
||||
val desObj2 = des2.deserialize(ser2.serialize(obj, compression), testSerializationContext.withEncodingWhitelist(encodingWhitelist))
|
||||
assertTrue(Objects.deepEquals(obj, desObj2) == expectedEqual)
|
||||
assertTrue(Objects.deepEquals(desObj, desObj2) == expectDeserializedEqual)
|
||||
assertTrue(deepEquals(obj, desObj2) == expectedEqual)
|
||||
assertTrue(deepEquals(desObj, desObj2) == expectDeserializedEqual)
|
||||
|
||||
// TODO: add some schema assertions to check correctly formed.
|
||||
return desObj
|
||||
@ -580,7 +580,7 @@ class SerializationOutputTests(private val compression: CordaSerializationEncodi
|
||||
assertTrue(desThrowable is CordaRuntimeException) // Since we don't handle the other case(s) yet
|
||||
if (desThrowable is CordaRuntimeException) {
|
||||
assertEquals("${t.javaClass.name}: ${t.message}", desThrowable.message)
|
||||
assertTrue(Objects.deepEquals(t.stackTrace, desThrowable.stackTrace))
|
||||
assertTrue(Objects.deepEquals(t.stackTrace.toStackTraceBasic, desThrowable.stackTrace.toStackTraceBasic))
|
||||
assertEquals(t.suppressed.size, desThrowable.suppressed.size)
|
||||
t.suppressed.zip(desThrowable.suppressed).forEach { (before, after) -> assertSerializedThrowableEquivalent(before, after) }
|
||||
}
|
||||
@ -1521,5 +1521,36 @@ class SerializationOutputTests(private val compression: CordaSerializationEncodi
|
||||
assertEquals(20059, uncompressedSize)
|
||||
assertEquals(1018, compressedSize)
|
||||
}
|
||||
}
|
||||
|
||||
// JDK11: backwards compatibility function to deal with StacktraceElement comparison pre-JPMS
|
||||
private fun deepEquals(a: Any?, b: Any?): Boolean {
|
||||
return if (a === b)
|
||||
true
|
||||
else if (a == null || b == null)
|
||||
false
|
||||
else {
|
||||
if (a is Exception && b is Exception)
|
||||
(a.cause == b.cause && a.localizedMessage == b.localizedMessage && a.message == b.message) &&
|
||||
Objects.deepEquals(a.stackTrace.toStackTraceBasic, b.stackTrace.toStackTraceBasic)
|
||||
else
|
||||
Objects.deepEquals(a, b)
|
||||
}
|
||||
}
|
||||
|
||||
private val <T> Array<T>.toStackTraceBasic: Unit
|
||||
get() {
|
||||
this.map { StackTraceElementBasic(it as StackTraceElement) }
|
||||
}
|
||||
|
||||
// JPMS adds additional fields that are not equal according to classloader/module hierarchy
|
||||
data class StackTraceElementBasic(val ste: StackTraceElement) {
|
||||
override fun equals(other: Any?): Boolean {
|
||||
return if (other is StackTraceElementBasic)
|
||||
(ste.className == other.ste.className) &&
|
||||
(ste.methodName == other.ste.methodName) &&
|
||||
(ste.fileName == other.ste.fileName) &&
|
||||
(ste.lineNumber == other.ste.lineNumber)
|
||||
else false
|
||||
}
|
||||
}
|
||||
}
|
@ -1,9 +1,14 @@
|
||||
import static org.gradle.api.JavaVersion.VERSION_1_8
|
||||
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'net.corda.plugins.publish-utils'
|
||||
apply plugin: 'com.jfrog.artifactory'
|
||||
|
||||
description 'Corda serialization'
|
||||
|
||||
// required by DJVM and Avian JVM (for running inside the SGX enclave) which only supports Java 8.
|
||||
targetCompatibility = VERSION_1_8
|
||||
|
||||
dependencies {
|
||||
compile project(":core")
|
||||
|
||||
|
@ -2,8 +2,6 @@ package net.corda.serialization.internal
|
||||
|
||||
import net.corda.core.DeleteForDJVM
|
||||
import net.corda.core.serialization.ClassWhitelist
|
||||
import sun.misc.Unsafe
|
||||
import sun.security.util.Password
|
||||
import java.io.*
|
||||
import java.lang.invoke.*
|
||||
import java.lang.reflect.AccessibleObject
|
||||
@ -50,7 +48,6 @@ object AllButBlacklisted : ClassWhitelist {
|
||||
ClassLoader::class.java.name,
|
||||
Handler::class.java.name, // MemoryHandler, StreamHandler
|
||||
Runtime::class.java.name,
|
||||
Unsafe::class.java.name,
|
||||
ZipFile::class.java.name,
|
||||
Provider::class.java.name,
|
||||
SecurityManager::class.java.name,
|
||||
@ -62,7 +59,6 @@ object AllButBlacklisted : ClassWhitelist {
|
||||
|
||||
// java.security.
|
||||
KeyStore::class.java.name,
|
||||
Password::class.java.name,
|
||||
AccessController::class.java.name,
|
||||
Permission::class.java.name,
|
||||
|
||||
|
@ -5,7 +5,6 @@ import net.corda.core.utilities.NetworkHostAndPort
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import rx.Notification
|
||||
import rx.exceptions.OnErrorNotImplementedException
|
||||
import sun.security.x509.X509CertImpl
|
||||
import java.security.cert.CRLReason
|
||||
import java.util.*
|
||||
|
||||
@ -62,7 +61,6 @@ object DefaultWhitelist : SerializationWhitelist {
|
||||
StackTraceElement::class.java,
|
||||
|
||||
// Implementation of X509Certificate.
|
||||
X509CertImpl::class.java,
|
||||
CRLReason::class.java
|
||||
)
|
||||
}
|
||||
|
@ -88,9 +88,7 @@ open class ArraySerializer(override val type: Type, factory: LocalSerializerFact
|
||||
context: SerializationContext
|
||||
): Any {
|
||||
if (obj is List<*>) {
|
||||
return obj.map {
|
||||
input.readObjectOrNull(redescribe(it, elementType), schemas, elementType, context)
|
||||
}.toArrayOfType(elementType)
|
||||
return obj.map { input.readObjectOrNull(it, schemas, elementType, context) }.toArrayOfType(elementType)
|
||||
} else throw AMQPNotSerializableException(type, "Expected a List but found $obj")
|
||||
}
|
||||
|
||||
|
@ -26,10 +26,12 @@ private class ConstructorCaller(private val javaConstructor: Constructor<Any>):
|
||||
try {
|
||||
javaConstructor.newInstance(*parameters)
|
||||
} catch (e: InvocationTargetException) {
|
||||
@Suppress("DEPRECATION") // JDK11: isAccessible() should be replaced with canAccess() (since 9)
|
||||
throw NotSerializableException(
|
||||
"Constructor for ${javaConstructor.declaringClass} (isAccessible=${javaConstructor.isAccessible}) " +
|
||||
"failed when called with parameters ${parameters.toList()}: ${e.cause!!.message}")
|
||||
} catch (e: IllegalAccessException) {
|
||||
@Suppress("DEPRECATION") // JDK11: isAccessible() should be replaced with canAccess() (since 9)
|
||||
throw NotSerializableException(
|
||||
"Constructor for ${javaConstructor.declaringClass} (isAccessible=${javaConstructor.isAccessible}) " +
|
||||
"not accessible: ${e.message}")
|
||||
@ -44,10 +46,12 @@ private class SetterCaller(val setter: Method): (Any, Any?) -> Unit {
|
||||
try {
|
||||
setter.invoke(target, value)
|
||||
} catch (e: InvocationTargetException) {
|
||||
@Suppress("DEPRECATION") // JDK11: isAccessible() should be replaced with canAccess() (since 9)
|
||||
throw NotSerializableException(
|
||||
"Setter ${setter.declaringClass}.${setter.name} (isAccessible=${setter.isAccessible} " +
|
||||
"failed when called with parameter $value: ${e.cause!!.message}")
|
||||
} catch (e: IllegalAccessException) {
|
||||
@Suppress("DEPRECATION") // JDK11: isAccessible() should be replaced with canAccess() (since 9)
|
||||
throw NotSerializableException(
|
||||
"Setter ${setter.declaringClass}.${setter.name} (isAccessible=${setter.isAccessible} " +
|
||||
"not accessible: ${e.message}")
|
||||
|
@ -346,7 +346,7 @@ class ClassCarpenterImpl @JvmOverloads constructor (override val whitelist: Clas
|
||||
visitCode()
|
||||
visitLdcInsn(Type.getType("L${schema.jvmName};"))
|
||||
visitVarInsn(ALOAD, 0)
|
||||
visitMethodInsn(INVOKESTATIC, jlEnum, "valueOf", "(L$jlClass;L$jlString;)L$jlEnum;", true)
|
||||
visitMethodInsn(INVOKESTATIC, jlEnum, "valueOf", "(L$jlClass;L$jlString;)L$jlEnum;", false)
|
||||
visitTypeInsn(CHECKCAST, schema.jvmName)
|
||||
visitInsn(ARETURN)
|
||||
visitMaxs(0, 0)
|
||||
|
@ -3,6 +3,7 @@ package net.corda.serialization.internal.amqp
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.serialization.internal.amqp.testutils.*
|
||||
import org.junit.Test
|
||||
import java.lang.Character.valueOf
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.fail
|
||||
|
||||
@ -49,9 +50,9 @@ class DeserializeSimpleTypesTests {
|
||||
@Suppress("PLATFORM_CLASS_MAPPED_TO_KOTLIN")
|
||||
@Test
|
||||
fun testCharacter() {
|
||||
data class C(val c: Character)
|
||||
data class C(val c: Char)
|
||||
|
||||
val c = C(Character('c'))
|
||||
val c = C(valueOf('c'))
|
||||
val serialisedC = SerializationOutput(sf1).serialize(c)
|
||||
val deserializedC = DeserializationInput(sf1).deserialize(serialisedC)
|
||||
|
||||
@ -90,9 +91,9 @@ class DeserializeSimpleTypesTests {
|
||||
@Suppress("PLATFORM_CLASS_MAPPED_TO_KOTLIN")
|
||||
@Test
|
||||
fun testArrayOfInteger() {
|
||||
class IA(val ia: Array<Integer>)
|
||||
class IA(val ia: Array<Int>)
|
||||
|
||||
val ia = IA(arrayOf(Integer(1), Integer(2), Integer(3)))
|
||||
val ia = IA(arrayOf(Integer.valueOf(1), Integer.valueOf(2), Integer.valueOf(3)))
|
||||
|
||||
assertEquals("class [Ljava.lang.Integer;", ia.ia::class.java.toString())
|
||||
assertEquals(AMQPTypeIdentifiers.nameForType(ia.ia::class.java), "int[]")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user