diff --git a/.idea/compiler.xml b/.idea/compiler.xml index c3348988c8..9844574b57 100644 --- a/.idea/compiler.xml +++ b/.idea/compiler.xml @@ -98,8 +98,6 @@ - - @@ -235,6 +233,8 @@ + + diff --git a/build.gradle b/build.gradle index eb1d222558..373d206fe1 100644 --- a/build.gradle +++ b/build.gradle @@ -96,6 +96,7 @@ buildscript { ext.commons_cli_version = '1.4' ext.snappy_version = '0.4' ext.fast_classpath_scanner_version = '2.12.3' + ext.jcabi_manifests_version = '1.1' // Update 121 is required for ObjectInputFilter and at time of writing 131 was latest: ext.java8_minUpdateVersion = '131' diff --git a/client/jackson/build.gradle b/client/jackson/build.gradle index 8ab3793ba0..82543b434a 100644 --- a/client/jackson/build.gradle +++ b/client/jackson/build.gradle @@ -16,11 +16,8 @@ apply plugin: 'com.jfrog.artifactory' dependencies { compile project(':serialization') - testCompile project(':test-utils') compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version" - testCompile "org.jetbrains.kotlin:kotlin-test:$kotlin_version" - // Jackson and its plugins: parsing to/from JSON and other textual formats. compile "com.fasterxml.jackson.module:jackson-module-kotlin:$jackson_version" // Yaml is useful for parsing strings to method calls. @@ -29,7 +26,9 @@ dependencies { compile "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:$jackson_version" compile "com.google.guava:guava:$guava_version" + testCompile project(':test-utils') testCompile project(path: ':core', configuration: 'testArtifacts') + testCompile "org.jetbrains.kotlin:kotlin-test:$kotlin_version" testCompile "junit:junit:$junit_version" } diff --git a/client/jackson/src/main/kotlin/net/corda/client/jackson/JacksonSupport.kt b/client/jackson/src/main/kotlin/net/corda/client/jackson/JacksonSupport.kt index 492f9e84bb..f93b850392 100644 --- a/client/jackson/src/main/kotlin/net/corda/client/jackson/JacksonSupport.kt +++ b/client/jackson/src/main/kotlin/net/corda/client/jackson/JacksonSupport.kt @@ -31,10 +31,7 @@ import net.corda.core.contracts.Amount import net.corda.core.contracts.ContractState import net.corda.core.contracts.StateRef import net.corda.core.crypto.* -import net.corda.core.identity.AbstractParty -import net.corda.core.identity.AnonymousParty -import net.corda.core.identity.CordaX500Name -import net.corda.core.identity.Party +import net.corda.core.identity.* import net.corda.core.internal.CertRole import net.corda.core.internal.VisibleForTesting import net.corda.core.internal.uncheckedCast @@ -65,12 +62,13 @@ import javax.security.auth.x500.X500Principal * * Note that Jackson can also be used to serialise/deserialise other formats such as Yaml and XML. */ -@Suppress("DEPRECATION") +@Suppress("DEPRECATION", "MemberVisibilityCanBePrivate") object JacksonSupport { // If you change this API please update the docs in the docsite (json.rst) @DoNotImplement interface PartyObjectMapper { + val isFullParties: Boolean fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? fun partyFromKey(owningKey: PublicKey): Party? fun partiesFromName(query: String): Set @@ -78,9 +76,11 @@ object JacksonSupport { } @Deprecated("This is an internal class, do not use", replaceWith = ReplaceWith("JacksonSupport.createDefaultMapper")) - class RpcObjectMapper(val rpc: CordaRPCOps, - factory: JsonFactory, - val fuzzyIdentityMatch: Boolean) : PartyObjectMapper, ObjectMapper(factory) { + class RpcObjectMapper + @JvmOverloads constructor(val rpc: CordaRPCOps, + factory: JsonFactory, + val fuzzyIdentityMatch: Boolean, + override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) { override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = rpc.wellKnownPartyFromX500Name(name) override fun partyFromKey(owningKey: PublicKey): Party? = rpc.partyFromKey(owningKey) override fun partiesFromName(query: String) = rpc.partiesFromName(query, fuzzyIdentityMatch) @@ -88,9 +88,11 @@ object JacksonSupport { } @Deprecated("This is an internal class, do not use") - class IdentityObjectMapper(val identityService: IdentityService, - factory: JsonFactory, - val fuzzyIdentityMatch: Boolean) : PartyObjectMapper, ObjectMapper(factory) { + class IdentityObjectMapper + @JvmOverloads constructor(val identityService: IdentityService, + factory: JsonFactory, + val fuzzyIdentityMatch: Boolean, + override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) { override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = identityService.wellKnownPartyFromX500Name(name) override fun partyFromKey(owningKey: PublicKey): Party? = identityService.partyFromKey(owningKey) override fun partiesFromName(query: String) = identityService.partiesFromName(query, fuzzyIdentityMatch) @@ -98,7 +100,9 @@ object JacksonSupport { } @Deprecated("This is an internal class, do not use", replaceWith = ReplaceWith("JacksonSupport.createNonRpcMapper")) - class NoPartyObjectMapper(factory: JsonFactory) : PartyObjectMapper, ObjectMapper(factory) { + class NoPartyObjectMapper + @JvmOverloads constructor(factory: JsonFactory, + override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) { override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = null override fun partyFromKey(owningKey: PublicKey): Party? = null override fun partiesFromName(query: String): Set = emptySet() @@ -112,22 +116,33 @@ object JacksonSupport { /** * Creates a Jackson ObjectMapper that uses RPC to deserialise parties from string names. * - * If [fuzzyIdentityMatch] is false, fields mapped to [Party] objects must be in X.500 name form and precisely + * @param fuzzyIdentityMatch If false, fields mapped to [Party] objects must be in X.500 name form and precisely * match an identity known from the network map. If true, the name is matched more leniently but if the match * is ambiguous a [JsonParseException] is thrown. + * + * @param fullParties If true then [Party] objects will be serialised as JSON objects, with the owning key serialised + * in addition to the name. For [PartyAndCertificate] objects the cert path will be included. */ @JvmStatic @JvmOverloads fun createDefaultMapper(rpc: CordaRPCOps, factory: JsonFactory = JsonFactory(), - fuzzyIdentityMatch: Boolean = false): ObjectMapper { - return configureMapper(RpcObjectMapper(rpc, factory, fuzzyIdentityMatch)) + fuzzyIdentityMatch: Boolean = false, + fullParties: Boolean = false): ObjectMapper { + return configureMapper(RpcObjectMapper(rpc, factory, fuzzyIdentityMatch, fullParties)) } - /** For testing or situations where deserialising parties is not required */ + /** + * For testing or situations where deserialising parties is not required + * + * @param fullParties If true then [Party] objects will be serialised as JSON objects, with the owning key serialised + * in addition to the name. For [PartyAndCertificate] objects the cert path will be included. + */ @JvmStatic @JvmOverloads - fun createNonRpcMapper(factory: JsonFactory = JsonFactory()): ObjectMapper = configureMapper(NoPartyObjectMapper(factory)) + fun createNonRpcMapper(factory: JsonFactory = JsonFactory(), fullParties: Boolean = false): ObjectMapper { + return configureMapper(NoPartyObjectMapper(factory, fullParties)) + } /** * Creates a Jackson ObjectMapper that uses an [IdentityService] directly inside the node to deserialise parties from string names. @@ -207,7 +222,14 @@ object JacksonSupport { .filter { Modifier.isStatic(it.modifiers) && it.type == KeyPurposeId::class.java } .associateBy({ (it.get(null) as KeyPurposeId).id }, { it.name }) - val knownExtensions = setOf("2.5.29.15", "2.5.29.37", "2.5.29.19", "2.5.29.17", "2.5.29.18", CordaOID.X509_EXTENSION_CORDA_ROLE) + val knownExtensions = setOf( + "2.5.29.15", + "2.5.29.17", + "2.5.29.18", + "2.5.29.19", + "2.5.29.37", + CordaOID.X509_EXTENSION_CORDA_ROLE + ) override fun serialize(value: X509Certificate, gen: JsonGenerator, serializers: SerializerProvider) { gen.jsonObject { @@ -218,17 +240,20 @@ object JacksonSupport { writeObjectField("issuer", value.issuerX500Principal) writeObjectField("notBefore", value.notBefore) writeObjectField("notAfter", value.notAfter) + writeObjectField("cordaCertRole", CertRole.extract(value)) writeObjectField("issuerUniqueID", value.issuerUniqueID) writeObjectField("subjectUniqueID", value.subjectUniqueID) writeObjectField("keyUsage", value.keyUsage?.asList()?.mapIndexedNotNull { i, flag -> if (flag) keyUsages[i] else null }) writeObjectField("extendedKeyUsage", value.extendedKeyUsage.map { keyPurposeIds.getOrDefault(it, it) }) jsonObject("basicConstraints") { - writeBooleanField("isCA", value.basicConstraints != -1) - writeObjectField("pathLength", value.basicConstraints.let { if (it != Int.MAX_VALUE) it else null }) + val isCa = value.basicConstraints != -1 + writeBooleanField("isCA", isCa) + if (isCa) { + writeObjectField("pathLength", value.basicConstraints.let { if (it != Int.MAX_VALUE) it else null }) + } } writeObjectField("subjectAlternativeNames", value.subjectAlternativeNames) writeObjectField("issuerAlternativeNames", value.issuerAlternativeNames) - writeObjectField("cordaCertRole", CertRole.extract(value)) writeObjectField("otherCriticalExtensions", value.criticalExtensionOIDs - knownExtensions) writeObjectField("otherNonCriticalExtensions", value.nonCriticalExtensionOIDs - knownExtensions) writeBinaryField("encoded", value.encoded) @@ -239,8 +264,12 @@ object JacksonSupport { private class X509CertificateDeserializer : JsonDeserializer() { private val certFactory = CertificateFactory.getInstance("X.509") override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): X509Certificate { - val encoded = parser.readValueAsTree()["encoded"] - return certFactory.generateCertificate(encoded.binaryValue().inputStream()) as X509Certificate + val encoded = if (parser.currentToken == JsonToken.START_OBJECT) { + parser.readValueAsTree()["encoded"].binaryValue() + } else { + parser.binaryValue + } + return certFactory.generateCertificate(encoded.inputStream()) as X509Certificate } } @@ -284,9 +313,13 @@ object JacksonSupport { @Deprecated("This is an internal class, do not use") object PartySerializer : JsonSerializer() { - override fun serialize(value: Party, generator: JsonGenerator, provider: SerializerProvider) { - // TODO Add configurable option to output this as an object which includes the owningKey - generator.writeObject(value.name) + override fun serialize(value: Party, gen: JsonGenerator, provider: SerializerProvider) { + val mapper = gen.codec as PartyObjectMapper + if (mapper.isFullParties) { + gen.writeObject(PartyAnalogue(value.name, value.owningKey)) + } else { + gen.writeObject(value.name) + } } } @@ -294,28 +327,39 @@ object JacksonSupport { object PartyDeserializer : JsonDeserializer() { override fun deserialize(parser: JsonParser, context: DeserializationContext): Party { val mapper = parser.codec as PartyObjectMapper - // The comma character is invalid in Base58, and required as a separator for X.500 names. As Corda - // X.500 names all involve at least three attributes (organisation, locality, country), they must - // include a comma. As such we can use it as a distinguisher between the two types. - return if ("," in parser.text) { - val principal = CordaX500Name.parse(parser.text) - mapper.wellKnownPartyFromX500Name(principal) ?: throw JsonParseException(parser, "Could not find a Party with name $principal") + return if (parser.currentToken == JsonToken.START_OBJECT) { + val analogue = parser.readValueAs() + Party(analogue.name, analogue.owningKey) } else { - val nameMatches = mapper.partiesFromName(parser.text) - when { - nameMatches.isEmpty() -> { - val publicKey = parser.readValueAs() - mapper.partyFromKey(publicKey) - ?: throw JsonParseException(parser, "Could not find a Party with key ${publicKey.toStringShort()}") - } - nameMatches.size == 1 -> nameMatches.first() - else -> throw JsonParseException(parser, "Ambiguous name match '${parser.text}': could be any of " + - nameMatches.map { it.name }.joinToString(" ... or ... ")) + // The comma character is invalid in Base58, and required as a separator for X.500 names. As Corda + // X.500 names all involve at least three attributes (organisation, locality, country), they must + // include a comma. As such we can use it as a distinguisher between the two types. + if ("," in parser.text) { + val principal = CordaX500Name.parse(parser.text) + mapper.wellKnownPartyFromX500Name(principal) ?: throw JsonParseException(parser, "Could not find a Party with name $principal") + } else { + lookupByNameSegment(mapper, parser) } } } + + private fun lookupByNameSegment(mapper: PartyObjectMapper, parser: JsonParser): Party { + val nameMatches = mapper.partiesFromName(parser.text) + return when { + nameMatches.isEmpty() -> { + val publicKey = parser.readValueAs() + mapper.partyFromKey(publicKey) + ?: throw JsonParseException(parser, "Could not find a Party with key ${publicKey.toStringShort()}") + } + nameMatches.size == 1 -> nameMatches.first() + else -> throw JsonParseException(parser, "Ambiguous name match '${parser.text}': could be any of " + + nameMatches.map { it.name }.joinToString(" ... or ... ")) + } + } } + private class PartyAnalogue(val name: CordaX500Name, val owningKey: PublicKey) + @Deprecated("This is an internal class, do not use") object CordaX500NameDeserializer : JsonDeserializer() { override fun deserialize(parser: JsonParser, context: DeserializationContext): CordaX500Name { diff --git a/client/jackson/src/main/kotlin/net/corda/client/jackson/internal/CordaModule.kt b/client/jackson/src/main/kotlin/net/corda/client/jackson/internal/CordaModule.kt index 640dbf3a4a..5dd3fe395a 100644 --- a/client/jackson/src/main/kotlin/net/corda/client/jackson/internal/CordaModule.kt +++ b/client/jackson/src/main/kotlin/net/corda/client/jackson/internal/CordaModule.kt @@ -29,9 +29,10 @@ import net.corda.core.utilities.NetworkHostAndPort import net.corda.serialization.internal.AllWhitelist import net.corda.serialization.internal.amqp.SerializerFactory import net.corda.serialization.internal.amqp.constructorForDeserialization -import net.corda.serialization.internal.amqp.createSerializerFactoryFactory +import net.corda.serialization.internal.amqp.hasCordaSerializable import net.corda.serialization.internal.amqp.propertiesForSerialization import java.security.PublicKey +import java.security.cert.CertPath class CordaModule : SimpleModule("corda-core") { override fun setupModule(context: SetupContext) { @@ -39,7 +40,7 @@ class CordaModule : SimpleModule("corda-core") { context.addBeanSerializerModifier(CordaSerializableBeanSerializerModifier()) - context.setMixInAnnotations(PartyAndCertificate::class.java, PartyAndCertificateSerializerMixin::class.java) + context.setMixInAnnotations(PartyAndCertificate::class.java, PartyAndCertificateMixin::class.java) context.setMixInAnnotations(NetworkHostAndPort::class.java, NetworkHostAndPortMixin::class.java) context.setMixInAnnotations(CordaX500Name::class.java, CordaX500NameMixin::class.java) context.setMixInAnnotations(Amount::class.java, AmountMixin::class.java) @@ -53,7 +54,7 @@ class CordaModule : SimpleModule("corda-core") { context.setMixInAnnotations(DigitalSignature.WithKey::class.java, ByteSequenceWithPropertiesMixin::class.java) context.setMixInAnnotations(DigitalSignatureWithCert::class.java, ByteSequenceWithPropertiesMixin::class.java) context.setMixInAnnotations(TransactionSignature::class.java, ByteSequenceWithPropertiesMixin::class.java) - context.setMixInAnnotations(SignedTransaction::class.java, SignedTransactionMixin2::class.java) + context.setMixInAnnotations(SignedTransaction::class.java, SignedTransactionMixin::class.java) context.setMixInAnnotations(WireTransaction::class.java, JacksonSupport.WireTransactionMixin::class.java) context.setMixInAnnotations(NodeInfo::class.java, NodeInfoMixin::class.java) } @@ -69,12 +70,15 @@ private class CordaSerializableBeanSerializerModifier : BeanSerializerModifier() override fun changeProperties(config: SerializationConfig, beanDesc: BeanDescription, beanProperties: MutableList): MutableList { - // TODO We're assuming here that Jackson gives us a superset of all the properties. Either confirm this or - // make sure the returned beanProperties are exactly the AMQP properties - if (beanDesc.beanClass.isAnnotationPresent(CordaSerializable::class.java)) { + if (hasCordaSerializable(beanDesc.beanClass)) { val ctor = constructorForDeserialization(beanDesc.beanClass) - val amqpProperties = propertiesForSerialization(ctor, beanDesc.beanClass, serializerFactory).serializationOrder - beanProperties.removeIf { bean -> amqpProperties.none { amqp -> amqp.serializer.name == bean.name } } + val amqpProperties = propertiesForSerialization(ctor, beanDesc.beanClass, serializerFactory) + .serializationOrder + .map { it.serializer.name } + beanProperties.removeIf { it.name !in amqpProperties } + (amqpProperties - beanProperties.map { it.name }).let { + check(it.isEmpty()) { "Jackson didn't provide serialisers for $it" } + } } return beanProperties } @@ -85,26 +89,31 @@ private class CordaSerializableBeanSerializerModifier : BeanSerializerModifier() private interface NetworkHostAndPortMixin private class NetworkHostAndPortDeserializer : JsonDeserializer() { - override fun deserialize(parser: JsonParser, ctxt: DeserializationContext) = NetworkHostAndPort.parse(parser.text) + override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): NetworkHostAndPort { + return NetworkHostAndPort.parse(parser.text) + } } @JsonSerialize(using = PartyAndCertificateSerializer::class) // TODO Add deserialization which follows the same lookup logic as Party -private interface PartyAndCertificateSerializerMixin +private interface PartyAndCertificateMixin private class PartyAndCertificateSerializer : JsonSerializer() { override fun serialize(value: PartyAndCertificate, gen: JsonGenerator, serializers: SerializerProvider) { - gen.jsonObject { - writeObjectField("name", value.name) - writeObjectField("owningKey", value.owningKey) - // TODO Add configurable option to output the certPath + val mapper = gen.codec as JacksonSupport.PartyObjectMapper + if (mapper.isFullParties) { + gen.writeObject(PartyAndCertificateWrapper(value.name, value.certPath)) + } else { + gen.writeObject(value.party) } } } +private class PartyAndCertificateWrapper(val name: CordaX500Name, val certPath: CertPath) + @JsonSerialize(using = SignedTransactionSerializer::class) @JsonDeserialize(using = SignedTransactionDeserializer::class) -private interface SignedTransactionMixin2 +private interface SignedTransactionMixin private class SignedTransactionSerializer : JsonSerializer() { override fun serialize(value: SignedTransaction, gen: JsonGenerator, serializers: SerializerProvider) { diff --git a/client/jackson/src/test/kotlin/net/corda/client/jackson/JacksonSupportTest.kt b/client/jackson/src/test/kotlin/net/corda/client/jackson/JacksonSupportTest.kt index 56dbd190a6..b13d576e30 100644 --- a/client/jackson/src/test/kotlin/net/corda/client/jackson/JacksonSupportTest.kt +++ b/client/jackson/src/test/kotlin/net/corda/client/jackson/JacksonSupportTest.kt @@ -24,10 +24,7 @@ import net.corda.core.contracts.Amount import net.corda.core.cordapp.CordappProvider import net.corda.core.crypto.* import net.corda.core.crypto.CompositeKey -import net.corda.core.identity.AbstractParty -import net.corda.core.identity.AnonymousParty -import net.corda.core.identity.CordaX500Name -import net.corda.core.identity.Party +import net.corda.core.identity.* import net.corda.core.internal.DigitalSignatureWithCert import net.corda.core.node.NodeInfo import net.corda.core.node.ServiceHub @@ -257,10 +254,20 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: assertThat(json.textValue()).isEqualTo(MINI_CORP.name.toString()) } + @Test + fun `Party serialization with isFullParty = true`() { + partyObjectMapper.isFullParties = true + val json = mapper.valueToTree(MINI_CORP.party) + val (name, owningKey) = json.assertHasOnlyFields("name", "owningKey") + assertThat(name.valueAs(mapper)).isEqualTo(MINI_CORP.name) + assertThat(owningKey.valueAs(mapper)).isEqualTo(MINI_CORP.publicKey) + } + @Test fun `Party deserialization on full name`() { fun convertToParty() = mapper.convertValue(TextNode(MINI_CORP.name.toString())) + // Check that it fails if it can't find the party assertThatThrownBy { convertToParty() } partyObjectMapper.identities += MINI_CORP.party @@ -271,6 +278,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: fun `Party deserialization on part of name`() { fun convertToParty() = mapper.convertValue(TextNode(MINI_CORP.name.organisation)) + // Check that it fails if it can't find the party assertThatThrownBy { convertToParty() } partyObjectMapper.identities += MINI_CORP.party @@ -281,12 +289,24 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: fun `Party deserialization on public key`() { fun convertToParty() = mapper.convertValue(TextNode(MINI_CORP.publicKey.toBase58String())) + // Check that it fails if it can't find the party assertThatThrownBy { convertToParty() } partyObjectMapper.identities += MINI_CORP.party assertThat(convertToParty()).isEqualTo(MINI_CORP.party) } + @Test + fun `Party deserialization on name and key`() { + val party = mapper.convertValue(mapOf( + "name" to MINI_CORP.name, + "owningKey" to MINI_CORP.publicKey + )) + // Party.equals is only defined on the public key so we must check the name as well + assertThat(party.name).isEqualTo(MINI_CORP.name) + assertThat(party.owningKey).isEqualTo(MINI_CORP.publicKey) + } + @Test fun PublicKey() { val json = mapper.valueToTree(MINI_CORP.publicKey) @@ -326,15 +346,31 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: } @Test - fun `PartyAndCertificate serialisation`() { - val json = mapper.valueToTree(MINI_CORP.identity) - val (name, owningKey) = json.assertHasOnlyFields("name", "owningKey") - assertThat(name.valueAs(mapper)).isEqualTo(MINI_CORP.name) - assertThat(owningKey.valueAs(mapper)).isEqualTo(MINI_CORP.publicKey) + fun `PartyAndCertificate serialization`() { + val json = mapper.valueToTree(MINI_CORP.identity) + assertThat(json.textValue()).isEqualTo(MINI_CORP.name.toString()) } @Test - fun `NodeInfo serialisation`() { + fun `PartyAndCertificate serialization with isFullParty = true`() { + partyObjectMapper.isFullParties = true + val json = mapper.valueToTree(MINI_CORP.identity) + println(mapper.writeValueAsString(json)) + val (name, certPath) = json.assertHasOnlyFields("name", "certPath") + assertThat(name.valueAs(mapper)).isEqualTo(MINI_CORP.name) + assertThat(certPath.valueAs(mapper)).isEqualTo(MINI_CORP.identity.certPath) + } + + @Test + fun `PartyAndCertificate deserialization on cert path`() { + val certPathJson = mapper.valueToTree(MINI_CORP.identity.certPath) + val partyAndCert = mapper.convertValue(mapOf("certPath" to certPathJson)) + // PartyAndCertificate.equals is defined on the Party so we must check the certPath directly + assertThat(partyAndCert.certPath).isEqualTo(MINI_CORP.identity.certPath) + } + + @Test + fun `NodeInfo serialization`() { val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME) val json = mapper.valueToTree(nodeInfo) val (addresses, legalIdentitiesAndCerts, platformVersion, serial) = json.assertHasOnlyFields( @@ -349,14 +385,14 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: } legalIdentitiesAndCerts.run { assertThat(this).hasSize(1) - assertThat(this[0]["name"].valueAs(mapper)).isEqualTo(ALICE_NAME) + assertThat(this[0].valueAs(mapper)).isEqualTo(ALICE_NAME) } assertThat(platformVersion.intValue()).isEqualTo(nodeInfo.platformVersion) assertThat(serial.longValue()).isEqualTo(nodeInfo.serial) } @Test - fun `NodeInfo deserialisation on name`() { + fun `NodeInfo deserialization on name`() { val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME) fun convertToNodeInfo() = mapper.convertValue(TextNode(ALICE_NAME.toString())) @@ -369,7 +405,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: } @Test - fun `NodeInfo deserialisation on public key`() { + fun `NodeInfo deserialization on public key`() { val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME) fun convertToNodeInfo() = mapper.convertValue(TextNode(nodeInfo.legalIdentities[0].owningKey.toBase58String())) @@ -396,7 +432,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: } @Test - fun X509Certificate() { + fun `X509Certificate serialization`() { val cert: X509Certificate = MINI_CORP.identity.certificate val json = mapper.valueToTree(cert) println(mapper.writeValueAsString(json)) @@ -407,7 +443,13 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: assertThat(json["notAfter"].valueAs(mapper)).isEqualTo(cert.notAfter) assertThat(json["notBefore"].valueAs(mapper)).isEqualTo(cert.notBefore) assertThat(json["encoded"].binaryValue()).isEqualTo(cert.encoded) - assertThat(mapper.convertValue(json).encoded).isEqualTo(cert.encoded) + } + + @Test + fun `X509Certificate deserialization`() { + val cert: X509Certificate = MINI_CORP.identity.certificate + assertThat(mapper.convertValue(mapOf("encoded" to cert.encoded))).isEqualTo(cert) + assertThat(mapper.convertValue(BinaryNode(cert.encoded))).isEqualTo(cert) } @Test @@ -458,6 +500,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory: } private class TestPartyObjectMapper : JacksonSupport.PartyObjectMapper { + override var isFullParties: Boolean = false val identities = ArrayList() val nodes = ArrayList() override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? { diff --git a/client/rpc/src/smoke-test/kotlin/net/corda/kotlin/rpc/StandaloneCordaRPClientTest.kt b/client/rpc/src/smoke-test/kotlin/net/corda/kotlin/rpc/StandaloneCordaRPClientTest.kt index 1828ec0746..379257b20e 100644 --- a/client/rpc/src/smoke-test/kotlin/net/corda/kotlin/rpc/StandaloneCordaRPClientTest.kt +++ b/client/rpc/src/smoke-test/kotlin/net/corda/kotlin/rpc/StandaloneCordaRPClientTest.kt @@ -37,6 +37,7 @@ import net.corda.smoketesting.NodeProcess import org.apache.commons.io.output.NullOutputStream import org.junit.After import org.junit.Before +import org.junit.Ignore import org.junit.Test import java.io.FilterInputStream import java.io.InputStream @@ -104,8 +105,24 @@ class StandaloneCordaRPClientTest { financeJar.copyToDirectory(cordappsDir) } + @Test fun `test attachments`() { + val attachment = InputStreamAndHash.createInMemoryTestZip(attachmentSize, 1) + assertFalse(rpcProxy.attachmentExists(attachment.sha256)) + val id = attachment.inputStream.use { rpcProxy.uploadAttachment(it) } + assertEquals(attachment.sha256, id, "Attachment has incorrect SHA256 hash") + + val hash = HashingInputStream(Hashing.sha256(), rpcProxy.openAttachment(id)).use { it -> + it.copyTo(NullOutputStream()) + SecureHash.SHA256(it.hash().asBytes()) + } + assertEquals(attachment.sha256, hash) + } + + @Ignore("CORDA-1520 - After switching from Kryo to AMQP this test won't work") + @Test + fun `test wrapped attachments`() { val attachment = InputStreamAndHash.createInMemoryTestZip(attachmentSize, 1) assertFalse(rpcProxy.attachmentExists(attachment.sha256)) val id = WrapperStream(attachment.inputStream).use { rpcProxy.uploadAttachment(it) } diff --git a/core/src/main/kotlin/net/corda/core/flows/FinalityFlow.kt b/core/src/main/kotlin/net/corda/core/flows/FinalityFlow.kt index d6bf462055..edd93b164c 100644 --- a/core/src/main/kotlin/net/corda/core/flows/FinalityFlow.kt +++ b/core/src/main/kotlin/net/corda/core/flows/FinalityFlow.kt @@ -14,6 +14,7 @@ import co.paralleluniverse.fibers.Suspendable import net.corda.core.crypto.isFulfilledBy import net.corda.core.identity.Party import net.corda.core.identity.groupAbstractPartyByWellKnownParty +import net.corda.core.internal.pushToLoggingContext import net.corda.core.transactions.LedgerTransaction import net.corda.core.transactions.SignedTransaction import net.corda.core.utilities.ProgressTracker @@ -61,17 +62,24 @@ class FinalityFlow(val transaction: SignedTransaction, // // Lookup the resolved transactions and use them to map each signed transaction to the list of participants. // Then send to the notary if needed, record locally and distribute. + + transaction.pushToLoggingContext() + val commandDataTypes = transaction.tx.commands.map { it.value }.mapNotNull { it::class.qualifiedName }.distinct() + logger.info("Started finalization, commands are ${commandDataTypes.joinToString(", ", "[", "]")}.") val parties = getPartiesToSend(verifyTx()) val notarised = notariseAndRecord() // Each transaction has its own set of recipients, but extra recipients get them all. progressTracker.currentStep = BROADCASTING - for (party in parties) { - if (!serviceHub.myInfo.isLegalIdentity(party)) { - val session = initiateFlow(party) - subFlow(SendTransactionFlow(session, notarised)) - } + val recipients = parties.filterNot(serviceHub.myInfo::isLegalIdentity) + logger.info("Broadcasting transaction to parties ${recipients.map { it.name }.joinToString(", ", "[", "]")}.") + for (party in recipients) { + logger.info("Sending transaction to party ${party.name}.") + val session = initiateFlow(party) + subFlow(SendTransactionFlow(session, notarised)) + logger.info("Party ${party.name} received the transaction.") } + logger.info("All parties received the transaction successfully.") return notarised } @@ -83,9 +91,12 @@ class FinalityFlow(val transaction: SignedTransaction, val notarySignatures = subFlow(NotaryFlow.Client(transaction)) transaction + notarySignatures } else { + logger.info("No need to notarise this transaction.") transaction } + logger.info("Recording transaction locally.") serviceHub.recordTransactions(notarised) + logger.info("Recorded transaction locally successfully.") return notarised } diff --git a/core/src/main/kotlin/net/corda/core/flows/NotaryFlow.kt b/core/src/main/kotlin/net/corda/core/flows/NotaryFlow.kt index e8078c8877..be86932ec9 100644 --- a/core/src/main/kotlin/net/corda/core/flows/NotaryFlow.kt +++ b/core/src/main/kotlin/net/corda/core/flows/NotaryFlow.kt @@ -19,6 +19,7 @@ import net.corda.core.identity.Party import net.corda.core.internal.FetchDataFlow import net.corda.core.internal.notary.generateSignature import net.corda.core.internal.notary.validateSignatures +import net.corda.core.internal.pushToLoggingContext import net.corda.core.transactions.ContractUpgradeWireTransaction import net.corda.core.transactions.SignedTransaction import net.corda.core.transactions.WireTransaction @@ -54,9 +55,12 @@ class NotaryFlow { @Suspendable @Throws(NotaryException::class) override fun call(): List { + stx.pushToLoggingContext() val notaryParty = checkTransaction() progressTracker.currentStep = REQUESTING + logger.info("Sending transaction to notary: ${notaryParty.name}.") val response = notarise(notaryParty) + logger.info("Notary responded.") progressTracker.currentStep = VALIDATING return validateResponse(response, notaryParty) } diff --git a/core/src/main/kotlin/net/corda/core/flows/ReceiveTransactionFlow.kt b/core/src/main/kotlin/net/corda/core/flows/ReceiveTransactionFlow.kt index 501ea40260..9b2f591211 100644 --- a/core/src/main/kotlin/net/corda/core/flows/ReceiveTransactionFlow.kt +++ b/core/src/main/kotlin/net/corda/core/flows/ReceiveTransactionFlow.kt @@ -13,6 +13,7 @@ package net.corda.core.flows import co.paralleluniverse.fibers.Suspendable import net.corda.core.contracts.* import net.corda.core.internal.ResolveTransactionsFlow +import net.corda.core.internal.pushToLoggingContext import net.corda.core.node.StatesToRecord import net.corda.core.transactions.SignedTransaction import net.corda.core.utilities.unwrap @@ -46,18 +47,25 @@ class ReceiveTransactionFlow @JvmOverloads constructor(private val otherSideSess } else { logger.trace("Receiving a transaction (but without checking the signatures) from ${otherSideSession.counterparty}") } - val stx = otherSideSession.receive().unwrap { + it.pushToLoggingContext() + logger.info("Received transaction acknowledgement request from party ${otherSideSession.counterparty.name}.") subFlow(ResolveTransactionsFlow(it, otherSideSession)) - it.verify(serviceHub, checkSufficientSignatures) - it + logger.info("Transaction dependencies resolution completed.") + try { + it.verify(serviceHub, checkSufficientSignatures) + it + } catch (e: Exception) { + logger.warn("Transaction verification failed.") + throw e + } } - if (checkSufficientSignatures) { // We should only send a transaction to the vault for processing if we did in fact fully verify it, and // there are no missing signatures. We don't want partly signed stuff in the vault. - logger.trace("Successfully received fully signed tx ${stx.id}, sending to the vault for processing") + logger.info("Successfully received fully signed tx. Sending it to the vault for processing.") serviceHub.recordTransactions(statesToRecord, setOf(stx)) + logger.info("Successfully recorded received transaction locally.") } return stx } diff --git a/core/src/main/kotlin/net/corda/core/internal/FlowStateMachine.kt b/core/src/main/kotlin/net/corda/core/internal/FlowStateMachine.kt index 37b9be94e0..1892b7ef71 100644 --- a/core/src/main/kotlin/net/corda/core/internal/FlowStateMachine.kt +++ b/core/src/main/kotlin/net/corda/core/internal/FlowStateMachine.kt @@ -51,4 +51,5 @@ interface FlowStateMachine { val resultFuture: CordaFuture val context: InvocationContext val ourIdentity: Party + val ourSenderUUID: String? } diff --git a/core/src/main/kotlin/net/corda/core/internal/InternalUtils.kt b/core/src/main/kotlin/net/corda/core/internal/InternalUtils.kt index 1998a4cfb2..eaeae516d0 100644 --- a/core/src/main/kotlin/net/corda/core/internal/InternalUtils.kt +++ b/core/src/main/kotlin/net/corda/core/internal/InternalUtils.kt @@ -17,13 +17,19 @@ import com.google.common.hash.HashingInputStream import net.corda.core.cordapp.Cordapp import net.corda.core.cordapp.CordappConfig import net.corda.core.cordapp.CordappContext -import net.corda.core.crypto.* +import net.corda.core.crypto.Crypto +import net.corda.core.crypto.DigitalSignature +import net.corda.core.crypto.SecureHash +import net.corda.core.crypto.SignedData +import net.corda.core.crypto.sha256 +import net.corda.core.crypto.sign import net.corda.core.identity.CordaX500Name import net.corda.core.node.ServicesForResolution import net.corda.core.serialization.SerializationContext import net.corda.core.serialization.SerializedBytes import net.corda.core.serialization.deserialize import net.corda.core.serialization.serialize +import net.corda.core.transactions.SignedTransaction import net.corda.core.transactions.TransactionBuilder import net.corda.core.transactions.WireTransaction import net.corda.core.utilities.OpaqueBytes @@ -31,11 +37,15 @@ import org.bouncycastle.asn1.x500.X500Name import org.bouncycastle.asn1.x500.X500NameBuilder import org.bouncycastle.asn1.x500.style.BCStyle import org.slf4j.Logger +import org.slf4j.MDC import rx.Observable import rx.Observer import rx.subjects.PublishSubject import rx.subjects.UnicastSubject -import java.io.* +import java.io.ByteArrayOutputStream +import java.io.IOException +import java.io.InputStream +import java.io.OutputStream import java.lang.reflect.Field import java.lang.reflect.Modifier import java.math.BigDecimal @@ -51,11 +61,23 @@ import java.nio.file.Paths import java.security.KeyPair import java.security.PrivateKey import java.security.PublicKey -import java.security.cert.* +import java.security.cert.CertPath +import java.security.cert.CertPathValidator +import java.security.cert.CertPathValidatorException +import java.security.cert.PKIXCertPathValidatorResult +import java.security.cert.PKIXParameters +import java.security.cert.TrustAnchor +import java.security.cert.X509Certificate import java.time.Duration import java.time.temporal.Temporal import java.util.* -import java.util.Spliterator.* +import java.util.Spliterator.DISTINCT +import java.util.Spliterator.IMMUTABLE +import java.util.Spliterator.NONNULL +import java.util.Spliterator.ORDERED +import java.util.Spliterator.SIZED +import java.util.Spliterator.SORTED +import java.util.Spliterator.SUBSIZED import java.util.concurrent.ExecutorService import java.util.concurrent.TimeUnit import java.util.stream.IntStream @@ -68,6 +90,17 @@ import kotlin.reflect.KClass import kotlin.reflect.full.createInstance val Throwable.rootCause: Throwable get() = cause?.rootCause ?: this +val Throwable.rootMessage: String? get() { + var message = this.message + var throwable = cause + while (throwable != null) { + if (throwable.message != null) { + message = throwable.message + } + throwable = throwable.cause + } + return message +} infix fun Temporal.until(endExclusive: Temporal): Duration = Duration.between(this, endExclusive) @@ -469,3 +502,10 @@ val PublicKey.hash: SecureHash get() = encoded.sha256() * Extension method for providing a sumBy method that processes and returns a Long */ fun Iterable.sumByLong(selector: (T) -> Long): Long = this.map { selector(it) }.sum() + +/** + * Ensures each log entry from the current thread will contain id of the transaction in the MDC. + */ +internal fun SignedTransaction.pushToLoggingContext() { + MDC.put("tx_id", id.toString()) +} \ No newline at end of file diff --git a/core/src/main/kotlin/net/corda/core/serialization/SerializationAPI.kt b/core/src/main/kotlin/net/corda/core/serialization/SerializationAPI.kt index 46b826471b..362f0d009c 100644 --- a/core/src/main/kotlin/net/corda/core/serialization/SerializationAPI.kt +++ b/core/src/main/kotlin/net/corda/core/serialization/SerializationAPI.kt @@ -219,7 +219,8 @@ object SerializationDefaults { /** * Convenience extension method for deserializing a ByteSequence, utilising the defaults. */ -inline fun ByteSequence.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T { +inline fun ByteSequence.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, + context: SerializationContext = serializationFactory.defaultContext): T { return serializationFactory.deserialize(this, T::class.java, context) } @@ -228,31 +229,40 @@ inline fun ByteSequence.deserialize(serializationFactory: Seri * It might be helpful to know [SerializationContext] to use the same encoding in the reply. */ inline fun ByteSequence.deserializeWithCompatibleContext(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, - context: SerializationContext = serializationFactory.defaultContext): ObjectWithCompatibleContext { + context: SerializationContext = serializationFactory.defaultContext): ObjectWithCompatibleContext { return serializationFactory.deserializeWithCompatibleContext(this, T::class.java, context) } /** * Convenience extension method for deserializing SerializedBytes with type matching, utilising the defaults. */ -inline fun SerializedBytes.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T { +inline fun SerializedBytes.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, + context: SerializationContext = serializationFactory.defaultContext): T { return serializationFactory.deserialize(this, T::class.java, context) } /** * Convenience extension method for deserializing a ByteArray, utilising the defaults. */ -inline fun ByteArray.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T = this.sequence().deserialize(serializationFactory, context) +inline fun ByteArray.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, + context: SerializationContext = serializationFactory.defaultContext): T { + require(isNotEmpty()) { "Empty bytes" } + return this.sequence().deserialize(serializationFactory, context) +} /** * Convenience extension method for deserializing a JDBC Blob, utilising the defaults. */ -inline fun Blob.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T = this.getBytes(1, this.length().toInt()).deserialize(serializationFactory, context) +inline fun Blob.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, + context: SerializationContext = serializationFactory.defaultContext): T { + return this.getBytes(1, this.length().toInt()).deserialize(serializationFactory, context) +} /** * Convenience extension method for serializing an object of type T, utilising the defaults. */ -fun T.serialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): SerializedBytes { +fun T.serialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, + context: SerializationContext = serializationFactory.defaultContext): SerializedBytes { return serializationFactory.serialize(this, context) } diff --git a/core/src/main/kotlin/net/corda/core/transactions/WireTransaction.kt b/core/src/main/kotlin/net/corda/core/transactions/WireTransaction.kt index 91a02145d4..138522b07b 100644 --- a/core/src/main/kotlin/net/corda/core/transactions/WireTransaction.kt +++ b/core/src/main/kotlin/net/corda/core/transactions/WireTransaction.kt @@ -79,7 +79,7 @@ class WireTransaction(componentGroups: List, val privacySalt: Pr val requiredSigningKeys: Set get() { val commandKeys = commands.flatMap { it.signers }.toSet() - // TODO: prevent notary field from being set if there are no inputs and no timestamp. + // TODO: prevent notary field from being set if there are no inputs and no time-window. return if (notary != null && (inputs.isNotEmpty() || timeWindow != null)) { commandKeys + notary.owningKey } else { diff --git a/docs/source/api-flows.rst b/docs/source/api-flows.rst index 6fb0960b09..06ee98b831 100644 --- a/docs/source/api-flows.rst +++ b/docs/source/api-flows.rst @@ -30,7 +30,7 @@ In our flow, the Initiator flow class will be doing the majority of the work: 2. Create a transaction builder 3. Extract any input states from the vault and add them to the builder 4. Create any output states and add them to the builder -5. Add any commands, attachments and timestamps to the builder +5. Add any commands, attachments and time-window to the builder *Part 2 - Sign the transaction* diff --git a/docs/source/api-scanner.rst b/docs/source/api-scanner.rst new file mode 100644 index 0000000000..45e0d096c1 --- /dev/null +++ b/docs/source/api-scanner.rst @@ -0,0 +1,58 @@ +API stability check +=================== + +We have committed not to alter Corda's API so that developers will not have to keep rewriting their CorDapps with each +new Corda release. The stable Corda modules are listed :ref:`here `. Our CI process runs an "API Stability" +check for each GitHub pull request in order to check that we don't accidentally introduce an API-breaking change. + +Build Process +------------- + +As part of the build process the following commands are run for each PR: + +.. code-block:: shell + + $ gradlew generateApi + $ .ci/check-api-changes.sh + +This ``bash`` script has been tested on both MacOS and various Linux distributions, it can also be run on Windows with the +use of a suitable bash emulator such as git bash. The script's return value is the number of API-breaking changes that it +has detected, and this should be zero for the check to pass. The maximum return value is 255, although the script will still +correctly report higher numbers of breaking changes. + +There are three kinds of breaking change: + +* Removal or modification of existing API, i.e. an existing class, method or field has been either deleted or renamed, or + its signature somehow altered. +* Addition of a new method to an interface or abstract class. Types that have been annotated as ``@DoNotImplement`` are + excluded from this check. (This annotation is also inherited across subclasses and subinterfaces.) +* Exposure of an internal type via a public API. Internal types are considered to be anything in a ``*.internal.`` package + or anything in a module that isn't in the stable modules list :ref:`here `. + +Developers can execute these commands themselves before submitting their PR, to ensure that they haven't inadvertently +broken Corda's API. + + +How it works +------------ + +The ``generateApi`` Gradle task writes a summary of Corda's public API into the file ``build/api/api-corda-.txt``. +The ``.ci/check-api-changes.sh`` script then compares this file with the contents of ``.ci/api-current.txt``, which is a +managed file within the Corda repository. + +The Gradle task itself is implemented by the API Scanner plugin. More information on the API Scanner plugin is available `here `_. + + +Updating the API +---------------- + +As a rule, ``api-current.txt`` should only be updated by the release manager for each Corda release. + +We do not expect modifications to ``api-current.txt`` as part of normal development. However, we may sometimes need to adjust +the public API in ways that would not break developers' CorDapps but which would be blocked by the API Stabilty check. +For example, migrating a method from an interface into a superinterface. Any changes to the API summary file should be +included in the PR, which would then need explicit approval from either `Mike Hearn `_, `Rick Parker `_ or `Matthew Nesbit `_. + +.. note:: If you need to modify ``api-current.txt``, do not re-generate the file on the master branch. This will include new API that + hasn't been released or committed to, and may be subject to change. Manually change the specific line or lines of the + existing committed API that has changed. \ No newline at end of file diff --git a/docs/source/blob-inspector.rst b/docs/source/blob-inspector.rst new file mode 100644 index 0000000000..ba301eef0a --- /dev/null +++ b/docs/source/blob-inspector.rst @@ -0,0 +1,63 @@ +Blob Inspector +============== + +There are many benefits to having a custom binary serialisation format (see :doc:`serialization` for details) but one +disadvantage is the inability to view the contents in a human-friendly manner. The blob inspector tool alleviates this issue +by allowing the contents of a binary blob file (or URL end-point) to be output in either YAML or JSON. It uses +``JacksonSupport`` to do this (see :doc:`json`). + +The latest version of the tool can be downloaded from `here `_. + +To run simply pass in the file or URL as the first parameter: + +``java -jar blob-inspector.jar `` + +Use the ``--help`` flag for a full list of command line options. + +``SerializedBytes` +~~~~~~~~~~~~~~~~~~ + +One thing to note is that the binary blob may contain embedded ``SerializedBytes`` objects. Rather than printing these +out as a Base64 string, the blob inspector will first materialise them into Java objects and then output those. You will +see this when dealing with classes such as ``SignedData`` or other structures that attach a signature, such as the +``nodeInfo-*`` files or the ``network-parameters`` file in the node's directory. For example, the output of a node-info +file may look like: + +.. container:: codeset + + .. sourcecode:: yaml + + net.corda.nodeapi.internal.SignedNodeInfo + --- + raw: + class: "net.corda.core.node.NodeInfo" + deserialized: + addresses: + - "localhost:10011" + legalIdentitiesAndCerts: + - "O=BankOfCorda, L=New York, C=US" + platformVersion: 4 + serial: 1527074180971 + signatures: + - !!binary | + dmoAnnzcv0MzRN+3ZSCDcCJIAbXnoYy5mFWB3Nijndzu/dzIoYdIawINXbNSY/5z2XloDK01vZRV + TreFZCbZAg== + + .. sourcecode:: json + + net.corda.nodeapi.internal.SignedNodeInfo + { + "raw" : { + "class" : "net.corda.core.node.NodeInfo", + "deserialized" : { + "addresses" : [ "localhost:10011" ], + "legalIdentitiesAndCerts" : [ "O=BankOfCorda, L=New York, C=US" ], + "platformVersion" : 4, + "serial" : 1527074180971 + } + }, + "signatures" : [ "dmoAnnzcv0MzRN+3ZSCDcCJIAbXnoYy5mFWB3Nijndzu/dzIoYdIawINXbNSY/5z2XloDK01vZRVTreFZCbZAg==" ] + } + +Notice the file is actually a serialised ``SignedNodeInfo`` object, which has a ``raw`` property of type ``SerializedBytes``. +This property is materialised into a ``NodeInfo`` and is output under the ``deserialized`` field. diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index 3a4564caae..f4cddf75ba 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -8,6 +8,12 @@ Unreleased ========== * Introduced a hierarchy of ``DatabaseMigrationException``s, allowing ``NodeStartup`` to gracefully inform users of problems related to database migrations before exiting with a non-zero code. +* Doorman and NetworkMap url's can now be configured individually rather than being assumed to be + the same server. Current ``compatibilityZoneURL`` configurations remain valid. See both :doc:`corda-configuration-file` + and :doc:`permissioning` for details. + +* Improved audit trail for ``FinalityFlow`` and related sub-flows. + * ``NodeStartup`` will now only print node's configuration if ``devMode`` is ``true``, avoiding the risk of printing passwords in a production setup. * SLF4J's MDC will now only be printed to the console if not empty. No more log lines ending with "{}". @@ -18,7 +24,7 @@ Unreleased * RPC server will now mask internal errors to RPC clients if not in devMode. ``Throwable``s implementing ``ClientRelevantError`` will continue to be propagated to clients. * RPC Framework moved from Kryo to the Corda AMQP implementation [Corda-847]. This completes the removal - of ``Kryo`` from general use within Corda, remaining only for use in flow checkpointing. + of ``Kryo`` from general use within Corda, remaining only for use in flow checkpointing. * Set co.paralleluniverse.fibers.verifyInstrumentation=true in devMode. @@ -35,12 +41,19 @@ Unreleased * ``Party`` objects can be deserialised by looking up their public key, in addition to their name * ``NodeInfo`` objects are serialised as an object and can be looked up using the same mechanism as ``Party`` * ``NetworkHostAndPort`` serialised according to its ``toString()`` - * ``PartyAndCertificate`` is serialised as an object containing the name and owning key - * ``SerializedBytes`` is serialised by converting the bytes into the object it represents, which is then serialised into - a JSON/YAML object - * ``CertPath`` and ``X509Certificate`` are serialised as objects and can be deserialised back + * ``PartyAndCertificate`` is serialised as the name + * ``SerializedBytes`` is serialised by materialising the bytes into the object it represents, and then serialising that + object into YAML/JSON + * ``X509Certificate`` is serialised as an object with key fields such as ``issuer``, ``publicKey``, ``serialNumber``, etc. + The encoded bytes are also serialised into the ``encoded`` field. This can be used to deserialise an ``X509Certificate`` + back. + * ``CertPath`` objects are serialised as a list of ``X509Certificate`` objects. * ``SignedTransaction`` is serialised into its ``txBits`` and ``signatures`` and can be deserialised back +* ``fullParties`` boolean parameter added to ``JacksonSupport.createDefaultMapper`` and ``createNonRpcMapper``. If ``true`` + then ``Party`` objects are serialised as JSON objects with the ``name`` and ``owningKey`` fields. For ``PartyAndCertificate`` + the ``certPath`` is serialised. + * Several members of ``JacksonSupport`` have been deprecated to highlight that they are internal and not to be used. * The Vault Criteria API has been extended to take a more precise specification of which class contains a field. This diff --git a/docs/source/cipher-suites.rst b/docs/source/cipher-suites.rst index 8991adcd4e..6ffa5f8d99 100644 --- a/docs/source/cipher-suites.rst +++ b/docs/source/cipher-suites.rst @@ -24,12 +24,15 @@ Certificate hierarchy A Corda network has 8 types of keys and a regular node requires 4 of them: +**Network Keys** + * The **root network CA** key * The **doorman CA** key * The **network map** key -* The **service identity** key(s) (per service, such as a notary cluster; it can be a Composite Key) +* The **service identity** key(s) (per service, such as a notary cluster; it can be a Composite key) + +**Node Keys** --- **Node Keys** -- * The **node CA** key(s) (one per node) * The **legal identity** key(s) (one per node) * The **tls** key(s) (per node) diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst index 3bfff458e4..0baf3c6efc 100644 --- a/docs/source/contributing.rst +++ b/docs/source/contributing.rst @@ -102,6 +102,12 @@ Building against the master branch You can test your changes against CorDapps defined in other repos by following the instructions :doc:`here `. +Running the API scanner +^^^^^^^^^^^^^^^^^^^^^^^ +Your changes must also not break compatibility with existing public API. We have an API scanning tool which runs as part of the build +process which can be used to flag up any accidental changes, which is detailed :doc:`here `. + + Updating the docs ----------------- diff --git a/docs/source/corda-api.rst b/docs/source/corda-api.rst index f0f803a227..bcb150c3c4 100644 --- a/docs/source/corda-api.rst +++ b/docs/source/corda-api.rst @@ -21,6 +21,8 @@ The following are the core APIs that are used in the development of CorDapps: Before reading this page, you should be familiar with the :doc:`key concepts of Corda `. +.. _internal-apis-and-stability-guarantees: + Internal APIs and stability guarantees -------------------------------------- diff --git a/docs/source/corda-configuration-file.rst b/docs/source/corda-configuration-file.rst index eab81cdb1d..d79fa784cc 100644 --- a/docs/source/corda-configuration-file.rst +++ b/docs/source/corda-configuration-file.rst @@ -184,7 +184,16 @@ absolute path to the node's base directory. interfaces, and then by sending an IP discovery request to the network map service. Set to ``false`` to disable. :compatibilityZoneURL: The root address of Corda compatibility zone network management services, it is used by the Corda node to register with the network and - obtain Corda node certificate, (See :doc:`permissioning` for more information.) and also used by the node to obtain network map information. + obtain Corda node certificate, (See :doc:`permissioning` for more information.) and also used by the node to obtain network map information. Cannot be + set at the same time as the ``networkServices`` option. + +:networkServices: If the Corda compatibility zone services, both network map and registration (doorman), are not running on the same endpoint + and thus have different URLs then this option should be used in place of the ``compatibilityZoneURL`` setting. + + :doormanURL: Root address of the network registration service. + :networkMapURL: Root address of the network map service. + +.. note:: Only one of ``compatibilityZoneURL`` or ``networkServices`` should be used. :jvmArgs: An optional list of JVM args, as strings, which replace those inherited from the command line when launching via ``corda.jar`` only. e.g. ``jvmArgs = [ "-Xmx220m", "-Xms220m", "-XX:+UseG1GC" ]`` @@ -269,7 +278,7 @@ Simple notary configuration file: notary : { validating : false } - devMode : true + devMode : false compatibilityZoneURL : "https://cz.corda.net" An example ``web-server.conf`` file is as follow: @@ -288,6 +297,10 @@ An example ``web-server.conf`` file is as follow: webAddress : "localhost:12347", rpcUsers : [{ username=user1, password=letmein, permissions=[ StartFlow.net.corda.protocols.CashProtocol ] }] +Configuring a node where the Corda Comatability Zone's registration and Network Map services exist on different URLs + +.. literalinclude:: example-code/src/main/resources/example-node-with-networkservices.conf + Fields ------ @@ -344,4 +357,4 @@ Example adding/overriding keyStore password when starting Corda node: .. sourcecode:: shell - java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar \ No newline at end of file + java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar diff --git a/docs/source/example-code/src/main/java/net/corda/docs/java/tutorial/contract/CommercialPaper.java b/docs/source/example-code/src/main/java/net/corda/docs/java/tutorial/contract/CommercialPaper.java index 3052ac8dd4..6d406ed5ae 100644 --- a/docs/source/example-code/src/main/java/net/corda/docs/java/tutorial/contract/CommercialPaper.java +++ b/docs/source/example-code/src/main/java/net/corda/docs/java/tutorial/contract/CommercialPaper.java @@ -66,7 +66,7 @@ public class CommercialPaper implements Contract { }); } else if (cmd.getValue() instanceof Commands.Issue) { State output = outputs.get(0); - if (timeWindow == null) throw new IllegalArgumentException("Issuances must be timestamped"); + if (timeWindow == null) throw new IllegalArgumentException("Issuances must have a time-window"); Instant time = timeWindow.getUntilTime(); requireThat(require -> { // Don't allow people to issue commercial paper under other entities identities. diff --git a/docs/source/example-code/src/main/resources/example-node-with-networkservices.conf b/docs/source/example-code/src/main/resources/example-node-with-networkservices.conf new file mode 100644 index 0000000000..61ddf736d1 --- /dev/null +++ b/docs/source/example-code/src/main/resources/example-node-with-networkservices.conf @@ -0,0 +1,25 @@ +myLegalName : "O=Bank A,L=London,C=GB" +keyStorePassword : "cordacadevpass" +trustStorePassword : "trustpass" +crlCheckSoftFail: true +dataSourceProperties : { + dataSourceClassName : org.h2.jdbcx.JdbcDataSource + dataSource.url : "jdbc:h2:file:"${baseDirectory}"/persistence" + dataSource.user : sa + dataSource.password : "" +} +p2pAddress : "my-corda-node:10002" +rpcSettings = { + useSsl = false + standAloneBroker = false + address : "my-corda-node:10003" + adminAddress : "my-corda-node:10004" +} +rpcUsers : [ + { username=user1, password=letmein, permissions=[ StartFlow.net.corda.protocols.CashProtocol ] } +] +devMode : false +networkServices : { + doormanURL = "https://registration.corda.net" + networkMapURL = "https://cz.corda.net" +} diff --git a/docs/source/flow-state-machines.rst b/docs/source/flow-state-machines.rst index 5905fefef4..b811b21b0f 100644 --- a/docs/source/flow-state-machines.rst +++ b/docs/source/flow-state-machines.rst @@ -228,7 +228,7 @@ Next, we call another subflow called ``SignTransactionFlow``. ``SignTransactionF * Sending the transaction back to the buyer. The transaction then needs to be finalized. This is the the process of sending the transaction to a notary to assert -(with another signature) that the timestamp in the transaction (if any) is valid and there are no double spends. +(with another signature) that the time-window in the transaction (if any) is valid and there are no double spends. In this flow, finalization is handled by the buyer, so we just wait for the signed transaction to appear in our transaction storage. It will have the same ID as the one we started with but more signatures. diff --git a/docs/source/key-concepts-contracts.rst b/docs/source/key-concepts-contracts.rst index f4e768fad7..c430ea1123 100644 --- a/docs/source/key-concepts-contracts.rst +++ b/docs/source/key-concepts-contracts.rst @@ -36,7 +36,7 @@ We can picture this situation as follows: The contract code can be written in any JVM language, and has access to the full capabilities of the language, including: -* Checking the number of inputs, outputs, commands, timestamps, and/or attachments +* Checking the number of inputs, outputs, commands, time-window, and/or attachments * Checking the contents of any of these components * Looping constructs, variable assignment, function calls, helper methods, etc. * Grouping similar states to validate them as a group (e.g. imposing a rule on the combined value of all the cash diff --git a/docs/source/key-concepts-oracles.rst b/docs/source/key-concepts-oracles.rst index 9dca75a82f..b5091800c9 100644 --- a/docs/source/key-concepts-oracles.rst +++ b/docs/source/key-concepts-oracles.rst @@ -49,14 +49,14 @@ Transaction Merkle trees ^^^^^^^^^^^^^^^^^^^^^^^^ A Merkle tree is constructed from a transaction by splitting the transaction into leaves, where each leaf contains either an input, an output, a command, or an attachment. The Merkle tree also contains the other fields of the -``WireTransaction``, such as the timestamp, the notary, the type and the signers. +``WireTransaction``, such as the time-window, the notary, the type and the signers. Next, the Merkle tree is built in the normal way by hashing the concatenation of nodes’ hashes below the current one together. It’s visible on the example image below, where ``H`` denotes sha256 function, "+" - concatenation. .. image:: resources/merkleTree.png -The transaction has two input states, one output state, one attachment, one command and a timestamp. For brevity +The transaction has two input states, one output state, one attachment, one command and a time-window. For brevity we didn't include all leaves on the diagram (type, notary and signers are presented as one leaf labelled Rest - in reality they are separate leaves). Notice that if a tree is not a full binary tree, leaves are padded to the nearest power of 2 with zero hash (since finding a pre-image of sha256(x) == 0 is hard computational task) - marked light @@ -73,7 +73,7 @@ obtained belongs to that particular transaction. .. image:: resources/partialMerkle.png In the example above, the node ``H(f)`` is the one holding command data for signing by Oracle service. Blue leaf -``H(g)`` is also included since it's holding timestamp information. Nodes labelled ``Provided`` form the Partial -Merkle Tree, black ones are omitted. Having timestamp with the command that should be in a violet node place and +``H(g)`` is also included since it's holding time-window information. Nodes labelled ``Provided`` form the Partial +Merkle Tree, black ones are omitted. Having time-window with the command that should be in a violet node place and branch we are able to calculate root of this tree and compare it with original transaction identifier - we have a -proof that this command and timestamp belong to this transaction. \ No newline at end of file +proof that this command and time-window belong to this transaction. \ No newline at end of file diff --git a/docs/source/key-concepts-transactions.rst b/docs/source/key-concepts-transactions.rst index 32642572a7..ff38a8b12b 100644 --- a/docs/source/key-concepts-transactions.rst +++ b/docs/source/key-concepts-transactions.rst @@ -111,10 +111,10 @@ As well as input states and output states, transactions contain: * Commands * Attachments -* Timestamps +* Time-Window For example, a transaction where Alice pays off £5 of an IOU with Bob using a £5 cash payment, supported by two -attachments and a timestamp, may look as follows: +attachments and a time-window, may look as follows: .. image:: resources/full-tx.png :scale: 25% @@ -172,8 +172,8 @@ For this use case, we have *attachments*. Each transaction can refer to zero or attachments are ZIP/JAR files containing arbitrary content. The information in these files can then be used when checking the transaction's validity. -Time-windows -^^^^^^^^^^^^ +Time-window +^^^^^^^^^^^ In some cases, we want a transaction proposed to only be approved during a certain time-window. For example: * An option can only be exercised after a certain date diff --git a/docs/source/permissioning.rst b/docs/source/permissioning.rst index 3e0e8f4965..a77a141f34 100644 --- a/docs/source/permissioning.rst +++ b/docs/source/permissioning.rst @@ -196,21 +196,25 @@ This can be overridden with the additional ``--network-root-truststore`` flag. The certificate signing request will be created based on node information obtained from the node configuration. The following information from the node configuration file is needed to generate the request. -:myLegalName: Your company's legal name as an X.500 string. X.500 allows differentiation between entities with the same - name as the legal name needs to be unique on the network. If another node has already been permissioned with this - name then the permissioning server will automatically reject the request. The request will also be rejected if it - violates legal name rules, see :ref:`node_naming` for more information. +* **myLegalName** Your company's legal name as an X.500 string. X.500 allows differentiation between entities with the same + name as the legal name needs to be unique on the network. If another node has already been permissioned with this + name then the permissioning server will automatically reject the request. The request will also be rejected if it + violates legal name rules, see :ref:`node_naming` for more information. -:emailAddress: e.g. "admin@company.com" +* **emailAddress** e.g. "admin@company.com" -:devMode: must be set to false +* **devMode** must be set to false -:compatibilityZoneURL: Corda compatibility zone network management service root URL. +* **networkServices or compatibilityZoneURL** The Corda compatibility zone services must be configured. This must be either: - A new pair of private and public keys generated by the Corda node will be used to create the request. + * **compatibilityZoneURL** The Corda compatibility zone network management service root URL. + * **networkServices** Replaces the ``compatibilityZoneURL`` when the Doorman and Network Map services + are configured to operate on different URL endpoints. The ``doorman`` entry is used for registration. - The utility will submit the request to the doorman server and poll for a result periodically to retrieve the certificates. - Once the request has been approved and the certificates downloaded from the server, the node will create the keystore and trust store using the certificates and the generated private key. +A new pair of private and public keys generated by the Corda node will be used to create the request. + +The utility will submit the request to the doorman server and poll for a result periodically to retrieve the certificates. +Once the request has been approved and the certificates downloaded from the server, the node will create the keystore and trust store using the certificates and the generated private key. .. note:: You can exit the utility at any time if the approval process is taking longer than expected. The request process will resume on restart. diff --git a/docs/source/release-process-index.rst b/docs/source/release-process-index.rst index b269691a86..9ef0c82347 100644 --- a/docs/source/release-process-index.rst +++ b/docs/source/release-process-index.rst @@ -8,4 +8,5 @@ Release process changelog contributing codestyle - testing \ No newline at end of file + testing + api-scanner \ No newline at end of file diff --git a/docs/source/resources/full-tx.png b/docs/source/resources/full-tx.png index 6391496135..354fc1c5e4 100644 Binary files a/docs/source/resources/full-tx.png and b/docs/source/resources/full-tx.png differ diff --git a/docs/source/tools-index.rst b/docs/source/tools-index.rst index 55b398dd35..c9bcafc44b 100644 --- a/docs/source/tools-index.rst +++ b/docs/source/tools-index.rst @@ -4,6 +4,7 @@ Tools .. toctree:: :maxdepth: 1 + blob-inspector network-simulator demobench node-explorer diff --git a/docs/source/tutorial-building-transactions.rst b/docs/source/tutorial-building-transactions.rst index fdf28e5dce..324cb95ae2 100644 --- a/docs/source/tutorial-building-transactions.rst +++ b/docs/source/tutorial-building-transactions.rst @@ -44,7 +44,7 @@ Transactions in Corda contain a number of elements: transactions to migrate the states across to a consistent notary node before being allowed to mutate any states) -7. Optionally a timestamp that can used by the notary to bound the +7. Optionally a time-window that can used by the notary to bound the period during which the proposed transaction can be committed to the ledger diff --git a/docs/source/tutorial-contract.rst b/docs/source/tutorial-contract.rst index 23c1430286..5137127b70 100644 --- a/docs/source/tutorial-contract.rst +++ b/docs/source/tutorial-contract.rst @@ -299,13 +299,13 @@ logic. This loop is the core logic of the contract. -The first line simply gets the timestamp out of the transaction. Timestamping of transactions is optional, so a time +The first line simply gets the time-window out of the transaction. Setting a time-window in transactions is optional, so a time may be missing here. We check for it being null later. .. warning:: In the Kotlin version as long as we write a comparison with the transaction time first the compiler will verify we didn't forget to check if it's missing. Unfortunately due to the need for smooth Java interop, this check won't happen if we write e.g. ``someDate > time``, it has to be ``time < someDate``. So it's good practice to - always write the transaction timestamp first. + always write the transaction time-window first. Next, we take one of three paths, depending on what the type of the command object is. @@ -597,7 +597,7 @@ The time-lock contract mentioned above can be implemented very simply: class TestTimeLock : Contract { ... override fun verify(tx: LedgerTransaction) { - val time = tx.timestamp.before ?: throw IllegalStateException(...) + val time = tx.timeWindow?.untilTime ?: throw IllegalStateException(...) ... requireThat { "the time specified in the time-lock has passed" by diff --git a/experimental/blobinspector/build.gradle b/experimental/blobinspector/build.gradle deleted file mode 100644 index 2862ff6fae..0000000000 --- a/experimental/blobinspector/build.gradle +++ /dev/null @@ -1,52 +0,0 @@ -apply plugin: 'java' -apply plugin: 'kotlin' -apply plugin: 'application' - -mainClassName = 'net.corda.blobinspector.MainKt' - -dependencies { - compile project(':core') - compile project(':node-api') - - compile "commons-cli:commons-cli:$commons_cli_version" - - testCompile project(':test-utils') - - testCompile "junit:junit:$junit_version" -} - -/** - * To run from within gradle use - * - * ./gradlew -PrunArgs=" " :experimental:blobinspector:run - * - * For example, to parse a file from the command line and print out the deserialized properties - * - * ./gradlew -PrunArgs="-f -d" :experimental:blobinspector:run - * - * at the command line. - */ -run { - if (project.hasProperty('runArgs')) { - args = [ project.findProperty('runArgs').toString().split(" ") ].flatten() - } - - if (System.properties.getProperty('consoleLogLevel') != null) { - logging.captureStandardOutput(LogLevel.valueOf(System.properties.getProperty('consoleLogLevel'))) - logging.captureStandardError(LogLevel.valueOf(System.properties.getProperty('consoleLogLevel'))) - systemProperty "consoleLogLevel", System.properties.getProperty('consoleLogLevel') - } -} - -/** - * Build a executable jar - */ -jar { - baseName 'blobinspector' - manifest { - attributes( - 'Automatic-Module-Name': 'net.corda.experimental.blobinspector', - 'Main-Class': 'net.corda.blobinspector.MainKt' - ) - } -} diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobInspector.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobInspector.kt deleted file mode 100644 index 84de454358..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobInspector.kt +++ /dev/null @@ -1,405 +0,0 @@ -package net.corda.blobinspector - -import net.corda.core.crypto.SecureHash -import net.corda.core.serialization.EncodingWhitelist -import net.corda.core.serialization.SerializationEncoding -import net.corda.core.utilities.ByteSequence -import net.corda.serialization.internal.SerializationFactoryImpl -import net.corda.serialization.internal.amqp.CompositeType -import net.corda.serialization.internal.amqp.DeserializationInput -import net.corda.serialization.internal.amqp.RestrictedType -import net.corda.serialization.internal.amqp.TypeNotation -import net.corda.serialization.internal.amqp.amqpMagic -import org.apache.qpid.proton.amqp.Binary -import org.apache.qpid.proton.amqp.DescribedType -import org.apache.qpid.proton.amqp.Symbol - -/** - * Print a string to the console only if the verbose config option is set. - */ -fun String.debug(config: Config) { - if (config.verbose) { - println(this) - } -} - -/** - * - */ -interface Stringify { - fun stringify(sb: IndentingStringBuilder) -} - -/** - * Makes classnames easier to read by stripping off the package names from the class and separating nested - * classes - * - * For example: - * - * net.corda.blobinspector.Class1 - * Class1 - * - * net.corda.blobinspector.Class1 - * Class1 - * - * net.corda.blobinspector.Class1> - * Class1 > - * - * net.corda.blobinspector.Class1> - * Class1 :: C > - */ -fun String.simplifyClass(): String { - - return if (this.endsWith('>')) { - val templateStart = this.indexOf('<') - val clazz = (this.substring(0, templateStart)) - val params = this.substring(templateStart+1, this.length-1).split(',').joinToString { it.simplifyClass() } - - "${clazz.simplifyClass()} <$params>" - } - else { - substring(this.lastIndexOf('.') + 1).replace("$", " :: ") - } -} - -/** - * Represents the deserialized form of the property of an Object - * - * @param name - * @param type - */ -abstract class Property( - val name: String, - val type: String) : Stringify - -/** - * Derived class of [Property], represents properties of an object that are non compelex, such - * as any POD type or String - */ -class PrimProperty( - name: String, - type: String, - private val value: String) : Property(name, type) { - override fun toString(): String = "$name : $type : $value" - - override fun stringify(sb: IndentingStringBuilder) { - sb.appendln("$name : $type : $value") - } -} - -/** - * Derived class of [Property] that represents a binary blob. Specifically useful because printing - * a stream of bytes onto the screen isn't very use friendly - */ -class BinaryProperty( - name: String, - type: String, - val value: ByteArray) : Property(name, type) { - override fun toString(): String = "$name : $type : <<>>" - - override fun stringify(sb: IndentingStringBuilder) { - sb.appendln("$name : $type : <<>>") - } -} - -/** - * Derived class of [Property] that represent a list property. List could be either PoD types or - * composite types. - */ -class ListProperty( - name: String, - type: String, - private val values: MutableList = mutableListOf()) : Property(name, type) { - override fun stringify(sb: IndentingStringBuilder) { - sb.apply { - when { - values.isEmpty() -> appendln("$name : $type : [ << EMPTY LIST >> ]") - values.first() is Stringify -> { - appendln("$name : $type : [") - values.forEach { - (it as Stringify).stringify(this) - } - appendln("]") - } - else -> { - appendln("$name : $type : [") - values.forEach { - appendln(it.toString()) - } - appendln("]") - } - } - } - } -} - -class MapProperty( - name: String, - type: String, - private val map: MutableMap<*, *> -) : Property(name, type) { - override fun stringify(sb: IndentingStringBuilder) { - if (map.isEmpty()) { - sb.appendln("$name : $type : { << EMPTY MAP >> }") - return - } - - // TODO this will not produce pretty output - sb.apply { - appendln("$name : $type : {") - map.forEach { - try { - (it.key as Stringify).stringify(this) - } catch (e: ClassCastException) { - append (it.key.toString() + " : ") - } - try { - (it.value as Stringify).stringify(this) - } catch (e: ClassCastException) { - appendln("\"${it.value.toString()}\"") - } - } - appendln("}") - } - } -} - -/** - * Derived class of [Property] that represents class properties that are themselves instances of - * some complex type. - */ -class InstanceProperty( - name: String, - type: String, - val value: Instance) : Property(name, type) { - override fun stringify(sb: IndentingStringBuilder) { - sb.append("$name : ") - value.stringify(sb) - } -} - -/** - * Represents an instance of a composite type. - */ -class Instance( - val name: String, - val type: String, - val fields: MutableList = mutableListOf()) : Stringify { - override fun stringify(sb: IndentingStringBuilder) { - sb.apply { - appendln("${name.simplifyClass()} : {") - fields.forEach { - it.stringify(this) - } - appendln("}") - } - } -} - -/** - * - */ -fun inspectComposite( - config: Config, - typeMap: Map, - obj: DescribedType): Instance { - if (obj.described !is List<*>) throw MalformedBlob("") - - val name = (typeMap[obj.descriptor] as CompositeType).name - "composite: $name".debug(config) - - val inst = Instance( - typeMap[obj.descriptor]?.name ?: "", - typeMap[obj.descriptor]?.label ?: "") - - (typeMap[obj.descriptor] as CompositeType).fields.zip(obj.described as List<*>).forEach { - " field: ${it.first.name}".debug(config) - inst.fields.add( - if (it.second is DescribedType) { - " - is described".debug(config) - val d = inspectDescribed(config, typeMap, it.second as DescribedType) - - when (d) { - is Instance -> - InstanceProperty( - it.first.name, - it.first.type, - d) - is List<*> -> { - " - List".debug(config) - ListProperty( - it.first.name, - it.first.type, - d as MutableList) - } - is Map<*, *> -> { - MapProperty( - it.first.name, - it.first.type, - d as MutableMap<*, *>) - } - else -> { - " skip it".debug(config) - return@forEach - } - } - - } else { - " - is prim".debug(config) - when (it.first.type) { - // Note, as in the case of SHA256 we can treat particular binary types - // as different properties with a little coercion - "binary" -> { - if (name == "net.corda.core.crypto.SecureHash\$SHA256") { - PrimProperty( - it.first.name, - it.first.type, - SecureHash.SHA256((it.second as Binary).array).toString()) - } else { - BinaryProperty(it.first.name, it.first.type, (it.second as Binary).array) - } - } - else -> PrimProperty(it.first.name, it.first.type, it.second.toString()) - } - }) - } - - return inst -} - -fun inspectRestricted( - config: Config, - typeMap: Map, - obj: DescribedType): Any { - return when ((typeMap[obj.descriptor] as RestrictedType).source) { - "list" -> inspectRestrictedList(config, typeMap, obj) - "map" -> inspectRestrictedMap(config, typeMap, obj) - else -> throw NotImplementedError() - } -} - - -fun inspectRestrictedList( - config: Config, - typeMap: Map, - obj: DescribedType -) : List { - if (obj.described !is List<*>) throw MalformedBlob("") - - return mutableListOf().apply { - (obj.described as List<*>).forEach { - when (it) { - is DescribedType -> add(inspectDescribed(config, typeMap, it)) - is RestrictedType -> add(inspectRestricted(config, typeMap, it)) - else -> add (it.toString()) - } - } - } -} - -fun inspectRestrictedMap( - config: Config, - typeMap: Map, - obj: DescribedType -) : Map { - if (obj.described !is Map<*,*>) throw MalformedBlob("") - - return mutableMapOf().apply { - (obj.described as Map<*, *>).forEach { - val key = when (it.key) { - is DescribedType -> inspectDescribed(config, typeMap, it.key as DescribedType) - is RestrictedType -> inspectRestricted(config, typeMap, it.key as RestrictedType) - else -> it.key.toString() - } - - val value = when (it.value) { - is DescribedType -> inspectDescribed(config, typeMap, it.value as DescribedType) - is RestrictedType -> inspectRestricted(config, typeMap, it.value as RestrictedType) - else -> it.value.toString() - } - - this[key] = value - } - } -} - - -/** - * Every element of the blob stream will be a ProtonJ [DescribedType]. When inspecting the blob stream - * the two custom Corda types we're interested in are [CompositeType]'s, representing the instance of - * some object (class), and [RestrictedType]'s, representing containers and enumerations. - * - * @param config The configuration object that controls the behaviour of the BlobInspector - * @param typeMap - * @param obj - */ -fun inspectDescribed( - config: Config, - typeMap: Map, - obj: DescribedType): Any { - "${obj.descriptor} in typeMap? = ${obj.descriptor in typeMap}".debug(config) - - return when (typeMap[obj.descriptor]) { - is CompositeType -> { - "* It's composite".debug(config) - inspectComposite(config, typeMap, obj) - } - is RestrictedType -> { - "* It's restricted".debug(config) - inspectRestricted(config, typeMap, obj) - } - else -> { - "${typeMap[obj.descriptor]?.name} is neither Composite or Restricted".debug(config) - } - } - -} - -internal object NullEncodingWhitelist : EncodingWhitelist { - override fun acceptEncoding(encoding: SerializationEncoding) = false -} - -// TODO : Refactor to generically poerate on arbitrary blobs, not a single workflow -fun inspectBlob(config: Config, blob: ByteArray) { - val bytes = ByteSequence.of(blob) - - val headerSize = SerializationFactoryImpl.magicSize - - // TODO written to only understand one version, when we support multiple this will need to change - val headers = listOf(ByteSequence.of(amqpMagic.bytes)) - - val blobHeader = bytes.take(headerSize) - - if (blobHeader !in headers) { - throw MalformedBlob("Blob is not a Corda AMQP serialised object graph") - } - - - val e = DeserializationInput.getEnvelope(bytes, NullEncodingWhitelist) - - if (config.schema) { - println(e.schema) - } - - if (config.transforms) { - println(e.transformsSchema) - } - - val typeMap = e.schema.types.associateBy({ it.descriptor.name }, { it }) - - if (config.data) { - val inspected = inspectDescribed(config, typeMap, e.obj as DescribedType) - - println("\n${IndentingStringBuilder().apply { (inspected as Instance).stringify(this) }}") - - (inspected as Instance).fields.find { - it.type.startsWith("net.corda.core.serialization.SerializedBytes<") - }?.let { - "Found field of SerializedBytes".debug(config) - (it as InstanceProperty).value.fields.find { it.name == "bytes" }?.let { raw -> - inspectBlob(config, (raw as BinaryProperty).value) - } - } - } -} - diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobLoader.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobLoader.kt deleted file mode 100644 index c831665036..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/BlobLoader.kt +++ /dev/null @@ -1,40 +0,0 @@ -package net.corda.blobinspector - -import java.io.File -import java.net.URL - -/** - * - */ -class FileBlobHandler(config_: Config) : BlobHandler(config_) { - private val path = File(URL((config_ as FileConfig).file).toURI()) - - override fun getBytes(): ByteArray { - return path.readBytes() - } -} - -/** - * - */ -class InMemoryBlobHandler(config_: Config) : BlobHandler(config_) { - private val localBytes = (config_ as InMemoryConfig).blob?.bytes ?: kotlin.ByteArray(0) - override fun getBytes(): ByteArray = localBytes -} - -/** - * - */ -abstract class BlobHandler(val config: Config) { - companion object { - fun make(config: Config): BlobHandler { - return when (config.mode) { - Mode.file -> FileBlobHandler(config) - Mode.inMem -> InMemoryBlobHandler(config) - } - } - } - - abstract fun getBytes(): ByteArray -} - diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Config.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Config.kt deleted file mode 100644 index 376331ec2b..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Config.kt +++ /dev/null @@ -1,137 +0,0 @@ -package net.corda.blobinspector - -import org.apache.commons.cli.CommandLine -import net.corda.core.serialization.SerializedBytes -import org.apache.commons.cli.Option -import org.apache.commons.cli.Options - -/** - * Enumeration of the modes in which the blob inspector can be run. - * - * @property make lambda function that takes no parameters and returns a specific instance of the configuration - * object for that mode. - * - * @property options A lambda function that takes no parameters and returns an [Options] instance that define - * the command line flags related to this mode. For example ``file`` mode would have an option to pass in - * the name of the file to read. - * - */ -enum class Mode( - val make : () -> Config, - val options : (Options) -> Unit -) { - file( - { - FileConfig(Mode.file) - }, - { o -> - o.apply{ - addOption( - Option ("f", "file", true, "path to file").apply { - isRequired = true - } - ) - } - } - ), - inMem( - { - InMemoryConfig(Mode.inMem) - }, - { - // The in memory only mode has no specific option assocaited with it as it's intended for - // testing purposes only within the unit test framework and not use on the command line - } - ) -} - -/** - * Configuration data class for the Blob Inspector. - * - * @property mode - */ -abstract class Config (val mode: Mode) { - var schema: Boolean = false - var transforms: Boolean = false - var data: Boolean = false - var verbose: Boolean = false - - abstract fun populateSpecific(cmdLine: CommandLine) - abstract fun withVerbose() : Config - - fun populate(cmdLine: CommandLine) { - schema = cmdLine.hasOption('s') - transforms = cmdLine.hasOption('t') - data = cmdLine.hasOption('d') - verbose = cmdLine.hasOption('v') - - populateSpecific(cmdLine) - } - - fun options() = Options().apply { - // install generic options - addOption(Option("s", "schema", false, "print the blob's schema").apply { - isRequired = false - }) - - addOption(Option("t", "transforms", false, "print the blob's transforms schema").apply { - isRequired = false - }) - - addOption(Option("d", "data", false, "Display the serialised data").apply { - isRequired = false - }) - - addOption(Option("v", "verbose", false, "Enable debug output").apply { - isRequired = false - }) - - // install the mode specific options - mode.options(this) - } -} - - -/** - * Configuration object when running in "File" mode, i.e. the object has been specified at - * the command line - */ -class FileConfig ( - mode: Mode -) : Config(mode) { - - var file: String = "unset" - - override fun populateSpecific(cmdLine : CommandLine) { - file = cmdLine.getParsedOptionValue("f") as String - } - - override fun withVerbose() : FileConfig { - return FileConfig(mode).apply { - this.schema = schema - this.transforms = transforms - this.data = data - this.verbose = true - } - } -} - - -/** - * Placeholder config objet used when running unit tests and the inspected blob is being fed in - * via some mechanism directly. Normally this will be the direct serialisation of an object in a unit - * test and then dumping that blob into the inspector for visual comparison of the output - */ -class InMemoryConfig ( - mode: Mode -) : Config(mode) { - var blob: SerializedBytes<*>? = null - - override fun populateSpecific(cmdLine: CommandLine) { - throw UnsupportedOperationException("In memory config is for testing only and cannot set specific flags") - } - - override fun withVerbose(): Config { - throw UnsupportedOperationException("In memory config is for testing headlessly, cannot be verbose") - } -} diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Errors.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Errors.kt deleted file mode 100644 index 888ef1e302..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Errors.kt +++ /dev/null @@ -1,3 +0,0 @@ -package net.corda.blobinspector - -class MalformedBlob(msg: String) : Exception(msg) diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/IndentingStringBuilder.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/IndentingStringBuilder.kt deleted file mode 100644 index 1ec7fe6557..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/IndentingStringBuilder.kt +++ /dev/null @@ -1,44 +0,0 @@ -package net.corda.blobinspector - -/** - * Wrapper around a [StringBuilder] that automates the indenting of lines as they're appended to facilitate - * pretty printing of deserialized blobs. - * - * @property sb The wrapped [StringBuilder] - * @property indenting Boolean flag that indicates weather we need to pad the start of whatever text - * currently being added to the string. - * @property indent How deeply the next line should be offset from the first column - */ -class IndentingStringBuilder(s: String = "", private val offset: Int = 4) { - private val sb = StringBuilder(s) - private var indenting = true - private var indent = 0 - - private fun wrap(ln: String, appender: (String) -> Unit) { - if ((ln.endsWith("}") || ln.endsWith("]")) && indent > 0 && ln.length == 1) { - indent -= offset - } - - appender(ln) - - if (ln.endsWith("{") || ln.endsWith("[")) { - indent += offset - } - } - - fun appendln(ln: String) { - wrap(ln) { s -> sb.appendln("${"".padStart(if (indenting) indent else 0, ' ')}$s") } - - indenting = true - } - - fun append(ln: String) { - indenting = false - - wrap(ln) { s -> sb.append("${"".padStart(indent, ' ')}$s") } - } - - override fun toString(): String { - return sb.toString() - } -} \ No newline at end of file diff --git a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt b/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt deleted file mode 100644 index 9abe288afa..0000000000 --- a/experimental/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt +++ /dev/null @@ -1,81 +0,0 @@ -package net.corda.blobinspector - -import org.apache.commons.cli.* -import java.lang.IllegalArgumentException - -/** - * Mode isn't a required property as we default it to [Mode.file] - */ -private fun modeOption() = Option("m", "mode", true, "mode, file is the default").apply { - isRequired = false -} - -/** - * - * Parse the command line arguments looking for the main mode into which the application is - * being put. Note, this defaults to [Mode.file] if not set meaning we will look for a file path - * being passed as a parameter and parse that file. - * - * @param args reflects the command line arguments - * - * @return An instantiated but unpopulated [Config] object instance suitable for the mode into - * which we've been placed. This Config object should be populated via [loadModeSpecificOptions] - */ -fun getMode(args: Array): Config { - // For now we only care what mode we're being put in, we can build the rest of the args and parse them - // later - val options = Options().apply { - addOption(modeOption()) - } - - val cmd = try { - DefaultParser().parse(options, args, true) - } catch (e: org.apache.commons.cli.ParseException) { - println(e) - HelpFormatter().printHelp("blobinspector", options) - throw IllegalArgumentException("OH NO!!!") - } - - return try { - Mode.valueOf(cmd.getParsedOptionValue("m") as? String ?: "file") - } catch (e: IllegalArgumentException) { - Mode.file - }.make() -} - -/** - * - * @param config an instance of a [Config] specialisation suitable for the mode into which - * the application has been put. - * @param args The command line arguments - */ -fun loadModeSpecificOptions(config: Config, args: Array) { - config.apply { - // load that modes specific command line switches, needs to include the mode option - val modeSpecificOptions = config.options().apply { - addOption(modeOption()) - } - - populate(try { - DefaultParser().parse(modeSpecificOptions, args, false) - } catch (e: org.apache.commons.cli.ParseException) { - println("Error: ${e.message}") - HelpFormatter().printHelp("blobinspector", modeSpecificOptions) - System.exit(1) - return - }) - } -} - -/** - * Executable entry point - */ -fun main(args: Array) { - println("<<< WARNING: this tool is experimental and under active development >>>") - getMode(args).let { mode -> - loadModeSpecificOptions(mode, args) - BlobHandler.make(mode) - }.apply { - inspectBlob(config, getBytes()) - } -} diff --git a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/FileParseTests.kt b/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/FileParseTests.kt deleted file mode 100644 index c0a87a667e..0000000000 --- a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/FileParseTests.kt +++ /dev/null @@ -1,87 +0,0 @@ -package net.corda.blobinspector - -import java.net.URI - -import org.junit.Test -import net.corda.testing.common.internal.ProjectStructure.projectRootDir - -class FileParseTests { - @Suppress("UNUSED") - var localPath: URI = projectRootDir.toUri().resolve( - "tools/blobinspector/src/test/resources/net/corda/blobinspector") - - fun setupArgsWithFile(path: String) = Array(5) { - when (it) { - 0 -> "-m" - 1 -> "file" - 2 -> "-f" - 3 -> path - 4 -> "-d" - else -> "error" - } - } - - private val filesToTest = listOf( - "FileParseTests.1Int", - "FileParseTests.2Int", - "FileParseTests.3Int", - "FileParseTests.1String", - "FileParseTests.1Composite", - "FileParseTests.2Composite", - "FileParseTests.IntList", - "FileParseTests.StringList", - "FileParseTests.MapIntString", - "FileParseTests.MapIntClass" - ) - - fun testFile(file: String) { - val path = FileParseTests::class.java.getResource(file) - val args = setupArgsWithFile(path.toString()) - - val handler = getMode(args).let { mode -> - loadModeSpecificOptions(mode, args) - BlobHandler.make(mode) - } - - inspectBlob(handler.config, handler.getBytes()) - } - - @Test - fun simpleFiles() { - filesToTest.forEach { testFile(it) } - } - - @Test - fun specificTest() { - testFile(filesToTest[4]) - testFile(filesToTest[5]) - testFile(filesToTest[6]) - } - - @Test - fun networkParams() { - val file = "networkParams" - val path = FileParseTests::class.java.getResource(file) - val verbose = false - - val args = verbose.let { - if (it) - Array(4) { - when (it) { 0 -> "-f"; 1 -> path.toString(); 2 -> "-d"; 3 -> "-vs"; else -> "error" - } - } - else - Array(3) { - when (it) { 0 -> "-f"; 1 -> path.toString(); 2 -> "-d"; else -> "error" - } - } - } - - val handler = getMode(args).let { mode -> - loadModeSpecificOptions(mode, args) - BlobHandler.make(mode) - } - - inspectBlob(handler.config, handler.getBytes()) - } -} diff --git a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/InMemoryTests.kt b/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/InMemoryTests.kt deleted file mode 100644 index 4b94bf2eea..0000000000 --- a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/InMemoryTests.kt +++ /dev/null @@ -1,91 +0,0 @@ -package net.corda.blobinspector - -import net.corda.core.serialization.SerializedBytes -import net.corda.serialization.internal.AllWhitelist -import net.corda.serialization.internal.amqp.SerializationOutput -import net.corda.serialization.internal.amqp.SerializerFactory -import net.corda.serialization.internal.AMQP_P2P_CONTEXT -import org.junit.Test - - -class InMemoryTests { - private val factory = SerializerFactory(AllWhitelist, ClassLoader.getSystemClassLoader()) - - private fun inspect (b: SerializedBytes<*>) { - BlobHandler.make( - InMemoryConfig(Mode.inMem).apply { blob = b; data = true} - ).apply { - inspectBlob(config, getBytes()) - } - } - - @Test - fun test1() { - data class C (val a: Int, val b: Long, val c: String) - inspect (SerializationOutput(factory).serialize(C(100, 567L, "this is a test"), AMQP_P2P_CONTEXT)) - } - - @Test - fun test2() { - data class C (val i: Int, val c: C?) - inspect (SerializationOutput(factory).serialize(C(1, C(2, C(3, C(4, null)))), AMQP_P2P_CONTEXT)) - } - - @Test - fun test3() { - data class C (val a: IntArray, val b: Array) - - val a = IntArray(10) { i -> i } - val c = C(a, arrayOf("aaa", "bbb", "ccc")) - - inspect (SerializationOutput(factory).serialize(c, AMQP_P2P_CONTEXT)) - } - - @Test - fun test4() { - data class Elem(val e1: Long, val e2: String) - data class Wrapper (val name: String, val elementes: List) - - inspect (SerializationOutput(factory).serialize( - Wrapper("Outer Class", - listOf( - Elem(1L, "First element"), - Elem(2L, "Second element"), - Elem(3L, "Third element") - )), AMQP_P2P_CONTEXT)) - } - - @Test - fun test4b() { - data class Elem(val e1: Long, val e2: String) - data class Wrapper (val name: String, val elementes: List>) - - inspect (SerializationOutput(factory).serialize( - Wrapper("Outer Class", - listOf ( - listOf( - Elem(1L, "First element"), - Elem(2L, "Second element"), - Elem(3L, "Third element") - ), - listOf( - Elem(4L, "Fourth element"), - Elem(5L, "Fifth element"), - Elem(6L, "Sixth element") - ) - )), AMQP_P2P_CONTEXT)) - } - - @Test - fun test5() { - data class C (val a: Map) - - inspect (SerializationOutput(factory).serialize( - C(mapOf( - "a" to "a a a", - "b" to "b b b", - "c" to "c c c")), - AMQP_P2P_CONTEXT - )) - } -} \ No newline at end of file diff --git a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/ModeParse.kt b/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/ModeParse.kt deleted file mode 100644 index 80560576a4..0000000000 --- a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/ModeParse.kt +++ /dev/null @@ -1,83 +0,0 @@ -package net.corda.blobinspector - -import org.junit.Test -import org.junit.Assert.assertEquals -import org.junit.Assert.assertTrue -import kotlin.test.assertFalse - -class ModeParse { - @Test - fun fileIsSetToFile() { - val opts1 = Array(2) { - when (it) { - 0 -> "-m" - 1 -> "file" - else -> "error" - } - } - - assertEquals(Mode.file, getMode(opts1).mode) - } - - @Test - fun nothingIsSetToFile() { - val opts1 = Array(0) { "" } - - assertEquals(Mode.file, getMode(opts1).mode) - } - - @Test - fun filePathIsSet() { - val opts1 = Array(4) { - when (it) { - 0 -> "-m" - 1 -> "file" - 2 -> "-f" - 3 -> "path/to/file" - else -> "error" - } - } - - val config = getMode(opts1) - assertTrue(config is FileConfig) - assertEquals(Mode.file, config.mode) - assertEquals("unset", (config as FileConfig).file) - - loadModeSpecificOptions(config, opts1) - - assertEquals("path/to/file", config.file) - } - - @Test - fun schemaIsSet() { - Array(2) { - when (it) { 0 -> "-f"; 1 -> "path/to/file"; else -> "error" - } - }.let { options -> - getMode(options).apply { - loadModeSpecificOptions(this, options) - assertFalse(schema) - } - } - - Array(3) { - when (it) { 0 -> "--schema"; 1 -> "-f"; 2 -> "path/to/file"; else -> "error" - } - }.let { - getMode(it).apply { - loadModeSpecificOptions(this, it) - assertTrue(schema) - } - } - - Array(3) { - when (it) { 0 -> "-f"; 1 -> "path/to/file"; 2 -> "-s"; else -> "error" - } - }.let { - getMode(it).apply { - loadModeSpecificOptions(this, it) - assertTrue(schema) - } - } - } -} \ No newline at end of file diff --git a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/SimplifyClassTests.kt b/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/SimplifyClassTests.kt deleted file mode 100644 index 3dcafbc88d..0000000000 --- a/experimental/blobinspector/src/test/kotlin/net/corda/blobinspector/SimplifyClassTests.kt +++ /dev/null @@ -1,28 +0,0 @@ -package net.corda.blobinspector - -import org.junit.Test - -class SimplifyClassTests { - - @Test - fun test1() { - data class A(val a: Int) - - println(A::class.java.name) - println(A::class.java.name.simplifyClass()) - } - - @Test - fun test2() { - val p = this.javaClass.`package`.name - - println("$p.Class1<$p.Class2>") - println("$p.Class1<$p.Class2>".simplifyClass()) - println("$p.Class1<$p.Class2, $p.Class3>") - println("$p.Class1<$p.Class2, $p.Class3>".simplifyClass()) - println("$p.Class1<$p.Class2<$p.Class3>>") - println("$p.Class1<$p.Class2<$p.Class3>>".simplifyClass()) - println("$p.Class1<$p.Class2<$p.Class3>>") - println("$p.Class1\$C<$p.Class2<$p.Class3>>".simplifyClass()) - } -} \ No newline at end of file diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Composite b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Composite deleted file mode 100644 index 450e6970da..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Composite and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Int b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Int deleted file mode 100644 index 25dcb48d65..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1Int and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1String b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1String deleted file mode 100644 index 9676f0375f..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.1String and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Composite b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Composite deleted file mode 100644 index 0bf3a5c475..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Composite and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Int b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Int deleted file mode 100644 index 118a23f37b..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.2Int and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.3Int b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.3Int deleted file mode 100644 index 9f00d59068..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.3Int and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.IntList b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.IntList deleted file mode 100644 index d762a9e821..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.IntList and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntClass b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntClass deleted file mode 100644 index 175949d9aa..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntClass and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntString b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntString deleted file mode 100644 index 67ba352ec4..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.MapIntString and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.StringList b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.StringList deleted file mode 100644 index 5758d9fa62..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/FileParseTests.StringList and /dev/null differ diff --git a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/networkParams b/experimental/blobinspector/src/test/resources/net/corda/blobinspector/networkParams deleted file mode 100644 index dcdbaa7b5f..0000000000 Binary files a/experimental/blobinspector/src/test/resources/net/corda/blobinspector/networkParams and /dev/null differ diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/config/ConfigUtilities.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/config/ConfigUtilities.kt index 9f3a0b07d5..e82516ab2a 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/config/ConfigUtilities.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/config/ConfigUtilities.kt @@ -12,7 +12,12 @@ package net.corda.nodeapi.internal.config -import com.typesafe.config.* +import com.typesafe.config.Config +import com.typesafe.config.ConfigException +import com.typesafe.config.ConfigFactory +import com.typesafe.config.ConfigUtil +import com.typesafe.config.ConfigValueFactory +import com.typesafe.config.ConfigValueType import net.corda.core.identity.CordaX500Name import net.corda.core.internal.noneOrSingle import net.corda.core.internal.uncheckedCast @@ -49,7 +54,7 @@ operator fun Config.getValue(receiver: Any, metadata: KProperty<*>): T return getValueInternal(metadata.name, metadata.returnType, UnknownConfigKeysPolicy.IGNORE::handle) } -fun Config.parseAs(clazz: KClass, onUnknownKeys: ((Set, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle): T { +fun Config.parseAs(clazz: KClass, onUnknownKeys: ((Set, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle, nestedPath: String? = null): T { require(clazz.isData) { "Only Kotlin data classes can be parsed. Offending: ${clazz.qualifiedName}" } val constructor = clazz.primaryConstructor!! val parameters = constructor.parameters @@ -72,7 +77,7 @@ fun Config.parseAs(clazz: KClass, onUnknownKeys: ((Set, log // Get the matching property for this parameter val property = clazz.memberProperties.first { it.name == param.name } val path = defaultToOldPath(property) - getValueInternal(path, param.type, onUnknownKeys) + getValueInternal(path, param.type, onUnknownKeys, nestedPath) } try { return constructor.callBy(args) @@ -101,68 +106,83 @@ fun Config.toProperties(): Properties { { it.value.unwrapped().toString() }) } -private fun Config.getValueInternal(path: String, type: KType, onUnknownKeys: ((Set, logger: Logger) -> Unit)): T { - return uncheckedCast(if (type.arguments.isEmpty()) getSingleValue(path, type, onUnknownKeys) else getCollectionValue(path, type, onUnknownKeys)) +private fun Config.getValueInternal(path: String, type: KType, onUnknownKeys: ((Set, logger: Logger) -> Unit), nestedPath: String? = null): T { + return uncheckedCast(if (type.arguments.isEmpty()) getSingleValue(path, type, onUnknownKeys, nestedPath) else getCollectionValue(path, type, onUnknownKeys, nestedPath)) } -private fun Config.getSingleValue(path: String, type: KType, onUnknownKeys: (Set, logger: Logger) -> Unit): Any? { +private fun Config.getSingleValue(path: String, type: KType, onUnknownKeys: (Set, logger: Logger) -> Unit, nestedPath: String? = null): Any? { if (type.isMarkedNullable && !hasPath(path)) return null val typeClass = type.jvmErasure - return when (typeClass) { - String::class -> getString(path) - Int::class -> getInt(path) - Long::class -> getLong(path) - Double::class -> getDouble(path) - Boolean::class -> getBoolean(path) - LocalDate::class -> LocalDate.parse(getString(path)) - Duration::class -> getDuration(path) - Instant::class -> Instant.parse(getString(path)) - NetworkHostAndPort::class -> NetworkHostAndPort.parse(getString(path)) - Path::class -> Paths.get(getString(path)) - URL::class -> URL(getString(path)) - UUID::class -> UUID.fromString(getString(path)) - CordaX500Name::class -> { - when (getValue(path).valueType()) { - ConfigValueType.OBJECT -> getConfig(path).parseAs(onUnknownKeys) - else -> CordaX500Name.parse(getString(path)) + return try { + when (typeClass) { + String::class -> getString(path) + Int::class -> getInt(path) + Long::class -> getLong(path) + Double::class -> getDouble(path) + Boolean::class -> getBoolean(path) + LocalDate::class -> LocalDate.parse(getString(path)) + Duration::class -> getDuration(path) + Instant::class -> Instant.parse(getString(path)) + NetworkHostAndPort::class -> NetworkHostAndPort.parse(getString(path)) + Path::class -> Paths.get(getString(path)) + URL::class -> URL(getString(path)) + UUID::class -> UUID.fromString(getString(path)) + CordaX500Name::class -> { + when (getValue(path).valueType()) { + ConfigValueType.OBJECT -> getConfig(path).parseAs(onUnknownKeys) + else -> CordaX500Name.parse(getString(path)) + } + } + Properties::class -> getConfig(path).toProperties() + Config::class -> getConfig(path) + else -> if (typeClass.java.isEnum) { + parseEnum(typeClass.java, getString(path)) + } else { + getConfig(path).parseAs(typeClass, onUnknownKeys, nestedPath?.let { "$it.$path" } ?: path) } } - Properties::class -> getConfig(path).toProperties() - Config::class -> getConfig(path) - else -> if (typeClass.java.isEnum) { - parseEnum(typeClass.java, getString(path)) - } else { - getConfig(path).parseAs(typeClass, onUnknownKeys) - } + } catch (e: ConfigException.Missing) { + throw e.relative(path, nestedPath) } } -private fun Config.getCollectionValue(path: String, type: KType, onUnknownKeys: (Set, logger: Logger) -> Unit): Collection { +private fun ConfigException.Missing.relative(path: String, nestedPath: String?): ConfigException.Missing { + return when { + nestedPath != null -> throw ConfigException.Missing("$nestedPath.$path") + else -> this + } +} + +private fun Config.getCollectionValue(path: String, type: KType, onUnknownKeys: (Set, logger: Logger) -> Unit, nestedPath: String? = null): Collection { val typeClass = type.jvmErasure require(typeClass == List::class || typeClass == Set::class) { "$typeClass is not supported" } val elementClass = type.arguments[0].type?.jvmErasure ?: throw IllegalArgumentException("Cannot work with star projection: $type") if (!hasPath(path)) { return if (typeClass == List::class) emptyList() else emptySet() } - val values: List = when (elementClass) { - String::class -> getStringList(path) - Int::class -> getIntList(path) - Long::class -> getLongList(path) - Double::class -> getDoubleList(path) - Boolean::class -> getBooleanList(path) - LocalDate::class -> getStringList(path).map(LocalDate::parse) - Instant::class -> getStringList(path).map(Instant::parse) - NetworkHostAndPort::class -> getStringList(path).map(NetworkHostAndPort.Companion::parse) - Path::class -> getStringList(path).map { Paths.get(it) } - URL::class -> getStringList(path).map(::URL) - UUID::class -> getStringList(path).map { UUID.fromString(it) } - CordaX500Name::class -> getStringList(path).map(CordaX500Name.Companion::parse) - Properties::class -> getConfigList(path).map(Config::toProperties) - else -> if (elementClass.java.isEnum) { - getStringList(path).map { parseEnum(elementClass.java, it) } - } else { - getConfigList(path).map { it.parseAs(elementClass, onUnknownKeys) } + val values: List = try { + when (elementClass) { + String::class -> getStringList(path) + Int::class -> getIntList(path) + Long::class -> getLongList(path) + Double::class -> getDoubleList(path) + Boolean::class -> getBooleanList(path) + LocalDate::class -> getStringList(path).map(LocalDate::parse) + Instant::class -> getStringList(path).map(Instant::parse) + NetworkHostAndPort::class -> getStringList(path).map(NetworkHostAndPort.Companion::parse) + Path::class -> getStringList(path).map { Paths.get(it) } + URL::class -> getStringList(path).map(::URL) + UUID::class -> getStringList(path).map { UUID.fromString(it) } + CordaX500Name::class -> getStringList(path).map(CordaX500Name.Companion::parse) + Properties::class -> getConfigList(path).map(Config::toProperties) + else -> if (elementClass.java.isEnum) { + getStringList(path).map { parseEnum(elementClass.java, it) } + } else { + getConfigList(path).map { it.parseAs(elementClass, onUnknownKeys) } + } } + } catch (e: ConfigException.Missing) { + throw e.relative(path, nestedPath) } return if (typeClass == Set::class) values.toSet() else values } diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt index a9e0c7e0ab..5a6cc539f9 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt @@ -15,7 +15,6 @@ import net.corda.core.schemas.MappedSchema import net.corda.core.utilities.contextLogger import rx.Observable import rx.Subscriber -import rx.subjects.PublishSubject import rx.subjects.UnicastSubject import java.io.Closeable import java.sql.Connection @@ -81,9 +80,7 @@ class CordaPersistence( } val entityManagerFactory get() = hibernateConfig.sessionFactoryForRegisteredSchemas - data class Boundary(val txId: UUID) - - internal val transactionBoundaries = PublishSubject.create().toSerialized() + data class Boundary(val txId: UUID, val success: Boolean) init { // Found a unit test that was forgetting to close the database transactions. When you close() on the top level @@ -204,15 +201,19 @@ class CordaPersistence( * * For examples, see the call hierarchy of this function. */ -fun rx.Observer.bufferUntilDatabaseCommit(): rx.Observer { - val currentTxId = contextTransaction.id - val databaseTxBoundary: Observable = contextDatabase.transactionBoundaries.first { it.txId == currentTxId } +fun rx.Observer.bufferUntilDatabaseCommit(propagateRollbackAsError: Boolean = false): rx.Observer { + val currentTx = contextTransaction val subject = UnicastSubject.create() + val databaseTxBoundary: Observable = currentTx.boundary.filter { it.success } + if (propagateRollbackAsError) { + currentTx.boundary.filter { !it.success }.subscribe { this.onError(DatabaseTransactionRolledBackException(it.txId)) } + } subject.delaySubscription(databaseTxBoundary).subscribe(this) - databaseTxBoundary.doOnCompleted { subject.onCompleted() } return subject } +class DatabaseTransactionRolledBackException(txId: UUID) : Exception("Database transaction $txId was rolled back") + // A subscriber that delegates to multiple others, wrapping a database transaction around the combination. private class DatabaseTransactionWrappingSubscriber(private val db: CordaPersistence?) : Subscriber() { // Some unsubscribes happen inside onNext() so need something that supports concurrent modification. diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/DatabaseTransaction.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/DatabaseTransaction.kt index 63b93b91f1..7fba9292b2 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/DatabaseTransaction.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/DatabaseTransaction.kt @@ -13,6 +13,7 @@ package net.corda.nodeapi.internal.persistence import co.paralleluniverse.strands.Strand import org.hibernate.Session import org.hibernate.Transaction +import rx.subjects.PublishSubject import java.sql.Connection import java.util.* @@ -51,6 +52,10 @@ class DatabaseTransaction( val session: Session by sessionDelegate private lateinit var hibernateTransaction: Transaction + + internal val boundary = PublishSubject.create() + private var committed = false + fun commit() { if (sessionDelegate.isInitialized()) { hibernateTransaction.commit() @@ -58,6 +63,7 @@ class DatabaseTransaction( if (_connectionCreated) { connection.commit() } + committed = true } fun rollback() { @@ -78,7 +84,15 @@ class DatabaseTransaction( } contextTransactionOrNull = outerTransaction if (outerTransaction == null) { - database.transactionBoundaries.onNext(CordaPersistence.Boundary(id)) + boundary.onNext(CordaPersistence.Boundary(id, committed)) } } + + fun onCommit(callback: () -> Unit) { + boundary.filter { it.success }.subscribe { callback() } + } + + fun onRollback(callback: () -> Unit) { + boundary.filter { !it.success }.subscribe { callback() } + } } diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPChannelHandler.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPChannelHandler.kt index e39e263df6..bf7e5daae0 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPChannelHandler.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPChannelHandler.kt @@ -54,6 +54,7 @@ internal class AMQPChannelHandler(private val serverMode: Boolean, private var remoteCert: X509Certificate? = null private var eventProcessor: EventProcessor? = null private var suppressClose: Boolean = false + private var badCert: Boolean = false override fun channelActive(ctx: ChannelHandlerContext) { val ch = ctx.channel() @@ -86,7 +87,7 @@ internal class AMQPChannelHandler(private val serverMode: Boolean, val ch = ctx.channel() log.info("Closed client connection ${ch.id()} from $remoteAddress to ${ch.localAddress()}") if (!suppressClose) { - onClose(Pair(ch as SocketChannel, ConnectionChange(remoteAddress, remoteCert, false))) + onClose(Pair(ch as SocketChannel, ConnectionChange(remoteAddress, remoteCert, false, badCert))) } eventProcessor?.close() ctx.fireChannelInactive() @@ -104,19 +105,22 @@ internal class AMQPChannelHandler(private val serverMode: Boolean, val remoteX500Name = try { CordaX500Name.build(remoteCert!!.subjectX500Principal) } catch (ex: IllegalArgumentException) { + badCert = true log.error("Certificate subject not a valid CordaX500Name", ex) ctx.close() return } if (allowedRemoteLegalNames != null && remoteX500Name !in allowedRemoteLegalNames) { + badCert = true log.error("Provided certificate subject $remoteX500Name not in expected set $allowedRemoteLegalNames") ctx.close() return } log.info("Handshake completed with subject: $remoteX500Name") createAMQPEngine(ctx) - onOpen(Pair(ctx.channel() as SocketChannel, ConnectionChange(remoteAddress, remoteCert, true))) + onOpen(Pair(ctx.channel() as SocketChannel, ConnectionChange(remoteAddress, remoteCert, true, false))) } else { + badCert = true log.error("Handshake failure ${evt.cause().message}") if (log.isTraceEnabled) { log.trace("Handshake failure", evt.cause()) diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPClient.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPClient.kt index acd3e7378a..e5ea9e97b7 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPClient.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/AMQPClient.kt @@ -78,7 +78,7 @@ class AMQPClient(val targets: List, val log = contextLogger() const val MIN_RETRY_INTERVAL = 1000L - const val MAX_RETRY_INTERVAL = 60000L + const val MAX_RETRY_INTERVAL = 300000L const val BACKOFF_MULTIPLIER = 2L const val NUM_CLIENT_THREADS = 2 } @@ -93,9 +93,22 @@ class AMQPClient(val targets: List, private var targetIndex = 0 private var currentTarget: NetworkHostAndPort = targets.first() private var retryInterval = MIN_RETRY_INTERVAL + private val badCertTargets = mutableSetOf() private fun nextTarget() { - targetIndex = (targetIndex + 1).rem(targets.size) + val origIndex = targetIndex + targetIndex = -1 + for (offset in 1..targets.size) { + val newTargetIndex = (origIndex + offset).rem(targets.size) + if (targets[newTargetIndex] !in badCertTargets) { + targetIndex = newTargetIndex + break + } + } + if (targetIndex == -1) { + log.error("No targets have presented acceptable certificates for $allowedRemoteLegalNames. Halting retries") + return + } log.info("Retry connect to ${targets[targetIndex]}") retryInterval = min(MAX_RETRY_INTERVAL, retryInterval * BACKOFF_MULTIPLIER) } @@ -162,7 +175,8 @@ class AMQPClient(val targets: List, } } - val handler = createClientSslHelper(parent.currentTarget, keyManagerFactory, trustManagerFactory) + val target = parent.currentTarget + val handler = createClientSslHelper(target, keyManagerFactory, trustManagerFactory) pipeline.addLast("sslHandler", handler) if (parent.trace) pipeline.addLast("logger", LoggingHandler(LogLevel.INFO)) pipeline.addLast(AMQPChannelHandler(false, @@ -174,7 +188,13 @@ class AMQPClient(val targets: List, parent.retryInterval = MIN_RETRY_INTERVAL // reset to fast reconnect if we connect properly parent._onConnection.onNext(it.second) }, - { parent._onConnection.onNext(it.second) }, + { + parent._onConnection.onNext(it.second) + if (it.second.badCert) { + log.error("Blocking future connection attempts to $target due to bad certificate on endpoint") + parent.badCertTargets += target + } + }, { rcv -> parent._onReceive.onNext(rcv) })) } } @@ -188,6 +208,9 @@ class AMQPClient(val targets: List, } private fun restart() { + if (targetIndex == -1) { + return + } val bootstrap = Bootstrap() // TODO Needs more configuration control when we profile. e.g. to use EPOLL on Linux bootstrap.group(workerGroup).channel(NioSocketChannel::class.java).handler(ClientChannelInitializer(this)) diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/ConnectionChange.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/ConnectionChange.kt index 1de5b29548..d1f6a0e536 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/ConnectionChange.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper/netty/ConnectionChange.kt @@ -13,4 +13,4 @@ package net.corda.nodeapi.internal.protonwrapper.netty import java.net.InetSocketAddress import java.security.cert.X509Certificate -data class ConnectionChange(val remoteAddress: InetSocketAddress, val remoteCert: X509Certificate?, val connected: Boolean) \ No newline at end of file +data class ConnectionChange(val remoteAddress: InetSocketAddress, val remoteCert: X509Certificate?, val connected: Boolean, val badCert: Boolean) \ No newline at end of file diff --git a/node/build.gradle b/node/build.gradle index 02e9ad88c6..d3aea6fe0f 100644 --- a/node/build.gradle +++ b/node/build.gradle @@ -111,7 +111,7 @@ dependencies { compile "org.fusesource.jansi:jansi:$jansi_version" // Manifests: for reading stuff from the manifest file - compile "com.jcabi:jcabi-manifests:1.1" + compile "com.jcabi:jcabi-manifests:$jcabi_manifests_version" compile("com.intellij:forms_rt:7.0.3") { exclude group: "asm" diff --git a/node/src/integration-test/kotlin/net/corda/node/flows/FlowRetryTest.kt b/node/src/integration-test/kotlin/net/corda/node/flows/FlowRetryTest.kt new file mode 100644 index 0000000000..40bdb29444 --- /dev/null +++ b/node/src/integration-test/kotlin/net/corda/node/flows/FlowRetryTest.kt @@ -0,0 +1,158 @@ +package net.corda.node.flows + +import co.paralleluniverse.fibers.Suspendable +import net.corda.client.rpc.CordaRPCClient +import net.corda.core.flows.* +import net.corda.core.identity.Party +import net.corda.core.messaging.startFlow +import net.corda.core.serialization.CordaSerializable +import net.corda.core.utilities.ProgressTracker +import net.corda.core.utilities.getOrThrow +import net.corda.core.utilities.unwrap +import net.corda.node.services.Permissions +import net.corda.testing.core.singleIdentity +import net.corda.testing.driver.DriverParameters +import net.corda.testing.driver.driver +import net.corda.testing.driver.internal.RandomFree +import net.corda.testing.node.User +import org.junit.Before +import org.junit.Test +import java.lang.management.ManagementFactory +import java.sql.SQLException +import java.util.* +import kotlin.test.assertEquals +import kotlin.test.assertNotNull + + +class FlowRetryTest { + @Before + fun resetCounters() { + InitiatorFlow.seen.clear() + InitiatedFlow.seen.clear() + } + + @Test + fun `flows continue despite errors`() { + val numSessions = 2 + val numIterations = 10 + val user = User("mark", "dadada", setOf(Permissions.startFlow())) + val result: Any? = driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified(), + portAllocation = RandomFree)) { + + val nodeAHandle = startNode(rpcUsers = listOf(user)).getOrThrow() + val nodeBHandle = startNode(rpcUsers = listOf(user)).getOrThrow() + + val result = CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use { + it.proxy.startFlow(::InitiatorFlow, numSessions, numIterations, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow() + } + result + } + assertNotNull(result) + assertEquals("$numSessions:$numIterations", result) + } +} + +fun isQuasarAgentSpecified(): Boolean { + val jvmArgs = ManagementFactory.getRuntimeMXBean().inputArguments + return jvmArgs.any { it.startsWith("-javaagent:") && it.contains("quasar") } +} + +class ExceptionToCauseRetry : SQLException("deadlock") + +@StartableByRPC +@InitiatingFlow +class InitiatorFlow(private val sessionsCount: Int, private val iterationsCount: Int, private val other: Party) : FlowLogic() { + companion object { + object FIRST_STEP : ProgressTracker.Step("Step one") + + fun tracker() = ProgressTracker(FIRST_STEP) + + val seen = Collections.synchronizedSet(HashSet()) + + fun visit(sessionNum: Int, iterationNum: Int, step: Step) { + val visited = Visited(sessionNum, iterationNum, step) + if (visited !in seen) { + seen += visited + throw ExceptionToCauseRetry() + } + } + } + + override val progressTracker = tracker() + + @Suspendable + override fun call(): Any { + progressTracker.currentStep = FIRST_STEP + var received: Any? = null + visit(-1, -1, Step.First) + for (sessionNum in 1..sessionsCount) { + visit(sessionNum, -1, Step.BeforeInitiate) + val session = initiateFlow(other) + visit(sessionNum, -1, Step.AfterInitiate) + session.send(SessionInfo(sessionNum, iterationsCount)) + visit(sessionNum, -1, Step.AfterInitiateSendReceive) + for (iteration in 1..iterationsCount) { + visit(sessionNum, iteration, Step.BeforeSend) + logger.info("A Sending $sessionNum:$iteration") + session.send("$sessionNum:$iteration") + visit(sessionNum, iteration, Step.AfterSend) + received = session.receive().unwrap { it } + visit(sessionNum, iteration, Step.AfterReceive) + logger.info("A Got $sessionNum:$iteration") + } + doSleep() + } + return received!! + } + + // This non-flow-friendly sleep triggered a bug with session end messages and non-retryable checkpoints. + private fun doSleep() { + Thread.sleep(2000) + } +} + +@InitiatedBy(InitiatorFlow::class) +class InitiatedFlow(val session: FlowSession) : FlowLogic() { + companion object { + object FIRST_STEP : ProgressTracker.Step("Step one") + + fun tracker() = ProgressTracker(FIRST_STEP) + + val seen = Collections.synchronizedSet(HashSet()) + + fun visit(sessionNum: Int, iterationNum: Int, step: Step) { + val visited = Visited(sessionNum, iterationNum, step) + if (visited !in seen) { + seen += visited + throw ExceptionToCauseRetry() + } + } + } + + override val progressTracker = tracker() + + @Suspendable + override fun call() { + progressTracker.currentStep = FIRST_STEP + visit(-1, -1, Step.AfterInitiate) + val sessionInfo = session.receive().unwrap { it } + visit(sessionInfo.sessionNum, -1, Step.AfterInitiateSendReceive) + for (iteration in 1..sessionInfo.iterationsCount) { + visit(sessionInfo.sessionNum, iteration, Step.BeforeReceive) + val got = session.receive().unwrap { it } + visit(sessionInfo.sessionNum, iteration, Step.AfterReceive) + logger.info("B Got $got") + logger.info("B Sending $got") + visit(sessionInfo.sessionNum, iteration, Step.BeforeSend) + session.send(got) + visit(sessionInfo.sessionNum, iteration, Step.AfterSend) + } + } +} + +@CordaSerializable +data class SessionInfo(val sessionNum: Int, val iterationsCount: Int) + +enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive, BeforeSend, AfterSend, BeforeReceive, AfterReceive } + +data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step) \ No newline at end of file diff --git a/node/src/integration-test/kotlin/net/corda/node/services/events/ScheduledFlowIntegrationTests.kt b/node/src/integration-test/kotlin/net/corda/node/services/events/ScheduledFlowIntegrationTests.kt index e2f954cec3..ddf20d700c 100644 --- a/node/src/integration-test/kotlin/net/corda/node/services/events/ScheduledFlowIntegrationTests.kt +++ b/node/src/integration-test/kotlin/net/corda/node/services/events/ScheduledFlowIntegrationTests.kt @@ -25,6 +25,7 @@ import net.corda.core.node.services.vault.QueryCriteria import net.corda.core.transactions.TransactionBuilder import net.corda.core.utilities.NonEmptySet import net.corda.core.utilities.getOrThrow +import net.corda.core.utilities.seconds import net.corda.testMessage.ScheduledState import net.corda.testMessage.SpentState import net.corda.testing.contracts.DummyContract @@ -110,7 +111,7 @@ class ScheduledFlowIntegrationTests : IntegrationTest() { val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password) val bobClient = CordaRPCClient(bob.rpcAddress).start(rpcUser.username, rpcUser.password) - val scheduledFor = Instant.now().plusSeconds(20) + val scheduledFor = Instant.now().plusSeconds(10) val initialiseFutures = mutableListOf>() for (i in 0 until N) { initialiseFutures.add(aliceClient.proxy.startFlow(::InsertInitialStateFlow, bob.nodeInfo.legalIdentities.first(), defaultNotaryIdentity, i, scheduledFor).returnValue) @@ -125,6 +126,9 @@ class ScheduledFlowIntegrationTests : IntegrationTest() { } spendAttemptFutures.getOrThrowAll() + // TODO: the queries below are not atomic so we need to allow enough time for the scheduler to finish. Would be better to query scheduler. + Thread.sleep(20.seconds.toMillis()) + val aliceStates = aliceClient.proxy.vaultQuery(ScheduledState::class.java).states.filter { it.state.data.processed } val aliceSpentStates = aliceClient.proxy.vaultQuery(SpentState::class.java).states diff --git a/node/src/integration-test/kotlin/net/corda/node/services/network/NetworkMapTest.kt b/node/src/integration-test/kotlin/net/corda/node/services/network/NetworkMapTest.kt index aff9765263..42cd453b6d 100644 --- a/node/src/integration-test/kotlin/net/corda/node/services/network/NetworkMapTest.kt +++ b/node/src/integration-test/kotlin/net/corda/node/services/network/NetworkMapTest.kt @@ -30,27 +30,23 @@ import net.corda.testing.driver.internal.RandomFree import net.corda.testing.internal.IntegrationTest import net.corda.testing.internal.IntegrationTestSchemas import net.corda.testing.internal.toDatabaseSchemaName -import net.corda.testing.node.internal.CompatibilityZoneParams -import net.corda.testing.node.internal.internalDriver +import net.corda.testing.node.internal.* import net.corda.testing.node.internal.network.NetworkMapServer import net.corda.testing.node.internal.startNode import org.assertj.core.api.Assertions.assertThat import org.assertj.core.api.Assertions.assertThatThrownBy import org.junit.* import org.junit.Assert.assertEquals +import org.junit.Before +import org.junit.Rule +import org.junit.Test +import org.junit.runner.RunWith +import org.junit.runners.Parameterized import java.net.URL import java.time.Instant -class NetworkMapTest : IntegrationTest() { - companion object { - @ClassRule - @JvmField - val databaseSchemas = IntegrationTestSchemas( - ALICE_NAME.toDatabaseSchemaName(), - BOB_NAME.toDatabaseSchemaName(), - DUMMY_NOTARY_NAME.toDatabaseSchemaName()) - } - +@RunWith(Parameterized::class) +class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneParams) : IntegrationTest() { @Rule @JvmField val testSerialization = SerializationEnvironmentRule(true) @@ -61,13 +57,44 @@ class NetworkMapTest : IntegrationTest() { private lateinit var networkMapServer: NetworkMapServer private lateinit var compatibilityZone: CompatibilityZoneParams + companion object { + @ClassRule + @JvmField + val databaseSchemas = IntegrationTestSchemas( + ALICE_NAME.toDatabaseSchemaName(), + BOB_NAME.toDatabaseSchemaName(), + DUMMY_NOTARY_NAME.toDatabaseSchemaName()) + + @JvmStatic + @Parameterized.Parameters(name = "{0}") + fun runParams() = listOf( + { addr: URL, nms: NetworkMapServer -> + SharedCompatibilityZoneParams( + addr, + publishNotaries = { + nms.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2) + } + ) + }, + { addr: URL, nms: NetworkMapServer -> + SplitCompatibilityZoneParams( + doormanURL = URL("http://I/Don't/Exist"), + networkMapURL = addr, + publishNotaries = { + nms.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2) + } + ) + } + + ) + } + + @Before fun start() { networkMapServer = NetworkMapServer(cacheTimeout, portAllocation.nextHostAndPort()) val address = networkMapServer.start() - compatibilityZone = CompatibilityZoneParams(URL("http://$address"), publishNotaries = { - networkMapServer.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2) - }) + compatibilityZone = initFunc(URL("http://$address"), networkMapServer) } @After diff --git a/node/src/integration-test/kotlin/net/corda/node/utilities/registration/NodeRegistrationTest.kt b/node/src/integration-test/kotlin/net/corda/node/utilities/registration/NodeRegistrationTest.kt index 6ff39284a5..7235b6d0b3 100644 --- a/node/src/integration-test/kotlin/net/corda/node/utilities/registration/NodeRegistrationTest.kt +++ b/node/src/integration-test/kotlin/net/corda/node/utilities/registration/NodeRegistrationTest.kt @@ -33,6 +33,7 @@ import net.corda.testing.internal.IntegrationTest import net.corda.testing.internal.IntegrationTestSchemas import net.corda.testing.node.NotarySpec import net.corda.testing.node.internal.CompatibilityZoneParams +import net.corda.testing.node.internal.SharedCompatibilityZoneParams import net.corda.testing.node.internal.internalDriver import net.corda.testing.node.internal.network.NetworkMapServer import org.assertj.core.api.Assertions.assertThat @@ -92,7 +93,7 @@ class NodeRegistrationTest : IntegrationTest() { @Test fun `node registration correct root cert`() { - val compatibilityZone = CompatibilityZoneParams( + val compatibilityZone = SharedCompatibilityZoneParams( URL("http://$serverHostAndPort"), publishNotaries = { server.networkParameters = testNetworkParameters(it) }, rootCert = DEV_ROOT_CA.certificate) diff --git a/node/src/main/kotlin/net/corda/node/NodeArgsParser.kt b/node/src/main/kotlin/net/corda/node/NodeArgsParser.kt index 1d5338ed6e..0ed374b0bc 100644 --- a/node/src/main/kotlin/net/corda/node/NodeArgsParser.kt +++ b/node/src/main/kotlin/net/corda/node/NodeArgsParser.kt @@ -135,11 +135,11 @@ data class CmdLineOptions(val baseDirectory: Path, if (devMode) mapOf("devMode" to this.devMode) else emptyMap()) ) return rawConfig to Try.on { - rawConfig.parseAsNodeConfiguration(unknownConfigKeysPolicy::handle).also { + rawConfig.parseAsNodeConfiguration(unknownConfigKeysPolicy::handle).also { config -> if (nodeRegistrationOption != null) { - require(!it.devMode) { "registration cannot occur in devMode" } - requireNotNull(it.compatibilityZoneURL) { - "compatibilityZoneURL must be present in node configuration file in registration mode." + require(!config.devMode) { "registration cannot occur in devMode" } + require(config.compatibilityZoneURL != null || config.networkServices != null) { + "compatibilityZoneURL or networkServices must be present in the node configuration file in registration mode." } } } diff --git a/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt b/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt index 8ac113ef0d..0727697392 100644 --- a/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt +++ b/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt @@ -303,7 +303,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration, val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null) val identityService = makeIdentityService(identity.certificate) - networkMapClient = configuration.compatibilityZoneURL?.let { NetworkMapClient(it, identityService.trustRoot) } + networkMapClient = configuration.networkServices?.let { NetworkMapClient(it.networkMapURL, identityService.trustRoot) } val networkParameters = NetworkParametersReader(identityService.trustRoot, networkMapClient, configuration.baseDirectory).networkParameters check(networkParameters.minimumPlatformVersion <= versionInfo.platformVersion) { @@ -1023,8 +1023,37 @@ internal fun logVendorString(database: CordaPersistence, log: Logger) { } internal class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogicRefFactory: FlowLogicRefFactory) : FlowStarter { - override fun startFlow(logic: FlowLogic, context: InvocationContext, deduplicationHandler: DeduplicationHandler?): CordaFuture> { - return smm.startFlow(logic, context, ourIdentity = null, deduplicationHandler = deduplicationHandler) + override fun startFlow(event: ExternalEvent.ExternalStartFlowEvent): CordaFuture> { + smm.deliverExternalEvent(event) + return event.future + } + + override fun startFlow(logic: FlowLogic, context: InvocationContext): CordaFuture> { + val startFlowEvent = object : ExternalEvent.ExternalStartFlowEvent, DeduplicationHandler { + override fun insideDatabaseTransaction() {} + + override fun afterDatabaseTransaction() {} + + override val externalCause: ExternalEvent + get() = this + override val deduplicationHandler: DeduplicationHandler + get() = this + + override val flowLogic: FlowLogic + get() = logic + override val context: InvocationContext + get() = context + + override fun wireUpFuture(flowFuture: CordaFuture>) { + _future.captureLater(flowFuture) + } + + private val _future = openFuture>() + override val future: CordaFuture> + get() = _future + + } + return startFlow(startFlowEvent) } override fun invokeFlowAsync( diff --git a/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt b/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt index 68c99969cd..99b909ef6f 100644 --- a/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt +++ b/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt @@ -224,7 +224,9 @@ open class NodeStartup(val args: Array) { } protected open fun registerWithNetwork(conf: NodeConfiguration, nodeRegistrationConfig: NodeRegistrationOption) { - val compatibilityZoneURL = conf.compatibilityZoneURL!! + val compatibilityZoneURL = conf.networkServices?.doormanURL ?: throw RuntimeException( + "compatibilityZoneURL or networkServices must be configured!") + println() println("******************************************************************") println("* *") diff --git a/node/src/main/kotlin/net/corda/node/serialization/kryo/CordaClassResolver.kt b/node/src/main/kotlin/net/corda/node/serialization/kryo/CordaClassResolver.kt index 17692a6a47..d342057e51 100644 --- a/node/src/main/kotlin/net/corda/node/serialization/kryo/CordaClassResolver.kt +++ b/node/src/main/kotlin/net/corda/node/serialization/kryo/CordaClassResolver.kt @@ -18,13 +18,12 @@ import com.esotericsoftware.kryo.util.DefaultClassResolver import com.esotericsoftware.kryo.util.Util import net.corda.core.internal.writer import net.corda.core.serialization.ClassWhitelist -import net.corda.core.serialization.CordaSerializable import net.corda.core.serialization.SerializationContext import net.corda.core.utilities.contextLogger import net.corda.serialization.internal.AttachmentsClassLoader import net.corda.serialization.internal.MutableClassWhitelist import net.corda.serialization.internal.TransientClassWhiteList -import net.corda.serialization.internal.amqp.hasAnnotationInHierarchy +import net.corda.serialization.internal.amqp.hasCordaSerializable import java.io.PrintWriter import java.lang.reflect.Modifier import java.lang.reflect.Modifier.isAbstract @@ -137,7 +136,7 @@ class CordaClassResolver(serializationContext: SerializationContext) : DefaultCl return (type.classLoader !is AttachmentsClassLoader) && !KryoSerializable::class.java.isAssignableFrom(type) && !type.isAnnotationPresent(DefaultSerializer::class.java) - && (type.isAnnotationPresent(CordaSerializable::class.java) || whitelist.hasAnnotationInHierarchy(type)) + && hasCordaSerializable(type) } // Need to clear out class names from attachments. diff --git a/node/src/main/kotlin/net/corda/node/services/api/CheckpointStorage.kt b/node/src/main/kotlin/net/corda/node/services/api/CheckpointStorage.kt index 7b25b78122..bddbbc7e54 100644 --- a/node/src/main/kotlin/net/corda/node/services/api/CheckpointStorage.kt +++ b/node/src/main/kotlin/net/corda/node/services/api/CheckpointStorage.kt @@ -30,6 +30,12 @@ interface CheckpointStorage { */ fun removeCheckpoint(id: StateMachineRunId): Boolean + /** + * Load an existing checkpoint from the store. + * @return the checkpoint, still in serialized form, or null if not found. + */ + fun getCheckpoint(id: StateMachineRunId): SerializedBytes? + /** * Stream all checkpoints from the store. If this is backed by a database the stream will be valid until the * underlying database connection is closed, so any processing should happen before it is closed. diff --git a/node/src/main/kotlin/net/corda/node/services/api/ServiceHubInternal.kt b/node/src/main/kotlin/net/corda/node/services/api/ServiceHubInternal.kt index 62381d8272..8239c4b36d 100644 --- a/node/src/main/kotlin/net/corda/node/services/api/ServiceHubInternal.kt +++ b/node/src/main/kotlin/net/corda/node/services/api/ServiceHubInternal.kt @@ -29,9 +29,9 @@ import net.corda.core.utilities.contextLogger import net.corda.node.internal.InitiatedFlowFactory import net.corda.node.internal.cordapp.CordappProviderInternal import net.corda.node.services.config.NodeConfiguration -import net.corda.node.services.messaging.DeduplicationHandler import net.corda.node.services.messaging.MessagingService import net.corda.node.services.network.NetworkMapUpdater +import net.corda.node.services.statemachine.ExternalEvent import net.corda.node.services.statemachine.FlowStateMachineImpl import net.corda.nodeapi.internal.persistence.CordaPersistence @@ -144,11 +144,17 @@ interface ServiceHubInternal : ServiceHub { interface FlowStarter { /** - * Starts an already constructed flow. Note that you must be on the server thread to call this method. + * Starts an already constructed flow. Note that you must be on the server thread to call this method. This method + * just synthesizes an [ExternalEvent.ExternalStartFlowEvent] and calls the method below. * @param context indicates who started the flow, see: [InvocationContext]. - * @param deduplicationHandler allows exactly-once start of the flow, see [DeduplicationHandler] */ - fun startFlow(logic: FlowLogic, context: InvocationContext, deduplicationHandler: DeduplicationHandler? = null): CordaFuture> + fun startFlow(logic: FlowLogic, context: InvocationContext): CordaFuture> + + /** + * Starts a flow as described by an [ExternalEvent.ExternalStartFlowEvent]. If a transient error + * occurs during invocation, it will re-attempt to start the flow. + */ + fun startFlow(event: ExternalEvent.ExternalStartFlowEvent): CordaFuture> /** * Will check [logicType] and [args] against a whitelist and if acceptable then construct and initiate the flow. diff --git a/node/src/main/kotlin/net/corda/node/services/config/NodeConfiguration.kt b/node/src/main/kotlin/net/corda/node/services/config/NodeConfiguration.kt index 502aa4cae8..87b350994f 100644 --- a/node/src/main/kotlin/net/corda/node/services/config/NodeConfiguration.kt +++ b/node/src/main/kotlin/net/corda/node/services/config/NodeConfiguration.kt @@ -11,6 +11,7 @@ package net.corda.node.services.config import com.typesafe.config.Config +import com.typesafe.config.ConfigException import net.corda.core.context.AuthServiceId import net.corda.core.identity.CordaX500Name import net.corda.core.internal.div @@ -46,6 +47,7 @@ interface NodeConfiguration : NodeSSLConfiguration { val devMode: Boolean val devModeOptions: DevModeOptions? val compatibilityZoneURL: URL? + val networkServices: NetworkServicesConfig? val certificateChainCheckPolicies: List val verifierType: VerifierType val p2pMessagingRetry: P2PMessagingRetryConfiguration @@ -58,6 +60,7 @@ interface NodeConfiguration : NodeSSLConfiguration { val enterpriseConfiguration: EnterpriseConfiguration // TODO Move into DevModeOptions val useTestClock: Boolean get() = false + val lazyBridgeStart: Boolean val detectPublicIp: Boolean get() = true val sshd: SSHDConfiguration? val database: DatabaseConfig @@ -168,6 +171,25 @@ data class BFTSMaRtConfiguration( } } +/** + * Used as an alternative to the older compatibilityZoneURL to allow the doorman and network map + * services for a node to be configured as different URLs. Cannot be set at the same time as the + * compatibilityZoneURL, and will be defaulted (if not set) to both point at the configured + * compatibilityZoneURL. + * + * @property doormanURL The URL of the tls certificate signing service. + * @property networkMapURL The URL of the Network Map service. + * @property inferred Non user setting that indicates weather the Network Services configuration was + * set explicitly ([inferred] == false) or weather they have been inferred via the compatibilityZoneURL parameter + * ([inferred] == true) where both the network map and doorman are running on the same endpoint. Only one, + * compatibilityZoneURL or networkServices, can be set at any one time. + */ +data class NetworkServicesConfig( + val doormanURL: URL, + val networkMapURL: URL, + val inferred : Boolean = false +) + /** * Currently only used for notarisation requests. * @@ -193,6 +215,7 @@ data class NodeConfigurationImpl( override val crlCheckSoftFail: Boolean, override val dataSourceProperties: Properties, override val compatibilityZoneURL: URL? = null, + override var networkServices: NetworkServicesConfig? = null, override val tlsCertCrlDistPoint: URL? = null, override val tlsCertCrlIssuer: String? = null, override val rpcUsers: List, @@ -215,6 +238,7 @@ data class NodeConfigurationImpl( override val noLocalShell: Boolean = false, override val devModeOptions: DevModeOptions? = null, override val useTestClock: Boolean = false, + override val lazyBridgeStart: Boolean = true, override val detectPublicIp: Boolean = true, // TODO See TODO above. Rename this to nodeInfoPollingFrequency and make it of type Duration override val additionalNodeInfoPollingFrequencyMsec: Long = 5.seconds.toMillis(), @@ -241,9 +265,13 @@ data class NodeConfigurationImpl( explicitAddress != null -> { require(settings.address == null) { "Can't provide top-level rpcAddress and rpcSettings.address (they control the same property)." } logger.warn("Top-level declaration of property 'rpcAddress' is deprecated. Please use 'rpcSettings.address' instead.") + settings.copy(address = explicitAddress) } - else -> settings + else -> { + settings.address ?: throw ConfigException.Missing("rpcSettings.address") + settings + } }.asOptions(fallbackSslOptions) } @@ -270,6 +298,7 @@ data class NodeConfigurationImpl( errors += validateDevModeOptions() errors += validateRpcOptions(rpcOptions) errors += validateTlsCertCrlConfig() + errors += validateNetworkServices() return errors } @@ -284,12 +313,28 @@ data class NodeConfigurationImpl( } private fun validateDevModeOptions(): List { - val errors = mutableListOf() if (devMode) { compatibilityZoneURL?.let { - errors += "'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true." + return listOf("'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true.") + } + + // if compatibiliZoneURL is set then it will be copied into the networkServices field and thus skipping + // this check by returning above is fine. + networkServices?.let { + return listOf("'networkServices': present. Property cannot be set when 'devMode' is true.") } } + + return emptyList() + } + + private fun validateNetworkServices(): List { + val errors = mutableListOf() + + if (compatibilityZoneURL != null && networkServices != null && !(networkServices!!.inferred)) { + errors += "Cannot configure both compatibilityZoneUrl and networkServices simultaneously" + } + return errors } @@ -336,20 +381,24 @@ data class NodeConfigurationImpl( |Please contact the R3 team on the public slack to discuss your use case. """.trimMargin()) } + + if (compatibilityZoneURL != null && networkServices == null) { + networkServices = NetworkServicesConfig(compatibilityZoneURL, compatibilityZoneURL, true) + } } } data class NodeRpcSettings( - val address: NetworkHostAndPort, - val adminAddress: NetworkHostAndPort, + val address: NetworkHostAndPort?, + val adminAddress: NetworkHostAndPort?, val standAloneBroker: Boolean = false, val useSsl: Boolean = false, val ssl: BrokerRpcSslOptions? ) { fun asOptions(fallbackSslOptions: BrokerRpcSslOptions): NodeRpcOptions { return object : NodeRpcOptions { - override val address = this@NodeRpcSettings.address - override val adminAddress = this@NodeRpcSettings.adminAddress + override val address = this@NodeRpcSettings.address!! + override val adminAddress = this@NodeRpcSettings.adminAddress!! override val standAloneBroker = this@NodeRpcSettings.standAloneBroker override val useSsl = this@NodeRpcSettings.useSsl override val sslConfig = this@NodeRpcSettings.ssl ?: fallbackSslOptions diff --git a/node/src/main/kotlin/net/corda/node/services/events/NodeSchedulerService.kt b/node/src/main/kotlin/net/corda/node/services/events/NodeSchedulerService.kt index 1277346ce0..b0d5095ad4 100644 --- a/node/src/main/kotlin/net/corda/node/services/events/NodeSchedulerService.kt +++ b/node/src/main/kotlin/net/corda/node/services/events/NodeSchedulerService.kt @@ -12,6 +12,7 @@ package net.corda.node.services.events import co.paralleluniverse.fibers.Suspendable import com.google.common.util.concurrent.ListenableFuture +import net.corda.core.concurrent.CordaFuture import net.corda.core.context.InvocationContext import net.corda.core.context.InvocationOrigin import net.corda.core.contracts.SchedulableState @@ -20,11 +21,9 @@ import net.corda.core.contracts.ScheduledStateRef import net.corda.core.contracts.StateRef import net.corda.core.flows.FlowLogic import net.corda.core.flows.FlowLogicRefFactory -import net.corda.core.internal.ThreadBox -import net.corda.core.internal.VisibleForTesting +import net.corda.core.internal.* import net.corda.core.internal.concurrent.flatMap -import net.corda.core.internal.join -import net.corda.core.internal.until +import net.corda.core.internal.concurrent.openFuture import net.corda.core.node.ServicesForResolution import net.corda.core.schemas.PersistentStateRef import net.corda.core.serialization.SingletonSerializeAsToken @@ -36,8 +35,10 @@ import net.corda.node.services.api.FlowStarter import net.corda.node.services.api.NodePropertiesStore import net.corda.node.services.api.SchedulerService import net.corda.node.services.messaging.DeduplicationHandler +import net.corda.node.services.statemachine.ExternalEvent import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX +import net.corda.nodeapi.internal.persistence.contextTransaction import org.apache.activemq.artemis.utils.ReusableLatch import org.apache.mina.util.ConcurrentHashSet import org.slf4j.Logger @@ -166,29 +167,31 @@ class NodeSchedulerService(private val clock: CordaClock, override fun scheduleStateActivity(action: ScheduledStateRef) { log.trace { "Schedule $action" } - if (!schedulerRepo.merge(action)) { - // Only increase the number of unfinished schedules if the state didn't already exist on the queue - unfinishedSchedules.countUp() - } - mutex.locked { - if (action.scheduledAt < nextScheduledAction?.scheduledAt ?: Instant.MAX) { - // We are earliest - rescheduleWakeUp() - } else if (action.ref == nextScheduledAction?.ref && action.scheduledAt != nextScheduledAction?.scheduledAt) { - // We were earliest but might not be any more - rescheduleWakeUp() + // Only increase the number of unfinished schedules if the state didn't already exist on the queue + val countUp = !schedulerRepo.merge(action) + contextTransaction.onCommit { + if (countUp) unfinishedSchedules.countUp() + mutex.locked { + if (action.scheduledAt < nextScheduledAction?.scheduledAt ?: Instant.MAX) { + // We are earliest + rescheduleWakeUp() + } else if (action.ref == nextScheduledAction?.ref && action.scheduledAt != nextScheduledAction?.scheduledAt) { + // We were earliest but might not be any more + rescheduleWakeUp() + } } } } override fun unscheduleStateActivity(ref: StateRef) { log.trace { "Unschedule $ref" } - if (startingStateRefs.all { it.ref != ref } && schedulerRepo.delete(ref)) { - unfinishedSchedules.countDown() - } - mutex.locked { - if (nextScheduledAction?.ref == ref) { - rescheduleWakeUp() + val countDown = startingStateRefs.all { it.ref != ref } && schedulerRepo.delete(ref) + contextTransaction.onCommit { + if (countDown) unfinishedSchedules.countDown() + mutex.locked { + if (nextScheduledAction?.ref == ref) { + rescheduleWakeUp() + } } } } @@ -237,7 +240,12 @@ class NodeSchedulerService(private val clock: CordaClock, schedulerTimerExecutor.join() } - private inner class FlowStartDeduplicationHandler(val scheduledState: ScheduledStateRef) : DeduplicationHandler { + private inner class FlowStartDeduplicationHandler(val scheduledState: ScheduledStateRef, override val flowLogic: FlowLogic, override val context: InvocationContext) : DeduplicationHandler, ExternalEvent.ExternalStartFlowEvent { + override val externalCause: ExternalEvent + get() = this + override val deduplicationHandler: FlowStartDeduplicationHandler + get() = this + override fun insideDatabaseTransaction() { schedulerRepo.delete(scheduledState.ref) } @@ -249,6 +257,18 @@ class NodeSchedulerService(private val clock: CordaClock, override fun toString(): String { return "${javaClass.simpleName}($scheduledState)" } + + override fun wireUpFuture(flowFuture: CordaFuture>) { + _future.captureLater(flowFuture) + val future = _future.flatMap { it.resultFuture } + future.then { + unfinishedSchedules.countDown() + } + } + + private val _future = openFuture>() + override val future: CordaFuture> + get() = _future } private fun onTimeReached(scheduledState: ScheduledStateRef) { @@ -260,11 +280,8 @@ class NodeSchedulerService(private val clock: CordaClock, flowName = scheduledFlow.javaClass.name // TODO refactor the scheduler to store and propagate the original invocation context val context = InvocationContext.newInstance(InvocationOrigin.Scheduled(scheduledState)) - val deduplicationHandler = FlowStartDeduplicationHandler(scheduledState) - val future = flowStarter.startFlow(scheduledFlow, context, deduplicationHandler).flatMap { it.resultFuture } - future.then { - unfinishedSchedules.countDown() - } + val startFlowEvent = FlowStartDeduplicationHandler(scheduledState, scheduledFlow, context) + flowStarter.startFlow(startFlowEvent) } } } catch (e: Exception) { diff --git a/node/src/main/kotlin/net/corda/node/services/messaging/Messaging.kt b/node/src/main/kotlin/net/corda/node/services/messaging/Messaging.kt index 7fbc77a9bc..a0311c0912 100644 --- a/node/src/main/kotlin/net/corda/node/services/messaging/Messaging.kt +++ b/node/src/main/kotlin/net/corda/node/services/messaging/Messaging.kt @@ -20,6 +20,8 @@ import net.corda.core.serialization.CordaSerializable import net.corda.core.serialization.serialize import net.corda.core.utilities.ByteSequence import net.corda.node.services.statemachine.DeduplicationId +import net.corda.node.services.statemachine.ExternalEvent +import net.corda.node.services.statemachine.SenderDeduplicationId import java.time.Instant import javax.annotation.concurrent.ThreadSafe @@ -35,6 +37,12 @@ import javax.annotation.concurrent.ThreadSafe */ @ThreadSafe interface MessagingService { + /** + * A unique identifier for this sender that changes whenever a node restarts. This is used in conjunction with a sequence + * number for message de-duplication at the recipient. + */ + val ourSenderUUID: String + /** * The provided function will be invoked for each received message whose topic and session matches. The callback * will run on the main server thread provided when the messaging service is constructed, and a database @@ -103,11 +111,12 @@ interface MessagingService { /** * Returns an initialised [Message] with the current time, etc, already filled in. * - * @param topicSession identifier for the topic and session the message is sent to. - * @param additionalProperties optional additional message headers. * @param topic identifier for the topic the message is sent to. + * @param data the payload for the message. + * @param deduplicationId optional message deduplication ID including sender identifier. + * @param additionalHeaders optional additional message headers. */ - fun createMessage(topic: String, data: ByteArray, deduplicationId: DeduplicationId = DeduplicationId.createRandom(newSecureRandom()), additionalHeaders: Map = emptyMap()): Message + fun createMessage(topic: String, data: ByteArray, deduplicationId: SenderDeduplicationId = SenderDeduplicationId(DeduplicationId.createRandom(newSecureRandom()), ourSenderUUID), additionalHeaders: Map = emptyMap()): Message /** Given information about either a specific node or a service returns its corresponding address */ fun getAddressOfParty(partyInfo: PartyInfo): MessageRecipients @@ -116,7 +125,7 @@ interface MessagingService { val myAddress: SingleMessageRecipient } -fun MessagingService.send(topicSession: String, payload: Any, to: MessageRecipients, deduplicationId: DeduplicationId = DeduplicationId.createRandom(newSecureRandom()), retryId: Long? = null, additionalHeaders: Map = emptyMap()) = send(createMessage(topicSession, payload.serialize().bytes, deduplicationId, additionalHeaders), to, retryId) +fun MessagingService.send(topicSession: String, payload: Any, to: MessageRecipients, deduplicationId: SenderDeduplicationId = SenderDeduplicationId(DeduplicationId.createRandom(newSecureRandom()), ourSenderUUID), retryId: Long? = null, additionalHeaders: Map = emptyMap()) = send(createMessage(topicSession, payload.serialize().bytes, deduplicationId, additionalHeaders), to, retryId) interface MessageHandlerRegistration @@ -162,15 +171,17 @@ object TopicStringValidator { } /** - * This handler is used to implement exactly-once delivery of an event on top of a possibly duplicated one. This is done + * This handler is used to implement exactly-once delivery of an external event on top of an at-least-once delivery. This is done * using two hooks that are called from the event processor, one called from the database transaction committing the - * side-effect caused by the event, and another one called after the transaction has committed successfully. + * side-effect caused by the external event, and another one called after the transaction has committed successfully. * * For example for messaging we can use [insideDatabaseTransaction] to store the message's unique ID for later * deduplication, and [afterDatabaseTransaction] to acknowledge the message and stop retries. * * We also use this for exactly-once start of a scheduled flow, [insideDatabaseTransaction] is used to remove the * to-be-scheduled state of the flow, [afterDatabaseTransaction] is used for cleanup of in-memory bookkeeping. + * + * It holds a reference back to the causing external event. */ interface DeduplicationHandler { /** @@ -184,6 +195,11 @@ interface DeduplicationHandler { * cleanup/acknowledgement/stopping of retries. */ fun afterDatabaseTransaction() + + /** + * The external event for which we are trying to reduce from at-least-once delivery to exactly-once. + */ + val externalCause: ExternalEvent } typealias MessageHandler = (ReceivedMessage, MessageHandlerRegistration, DeduplicationHandler) -> Unit diff --git a/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessageDeduplicator.kt b/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessageDeduplicator.kt index 9896e37337..bd71ad6167 100644 --- a/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessageDeduplicator.kt +++ b/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessageDeduplicator.kt @@ -19,7 +19,6 @@ import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import java.io.Serializable import java.time.Instant -import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.TimeUnit import javax.persistence.Column @@ -32,8 +31,6 @@ typealias SenderHashToSeqNo = Pair * Encapsulate the de-duplication logic. */ class P2PMessageDeduplicator(private val database: CordaPersistence) { - val ourSenderUUID = UUID.randomUUID().toString() - // A temporary in-memory set of deduplication IDs and associated high water mark details. // When we receive a message we don't persist the ID immediately, // so we store the ID here in the meantime (until the persisting db tx has committed). This is because Artemis may diff --git a/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessagingClient.kt b/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessagingClient.kt index d20eda2263..122ada151b 100644 --- a/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessagingClient.kt +++ b/node/src/main/kotlin/net/corda/node/services/messaging/P2PMessagingClient.kt @@ -26,11 +26,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken import net.corda.core.serialization.deserialize import net.corda.core.serialization.internal.nodeSerializationEnv import net.corda.core.serialization.serialize -import net.corda.core.utilities.ByteSequence -import net.corda.core.utilities.NetworkHostAndPort -import net.corda.core.utilities.OpaqueBytes -import net.corda.core.utilities.contextLogger -import net.corda.core.utilities.trace +import net.corda.core.utilities.* import net.corda.node.VersionInfo import net.corda.node.internal.LifecycleSupport import net.corda.node.internal.artemis.ReactiveArtemisConsumer @@ -38,19 +34,18 @@ import net.corda.node.internal.artemis.ReactiveArtemisConsumer.Companion.multipl import net.corda.node.services.api.NetworkMapCacheInternal import net.corda.node.services.config.NodeConfiguration import net.corda.node.services.statemachine.DeduplicationId +import net.corda.node.services.statemachine.ExternalEvent +import net.corda.node.services.statemachine.SenderDeduplicationId import net.corda.node.utilities.AffinityExecutor import net.corda.node.utilities.PersistentMap import net.corda.nodeapi.ArtemisTcpTransport.Companion.p2pConnectorTcpTransport import net.corda.nodeapi.internal.ArtemisMessagingComponent -import net.corda.nodeapi.internal.ArtemisMessagingComponent.ArtemisAddress +import net.corda.nodeapi.internal.ArtemisMessagingComponent.* import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_CONTROL import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_NOTIFY import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.JOURNAL_HEADER_SIZE import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2PMessagingHeaders import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEERS_PREFIX -import net.corda.nodeapi.internal.ArtemisMessagingComponent.NodeAddress -import net.corda.nodeapi.internal.ArtemisMessagingComponent.RemoteInboxAddress -import net.corda.nodeapi.internal.ArtemisMessagingComponent.ServiceAddress import net.corda.nodeapi.internal.bridging.BridgeControl import net.corda.nodeapi.internal.bridging.BridgeEntry import net.corda.nodeapi.internal.persistence.CordaPersistence @@ -61,12 +56,7 @@ import org.apache.activemq.artemis.api.core.Message.HDR_DUPLICATE_DETECTION_ID import org.apache.activemq.artemis.api.core.Message.HDR_VALIDATED_USER import org.apache.activemq.artemis.api.core.RoutingType import org.apache.activemq.artemis.api.core.SimpleString -import org.apache.activemq.artemis.api.core.client.ActiveMQClient -import org.apache.activemq.artemis.api.core.client.ClientConsumer -import org.apache.activemq.artemis.api.core.client.ClientMessage -import org.apache.activemq.artemis.api.core.client.ClientProducer -import org.apache.activemq.artemis.api.core.client.ClientSession -import org.apache.activemq.artemis.api.core.client.ServerLocator +import org.apache.activemq.artemis.api.core.client.* import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY import rx.Observable import rx.Subscription @@ -149,7 +139,7 @@ class P2PMessagingClient(val config: NodeConfiguration, ) } - private class NodeClientMessage(override val topic: String, override val data: ByteSequence, override val uniqueMessageId: DeduplicationId, override val senderUUID: String?, override val additionalHeaders: Map) : Message { + class NodeClientMessage(override val topic: String, override val data: ByteSequence, override val uniqueMessageId: DeduplicationId, override val senderUUID: String?, override val additionalHeaders: Map) : Message { override val debugTimestamp: Instant = Instant.now() override fun toString() = "$topic#${String(data.bytes)}" } @@ -183,9 +173,12 @@ class P2PMessagingClient(val config: NodeConfiguration, data class HandlerRegistration(val topic: String, val callback: Any) : MessageHandlerRegistration override val myAddress: SingleMessageRecipient = NodeAddress(myIdentity, advertisedAddress) + override val ourSenderUUID = UUID.randomUUID().toString() + private val messageRedeliveryDelaySeconds = config.p2pMessagingRetry.messageRedeliveryDelay.seconds private val state = ThreadBox(InnerState()) private val knownQueues = Collections.newSetFromMap(ConcurrentHashMap()) + private val delayStartQueues = Collections.newSetFromMap(ConcurrentHashMap()) private val externalBridge: Boolean = config.enterpriseConfiguration.externalBridge ?: false private val handlers = ConcurrentHashMap() @@ -255,7 +248,7 @@ class P2PMessagingClient(val config: NodeConfiguration, this@P2PMessagingClient, metricRegistry, queueBound = config.enterpriseConfiguration.tuning.maximumMessagingBatchSize, - ourSenderUUID = deduplicator.ourSenderUUID, + ourSenderUUID = ourSenderUUID, myLegalName = legalName ) this@P2PMessagingClient.messagingExecutor = messagingExecutor @@ -352,7 +345,12 @@ class P2PMessagingClient(val config: NodeConfiguration, val queues = session.addressQuery(SimpleString("$PEERS_PREFIX#")).queueNames for (queue in queues) { - createBridgeEntry(queue) + val queueQuery = session.queueQuery(queue) + if (!config.lazyBridgeStart || queueQuery.messageCount > 0) { + createBridgeEntry(queue) + } else { + delayStartQueues += queue.toString() + } } val startupMessage = BridgeControl.NodeToBridgeSnapshot(myIdentity.toStringShort(), inboxes, requiredBridges) sendBridgeControl(startupMessage) @@ -466,18 +464,23 @@ class P2PMessagingClient(val config: NodeConfiguration, } } - inner class MessageDeduplicationHandler(val artemisMessage: ClientMessage, val cordaMessage: ReceivedMessage) : DeduplicationHandler { + private inner class MessageDeduplicationHandler(val artemisMessage: ClientMessage, override val receivedMessage: ReceivedMessage) : DeduplicationHandler, ExternalEvent.ExternalMessageEvent { + override val externalCause: ExternalEvent + get() = this + override val deduplicationHandler: MessageDeduplicationHandler + get() = this + override fun insideDatabaseTransaction() { - deduplicator.persistDeduplicationId(cordaMessage.uniqueMessageId) + deduplicator.persistDeduplicationId(receivedMessage.uniqueMessageId) } override fun afterDatabaseTransaction() { - deduplicator.signalMessageProcessFinish(cordaMessage.uniqueMessageId) + deduplicator.signalMessageProcessFinish(receivedMessage.uniqueMessageId) messagingExecutor!!.acknowledge(artemisMessage) } override fun toString(): String { - return "${javaClass.simpleName}(${cordaMessage.uniqueMessageId})" + return "${javaClass.simpleName}(${receivedMessage.uniqueMessageId})" } } @@ -600,19 +603,26 @@ class P2PMessagingClient(val config: NodeConfiguration, /** Attempts to create a durable queue on the broker which is bound to an address of the same name. */ private fun createQueueIfAbsent(queueName: String, session: ClientSession) { + fun sendBridgeCreateMessage() { + val keyHash = queueName.substring(PEERS_PREFIX.length) + val peers = networkMap.getNodesByOwningKeyIndex(keyHash) + for (node in peers) { + val bridge = BridgeEntry(queueName, node.addresses, node.legalIdentities.map { it.name }) + val createBridgeMessage = BridgeControl.Create(myIdentity.toStringShort(), bridge) + sendBridgeControl(createBridgeMessage) + } + } if (!knownQueues.contains(queueName)) { - val queueQuery = session.queueQuery(SimpleString(queueName)) - if (!queueQuery.isExists) { - log.info("Create fresh queue $queueName bound on same address") - session.createQueue(queueName, RoutingType.ANYCAST, queueName, true) - if (queueName.startsWith(PEERS_PREFIX)) { - val keyHash = queueName.substring(PEERS_PREFIX.length) - val peers = networkMap.getNodesByOwningKeyIndex(keyHash) - for (node in peers) { - val bridge = BridgeEntry(queueName, node.addresses, node.legalIdentities.map { it.name }) - val createBridgeMessage = BridgeControl.Create(myIdentity.toStringShort(), bridge) - sendBridgeControl(createBridgeMessage) - } + if (delayStartQueues.contains(queueName)) { + log.info("Start bridge for previously empty queue $queueName") + sendBridgeCreateMessage() + delayStartQueues -= queueName + } else { + val queueQuery = session.queueQuery(SimpleString(queueName)) + if (!queueQuery.isExists) { + log.info("Create fresh queue $queueName bound on same address") + session.createQueue(queueName, RoutingType.ANYCAST, queueName, true) + sendBridgeCreateMessage() } } knownQueues += queueName @@ -635,8 +645,8 @@ class P2PMessagingClient(val config: NodeConfiguration, handlers.remove(registration.topic) } - override fun createMessage(topic: String, data: ByteArray, deduplicationId: DeduplicationId, additionalHeaders: Map): Message { - return NodeClientMessage(topic, OpaqueBytes(data), deduplicationId, deduplicator.ourSenderUUID, additionalHeaders) + override fun createMessage(topic: String, data: ByteArray, deduplicationId: SenderDeduplicationId, additionalHeaders: Map): Message { + return NodeClientMessage(topic, OpaqueBytes(data), deduplicationId.deduplicationId, deduplicationId.senderUUID, additionalHeaders) } override fun getAddressOfParty(partyInfo: PartyInfo): MessageRecipients { diff --git a/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt b/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt index 66a1538b23..af277848ad 100644 --- a/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt +++ b/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt @@ -20,9 +20,9 @@ import net.corda.nodeapi.internal.persistence.currentDBSession import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY import org.slf4j.Logger import org.slf4j.LoggerFactory +import java.io.Serializable import java.util.* import java.util.stream.Stream -import java.io.Serializable import javax.persistence.Column import javax.persistence.Entity import javax.persistence.Id @@ -63,6 +63,11 @@ class DBCheckpointStorage : CheckpointStorage { return session.createQuery(delete).executeUpdate() > 0 } + override fun getCheckpoint(id: StateMachineRunId): SerializedBytes? { + val bytes = currentDBSession().get(DBCheckpoint::class.java, id.uuid.toString())?.checkpoint ?: return null + return SerializedBytes(bytes) + } + override fun getAllCheckpoints(): Stream>> { val session = currentDBSession() val criteriaQuery = session.criteriaBuilder.createQuery(DBCheckpoint::class.java) diff --git a/node/src/main/kotlin/net/corda/node/services/persistence/DBTransactionStorage.kt b/node/src/main/kotlin/net/corda/node/services/persistence/DBTransactionStorage.kt index 5ace12aa4e..2b64788804 100644 --- a/node/src/main/kotlin/net/corda/node/services/persistence/DBTransactionStorage.kt +++ b/node/src/main/kotlin/net/corda/node/services/persistence/DBTransactionStorage.kt @@ -33,7 +33,6 @@ import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY import rx.Observable import rx.subjects.PublishSubject import java.io.Serializable -import java.util.* import javax.persistence.* // cache value type to just store the immutable bits of a signed transaction plus conversion helpers @@ -83,11 +82,11 @@ class DBTransactionStorage(cacheSizeBytes: Long) : WritableTransactionStorage, S // to the memory pressure at all here. private const val transactionSignatureOverheadEstimate = 1024 - private fun weighTx(tx: Optional): Int { - if (!tx.isPresent) { + private fun weighTx(tx: AppendOnlyPersistentMapBase.Transactional): Int { + val actTx = tx.valueWithoutIsolation + if (actTx == null) { return 0 } - val actTx = tx.get() return actTx.second.sumBy { it.size + transactionSignatureOverheadEstimate } + actTx.first.size } } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/Action.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/Action.kt index 7d8b60232a..9c11c4bc34 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/Action.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/Action.kt @@ -34,7 +34,7 @@ sealed class Action { data class SendInitial( val party: Party, val initialise: InitialSessionMessage, - val deduplicationId: DeduplicationId + val deduplicationId: SenderDeduplicationId ) : Action() /** @@ -43,7 +43,7 @@ sealed class Action { data class SendExisting( val peerParty: Party, val message: ExistingSessionMessage, - val deduplicationId: DeduplicationId + val deduplicationId: SenderDeduplicationId ) : Action() /** @@ -72,7 +72,8 @@ sealed class Action { */ data class PropagateErrors( val errorMessages: List, - val sessions: List + val sessions: List, + val senderUUID: String? ) : Action() /** @@ -139,6 +140,11 @@ sealed class Action { * Release soft locks associated with given ID (currently the flow ID). */ data class ReleaseSoftLocks(val uuid: UUID?) : Action() + + /** + * Retry a flow from the last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details. + */ + data class RetryFlowFromSafePoint(val currentState: StateMachineState) : Action() } /** diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/ActionExecutorImpl.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/ActionExecutorImpl.kt index 03c488e291..6f5f57ad5a 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/ActionExecutorImpl.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/ActionExecutorImpl.kt @@ -83,6 +83,7 @@ class ActionExecutorImpl( is Action.CommitTransaction -> executeCommitTransaction() is Action.ExecuteAsyncOperation -> executeAsyncOperation(fiber, action) is Action.ReleaseSoftLocks -> executeReleaseSoftLocks(action) + is Action.RetryFlowFromSafePoint -> executeRetryFlowFromSafePoint(action) } } @@ -135,7 +136,7 @@ class ActionExecutorImpl( @Suspendable private fun executePropagateErrors(action: Action.PropagateErrors) { action.errorMessages.forEach { (exception) -> - log.debug("Propagating error", exception) + log.warn("Propagating error", exception) } for (sessionState in action.sessions) { // We cannot propagate if the session isn't live. @@ -147,7 +148,7 @@ class ActionExecutorImpl( val sinkSessionId = sessionState.initiatedState.peerSinkSessionId val existingMessage = ExistingSessionMessage(sinkSessionId, errorMessage) val deduplicationId = DeduplicationId.createForError(errorMessage.errorId, sinkSessionId) - flowMessaging.sendSessionMessage(sessionState.peerParty, existingMessage, deduplicationId) + flowMessaging.sendSessionMessage(sessionState.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, action.senderUUID)) } } } @@ -236,6 +237,10 @@ class ActionExecutorImpl( ) } + private fun executeRetryFlowFromSafePoint(action: Action.RetryFlowFromSafePoint) { + stateMachineManager.retryFlowFromSafePoint(action.currentState) + } + private fun serializeCheckpoint(checkpoint: Checkpoint): SerializedBytes { return checkpoint.serialize(context = checkpointSerializationContext) } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/DeduplicationId.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/DeduplicationId.kt index 81e6560f86..a642321056 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/DeduplicationId.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/DeduplicationId.kt @@ -55,3 +55,9 @@ data class DeduplicationId(val toString: String) { } } } + +/** + * Represents the deduplication ID of a flow message, and the sender identifier for the flow doing the sending. The identifier might be + * null if the flow is trying to replay messages and doesn't want an optimisation to ignore the deduplication ID. + */ +data class SenderDeduplicationId(val deduplicationId: DeduplicationId, val senderUUID: String?) \ No newline at end of file diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/Event.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/Event.kt index 42978d51de..e35fdbbb50 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/Event.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/Event.kt @@ -41,9 +41,9 @@ sealed class Event { */ data class DeliverSessionMessage( val sessionMessage: ExistingSessionMessage, - val deduplicationHandler: DeduplicationHandler, + override val deduplicationHandler: DeduplicationHandler, val sender: Party - ) : Event() + ) : Event(), GeneratedByExternalEvent /** * Signal that an error has happened. This may be due to an uncaught exception in the flow or some external error. @@ -143,4 +143,19 @@ sealed class Event { * @param returnValue the result of the operation. */ data class AsyncOperationCompletion(val returnValue: Any?) : Event() + + /** + * Retry a flow from the last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details. + */ + object RetryFlowFromSafePoint : Event() { + override fun toString() = "RetryFlowFromSafePoint" + } + + /** + * Indicates that an event was generated by an external event and that external event needs to be replayed if we retry the flow, + * even if it has not yet been processed and placed on the pending de-duplication handlers list. + */ + interface GeneratedByExternalEvent { + val deduplicationHandler: DeduplicationHandler + } } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowHospital.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowHospital.kt index 1f0d6bb6ed..994127c3b1 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowHospital.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowHospital.kt @@ -19,10 +19,15 @@ interface FlowHospital { /** * The flow running in [flowFiber] has errored. */ - fun flowErrored(flowFiber: FlowFiber) + fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List) /** * The flow running in [flowFiber] has cleaned, possibly as a result of a flow hospital resume. */ fun flowCleaned(flowFiber: FlowFiber) + + /** + * The flow has been removed from the state machine. + */ + fun flowRemoved(flowFiber: FlowFiber) } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowMessaging.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowMessaging.kt index 0d517c6689..cd1c541742 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowMessaging.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowMessaging.kt @@ -34,7 +34,7 @@ interface FlowMessaging { * listen on the send acknowledgement. */ @Suspendable - fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: DeduplicationId) + fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: SenderDeduplicationId) /** * Start the messaging using the [onMessage] message handler. @@ -59,7 +59,7 @@ class FlowMessagingImpl(val serviceHub: ServiceHubInternal): FlowMessaging { } @Suspendable - override fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: DeduplicationId) { + override fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: SenderDeduplicationId) { log.trace { "Sending message $deduplicationId $message to party $party" } val networkMessage = serviceHub.networkService.createMessage(sessionTopic, serializeSessionMessage(message).bytes, deduplicationId, message.additionalHeaders(party)) val partyInfo = serviceHub.networkMapCache.getPartyInfo(party) ?: throw IllegalArgumentException("Don't know about $party") diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowStateMachineImpl.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowStateMachineImpl.kt index a3ddbe6c2b..abb917b6c8 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/FlowStateMachineImpl.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/FlowStateMachineImpl.kt @@ -57,6 +57,8 @@ class FlowStateMachineImpl(override val id: StateMachineRunId, fun currentStateMachine(): FlowStateMachineImpl<*>? = Strand.currentStrand() as? FlowStateMachineImpl<*> private val log: Logger = LoggerFactory.getLogger("net.corda.flow") + + private val SERIALIZER_BLOCKER = Fiber::class.java.getDeclaredField("SERIALIZER_BLOCKER").apply { isAccessible = true }.get(null) } override val serviceHub get() = getTransientField(TransientValues::serviceHub) @@ -75,6 +77,14 @@ class FlowStateMachineImpl(override val id: StateMachineRunId, internal var transientValues: TransientReference? = null internal var transientState: TransientReference? = null + /** + * What sender identifier to put on messages sent by this flow. This will either be the identifier for the current + * state machine manager / messaging client, or null to indicate this flow is restored from a checkpoint and + * the de-duplication of messages it sends should not be optimised since this could be unreliable. + */ + override val ourSenderUUID: String? + get() = transientState?.value?.senderUUID + private fun getTransientField(field: KProperty1): A { val suppliedValues = transientValues ?: throw IllegalStateException("${field.name} wasn't supplied!") return field.get(suppliedValues.value) @@ -178,6 +188,7 @@ class FlowStateMachineImpl(override val id: StateMachineRunId, fun setLoggingContext() { context.pushToLoggingContext() MDC.put("flow-id", id.uuid.toString()) + MDC.put("fiber-id", this.getId().toString()) } @Suspendable @@ -195,7 +206,7 @@ class FlowStateMachineImpl(override val id: StateMachineRunId, suspend(FlowIORequest.WaitForSessionConfirmations, maySkipCheckpoint = true) Try.Success(result) } catch (throwable: Throwable) { - logger.warn("Flow threw exception", throwable) + logger.info("Flow threw exception... sending to flow hospital", throwable) Try.Failure(throwable) } val softLocksId = if (hasSoftLockedStates) logic.runId.uuid else null @@ -335,7 +346,7 @@ class FlowStateMachineImpl(override val id: StateMachineRunId, isDbTransactionOpenOnExit = false ) require(continuation == FlowContinuation.ProcessEvents) - Fiber.unparkDeserialized(this, scheduler) + unpark(SERIALIZER_BLOCKER) } setLoggingContext() return uncheckedCast(processEventsUntilFlowIsResumed( diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/PropagatingFlowHospital.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/PropagatingFlowHospital.kt index 0f3ad147f8..120f671e83 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/PropagatingFlowHospital.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/PropagatingFlowHospital.kt @@ -19,12 +19,17 @@ import net.corda.core.utilities.loggerFor object PropagatingFlowHospital : FlowHospital { private val log = loggerFor() - override fun flowErrored(flowFiber: FlowFiber) { - log.debug { "Flow ${flowFiber.id} dirtied ${flowFiber.snapshot().checkpoint.errorState}" } + override fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List) { + log.debug { "Flow ${flowFiber.id} in state $currentState encountered error" } flowFiber.scheduleEvent(Event.StartErrorPropagation) + for ((index, error) in errors.withIndex()) { + log.warn("Flow ${flowFiber.id} is propagating error [$index] ", error) + } } override fun flowCleaned(flowFiber: FlowFiber) { throw IllegalStateException("Flow ${flowFiber.id} cleaned after error propagation triggered") } + + override fun flowRemoved(flowFiber: FlowFiber) {} } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/SingleThreadedStateMachineManager.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/SingleThreadedStateMachineManager.kt index 45dc2a4925..18ecaa59f4 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/SingleThreadedStateMachineManager.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/SingleThreadedStateMachineManager.kt @@ -56,13 +56,15 @@ import java.security.SecureRandom import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ExecutorService +import java.util.concurrent.locks.ReentrantLock import javax.annotation.concurrent.ThreadSafe import kotlin.collections.ArrayList +import kotlin.concurrent.withLock import kotlin.streams.toList /** * The StateMachineManagerImpl will always invoke the flow fibers on the given [AffinityExecutor], regardless of which - * thread actually starts them via [startFlow]. + * thread actually starts them via [deliverExternalEvent]. */ @ThreadSafe class SingleThreadedStateMachineManager( @@ -100,6 +102,7 @@ class SingleThreadedStateMachineManager( private val flowMessaging: FlowMessaging = FlowMessagingImpl(serviceHub) private val fiberDeserializationChecker = if (serviceHub.configuration.shouldCheckCheckpoints()) FiberDeserializationChecker() else null private val transitionExecutor = makeTransitionExecutor() + private val ourSenderUUID = serviceHub.networkService.ourSenderUUID private var checkpointSerializationContext: SerializationContext? = null private var tokenizableServices: List? = null @@ -138,7 +141,7 @@ class SingleThreadedStateMachineManager( resumeRestoredFlows(fibers) flowMessaging.start { receivedMessage, deduplicationHandler -> executor.execute { - onSessionMessage(receivedMessage, deduplicationHandler) + deliverExternalEvent(deduplicationHandler.externalCause) } } } @@ -186,7 +189,7 @@ class SingleThreadedStateMachineManager( } } - override fun startFlow( + private fun startFlow( flowLogic: FlowLogic, context: InvocationContext, ourIdentity: Party?, @@ -320,7 +323,73 @@ class SingleThreadedStateMachineManager( } } - private fun onSessionMessage(message: ReceivedMessage, deduplicationHandler: DeduplicationHandler) { + override fun retryFlowFromSafePoint(currentState: StateMachineState) { + // Get set of external events + val flowId = currentState.flowLogic.runId + val oldFlowLeftOver = mutex.locked { flows[flowId] }?.fiber?.transientValues?.value?.eventQueue + if (oldFlowLeftOver == null) { + logger.error("Unable to find flow for flow $flowId. Something is very wrong. The flow will not retry.") + return + } + val flow = if (currentState.isAnyCheckpointPersisted) { + val serializedCheckpoint = checkpointStorage.getCheckpoint(flowId) + if (serializedCheckpoint == null) { + logger.error("Unable to find database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.") + return + } + val checkpoint = deserializeCheckpoint(serializedCheckpoint) + if (checkpoint == null) { + logger.error("Unable to deserialize database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.") + return + } + // Resurrect flow + createFlowFromCheckpoint( + id = flowId, + checkpoint = checkpoint, + initialDeduplicationHandler = null, + isAnyCheckpointPersisted = true, + isStartIdempotent = false, + senderUUID = null + ) + } else { + // Just flow initiation message + null + } + externalEventMutex.withLock { + if (flow != null) addAndStartFlow(flowId, flow) + // Deliver all the external events from the old flow instance. + val unprocessedExternalEvents = mutableListOf() + do { + val event = oldFlowLeftOver.tryReceive() + if (event is Event.GeneratedByExternalEvent) { + unprocessedExternalEvents += event.deduplicationHandler.externalCause + } + } while (event != null) + val externalEvents = currentState.pendingDeduplicationHandlers.map { it.externalCause } + unprocessedExternalEvents + for (externalEvent in externalEvents) { + deliverExternalEvent(externalEvent) + } + } + } + + private val externalEventMutex = ReentrantLock() + override fun deliverExternalEvent(event: ExternalEvent) { + externalEventMutex.withLock { + when (event) { + is ExternalEvent.ExternalMessageEvent -> onSessionMessage(event) + is ExternalEvent.ExternalStartFlowEvent<*> -> onExternalStartFlow(event) + } + } + } + + private fun onExternalStartFlow(event: ExternalEvent.ExternalStartFlowEvent) { + val future = startFlow(event.flowLogic, event.context, ourIdentity = null, deduplicationHandler = event.deduplicationHandler) + event.wireUpFuture(future) + } + + private fun onSessionMessage(event: ExternalEvent.ExternalMessageEvent) { + val message: ReceivedMessage = event.receivedMessage + val deduplicationHandler: DeduplicationHandler = event.deduplicationHandler val peer = message.peer val sessionMessage = try { message.data.deserialize() @@ -394,7 +463,7 @@ class SingleThreadedStateMachineManager( } if (replyError != null) { - flowMessaging.sendSessionMessage(sender, replyError, DeduplicationId.createRandom(secureRandom)) + flowMessaging.sendSessionMessage(sender, replyError, SenderDeduplicationId(DeduplicationId.createRandom(secureRandom), ourSenderUUID)) deduplicationHandler.afterDatabaseTransaction() } } @@ -468,7 +537,8 @@ class SingleThreadedStateMachineManager( isAnyCheckpointPersisted = false, isStartIdempotent = isStartIdempotent, isRemoved = false, - flowLogic = flowLogic + flowLogic = flowLogic, + senderUUID = ourSenderUUID ) flowStateMachineImpl.transientState = TransientReference(initialState) mutex.locked { @@ -503,7 +573,7 @@ class SingleThreadedStateMachineManager( private fun createTransientValues(id: StateMachineRunId, resultFuture: CordaFuture): FlowStateMachineImpl.TransientValues { return FlowStateMachineImpl.TransientValues( - eventQueue = Channels.newChannel(stateMachineConfiguration.eventQueueSize, Channels.OverflowPolicy.BLOCK), + eventQueue = Channels.newChannel(-1, Channels.OverflowPolicy.BLOCK), resultFuture = resultFuture, database = database, transitionExecutor = transitionExecutor, @@ -519,7 +589,8 @@ class SingleThreadedStateMachineManager( checkpoint: Checkpoint, isAnyCheckpointPersisted: Boolean, isStartIdempotent: Boolean, - initialDeduplicationHandler: DeduplicationHandler? + initialDeduplicationHandler: DeduplicationHandler?, + senderUUID: String? = ourSenderUUID ): Flow { val flowState = checkpoint.flowState val resultFuture = openFuture() @@ -534,7 +605,8 @@ class SingleThreadedStateMachineManager( isAnyCheckpointPersisted = isAnyCheckpointPersisted, isStartIdempotent = isStartIdempotent, isRemoved = false, - flowLogic = logic + flowLogic = logic, + senderUUID = senderUUID ) val fiber = FlowStateMachineImpl(id, logic, scheduler) fiber.transientValues = TransientReference(createTransientValues(id, resultFuture)) @@ -552,7 +624,8 @@ class SingleThreadedStateMachineManager( isAnyCheckpointPersisted = isAnyCheckpointPersisted, isStartIdempotent = isStartIdempotent, isRemoved = false, - flowLogic = fiber.logic + flowLogic = fiber.logic, + senderUUID = senderUUID ) fiber.transientValues = TransientReference(createTransientValues(id, resultFuture)) fiber.transientState = TransientReference(state) @@ -576,9 +649,13 @@ class SingleThreadedStateMachineManager( startedFutures[id]?.setException(IllegalStateException("Will not start flow as SMM is stopping")) logger.trace("Not resuming as SMM is stopping.") } else { - incrementLiveFibers() - unfinishedFibers.countUp() - flows[id] = flow + val oldFlow = flows.put(id, flow) + if (oldFlow == null) { + incrementLiveFibers() + unfinishedFibers.countUp() + } else { + oldFlow.resultFuture.captureLater(flow.resultFuture) + } flow.fiber.scheduleEvent(Event.DoRemainingWork) when (checkpoint.flowState) { is FlowState.Unstarted -> { @@ -614,7 +691,7 @@ class SingleThreadedStateMachineManager( private fun makeTransitionExecutor(): TransitionExecutor { val interceptors = ArrayList() - interceptors.add { HospitalisingInterceptor(PropagatingFlowHospital, it) } + interceptors.add { HospitalisingInterceptor(StaffedFlowHospital, it) } if (serviceHub.configuration.devMode) { interceptors.add { DumpHistoryOnErrorInterceptor(it) } } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/StaffedFlowHospital.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/StaffedFlowHospital.kt new file mode 100644 index 0000000000..b0fb7943f0 --- /dev/null +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/StaffedFlowHospital.kt @@ -0,0 +1,127 @@ +package net.corda.node.services.statemachine + +import net.corda.core.flows.StateMachineRunId +import net.corda.core.utilities.loggerFor +import java.sql.SQLException +import java.time.Instant +import java.util.concurrent.ConcurrentHashMap + +/** + * This hospital consults "staff" to see if they can automatically diagnose and treat flows. + */ +object StaffedFlowHospital : FlowHospital { + private val log = loggerFor() + + private val staff = listOf(DeadlockNurse, DuplicateInsertSpecialist) + + private val patients = ConcurrentHashMap() + + val numberOfPatients = patients.size + + class MedicalHistory { + val records: MutableList = mutableListOf() + + sealed class Record(val suspendCount: Int) { + class Admitted(val at: Instant, suspendCount: Int) : Record(suspendCount) { + override fun toString() = "Admitted(at=$at, suspendCount=$suspendCount)" + } + + class Discharged(val at: Instant, suspendCount: Int, val by: Staff, val error: Throwable) : Record(suspendCount) { + override fun toString() = "Discharged(at=$at, suspendCount=$suspendCount, by=$by)" + } + } + + fun notDischargedForTheSameThingMoreThan(max: Int, by: Staff): Boolean { + val lastAdmittanceSuspendCount = (records.last() as MedicalHistory.Record.Admitted).suspendCount + return records.filterIsInstance(MedicalHistory.Record.Discharged::class.java).filter { it.by == by && it.suspendCount == lastAdmittanceSuspendCount }.count() <= max + } + + override fun toString(): String = "${this.javaClass.simpleName}(records = $records)" + } + + override fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List) { + log.info("Flow ${flowFiber.id} admitted to hospital in state $currentState") + val medicalHistory = patients.computeIfAbsent(flowFiber.id) { MedicalHistory() } + medicalHistory.records += MedicalHistory.Record.Admitted(Instant.now(), currentState.checkpoint.numberOfSuspends) + for ((index, error) in errors.withIndex()) { + log.info("Flow ${flowFiber.id} has error [$index]", error) + if (!errorIsDischarged(flowFiber, currentState, error, medicalHistory)) { + // If any error isn't discharged, then we propagate. + log.warn("Flow ${flowFiber.id} error was not discharged, propagating.") + flowFiber.scheduleEvent(Event.StartErrorPropagation) + return + } + } + // If all are discharged, retry. + flowFiber.scheduleEvent(Event.RetryFlowFromSafePoint) + } + + private fun errorIsDischarged(flowFiber: FlowFiber, currentState: StateMachineState, error: Throwable, medicalHistory: MedicalHistory): Boolean { + for (staffMember in staff) { + val diagnosis = staffMember.consult(flowFiber, currentState, error, medicalHistory) + if (diagnosis == Diagnosis.DISCHARGE) { + medicalHistory.records += MedicalHistory.Record.Discharged(Instant.now(), currentState.checkpoint.numberOfSuspends, staffMember, error) + log.info("Flow ${flowFiber.id} error discharged from hospital by $staffMember") + return true + } + } + return false + } + + // It's okay for flows to be cleaned... we fix them now! + override fun flowCleaned(flowFiber: FlowFiber) {} + + override fun flowRemoved(flowFiber: FlowFiber) { + patients.remove(flowFiber.id) + } + + enum class Diagnosis { + /** + * Retry from last safe point. + */ + DISCHARGE, + /** + * Please try another member of staff. + */ + NOT_MY_SPECIALTY + } + + interface Staff { + fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis + } + + /** + * SQL Deadlock detection. + */ + object DeadlockNurse : Staff { + override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis { + return if (mentionsDeadlock(newError)) { + Diagnosis.DISCHARGE + } else { + Diagnosis.NOT_MY_SPECIALTY + } + } + + private fun mentionsDeadlock(exception: Throwable?): Boolean { + return exception != null && (exception is SQLException && ((exception.message?.toLowerCase()?.contains("deadlock") + ?: false)) || mentionsDeadlock(exception.cause)) + } + } + + /** + * Primary key violation detection for duplicate inserts. Will detect other constraint violations too. + */ + object DuplicateInsertSpecialist : Staff { + override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis { + return if (mentionsConstraintViolation(newError) && history.notDischargedForTheSameThingMoreThan(3, this)) { + Diagnosis.DISCHARGE + } else { + Diagnosis.NOT_MY_SPECIALTY + } + } + + private fun mentionsConstraintViolation(exception: Throwable?): Boolean { + return exception != null && (exception is org.hibernate.exception.ConstraintViolationException || mentionsConstraintViolation(exception.cause)) + } + } +} \ No newline at end of file diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineManager.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineManager.kt index 289c22590c..72027b962a 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineManager.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineManager.kt @@ -14,11 +14,11 @@ import net.corda.core.concurrent.CordaFuture import net.corda.core.context.InvocationContext import net.corda.core.flows.FlowLogic import net.corda.core.flows.StateMachineRunId -import net.corda.core.identity.Party import net.corda.core.internal.FlowStateMachine import net.corda.core.messaging.DataFeed import net.corda.core.utilities.Try import net.corda.node.services.messaging.DeduplicationHandler +import net.corda.node.services.messaging.ReceivedMessage import rx.Observable /** @@ -50,21 +50,6 @@ interface StateMachineManager { */ fun stop(allowedUnsuspendedFiberCount: Int) - /** - * Starts a new flow. - * - * @param flowLogic The flow's code. - * @param context The context of the flow. - * @param ourIdentity The identity to use for the flow. - * @param deduplicationHandler Allows exactly-once start of the flow, see [DeduplicationHandler]. - */ - fun startFlow( - flowLogic: FlowLogic, - context: InvocationContext, - ourIdentity: Party?, - deduplicationHandler: DeduplicationHandler? - ): CordaFuture> - /** * Represents an addition/removal of a state machine. */ @@ -101,6 +86,12 @@ interface StateMachineManager { * @return whether the flow existed and was killed. */ fun killFlow(id: StateMachineRunId): Boolean + + /** + * Deliver an external event to the state machine. Such an event might be a new P2P message, or a request to start a flow. + * The event may be replayed if a flow fails and attempts to retry. + */ + fun deliverExternalEvent(event: ExternalEvent) } // These must be idempotent! A later failure in the state transition may error the flow state, and a replay may call @@ -110,4 +101,38 @@ interface StateMachineManagerInternal { fun addSessionBinding(flowId: StateMachineRunId, sessionId: SessionId) fun removeSessionBindings(sessionIds: Set) fun removeFlow(flowId: StateMachineRunId, removalReason: FlowRemovalReason, lastState: StateMachineState) + fun retryFlowFromSafePoint(currentState: StateMachineState) +} + +/** + * Represents an external event that can be injected into the state machine and that might need to be replayed if + * a flow retries. They always have de-duplication handlers to assist with the at-most once logic where required. + */ +interface ExternalEvent { + val deduplicationHandler: DeduplicationHandler + + /** + * An external P2P message event. + */ + interface ExternalMessageEvent : ExternalEvent { + val receivedMessage: ReceivedMessage + } + + /** + * An external request to start a flow, from the scheduler for example. + */ + interface ExternalStartFlowEvent : ExternalEvent { + val flowLogic: FlowLogic + val context: InvocationContext + + /** + * A callback for the state machine to pass back the [Future] associated with the flow start to the submitter. + */ + fun wireUpFuture(flowFuture: CordaFuture>) + + /** + * The future representing the flow start, passed back from the state machine to the submitter of this event. + */ + val future: CordaFuture> + } } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineState.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineState.kt index 0dedc0062b..da2bff6559 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineState.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/StateMachineState.kt @@ -36,6 +36,7 @@ import net.corda.node.services.messaging.DeduplicationHandler * possible. * @param isRemoved true if the flow has been removed from the state machine manager. This is used to avoid any further * work. + * @param senderUUID the identifier of the sending state machine or null if this flow is resumed from a checkpoint so that it does not participate in de-duplication high-water-marking. */ // TODO perhaps add a read-only environment to the state machine for things that don't change over time? // TODO evaluate persistent datastructure libraries to replace the inefficient copying we currently do. @@ -47,7 +48,8 @@ data class StateMachineState( val isTransactionTracked: Boolean, val isAnyCheckpointPersisted: Boolean, val isStartIdempotent: Boolean, - val isRemoved: Boolean + val isRemoved: Boolean, + val senderUUID: String? ) /** diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/DumpHistoryOnErrorInterceptor.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/DumpHistoryOnErrorInterceptor.kt index 27cecdbb85..b543757df8 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/DumpHistoryOnErrorInterceptor.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/DumpHistoryOnErrorInterceptor.kt @@ -44,7 +44,8 @@ class DumpHistoryOnErrorInterceptor(val delegate: TransitionExecutor) : Transiti (record ?: ArrayList()).apply { add(transitionRecord) } } - if (nextState.checkpoint.errorState is ErrorState.Errored) { + // Just if we decide to propagate, and not if just on the way to the hospital. + if (nextState.checkpoint.errorState is ErrorState.Errored && nextState.checkpoint.errorState.propagating) { log.warn("Flow ${fiber.id} errored, dumping all transitions:\n${record!!.joinToString("\n")}") for (error in nextState.checkpoint.errorState.errors) { log.warn("Flow ${fiber.id} error", error.exception) diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/HospitalisingInterceptor.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/HospitalisingInterceptor.kt index af10c25db0..7d480f5f9f 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/HospitalisingInterceptor.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/interceptors/HospitalisingInterceptor.kt @@ -36,20 +36,23 @@ class HospitalisingInterceptor( actionExecutor: ActionExecutor ): Pair { val (continuation, nextState) = delegate.executeTransition(fiber, previousState, event, transition, actionExecutor) - when (nextState.checkpoint.errorState) { - ErrorState.Clean -> { - if (hospitalisedFlows.remove(fiber.id) != null) { - flowHospital.flowCleaned(fiber) + + when (nextState.checkpoint.errorState) { + is ErrorState.Clean -> { + if (hospitalisedFlows.remove(fiber.id) != null) { + flowHospital.flowCleaned(fiber) + } + } + is ErrorState.Errored -> { + val exceptionsToHandle = nextState.checkpoint.errorState.errors.map { it.exception } + if (hospitalisedFlows.putIfAbsent(fiber.id, fiber) == null) { + flowHospital.flowErrored(fiber, previousState, exceptionsToHandle) + } } } - is ErrorState.Errored -> { - if (hospitalisedFlows.putIfAbsent(fiber.id, fiber) == null) { - flowHospital.flowErrored(fiber) - } - } - } if (nextState.isRemoved) { hospitalisedFlows.remove(fiber.id) + flowHospital.flowRemoved(fiber) } return Pair(continuation, nextState) } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/DeliverSessionMessageTransition.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/DeliverSessionMessageTransition.kt index abb95f67af..74e4f62c94 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/DeliverSessionMessageTransition.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/DeliverSessionMessageTransition.kt @@ -56,9 +56,6 @@ class DeliverSessionMessageTransition( is EndSessionMessage -> endMessageTransition() } } - if (!isErrored()) { - persistCheckpointIfNeeded() - } // Schedule a DoRemainingWork to check whether the flow needs to be woken up. actions.add(Action.ScheduleEvent(Event.DoRemainingWork)) FlowContinuation.ProcessEvents @@ -83,7 +80,7 @@ class DeliverSessionMessageTransition( // Send messages that were buffered pending confirmation of session. val sendActions = sessionState.bufferedMessages.map { (deduplicationId, bufferedMessage) -> val existingMessage = ExistingSessionMessage(message.initiatedSessionId, bufferedMessage) - Action.SendExisting(initiatedSession.peerParty, existingMessage, deduplicationId) + Action.SendExisting(initiatedSession.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID)) } actions.addAll(sendActions) currentState = currentState.copy(checkpoint = newCheckpoint) @@ -156,24 +153,6 @@ class DeliverSessionMessageTransition( } } - private fun TransitionBuilder.persistCheckpointIfNeeded() { - // We persist the message as soon as it arrives. - if (context.configuration.sessionDeliverPersistenceStrategy == SessionDeliverPersistenceStrategy.OnDeliver && - event.sessionMessage.payload !is EndSessionMessage) { - actions.addAll(arrayOf( - Action.CreateTransaction, - Action.PersistCheckpoint(context.id, currentState.checkpoint), - Action.PersistDeduplicationFacts(currentState.pendingDeduplicationHandlers), - Action.CommitTransaction, - Action.AcknowledgeMessages(currentState.pendingDeduplicationHandlers) - )) - currentState = currentState.copy( - pendingDeduplicationHandlers = emptyList(), - isAnyCheckpointPersisted = true - ) - } - } - private fun TransitionBuilder.endMessageTransition() { val sessionId = event.sessionMessage.recipientSessionId val sessions = currentState.checkpoint.sessions diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/ErrorFlowTransition.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/ErrorFlowTransition.kt index c614879ee2..8c6ecdeeca 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/ErrorFlowTransition.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/ErrorFlowTransition.kt @@ -56,7 +56,7 @@ class ErrorFlowTransition( sessions = newSessions ) currentState = currentState.copy(checkpoint = newCheckpoint) - actions.add(Action.PropagateErrors(errorMessages, initiatedSessions)) + actions.add(Action.PropagateErrors(errorMessages, initiatedSessions, startingState.senderUUID)) } // If we're errored but not propagating keep processing events. diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/StartedFlowTransition.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/StartedFlowTransition.kt index d848175d5a..3a9ee6f861 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/StartedFlowTransition.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/StartedFlowTransition.kt @@ -226,7 +226,7 @@ class StartedFlowTransition( } val deduplicationId = DeduplicationId.createForNormal(checkpoint, index++) val initialMessage = createInitialSessionMessage(sessionState.initiatingSubFlow, sourceSessionId, null) - actions.add(Action.SendInitial(sessionState.party, initialMessage, deduplicationId)) + actions.add(Action.SendInitial(sessionState.party, initialMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID))) newSessions[sourceSessionId] = SessionState.Initiating( bufferedMessages = emptyList(), rejectionError = null @@ -263,7 +263,7 @@ class StartedFlowTransition( when (existingSessionState) { is SessionState.Uninitiated -> { val initialMessage = createInitialSessionMessage(existingSessionState.initiatingSubFlow, sourceSessionId, message) - actions.add(Action.SendInitial(existingSessionState.party, initialMessage, deduplicationId)) + actions.add(Action.SendInitial(existingSessionState.party, initialMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID))) newSessions[sourceSessionId] = SessionState.Initiating( bufferedMessages = emptyList(), rejectionError = null @@ -280,7 +280,7 @@ class StartedFlowTransition( is InitiatedSessionState.Live -> { val sinkSessionId = existingSessionState.initiatedState.peerSinkSessionId val existingMessage = ExistingSessionMessage(sinkSessionId, sessionMessage) - actions.add(Action.SendExisting(existingSessionState.peerParty, existingMessage, deduplicationId)) + actions.add(Action.SendExisting(existingSessionState.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID))) Unit } InitiatedSessionState.Ended -> { diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/TopLevelTransition.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/TopLevelTransition.kt index 6e3b57ca53..3da28f65c7 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/TopLevelTransition.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/TopLevelTransition.kt @@ -28,18 +28,19 @@ class TopLevelTransition( ) : Transition { override fun transition(): TransitionResult { return when (event) { - is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition() - is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition() - is Event.Error -> errorTransition(event) - is Event.TransactionCommitted -> transactionCommittedTransition(event) - is Event.SoftShutdown -> softShutdownTransition() - is Event.StartErrorPropagation -> startErrorPropagationTransition() - is Event.EnterSubFlow -> enterSubFlowTransition(event) - is Event.LeaveSubFlow -> leaveSubFlowTransition() - is Event.Suspend -> suspendTransition(event) - is Event.FlowFinish -> flowFinishTransition(event) - is Event.InitiateFlow -> initiateFlowTransition(event) - is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event) + is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition() + is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition() + is Event.Error -> errorTransition(event) + is Event.TransactionCommitted -> transactionCommittedTransition(event) + is Event.SoftShutdown -> softShutdownTransition() + is Event.StartErrorPropagation -> startErrorPropagationTransition() + is Event.EnterSubFlow -> enterSubFlowTransition(event) + is Event.LeaveSubFlow -> leaveSubFlowTransition() + is Event.Suspend -> suspendTransition(event) + is Event.FlowFinish -> flowFinishTransition(event) + is Event.InitiateFlow -> initiateFlowTransition(event) + is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event) + is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition(startingState) } } @@ -212,7 +213,7 @@ class TopLevelTransition( if (state is SessionState.Initiated && state.initiatedState is InitiatedSessionState.Live) { val message = ExistingSessionMessage(state.initiatedState.peerSinkSessionId, EndSessionMessage) val deduplicationId = DeduplicationId.createForNormal(currentState.checkpoint, index) - Action.SendExisting(state.peerParty, message, deduplicationId) + Action.SendExisting(state.peerParty, message, SenderDeduplicationId(deduplicationId, currentState.senderUUID)) } else { null } @@ -251,4 +252,14 @@ class TopLevelTransition( resumeFlowLogic(event.returnValue) } } + + private fun retryFlowFromSafePointTransition(startingState: StateMachineState): TransitionResult { + return builder { + // Need to create a flow from the prior checkpoint or flow initiation. + actions.add(Action.CreateTransaction) + actions.add(Action.RetryFlowFromSafePoint(startingState)) + actions.add(Action.CommitTransaction) + FlowContinuation.Abort + } + } } diff --git a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/UnstartedFlowTransition.kt b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/UnstartedFlowTransition.kt index cbb62728da..9ceef31895 100644 --- a/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/UnstartedFlowTransition.kt +++ b/node/src/main/kotlin/net/corda/node/services/statemachine/transitions/UnstartedFlowTransition.kt @@ -68,7 +68,7 @@ class UnstartedFlowTransition( Action.SendExisting( flowStart.peerSession.counterparty, sessionMessage, - DeduplicationId.createForNormal(currentState.checkpoint, 0) + SenderDeduplicationId(DeduplicationId.createForNormal(currentState.checkpoint, 0), currentState.senderUUID) ) ) } diff --git a/node/src/main/kotlin/net/corda/node/utilities/AppendOnlyPersistentMap.kt b/node/src/main/kotlin/net/corda/node/utilities/AppendOnlyPersistentMap.kt index 295d5c1461..f262a7103b 100644 --- a/node/src/main/kotlin/net/corda/node/utilities/AppendOnlyPersistentMap.kt +++ b/node/src/main/kotlin/net/corda/node/utilities/AppendOnlyPersistentMap.kt @@ -13,14 +13,21 @@ package net.corda.node.utilities import com.github.benmanes.caffeine.cache.LoadingCache import com.github.benmanes.caffeine.cache.Weigher import net.corda.core.utilities.contextLogger +import net.corda.nodeapi.internal.persistence.DatabaseTransaction +import net.corda.nodeapi.internal.persistence.contextTransaction import net.corda.nodeapi.internal.persistence.currentDBSession +import java.lang.ref.WeakReference import java.util.* +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicReference /** - * Implements a caching layer on top of an *append-only* table accessed via Hibernate mapping. Note that if the same key is [set] twice the - * behaviour is unpredictable! There is a best-effort check for double inserts, but this should *not* be relied on, so - * ONLY USE THIS IF YOUR TABLE IS APPEND-ONLY + * Implements a caching layer on top of an *append-only* table accessed via Hibernate mapping. Note that if the same key is [set] twice, + * typically this will result in a duplicate insert if this is racing with another transaction. The flow framework will then retry. + * + * This class relies heavily on the fact that compute operations in the cache are atomic for a particular key. */ abstract class AppendOnlyPersistentMapBase( val toPersistentEntityKey: (K) -> EK, @@ -33,7 +40,8 @@ abstract class AppendOnlyPersistentMapBase( private val log = contextLogger() } - protected abstract val cache: LoadingCache> + protected abstract val cache: LoadingCache> + protected val pendingKeys = ConcurrentHashMap>() /** * Returns the value associated with the key, first loading that value from the storage if necessary. @@ -57,32 +65,31 @@ abstract class AppendOnlyPersistentMapBase( return result.map { x -> fromPersistentEntity(x) }.asSequence() } - private tailrec fun set(key: K, value: V, logWarning: Boolean, store: (K, V) -> V?): Boolean { - var insertionAttempt = false - var isUnique = true - val existingInCache = cache.get(key) { - // Thread safe, if multiple threads may wait until the first one has loaded. - insertionAttempt = true - // Key wasn't in the cache and might be in the underlying storage. - // Depending on 'store' method, this may insert without checking key duplication or it may avoid inserting a duplicated key. - val existingInDb = store(key, value) - if (existingInDb != null) { // Always reuse an existing value from the storage of a duplicated key. - isUnique = false - Optional.of(existingInDb) - } else { - Optional.of(value) - } - }!! - if (!insertionAttempt) { - if (existingInCache.isPresent) { - // Key already exists in cache, do nothing. - isUnique = false - } else { - // This happens when the key was queried before with no value associated. We invalidate the cached null - // value and recursively call set again. This is to avoid race conditions where another thread queries after - // the invalidate but before the set. - cache.invalidate(key!!) - return set(key, value, logWarning, store) + private fun set(key: K, value: V, logWarning: Boolean, store: (K, V) -> V?): Boolean { + // Will be set to true if store says it isn't in the database. + var isUnique = false + cache.asMap().compute(key) { _, oldValue -> + // Always write to the database, unless we can see it's already committed. + when (oldValue) { + is Transactional.InFlight<*, V> -> { + // Someone else is writing, so store away! + // TODO: we can do collision detection here and prevent it happening in the database. But we also have to do deadlock detection, so a bit of work. + isUnique = (store(key, value) == null) + oldValue.apply { alsoWrite(value) } + } + is Transactional.Committed -> oldValue // The value is already globally visible and cached. So do nothing since the values are always the same. + else -> { + // Null or Missing. Store away! + isUnique = (store(key, value) == null) + if (!isUnique && !weAreWriting(key)) { + // If we found a value already in the database, and we were not already writing, then it's already committed but got evicted. + Transactional.Committed(value) + } else { + // Some database transactions, including us, writing, with readers seeing whatever is in the database and writers seeing the (in memory) value. + Transactional.InFlight(this, key, { loadValue(key) }).apply { alsoWrite(value) } + } + } + } } if (logWarning && !isUnique) { @@ -103,7 +110,8 @@ abstract class AppendOnlyPersistentMapBase( /** * Associates the specified value with the specified key in this map and persists it. - * If the map previously contained a mapping for the key, the old value is not replaced. + * If the map previously contained a committed mapping for the key, the old value is not replaced. It may throw an error from the + * underlying storage if this races with another database transaction to store a value for the same key. * @return true if added key was unique, otherwise false */ fun addWithDuplicatesAllowed(key: K, value: V, logWarning: Boolean = true): Boolean = @@ -126,7 +134,7 @@ abstract class AppendOnlyPersistentMapBase( protected fun loadValue(key: K): V? { val result = currentDBSession().find(persistentEntityClass, toPersistentEntityKey(key)) - return result?.let(fromPersistentEntity)?.second + return result?.apply { currentDBSession().detach(result) }?.let(fromPersistentEntity)?.second } operator fun contains(key: K) = get(key) != null @@ -142,9 +150,161 @@ abstract class AppendOnlyPersistentMapBase( session.createQuery(deleteQuery).executeUpdate() cache.invalidateAll() } + + // Helpers to know if transaction(s) are currently writing the given key. + protected fun weAreWriting(key: K): Boolean = pendingKeys.get(key)?.contains(contextTransaction) ?: false + protected fun anyoneWriting(key: K): Boolean = pendingKeys.get(key)?.isNotEmpty() ?: false + + // Indicate this database transaction is a writer of this key. + private fun addPendingKey(key: K, databaseTransaction: DatabaseTransaction): Boolean { + var added = true + pendingKeys.compute(key) { k, oldSet -> + if (oldSet == null) { + val newSet = HashSet(0) + newSet += databaseTransaction + newSet + } else { + added = oldSet.add(databaseTransaction) + oldSet + } + } + return added + } + + // Remove this database transaction as a writer of this key, because the transaction committed or rolled back. + private fun removePendingKey(key: K, databaseTransaction: DatabaseTransaction) { + pendingKeys.compute(key) { k, oldSet -> + if (oldSet == null) { + oldSet + } else { + oldSet -= databaseTransaction + if (oldSet.size == 0) null else oldSet + } + } + } + + /** + * Represents a value in the cache, with transaction isolation semantics. + * + * There are 3 states. Globally missing, globally visible, and being written in a transaction somewhere now or in + * the past (and it rolled back). + */ + sealed class Transactional { + abstract val value: T + abstract val isPresent: Boolean + abstract val valueWithoutIsolation: T? + + fun orElse(alt: T?) = if (isPresent) value else alt + + // Everyone can see it, and database transaction committed. + class Committed(override val value: T) : Transactional() { + override val isPresent: Boolean + get() = true + override val valueWithoutIsolation: T? + get() = value + } + + // No one can see it. + class Missing() : Transactional() { + override val value: T + get() = throw NoSuchElementException("Not present") + override val isPresent: Boolean + get() = false + override val valueWithoutIsolation: T? + get() = null + } + + // Written in a transaction (uncommitted) somewhere, but there's a small window when this might be seen after commit, + // hence the committed flag. + class InFlight(private val map: AppendOnlyPersistentMapBase, + private val key: K, + private val _readerValueLoader: () -> T?, + private val _writerValueLoader: () -> T = { throw IllegalAccessException("No value loader provided") }) : Transactional() { + + // A flag to indicate this has now been committed, but hasn't yet been replaced with Committed. This also + // de-duplicates writes of the Committed value to the cache. + private val committed = AtomicBoolean(false) + + // What to do if a non-writer needs to see the value and it hasn't yet been committed to the database. + // Can be updated into a no-op once evaluated. + private val readerValueLoader = AtomicReference<() -> T?>(_readerValueLoader) + // What to do if a writer needs to see the value and it hasn't yet been committed to the database. + // Can be updated into a no-op once evaluated. + private val writerValueLoader = AtomicReference<() -> T>(_writerValueLoader) + + fun alsoWrite(_value: T) { + // Make the lazy loader the writers see actually just return the value that has been set. + writerValueLoader.set({ _value }) + // We make all these vals so that the lambdas do not need a reference to this, and so the onCommit only has a weak ref to the value. + // We want this so that the cache could evict the value (due to memory constraints etc) without the onCommit callback + // retaining what could be a large memory footprint object. + val tx = contextTransaction + val strongKey = key + val weakValue = WeakReference(_value) + val strongComitted = committed + val strongMap = map + if (map.addPendingKey(key, tx)) { + // If the transaction commits, update cache to make globally visible if we're first for this key, + // and then stop saying the transaction is writing the key. + tx.onCommit { + if (strongComitted.compareAndSet(false, true)) { + val dereferencedKey = strongKey + val dereferencedValue = weakValue.get() + if (dereferencedValue != null) { + strongMap.cache.put(dereferencedKey, Committed(dereferencedValue)) + } + } + strongMap.removePendingKey(strongKey, tx) + } + // If the transaction rolls back, stop saying this transaction is writing the key. + tx.onRollback { + strongMap.removePendingKey(strongKey, tx) + } + } + } + + // Lazy load the value a "writer" would see. If the original loader hasn't been replaced, replace it + // with one that just returns the value once evaluated. + private fun loadAsWriter(): T { + val _value = writerValueLoader.get()() + if (writerValueLoader.get() == _writerValueLoader) { + writerValueLoader.set({ _value }) + } + return _value + } + + // Lazy load the value a "reader" would see. If the original loader hasn't been replaced, replace it + // with one that just returns the value once evaluated. + private fun loadAsReader(): T? { + val _value = readerValueLoader.get()() + if (readerValueLoader.get() == _readerValueLoader) { + readerValueLoader.set({ _value }) + } + return _value + } + + // Whether someone reading (only) can see the entry. + private val isPresentAsReader: Boolean get() = (loadAsReader() != null) + // Whether the entry is already written and committed, or we are writing it (and thus it can be seen). + private val isPresentAsWriter: Boolean get() = committed.get() || map.weAreWriting(key) + + override val isPresent: Boolean + get() = isPresentAsWriter || isPresentAsReader + + // If it is committed or we are writing, reveal the value, potentially lazy loading from the database. + // If none of the above, see what was already in the database, potentially lazily. + override val value: T + get() = if (isPresentAsWriter) loadAsWriter() else if (isPresentAsReader) loadAsReader()!! else throw NoSuchElementException("Not present") + + // The value from the perspective of the eviction algorithm of the cache. i.e. we want to reveal memory footprint to it etc. + override val valueWithoutIsolation: T? + get() = if (writerValueLoader.get() != _writerValueLoader) writerValueLoader.get()() else if (readerValueLoader.get() != _writerValueLoader) readerValueLoader.get()() else null + } + } } -class AppendOnlyPersistentMap( +// Open for tests to override +open class AppendOnlyPersistentMap( toPersistentEntityKey: (K) -> EK, fromPersistentEntity: (E) -> Pair, toPersistentEntity: (key: K, value: V) -> E, @@ -156,26 +316,71 @@ class AppendOnlyPersistentMap( toPersistentEntity, persistentEntityClass) { //TODO determine cacheBound based on entity class later or with node config allowing tuning, or using some heuristic based on heap size - override val cache = NonInvalidatingCache>( + override val cache = NonInvalidatingCache>( bound = cacheBound, - loadFunction = { key -> Optional.ofNullable(loadValue(key)) }) + loadFunction = { key: K -> + // This gets called if a value is read and the cache has no Transactional for this key yet. + val value: V? = loadValue(key) + if (value == null) { + // No visible value + if (anyoneWriting(key)) { + // If someone is writing (but not us) + // For those not writing, the value cannot be seen. + // For those writing, they need to re-load the value from the database (which their database transaction CAN see). + Transactional.InFlight(this, key, { null }, { loadValue(key)!! }) + } else { + // If no one is writing, then the value does not exist. + Transactional.Missing() + } + } else { + // A value was found + if (weAreWriting(key)) { + // If we are writing, it might not be globally visible, and was evicted from the cache. + // For those not writing, they need to check the database again. + // For those writing, they can see the value found. + Transactional.InFlight(this, key, { loadValue(key) }, { value }) + } else { + // If no one is writing, then make it globally visible. + Transactional.Committed(value) + } + } + }) } +// Same as above, but with weighted values (e.g. memory footprint sensitive). class WeightBasedAppendOnlyPersistentMap( toPersistentEntityKey: (K) -> EK, fromPersistentEntity: (E) -> Pair, toPersistentEntity: (key: K, value: V) -> E, persistentEntityClass: Class, maxWeight: Long, - weighingFunc: (K, Optional) -> Int + weighingFunc: (K, Transactional) -> Int ) : AppendOnlyPersistentMapBase( toPersistentEntityKey, fromPersistentEntity, toPersistentEntity, persistentEntityClass) { - override val cache = NonInvalidatingWeightBasedCache( + override val cache = NonInvalidatingWeightBasedCache>( maxWeight = maxWeight, - weigher = Weigher> { key, value -> weighingFunc(key, value) }, - loadFunction = { key -> Optional.ofNullable(loadValue(key)) } - ) -} \ No newline at end of file + weigher = object : Weigher> { + override fun weigh(key: K, value: Transactional): Int { + return weighingFunc(key, value) + } + }, + loadFunction = { key: K -> + val value: V? = loadValue(key) + if (value == null) { + if (anyoneWriting(key)) { + Transactional.InFlight(this, key, { null }, { loadValue(key)!! }) + } else { + Transactional.Missing() + } + } else { + if (weAreWriting(key)) { + Transactional.InFlight(this, key, { loadValue(key) }, { value }) + } else { + Transactional.Committed(value) + } + } + }) +} diff --git a/node/src/main/resources/reference.conf b/node/src/main/resources/reference.conf index 669662469a..9477b1796b 100644 --- a/node/src/main/resources/reference.conf +++ b/node/src/main/resources/reference.conf @@ -7,11 +7,11 @@ // // Distribution of this file or any portion thereof via any medium without the express permission of R3 is strictly prohibited. -myLegalName = "Vast Global MegaCorp, Ltd" emailAddress = "admin@company.com" keyStorePassword = "cordacadevpass" trustStorePassword = "trustpass" crlCheckSoftFail = true +lazyBridgeStart = true dataSourceProperties = { dataSourceClassName = org.h2.jdbcx.JdbcDataSource dataSource.url = "jdbc:h2:file:"${baseDirectory}"/persistence;DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=0;AUTO_SERVER_PORT="${h2port} diff --git a/node/src/test/kotlin/net/corda/node/services/config/NodeConfigurationImplTest.kt b/node/src/test/kotlin/net/corda/node/services/config/NodeConfigurationImplTest.kt index 97270cc0ee..117ca66129 100644 --- a/node/src/test/kotlin/net/corda/node/services/config/NodeConfigurationImplTest.kt +++ b/node/src/test/kotlin/net/corda/node/services/config/NodeConfigurationImplTest.kt @@ -11,8 +11,11 @@ package net.corda.node.services.config import com.typesafe.config.Config +import com.typesafe.config.ConfigException import com.typesafe.config.ConfigFactory import com.zaxxer.hikari.HikariConfig +import com.typesafe.config.ConfigParseOptions +import com.typesafe.config.ConfigValueFactory import net.corda.core.internal.toPath import net.corda.core.utilities.NetworkHostAndPort import net.corda.nodeapi.internal.persistence.CordaPersistence.DataSourceConfigTag @@ -22,7 +25,10 @@ import net.corda.testing.core.ALICE_NAME import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties import net.corda.tools.shell.SSHDConfiguration import org.assertj.core.api.Assertions.assertThat +import org.assertj.core.api.Assertions.assertThatCode import org.assertj.core.api.Assertions.assertThatThrownBy +import org.junit.Assert.assertNotNull +import org.junit.Assert.assertEquals import org.junit.Test import java.net.InetAddress import java.net.URL @@ -164,7 +170,9 @@ class NodeConfigurationImplTest { @Test fun `validation has error when compatibilityZoneURL is present and devMode is true`() { - val configuration = testConfiguration.copy(devMode = true, compatibilityZoneURL = URI.create("https://r3.com").toURL()) + val configuration = testConfiguration.copy( + devMode = true, + compatibilityZoneURL = URL("https://r3.com")) val errors = configuration.validate() @@ -177,6 +185,66 @@ class NodeConfigurationImplTest { assertEquals(InetAddress.getLocalHost().hostName, config.enterpriseConfiguration.mutualExclusionConfiguration.machineName) } + @Test + fun `errors for nested config keys contain path`() { + var rawConfig = ConfigFactory.parseResources("working-config.conf", ConfigParseOptions.defaults().setAllowMissing(false)) + val missingPropertyPath = "rpcSettings.address" + rawConfig = rawConfig.withoutPath(missingPropertyPath) + + assertThatThrownBy { rawConfig.parseAsNodeConfiguration() }.isInstanceOfSatisfying(ConfigException.Missing::class.java) { exception -> + assertThat(exception.message).isNotNull() + assertThat(exception.message).contains(missingPropertyPath) + } + } + + @Test + fun `validation has error when compatibilityZone is present and devMode is true`() { + val configuration = testConfiguration.copy(devMode = true, networkServices = NetworkServicesConfig( + URL("https://r3.com.doorman"), + URL("https://r3.com/nm"))) + + val errors = configuration.validate() + + assertThat(errors).hasOnlyOneElementSatisfying { error -> error.contains("networkServices") && error.contains("devMode") } + } + + @Test + fun `validation has error when both compatibilityZoneURL and networkServices are configured`() { + val configuration = testConfiguration.copy( + devMode = false, + compatibilityZoneURL = URL("https://r3.com"), + networkServices = NetworkServicesConfig( + URL("https://r3.com.doorman"), + URL("https://r3.com/nm"))) + + val errors = configuration.validate() + + assertThat(errors).hasOnlyOneElementSatisfying { + error -> error.contains("Cannot configure both compatibilityZoneUrl and networkServices simultaneously") + } + } + + @Test + fun `rpcAddress and rpcSettings_address are equivalent`() { + var rawConfig = ConfigFactory.parseResources("working-config.conf", ConfigParseOptions.defaults().setAllowMissing(false)) + rawConfig = rawConfig.withoutPath("rpcSettings.address") + rawConfig = rawConfig.withValue("rpcAddress", ConfigValueFactory.fromAnyRef("localhost:4444")) + + assertThatCode { rawConfig.parseAsNodeConfiguration() }.doesNotThrowAnyException() + } + + @Test + fun `compatiilityZoneURL populates NetworkServices`() { + val compatibilityZoneURL = URI.create("https://r3.com").toURL() + val configuration = testConfiguration.copy( + devMode = false, + compatibilityZoneURL = compatibilityZoneURL) + + assertNotNull(configuration.networkServices) + assertEquals(compatibilityZoneURL, configuration.networkServices!!.doormanURL) + assertEquals(compatibilityZoneURL, configuration.networkServices!!.networkMapURL) + } + private fun configDebugOptions(devMode: Boolean, devModeOptions: DevModeOptions?): NodeConfiguration { return testConfiguration.copy(devMode = devMode, devModeOptions = devModeOptions) } diff --git a/node/src/test/kotlin/net/corda/node/services/events/NodeSchedulerServiceTest.kt b/node/src/test/kotlin/net/corda/node/services/events/NodeSchedulerServiceTest.kt index a9796ff107..3867c20b4a 100644 --- a/node/src/test/kotlin/net/corda/node/services/events/NodeSchedulerServiceTest.kt +++ b/node/src/test/kotlin/net/corda/node/services/events/NodeSchedulerServiceTest.kt @@ -24,14 +24,15 @@ import net.corda.node.internal.configureDatabase import net.corda.node.services.api.FlowStarter import net.corda.node.services.api.NodePropertiesStore import net.corda.node.services.messaging.DeduplicationHandler +import net.corda.node.services.statemachine.ExternalEvent import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.DatabaseConfig -import net.corda.nodeapi.internal.persistence.DatabaseTransaction import net.corda.testing.internal.doLookup import net.corda.testing.internal.rigorousMock import net.corda.testing.internal.spectator import net.corda.testing.node.MockServices import net.corda.testing.node.TestClock +import org.junit.After import org.junit.Ignore import org.junit.Rule import org.junit.Test @@ -60,7 +61,7 @@ open class NodeSchedulerServiceTestBase { dedupe.insideDatabaseTransaction() dedupe.afterDatabaseTransaction() openFuture>() - }.whenever(it).startFlow(any>(), any(), any()) + }.whenever(it).startFlow(any>()) } private val flowsDraingMode = rigorousMock().also { doReturn(false).whenever(it).isEnabled() @@ -90,7 +91,7 @@ open class NodeSchedulerServiceTestBase { protected fun assertStarted(flowLogic: FlowLogic<*>) { // Like in assertWaitingFor, use timeout to make verify wait as we often race the call to startFlow: - verify(flowStarter, timeout(5000)).startFlow(same(flowLogic), any(), any()) + verify(flowStarter, timeout(5000)).startFlow(argForWhich> { this.flowLogic == flowLogic }) } protected fun assertStarted(event: Event) = assertStarted(event.flowLogic) @@ -122,11 +123,11 @@ class MockScheduledFlowRepository : ScheduledFlowRepository { } class NodeSchedulerServiceTest : NodeSchedulerServiceTestBase() { - private val database = rigorousMock().also { - doAnswer { - val block: DatabaseTransaction.() -> Any? = it.getArgument(0) - rigorousMock().block() - }.whenever(it).transaction(any()) + private val database = configureDatabase(MockServices.makeTestDataSourceProperties(), DatabaseConfig(), rigorousMock()) + + @After + fun closeDatabase() { + database.close() } private val scheduler = NodeSchedulerService( @@ -158,7 +159,9 @@ class NodeSchedulerServiceTest : NodeSchedulerServiceTestBase() { }).whenever(it).data } flows[logicRef] = flowLogic - scheduler.scheduleStateActivity(ssr) + database.transaction { + scheduler.scheduleStateActivity(ssr) + } } @Test @@ -217,7 +220,9 @@ class NodeSchedulerServiceTest : NodeSchedulerServiceTestBase() { fun `test activity due in the future and schedule another for same time then unschedule second`() { val eventA = schedule(mark + 1.days) val eventB = schedule(mark + 1.days) - scheduler.unscheduleStateActivity(eventB.stateRef) + database.transaction { + scheduler.unscheduleStateActivity(eventB.stateRef) + } assertWaitingFor(eventA) testClock.advanceBy(1.days) assertStarted(eventA) @@ -227,7 +232,9 @@ class NodeSchedulerServiceTest : NodeSchedulerServiceTestBase() { fun `test activity due in the future and schedule another for same time then unschedule original`() { val eventA = schedule(mark + 1.days) val eventB = schedule(mark + 1.days) - scheduler.unscheduleStateActivity(eventA.stateRef) + database.transaction { + scheduler.unscheduleStateActivity(eventA.stateRef) + } assertWaitingFor(eventB) testClock.advanceBy(1.days) assertStarted(eventB) @@ -235,7 +242,9 @@ class NodeSchedulerServiceTest : NodeSchedulerServiceTestBase() { @Test fun `test activity due in the future then unschedule`() { - scheduler.unscheduleStateActivity(schedule(mark + 1.days).stateRef) + database.transaction { + scheduler.unscheduleStateActivity(schedule(mark + 1.days).stateRef) + } testClock.advanceBy(1.days) } } diff --git a/node/src/test/kotlin/net/corda/node/services/persistence/AppendOnlyPersistentMapTest.kt b/node/src/test/kotlin/net/corda/node/services/persistence/AppendOnlyPersistentMapTest.kt new file mode 100644 index 0000000000..b10042dcc6 --- /dev/null +++ b/node/src/test/kotlin/net/corda/node/services/persistence/AppendOnlyPersistentMapTest.kt @@ -0,0 +1,290 @@ +package net.corda.node.services.persistence + +import net.corda.core.schemas.MappedSchema +import net.corda.core.utilities.loggerFor +import net.corda.node.internal.configureDatabase +import net.corda.node.services.schema.NodeSchemaService +import net.corda.node.utilities.AppendOnlyPersistentMap +import net.corda.nodeapi.internal.persistence.DatabaseConfig +import net.corda.testing.internal.rigorousMock +import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties +import org.junit.After +import org.junit.Assert.* +import org.junit.Test +import org.junit.runner.RunWith +import org.junit.runners.Parameterized +import java.io.Serializable +import java.util.concurrent.CountDownLatch +import javax.persistence.Column +import javax.persistence.Entity +import javax.persistence.Id +import javax.persistence.PersistenceException + +@RunWith(Parameterized::class) +class AppendOnlyPersistentMapTest(var scenario: Scenario) { + companion object { + + private val scenarios = arrayOf( + Scenario(false, ReadOrWrite.Read, ReadOrWrite.Read, Outcome.Fail, Outcome.Fail), + Scenario(false, ReadOrWrite.Write, ReadOrWrite.Read, Outcome.Success, Outcome.Fail, Outcome.Success), + Scenario(false, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.Fail, Outcome.Success), + Scenario(false, ReadOrWrite.Write, ReadOrWrite.Write, Outcome.Success, Outcome.SuccessButErrorOnCommit), + Scenario(false, ReadOrWrite.WriteDuplicateAllowed, ReadOrWrite.Read, Outcome.Success, Outcome.Fail, Outcome.Success), + Scenario(false, ReadOrWrite.Read, ReadOrWrite.WriteDuplicateAllowed, Outcome.Fail, Outcome.Success), + Scenario(false, ReadOrWrite.WriteDuplicateAllowed, ReadOrWrite.WriteDuplicateAllowed, Outcome.Success, Outcome.SuccessButErrorOnCommit, Outcome.Fail), + Scenario(true, ReadOrWrite.Read, ReadOrWrite.Read, Outcome.Success, Outcome.Success), + Scenario(true, ReadOrWrite.Write, ReadOrWrite.Read, Outcome.SuccessButErrorOnCommit, Outcome.Success), + Scenario(true, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.Success, Outcome.Fail), + Scenario(true, ReadOrWrite.Write, ReadOrWrite.Write, Outcome.SuccessButErrorOnCommit, Outcome.SuccessButErrorOnCommit), + Scenario(true, ReadOrWrite.WriteDuplicateAllowed, ReadOrWrite.Read, Outcome.Fail, Outcome.Success), + Scenario(true, ReadOrWrite.Read, ReadOrWrite.WriteDuplicateAllowed, Outcome.Success, Outcome.Fail), + Scenario(true, ReadOrWrite.WriteDuplicateAllowed, ReadOrWrite.WriteDuplicateAllowed, Outcome.Fail, Outcome.Fail) + ) + + @Parameterized.Parameters(name = "{0}") + @JvmStatic + fun data(): Array> = scenarios.map { arrayOf(it) }.toTypedArray() + } + + enum class ReadOrWrite { Read, Write, WriteDuplicateAllowed } + enum class Outcome { Success, Fail, SuccessButErrorOnCommit } + + data class Scenario(val prePopulated: Boolean, + val a: ReadOrWrite, + val b: ReadOrWrite, + val aExpected: Outcome, + val bExpected: Outcome, + val bExpectedIfSingleThreaded: Outcome = bExpected) + + private val database = configureDatabase(makeTestDataSourceProperties(), + DatabaseConfig(), + rigorousMock(), + NodeSchemaService(setOf(MappedSchema(AppendOnlyPersistentMapTest::class.java, 1, listOf(PersistentMapEntry::class.java))))) + + @After + fun closeDatabase() { + database.close() + } + + @Test + fun `concurrent test no purge between A and B`() { + prepopulateIfRequired() + val map = createMap() + val a = TestThread("A", map).apply { start() } + val b = TestThread("B", map).apply { start() } + + // Begin A + a.phase1.countDown() + a.await(a::phase2) + + // Begin B + b.phase1.countDown() + b.await(b::phase2) + + // Commit A + a.phase3.countDown() + a.await(a::phase4) + + // Commit B + b.phase3.countDown() + b.await(b::phase4) + + // End + a.join() + b.join() + assertTrue(map.pendingKeysIsEmpty()) + } + + @Test + fun `test no purge with only a single transaction`() { + prepopulateIfRequired() + val map = createMap() + val a = TestThread("A", map, true).apply { + phase1.countDown() + phase3.countDown() + } + val b = TestThread("B", map, true).apply { + phase1.countDown() + phase3.countDown() + } + try { + database.transaction { + a.run() + b.run() + } + } catch (t: PersistenceException) { + // This only helps if thrown on commit, otherwise other latches not counted down. + assertEquals(t.message, Outcome.SuccessButErrorOnCommit, a.outcome) + } + a.await(a::phase4) + b.await(b::phase4) + assertTrue(map.pendingKeysIsEmpty()) + } + + + @Test + fun `concurrent test purge between A and B`() { + // Writes intentionally do not check the database first, so purging between read and write changes behaviour + val remapped = mapOf(Scenario(true, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.Success, Outcome.Fail) to Scenario(true, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.Success, Outcome.SuccessButErrorOnCommit)) + scenario = remapped[scenario] ?: scenario + prepopulateIfRequired() + val map = createMap() + val a = TestThread("A", map).apply { start() } + val b = TestThread("B", map).apply { start() } + + // Begin A + a.phase1.countDown() + a.await(a::phase2) + + map.invalidate() + + // Begin B + b.phase1.countDown() + b.await(b::phase2) + + // Commit A + a.phase3.countDown() + a.await(a::phase4) + + // Commit B + b.phase3.countDown() + b.await(b::phase4) + + // End + a.join() + b.join() + assertTrue(map.pendingKeysIsEmpty()) + } + + @Test + fun `test purge mid-way in a single transaction`() { + // Writes intentionally do not check the database first, so purging between read and write changes behaviour + val remapped = mapOf(Scenario(true, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.Success, Outcome.Fail) to Scenario(true, ReadOrWrite.Read, ReadOrWrite.Write, Outcome.SuccessButErrorOnCommit, Outcome.SuccessButErrorOnCommit)) + scenario = remapped[scenario] ?: scenario + prepopulateIfRequired() + val map = createMap() + val a = TestThread("A", map, true).apply { + phase1.countDown() + phase3.countDown() + } + val b = TestThread("B", map, true).apply { + phase1.countDown() + phase3.countDown() + } + try { + database.transaction { + a.run() + map.invalidate() + b.run() + } + } catch (t: PersistenceException) { + // This only helps if thrown on commit, otherwise other latches not counted down. + assertEquals(t.message, Outcome.SuccessButErrorOnCommit, a.outcome) + } + a.await(a::phase4) + b.await(b::phase4) + assertTrue(map.pendingKeysIsEmpty()) + } + + inner class TestThread(name: String, val map: AppendOnlyPersistentMap, singleThreaded: Boolean = false) : Thread(name) { + private val log = loggerFor() + + val readOrWrite = if (name == "A") scenario.a else scenario.b + val outcome = if (name == "A") scenario.aExpected else if (singleThreaded) scenario.bExpectedIfSingleThreaded else scenario.bExpected + + val phase1 = latch() + val phase2 = latch() + val phase3 = latch() + val phase4 = latch() + + override fun run() { + try { + database.transaction { + await(::phase1) + doActivity() + phase2.countDown() + await(::phase3) + } + } catch (t: PersistenceException) { + // This only helps if thrown on commit, otherwise other latches not counted down. + assertEquals(t.message, Outcome.SuccessButErrorOnCommit, outcome) + } + phase4.countDown() + } + + private fun doActivity() { + if (readOrWrite == ReadOrWrite.Read) { + log.info("Reading") + val value = map.get(1) + log.info("Read $value") + if (outcome == Outcome.Success || outcome == Outcome.SuccessButErrorOnCommit) { + assertEquals("X", value) + } else { + assertNull(value) + } + } else if (readOrWrite == ReadOrWrite.Write) { + log.info("Writing") + val wasSet = map.set(1, "X") + log.info("Write $wasSet") + if (outcome == Outcome.Success || outcome == Outcome.SuccessButErrorOnCommit) { + assertEquals(true, wasSet) + } else { + assertEquals(false, wasSet) + } + } else if (readOrWrite == ReadOrWrite.WriteDuplicateAllowed) { + log.info("Writing with duplicates allowed") + val wasSet = map.addWithDuplicatesAllowed(1, "X") + log.info("Write with duplicates allowed $wasSet") + if (outcome == Outcome.Success || outcome == Outcome.SuccessButErrorOnCommit) { + assertEquals(true, wasSet) + } else { + assertEquals(false, wasSet) + } + } + } + + private fun latch() = CountDownLatch(1) + fun await(latch: () -> CountDownLatch) { + log.info("Awaiting $latch") + latch().await() + } + } + + private fun prepopulateIfRequired() { + if (scenario.prePopulated) { + database.transaction { + val map = createMap() + map.set(1, "X") + } + } + } + + @Entity + @javax.persistence.Table(name = "persist_map_test") + class PersistentMapEntry( + @Id + @Column(name = "key") + var key: Long = -1, + + @Column(name = "value", length = 16) + var value: String = "" + ) : Serializable + + class TestMap : AppendOnlyPersistentMap( + toPersistentEntityKey = { it }, + fromPersistentEntity = { Pair(it.key, it.value) }, + toPersistentEntity = { key: Long, value: String -> + PersistentMapEntry().apply { + this.key = key + this.value = value + } + }, + persistentEntityClass = PersistentMapEntry::class.java + ) { + fun pendingKeysIsEmpty() = pendingKeys.isEmpty() + + fun invalidate() = cache.invalidateAll() + } + + fun createMap() = TestMap() +} \ No newline at end of file diff --git a/node/src/test/kotlin/net/corda/node/services/persistence/TransactionCallbackTest.kt b/node/src/test/kotlin/net/corda/node/services/persistence/TransactionCallbackTest.kt new file mode 100644 index 0000000000..cd46899392 --- /dev/null +++ b/node/src/test/kotlin/net/corda/node/services/persistence/TransactionCallbackTest.kt @@ -0,0 +1,49 @@ +package net.corda.node.services.persistence + +import net.corda.node.internal.configureDatabase +import net.corda.nodeapi.internal.persistence.DatabaseConfig +import net.corda.testing.internal.rigorousMock +import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties +import org.junit.After +import org.junit.Test +import kotlin.test.assertEquals + + +class TransactionCallbackTest { + private val database = configureDatabase(makeTestDataSourceProperties(), DatabaseConfig(), rigorousMock()) + + @After + fun closeDatabase() { + database.close() + } + + @Test + fun `onCommit called and onRollback not called on commit`() { + var onCommitCount = 0 + var onRollbackCount = 0 + database.transaction { + onCommit { onCommitCount++ } + onRollback { onRollbackCount++ } + } + assertEquals(1, onCommitCount) + assertEquals(0, onRollbackCount) + } + + @Test + fun `onCommit not called and onRollback called on rollback`() { + class TestException : Exception() + + var onCommitCount = 0 + var onRollbackCount = 0 + try { + database.transaction { + onCommit { onCommitCount++ } + onRollback { onRollbackCount++ } + throw TestException() + } + } catch (e: TestException) { + } + assertEquals(0, onCommitCount) + assertEquals(1, onRollbackCount) + } +} \ No newline at end of file diff --git a/node/src/test/kotlin/net/corda/node/services/statemachine/RetryFlowMockTest.kt b/node/src/test/kotlin/net/corda/node/services/statemachine/RetryFlowMockTest.kt new file mode 100644 index 0000000000..12b8d8af23 --- /dev/null +++ b/node/src/test/kotlin/net/corda/node/services/statemachine/RetryFlowMockTest.kt @@ -0,0 +1,166 @@ +package net.corda.node.services.statemachine + +import co.paralleluniverse.fibers.Suspendable +import net.corda.core.concurrent.CordaFuture +import net.corda.core.flows.FlowLogic +import net.corda.core.flows.FlowSession +import net.corda.core.flows.InitiatedBy +import net.corda.core.flows.InitiatingFlow +import net.corda.core.identity.Party +import net.corda.core.messaging.MessageRecipients +import net.corda.core.utilities.getOrThrow +import net.corda.core.utilities.unwrap +import net.corda.node.internal.StartedNode +import net.corda.node.services.messaging.Message +import net.corda.node.services.persistence.DBTransactionStorage +import net.corda.nodeapi.internal.persistence.contextTransaction +import net.corda.testing.node.internal.InternalMockNetwork +import net.corda.testing.node.internal.MessagingServiceSpy +import net.corda.testing.node.internal.newContext +import net.corda.testing.node.internal.setMessagingServiceSpy +import org.assertj.core.api.Assertions +import org.junit.After +import org.junit.Before +import org.junit.Test +import java.sql.SQLException +import java.time.Duration +import kotlin.test.assertEquals +import kotlin.test.assertNotNull +import kotlin.test.assertNull + +class RetryFlowMockTest { + private lateinit var mockNet: InternalMockNetwork + private lateinit var internalNodeA: StartedNode + private lateinit var internalNodeB: StartedNode + + @Before + fun start() { + mockNet = InternalMockNetwork(threadPerNode = true, cordappPackages = listOf(this.javaClass.`package`.name)) + internalNodeA = mockNet.createNode() + internalNodeB = mockNet.createNode() + mockNet.startNodes() + RetryFlow.count = 0 + SendAndRetryFlow.count = 0 + RetryInsertFlow.count = 0 + } + + private fun StartedNode.startFlow(logic: FlowLogic): CordaFuture = this.services.startFlow(logic, this.services.newContext()).getOrThrow().resultFuture + + @After + fun cleanUp() { + mockNet.stopNodes() + } + + @Test + fun `Single retry`() { + assertEquals(Unit, internalNodeA.startFlow(RetryFlow(1)).get()) + assertEquals(2, RetryFlow.count) + } + + @Test + fun `Retry forever`() { + Assertions.assertThatThrownBy { + internalNodeA.startFlow(RetryFlow(Int.MAX_VALUE)).getOrThrow() + }.isInstanceOf(LimitedRetryCausingError::class.java) + assertEquals(5, RetryFlow.count) + } + + @Test + fun `Retry does not set senderUUID`() { + val messagesSent = mutableListOf() + val partyB = internalNodeB.info.legalIdentities.first() + internalNodeA.setMessagingServiceSpy(object : MessagingServiceSpy(internalNodeA.network) { + override fun send(message: Message, target: MessageRecipients, retryId: Long?, sequenceKey: Any) { + messagesSent.add(message) + messagingService.send(message, target, retryId) + } + }) + internalNodeA.startFlow(SendAndRetryFlow(1, partyB)).get() + assertNotNull(messagesSent.first().senderUUID) + assertNull(messagesSent.last().senderUUID) + assertEquals(2, SendAndRetryFlow.count) + } + + @Test + fun `Retry duplicate insert`() { + assertEquals(Unit, internalNodeA.startFlow(RetryInsertFlow(1)).get()) + assertEquals(2, RetryInsertFlow.count) + } + + @Test + fun `Patient records do not leak in hospital`() { + assertEquals(Unit, internalNodeA.startFlow(RetryFlow(1)).get()) + assertEquals(0, StaffedFlowHospital.numberOfPatients) + assertEquals(2, RetryFlow.count) + } +} + +class LimitedRetryCausingError : org.hibernate.exception.ConstraintViolationException("Test message", SQLException(), "Test constraint") + +class RetryCausingError : SQLException("deadlock") + +class RetryFlow(val i: Int) : FlowLogic() { + companion object { + var count = 0 + } + + @Suspendable + override fun call() { + logger.info("Hello $count") + if (count++ < i) { + if (i == Int.MAX_VALUE) { + throw LimitedRetryCausingError() + } else { + throw RetryCausingError() + } + } + } +} + +@InitiatingFlow +class SendAndRetryFlow(val i: Int, val other: Party) : FlowLogic() { + companion object { + var count = 0 + } + + @Suspendable + override fun call() { + logger.info("Sending...") + val session = initiateFlow(other) + session.send("Boo") + if (count++ < i) { + throw RetryCausingError() + } + } +} + +@InitiatedBy(SendAndRetryFlow::class) +class ReceiveFlow2(val other: FlowSession) : FlowLogic() { + @Suspendable + override fun call() { + val received = other.receive().unwrap { it } + logger.info("Received... $received") + } +} + +class RetryInsertFlow(val i: Int) : FlowLogic() { + companion object { + var count = 0 + } + + @Suspendable + override fun call() { + logger.info("Hello") + doInsert() + // Checkpoint so we roll back to here + FlowLogic.sleep(Duration.ofSeconds(0)) + if (count++ < i) { + doInsert() + } + } + + private fun doInsert() { + val tx = DBTransactionStorage.DBTransaction("Foo") + contextTransaction.session.save(tx) + } +} \ No newline at end of file diff --git a/node/src/test/kotlin/net/corda/node/services/vault/VaultQueryTests.kt b/node/src/test/kotlin/net/corda/node/services/vault/VaultQueryTests.kt index d053cb457f..8054711fd1 100644 --- a/node/src/test/kotlin/net/corda/node/services/vault/VaultQueryTests.kt +++ b/node/src/test/kotlin/net/corda/node/services/vault/VaultQueryTests.kt @@ -38,7 +38,10 @@ import net.corda.nodeapi.internal.persistence.DatabaseTransaction import net.corda.testing.core.* import net.corda.testing.internal.TEST_TX_TIME import net.corda.testing.internal.rigorousMock -import net.corda.testing.internal.vault.* +import net.corda.testing.internal.vault.DUMMY_LINEAR_CONTRACT_PROGRAM_ID +import net.corda.testing.internal.vault.DummyLinearContract +import net.corda.testing.internal.vault.DummyLinearStateSchemaV1 +import net.corda.testing.internal.vault.VaultFiller import net.corda.testing.node.MockServices import net.corda.testing.node.MockServices.Companion.makeTestDatabaseAndMockServices import net.corda.testing.node.makeTestIdentityService @@ -181,10 +184,6 @@ abstract class VaultQueryTestsBase : VaultQueryParties { @JvmField val expectedEx: ExpectedException = ExpectedException.none() - @Suppress("LeakingThis") - @Rule - @JvmField - val transactionRule = VaultQueryRollbackRule(this) companion object { @ClassRule @JvmField val testSerialization = SerializationEnvironmentRule() @@ -202,7 +201,7 @@ abstract class VaultQueryTestsBase : VaultQueryParties { database.close() } - private fun consumeCash(amount: Amount) = vaultFiller.consumeCash(amount, CHARLIE) + protected fun consumeCash(amount: Amount) = vaultFiller.consumeCash(amount, CHARLIE) private fun setUpDb(_database: CordaPersistence, delay: Long = 0) { _database.transaction { // create new states @@ -1996,239 +1995,6 @@ abstract class VaultQueryTestsBase : VaultQueryParties { } } - /** - * Dynamic trackBy() tests - */ - - @Test - fun trackCashStates_unconsumed() { - val updates = database.transaction { - val updates = - // DOCSTART VaultQueryExample15 - vaultService.trackBy().updates // UNCONSUMED default - // DOCEND VaultQueryExample15 - - vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) - val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states - val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states - // add more cash - vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) - // add another deal - vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) - this.session.flush() - - // consume stuff - consumeCash(100.DOLLARS) - vaultFiller.consumeDeals(dealStates.toList()) - vaultFiller.consumeLinearStates(linearStates.toList()) - - close() // transaction needs to be closed to trigger events - updates - } - - updates.expectEvents { - sequence( - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 5) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 1) {} - } - ) - } - } - - @Test - fun trackCashStates_consumed() { - - val updates = database.transaction { - val criteria = VaultQueryCriteria(status = Vault.StateStatus.CONSUMED) - val updates = vaultService.trackBy(criteria).updates - - vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) - val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states - val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states - // add more cash - vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) - // add another deal - vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) - this.session.flush() - - consumeCash(100.POUNDS) - - // consume more stuff - consumeCash(100.DOLLARS) - vaultFiller.consumeDeals(dealStates.toList()) - vaultFiller.consumeLinearStates(linearStates.toList()) - - close() // transaction needs to be closed to trigger events - updates - } - - updates.expectEvents { - sequence( - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.size == 1) {} - require(produced.isEmpty()) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.size == 5) {} - require(produced.isEmpty()) {} - } - ) - } - } - - @Test - fun trackCashStates_all() { - val updates = database.transaction { - val updates = - database.transaction { - val criteria = VaultQueryCriteria(status = Vault.StateStatus.ALL) - vaultService.trackBy(criteria).updates - } - vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) - val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states - val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states - // add more cash - vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) - // add another deal - vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) - this.session.flush() - -// consume stuff - consumeCash(99.POUNDS) - - consumeCash(100.DOLLARS) - vaultFiller.consumeDeals(dealStates.toList()) - vaultFiller.consumeLinearStates(linearStates.toList()) - - close() // transaction needs to be closed to trigger events - updates - } - - updates.expectEvents { - sequence( - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 5) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 1) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.size == 1) {} - require(produced.size == 1) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.size == 5) {} - require(produced.isEmpty()) {} - } - ) - } - } - - @Test - fun trackLinearStates() { - - val updates = database.transaction { - // DOCSTART VaultQueryExample16 - val (snapshot, updates) = vaultService.trackBy() - // DOCEND VaultQueryExample16 - assertThat(snapshot.states).hasSize(0) - - vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 3, DUMMY_CASH_ISSUER) - val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states - val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states - // add more cash - vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) - // add another deal - vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) - this.session.flush() - - // consume stuff - consumeCash(100.DOLLARS) - vaultFiller.consumeDeals(dealStates.toList()) - vaultFiller.consumeLinearStates(linearStates.toList()) - - close() // transaction needs to be closed to trigger events - updates - } - - updates.expectEvents { - sequence( - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 10) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 3) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 1) {} - } - ) - } - } - - @Test - fun trackDealStates() { - val updates = database.transaction { - // DOCSTART VaultQueryExample17 - val (snapshot, updates) = vaultService.trackBy() - // DOCEND VaultQueryExample17 - assertThat(snapshot.states).hasSize(0) - - vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 3, DUMMY_CASH_ISSUER) - val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states - val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states - // add more cash - vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) - // add another deal - vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) - this.session.flush() - - // consume stuff - consumeCash(100.DOLLARS) - vaultFiller.consumeDeals(dealStates.toList()) - vaultFiller.consumeLinearStates(linearStates.toList()) - - close() - updates - } - - updates.expectEvents { - sequence( - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 3) {} - }, - expect { (consumed, produced, flowId) -> - require(flowId == null) {} - require(consumed.isEmpty()) {} - require(produced.size == 1) {} - } - ) - } - } - @Test fun unconsumedCashStatesForSpending_single_issuer_reference() { database.transaction { @@ -2289,10 +2055,241 @@ abstract class VaultQueryTestsBase : VaultQueryParties { */ } -class VaultQueryTests : VaultQueryTestsBase(), VaultQueryParties by vaultQueryTestRule { +class VaultQueryTests : VaultQueryTestsBase(), VaultQueryParties by delegate { companion object { - @ClassRule @JvmField - val vaultQueryTestRule = VaultQueryTestRule() + val delegate = VaultQueryTestRule() + } + + @Rule + @JvmField + val vaultQueryTestRule = delegate + + /** + * Dynamic trackBy() tests are H2 only, since rollback stops events being emitted. + */ + + @Test + fun trackCashStates_unconsumed() { + val updates = database.transaction { + val updates = + // DOCSTART VaultQueryExample15 + vaultService.trackBy().updates // UNCONSUMED default + // DOCEND VaultQueryExample15 + + vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) + val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states + val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states + // add more cash + vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) + // add another deal + vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) + this.session.flush() + + // consume stuff + consumeCash(100.DOLLARS) + vaultFiller.consumeDeals(dealStates.toList()) + vaultFiller.consumeLinearStates(linearStates.toList()) + + updates + } + + updates.expectEvents { + sequence( + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 5) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 1) {} + } + ) + } + } + + @Test + fun trackCashStates_consumed() { + + val updates = database.transaction { + val criteria = VaultQueryCriteria(status = Vault.StateStatus.CONSUMED) + val updates = vaultService.trackBy(criteria).updates + + vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) + val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states + val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states + // add more cash + vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) + // add another deal + vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) + this.session.flush() + + consumeCash(100.POUNDS) + + // consume more stuff + consumeCash(100.DOLLARS) + vaultFiller.consumeDeals(dealStates.toList()) + vaultFiller.consumeLinearStates(linearStates.toList()) + + updates + } + + updates.expectEvents { + sequence( + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.size == 1) {} + require(produced.isEmpty()) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.size == 5) {} + require(produced.isEmpty()) {} + } + ) + } + } + + @Test + fun trackCashStates_all() { + val updates = database.transaction { + val updates = + database.transaction { + val criteria = VaultQueryCriteria(status = Vault.StateStatus.ALL) + vaultService.trackBy(criteria).updates + } + vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 5, DUMMY_CASH_ISSUER) + val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states + val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states + // add more cash + vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) + // add another deal + vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) + this.session.flush() + + // consume stuff + consumeCash(99.POUNDS) + + consumeCash(100.DOLLARS) + vaultFiller.consumeDeals(dealStates.toList()) + vaultFiller.consumeLinearStates(linearStates.toList()) + + updates + } + + updates.expectEvents { + sequence( + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 5) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 1) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.size == 1) {} + require(produced.size == 1) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.size == 5) {} + require(produced.isEmpty()) {} + } + ) + } + } + + @Test + fun trackLinearStates() { + + val updates = database.transaction { + // DOCSTART VaultQueryExample16 + val (snapshot, updates) = vaultService.trackBy() + // DOCEND VaultQueryExample16 + assertThat(snapshot.states).hasSize(0) + + vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 3, DUMMY_CASH_ISSUER) + val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states + val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states + // add more cash + vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) + // add another deal + vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) + this.session.flush() + + // consume stuff + consumeCash(100.DOLLARS) + vaultFiller.consumeDeals(dealStates.toList()) + vaultFiller.consumeLinearStates(linearStates.toList()) + + updates + } + + updates.expectEvents { + sequence( + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 10) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 3) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 1) {} + } + ) + } + } + + @Test + fun trackDealStates() { + val updates = database.transaction { + // DOCSTART VaultQueryExample17 + val (snapshot, updates) = vaultService.trackBy() + // DOCEND VaultQueryExample17 + assertThat(snapshot.states).hasSize(0) + + vaultFiller.fillWithSomeTestCash(100.DOLLARS, notaryServices, 3, DUMMY_CASH_ISSUER) + val linearStates = vaultFiller.fillWithSomeTestLinearStates(10).states + val dealStates = vaultFiller.fillWithSomeTestDeals(listOf("123", "456", "789")).states + // add more cash + vaultFiller.fillWithSomeTestCash(100.POUNDS, notaryServices, 1, DUMMY_CASH_ISSUER) + // add another deal + vaultFiller.fillWithSomeTestDeals(listOf("SAMPLE DEAL")) + this.session.flush() + + // consume stuff + consumeCash(100.DOLLARS) + vaultFiller.consumeDeals(dealStates.toList()) + vaultFiller.consumeLinearStates(linearStates.toList()) + + updates + } + + updates.expectEvents { + sequence( + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 3) {} + }, + expect { (consumed, produced, flowId) -> + require(flowId == null) {} + require(consumed.isEmpty()) {} + require(produced.size == 1) {} + } + ) + } } } diff --git a/node/src/test/kotlin/net/corda/node/utilities/ObservablesTests.kt b/node/src/test/kotlin/net/corda/node/utilities/ObservablesTests.kt index 26d79fe7ed..acaf457c9b 100644 --- a/node/src/test/kotlin/net/corda/node/utilities/ObservablesTests.kt +++ b/node/src/test/kotlin/net/corda/node/utilities/ObservablesTests.kt @@ -15,8 +15,8 @@ import net.corda.core.internal.bufferUntilSubscribed import net.corda.core.internal.tee import net.corda.node.internal.configureDatabase import net.corda.nodeapi.internal.persistence.* -import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties import net.corda.testing.internal.rigorousMock +import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties import org.assertj.core.api.Assertions.assertThat import org.junit.After import org.junit.Test @@ -24,6 +24,7 @@ import rx.Observable import rx.subjects.PublishSubject import java.io.Closeable import java.util.* +import kotlin.test.fail class ObservablesTests { private fun isInDatabaseTransaction() = contextTransactionOrNull != null @@ -68,6 +69,72 @@ class ObservablesTests { assertThat(secondEvent.get()).isEqualTo(0 to false) } + class TestException : Exception("Synthetic exception for tests") {} + + @Test + fun `bufferUntilDatabaseCommit swallows if transaction rolled back`() { + val database = createDatabase() + + val source = PublishSubject.create() + val observable: Observable = source + + val firstEvent = SettableFuture.create>() + val secondEvent = SettableFuture.create>() + + observable.first().subscribe { firstEvent.set(it to isInDatabaseTransaction()) } + observable.skip(1).first().subscribe { secondEvent.set(it to isInDatabaseTransaction()) } + + try { + database.transaction { + val delayedSubject = source.bufferUntilDatabaseCommit() + assertThat(source).isNotEqualTo(delayedSubject) + delayedSubject.onNext(0) + source.onNext(1) + assertThat(firstEvent.isDone).isTrue() + assertThat(secondEvent.isDone).isFalse() + throw TestException() + } + fail("Should not have successfully completed transaction") + } catch (e: TestException) { + } + assertThat(secondEvent.isDone).isFalse() + + assertThat(firstEvent.get()).isEqualTo(1 to true) + } + + @Test + fun `bufferUntilDatabaseCommit propagates error if transaction rolled back`() { + val database = createDatabase() + + val source = PublishSubject.create() + val observable: Observable = source + + val firstEvent = SettableFuture.create>() + val secondEvent = SettableFuture.create>() + + observable.first().subscribe({ firstEvent.set(it to isInDatabaseTransaction()) }, {}) + observable.skip(1).subscribe({ secondEvent.set(it to isInDatabaseTransaction()) }, {}) + observable.skip(1).subscribe({}, { secondEvent.set(2 to isInDatabaseTransaction()) }) + + try { + database.transaction { + val delayedSubject = source.bufferUntilDatabaseCommit(propagateRollbackAsError = true) + assertThat(source).isNotEqualTo(delayedSubject) + delayedSubject.onNext(0) + source.onNext(1) + assertThat(firstEvent.isDone).isTrue() + assertThat(secondEvent.isDone).isFalse() + throw TestException() + } + fail("Should not have successfully completed transaction") + } catch (e: TestException) { + } + assertThat(secondEvent.isDone).isTrue() + + assertThat(firstEvent.get()).isEqualTo(1 to true) + assertThat(secondEvent.get()).isEqualTo(2 to false) + } + @Test fun `bufferUntilDatabaseCommit delays until transaction closed repeatable`() { val database = createDatabase() diff --git a/node/src/test/resources/working-config.conf b/node/src/test/resources/working-config.conf new file mode 100644 index 0000000000..45ca6ef647 --- /dev/null +++ b/node/src/test/resources/working-config.conf @@ -0,0 +1,31 @@ +myLegalName = "O=Alice Corp, L=Madrid, C=ES" +emailAddress = "admin@company.com" +keyStorePassword = "cordacadevpass" +trustStorePassword = "trustpass" +crlCheckSoftFail = true +baseDirectory = "/opt/corda" +dataSourceProperties = { + dataSourceClassName = org.h2.jdbcx.JdbcDataSource + dataSource.url = "jdbc:h2:file:blah" + dataSource.user = "sa" + dataSource.password = "" +} +database = { + transactionIsolationLevel = "REPEATABLE_READ" + exportHibernateJMXStatistics = "false" +} +p2pAddress = "localhost:2233" +h2port = 0 +useTestClock = false +verifierType = InMemory +rpcSettings = { + address = "locahost:3418" + adminAddress = "localhost:3419" + useSsl = false + standAloneBroker = false +} +p2pMessagingRetry { + messageRedeliveryDelay = 30 seconds + maxRetryCount = 3 + backoffBase = 2.0 +} \ No newline at end of file diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/DeserializationInput.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/DeserializationInput.kt index c60ffd2932..e2be528590 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/DeserializationInput.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/DeserializationInput.kt @@ -15,6 +15,7 @@ import net.corda.core.serialization.EncodingWhitelist import net.corda.core.serialization.SerializationContext import net.corda.core.serialization.SerializedBytes import net.corda.core.utilities.ByteSequence +import net.corda.core.utilities.loggerFor import net.corda.serialization.internal.* import org.apache.qpid.proton.amqp.Binary import org.apache.qpid.proton.amqp.DescribedType @@ -39,6 +40,7 @@ data class ObjectAndEnvelope(val obj: T, val envelope: Envelope) class DeserializationInput @JvmOverloads constructor(private val serializerFactory: SerializerFactory, private val encodingWhitelist: EncodingWhitelist = NullEncodingWhitelist) { private val objectHistory: MutableList = mutableListOf() + private val logger = loggerFor() companion object { @VisibleForTesting @@ -83,7 +85,6 @@ class DeserializationInput @JvmOverloads constructor(private val serializerFacto inline fun deserialize(bytes: SerializedBytes, context: SerializationContext): T = deserialize(bytes, T::class.java, context) - @Throws(NotSerializableException::class) private fun des(generator: () -> R): R { try { @@ -106,6 +107,9 @@ class DeserializationInput @JvmOverloads constructor(private val serializerFacto fun deserialize(bytes: ByteSequence, clazz: Class, context: SerializationContext): T = des { val envelope = getEnvelope(bytes, encodingWhitelist) + + logger.trace("deserialize blob scheme=\"${envelope.schema.toString()}\"") + clazz.cast(readObjectOrNull(envelope.obj, SerializationSchemas(envelope.schema, envelope.transformsSchema), clazz, context)) } diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/EvolutionSerializer.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/EvolutionSerializer.kt index a4bd586894..d37b42d1a7 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/EvolutionSerializer.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/EvolutionSerializer.kt @@ -13,6 +13,9 @@ package net.corda.serialization.internal.amqp import net.corda.core.internal.isConcreteClass import net.corda.core.serialization.DeprecatedConstructorForDeserialization import net.corda.core.serialization.SerializationContext +import net.corda.core.utilities.contextLogger +import net.corda.core.utilities.debug +import net.corda.core.utilities.loggerFor import net.corda.serialization.internal.carpenter.getTypeAsClass import org.apache.qpid.proton.codec.Data import java.io.NotSerializableException @@ -58,9 +61,15 @@ abstract class EvolutionSerializer( new[resultsIndex] = this } } + + override fun toString(): String { + return "resultsIndex = $resultsIndex property = ${property.name}" + } } companion object { + val logger = contextLogger() + /** * Unlike the generic deserialization case where we need to locate the primary constructor * for the object (or our best guess) in the case of an object whose structure has changed @@ -76,22 +85,37 @@ abstract class EvolutionSerializer( if (!clazz.isConcreteClass) return null - val oldArgumentSet = oldArgs.map { Pair(it.key as String?, it.value.property.resolvedType) } - + val oldArgumentSet = oldArgs.map { Pair(it.key as String?, it.value.property.resolvedType.asClass()) } var maxConstructorVersion = Integer.MIN_VALUE var constructor: KFunction? = null + clazz.kotlin.constructors.forEach { val version = it.findAnnotation()?.version ?: Integer.MIN_VALUE - if (oldArgumentSet.containsAll(it.parameters.map { v -> Pair(v.name, v.type.javaType) }) && - version > maxConstructorVersion) { + + if (version > maxConstructorVersion && + oldArgumentSet.containsAll(it.parameters.map { v -> Pair(v.name, v.type.javaType.asClass()) }) + ) { constructor = it maxConstructorVersion = version + + with(logger) { + info("Select annotated constructor version=$version nparams=${it.parameters.size}") + debug{" params=${it.parameters}"} + } + } else if (version != Integer.MIN_VALUE){ + with(logger) { + info("Ignore annotated constructor version=$version nparams=${it.parameters.size}") + debug{" params=${it.parameters}"} + } } } // if we didn't get an exact match revert to existing behaviour, if the new parameters // are not mandatory (i.e. nullable) things are fine - return constructor ?: constructorForDeserialization(type) + return constructor ?: run { + logger.info("Failed to find annotated historic constructor") + constructorForDeserialization(type) + } } private fun makeWithConstructor( @@ -261,9 +285,13 @@ class EvolutionSerializerGetter : EvolutionSerializerGetterBase() { // both the new and old fingerprint if (newSerializer is CollectionSerializer || newSerializer is MapSerializer) { newSerializer - } else { + } else if (newSerializer is EnumSerializer){ EnumEvolutionSerializer.make(typeNotation, newSerializer, factory, schemas) } + else { + loggerFor().error("typeNotation=${typeNotation.name} Need to evolve unsupported type") + throw NotSerializableException ("${typeNotation.name} cannot be evolved") + } } } } diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/Schema.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/Schema.kt index c822b06d01..27950b8a5a 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/Schema.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/Schema.kt @@ -163,7 +163,13 @@ sealed class TypeNotation : DescribedType { abstract val descriptor: Descriptor } -data class CompositeType(override val name: String, override val label: String?, override val provides: List, override val descriptor: Descriptor, val fields: List) : TypeNotation() { +data class CompositeType( + override val name: String, + override val label: String?, + override val provides: List, + override val descriptor: Descriptor, + val fields: List +) : TypeNotation() { companion object : DescribedTypeConstructor { val DESCRIPTOR = AMQPDescriptorRegistry.COMPOSITE_TYPE.amqpDescriptor diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializationHelper.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializationHelper.kt index af5495753e..1c1f45fb07 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializationHelper.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializationHelper.kt @@ -535,14 +535,17 @@ fun ClassWhitelist.requireWhitelisted(type: Type) { } } -fun ClassWhitelist.isWhitelisted(clazz: Class<*>) = (hasListed(clazz) || hasAnnotationInHierarchy(clazz)) -fun ClassWhitelist.isNotWhitelisted(clazz: Class<*>) = !(this.isWhitelisted(clazz)) +fun ClassWhitelist.isWhitelisted(clazz: Class<*>) = hasListed(clazz) || hasCordaSerializable(clazz) +fun ClassWhitelist.isNotWhitelisted(clazz: Class<*>) = !this.isWhitelisted(clazz) -// Recursively check the class, interfaces and superclasses for our annotation. -fun ClassWhitelist.hasAnnotationInHierarchy(type: Class<*>): Boolean { +/** + * Check the given [Class] has the [CordaSerializable] annotation, either directly or inherited from any of its super + * classes or interfaces. + */ +fun hasCordaSerializable(type: Class<*>): Boolean { return type.isAnnotationPresent(CordaSerializable::class.java) - || type.interfaces.any { hasAnnotationInHierarchy(it) } - || (type.superclass != null && hasAnnotationInHierarchy(type.superclass)) + || type.interfaces.any(::hasCordaSerializable) + || (type.superclass != null && hasCordaSerializable(type.superclass)) } /** @@ -565,27 +568,28 @@ fun ClassWhitelist.hasAnnotationInHierarchy(type: Class<*>): Boolean { * * As such, if objectInstance fails access, revert to Java reflection and try that */ -fun Class<*>.objectInstance() = - try { - this.kotlin.objectInstance - } catch (e: IllegalAccessException) { - // Check it really is an object (i.e. it has no constructor) - if (constructors.isNotEmpty()) null - else { - try { - this.getDeclaredField("INSTANCE")?.let { field -> - // and must be marked as both static and final (>0 means they're set) - if (modifiers and Modifier.STATIC == 0 || modifiers and Modifier.FINAL == 0) null - else { - val accessibility = field.isAccessible - field.isAccessible = true - val obj = field.get(null) - field.isAccessible = accessibility - obj - } +fun Class<*>.objectInstance(): Any? { + return try { + this.kotlin.objectInstance + } catch (e: IllegalAccessException) { + // Check it really is an object (i.e. it has no constructor) + if (constructors.isNotEmpty()) null + else { + try { + this.getDeclaredField("INSTANCE")?.let { field -> + // and must be marked as both static and final (>0 means they're set) + if (modifiers and Modifier.STATIC == 0 || modifiers and Modifier.FINAL == 0) null + else { + val accessibility = field.isAccessible + field.isAccessible = true + val obj = field.get(null) + field.isAccessible = accessibility + obj } - } catch (e: NoSuchFieldException) { - null } + } catch (e: NoSuchFieldException) { + null } } + } +} diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializerFactory.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializerFactory.kt index 5263c2c77f..418a2428c4 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializerFactory.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/SerializerFactory.kt @@ -15,6 +15,7 @@ import com.google.common.reflect.TypeResolver import net.corda.core.internal.uncheckedCast import net.corda.core.serialization.ClassWhitelist import net.corda.core.utilities.loggerFor +import net.corda.core.utilities.trace import net.corda.serialization.internal.carpenter.* import org.apache.qpid.proton.amqp.* import java.io.NotSerializableException @@ -64,6 +65,7 @@ open class SerializerFactory( serializersByDescriptor = ConcurrentHashMap(), customSerializers = CopyOnWriteArrayList(), transformsCache = ConcurrentHashMap()) + constructor(whitelist: ClassWhitelist, classLoader: ClassLoader, evolutionSerializerGetter: EvolutionSerializerGetterBase = EvolutionSerializerGetter(), @@ -84,6 +86,8 @@ open class SerializerFactory( private fun getEvolutionSerializer(typeNotation: TypeNotation, newSerializer: AMQPSerializer, schemas: SerializationSchemas) = evolutionSerializerGetter.getEvolutionSerializer(this, typeNotation, newSerializer, schemas) + private val logger = loggerFor() + /** * Look up, and manufacture if necessary, a serializer for the given type. * @@ -92,6 +96,9 @@ open class SerializerFactory( */ @Throws(NotSerializableException::class) fun get(actualClass: Class<*>?, declaredType: Type): AMQPSerializer { + // can be useful to enable but will be *extremely* chatty if you do + logger.trace { "Get Serializer for $actualClass ${declaredType.typeName}" } + val declaredClass = declaredType.asClass() ?: throw NotSerializableException( "Declared types of $declaredType are not supported.") @@ -117,10 +124,15 @@ open class SerializerFactory( makeMapSerializer(declaredTypeAmended) } } - Enum::class.java.isAssignableFrom(actualClass - ?: declaredClass) -> serializersByType.computeIfAbsent(actualClass ?: declaredClass) { - whitelist.requireWhitelisted(actualType) - EnumSerializer(actualType, actualClass ?: declaredClass, this) + Enum::class.java.isAssignableFrom(actualClass ?: declaredClass) -> { + logger.debug("class=[${actualClass?.simpleName} | $declaredClass] is an enumeration " + + "declaredType=${declaredType.typeName} " + + "isEnum=${declaredType::class.java.isEnum}") + + serializersByType.computeIfAbsent(actualClass ?: declaredClass) { + whitelist.requireWhitelisted(actualType) + EnumSerializer(actualType, actualClass ?: declaredClass, this) + } } else -> { makeClassSerializer(actualClass ?: declaredClass, actualType, declaredType) @@ -208,6 +220,7 @@ open class SerializerFactory( @Throws(NotSerializableException::class) fun get(typeDescriptor: Any, schema: SerializationSchemas): AMQPSerializer { return serializersByDescriptor[typeDescriptor] ?: { + logger.trace("get Serializer descriptor=${typeDescriptor}") processSchema(FactorySchemaAndDescriptor(schema, typeDescriptor)) serializersByDescriptor[typeDescriptor] ?: throw NotSerializableException( "Could not find type matching descriptor $typeDescriptor.") @@ -242,16 +255,24 @@ open class SerializerFactory( private fun processSchema(schemaAndDescriptor: FactorySchemaAndDescriptor, sentinel: Boolean = false) { val metaSchema = CarpenterMetaSchema.newInstance() for (typeNotation in schemaAndDescriptor.schemas.schema.types) { + logger.trace("descriptor=${schemaAndDescriptor.typeDescriptor}, typeNotation=${typeNotation.name}") try { val serialiser = processSchemaEntry(typeNotation) // if we just successfully built a serializer for the type but the type fingerprint // doesn't match that of the serialised object then we are dealing with different // instance of the class, as such we need to build an EvolutionSerializer if (serialiser.typeDescriptor != typeNotation.descriptor.name) { + logger.info("typeNotation=${typeNotation.name} action=\"requires Evolution\"") getEvolutionSerializer(typeNotation, serialiser, schemaAndDescriptor.schemas) } } catch (e: ClassNotFoundException) { - if (sentinel) throw e + if (sentinel) { + logger.error("typeNotation=${typeNotation.name} error=\"after Carpentry attempt failed to load\"") + throw e + } + else { + logger.info("typeNotation=\"${typeNotation.name}\" action=\"carpentry required\"") + } metaSchema.buildFor(typeNotation, classloader) } } @@ -280,8 +301,16 @@ open class SerializerFactory( } private fun processSchemaEntry(typeNotation: TypeNotation) = when (typeNotation) { - is CompositeType -> processCompositeType(typeNotation) // java.lang.Class (whether a class or interface) - is RestrictedType -> processRestrictedType(typeNotation) // Collection / Map, possibly with generics + // java.lang.Class (whether a class or interface) + is CompositeType -> { + logger.trace("typeNotation=${typeNotation.name} amqpType=CompositeType") + processCompositeType(typeNotation) + } + // Collection / Map, possibly with generics + is RestrictedType -> { + logger.trace("typeNotation=${typeNotation.name} amqpType=RestrictedType") + processRestrictedType(typeNotation) + } } // TODO: class loader logic, and compare the schema. @@ -295,6 +324,7 @@ open class SerializerFactory( } private fun makeClassSerializer(clazz: Class<*>, type: Type, declaredType: Type): AMQPSerializer = serializersByType.computeIfAbsent(type) { + logger.debug("class=${clazz.simpleName}, type=$type is a composite type") if (clazz.isSynthetic) { // Explicitly ban synthetic classes, we have no way of recreating them when deserializing. This also // captures Lambda expressions and other anonymous functions diff --git a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/custom/InputStreamSerializer.kt b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/custom/InputStreamSerializer.kt index 5dd66d38d5..f38c68903b 100644 --- a/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/custom/InputStreamSerializer.kt +++ b/serialization/src/main/kotlin/net/corda/serialization/internal/amqp/custom/InputStreamSerializer.kt @@ -24,7 +24,15 @@ import java.lang.reflect.Type object InputStreamSerializer : CustomSerializer.Implements(InputStream::class.java) { override val revealSubclassesInSchema: Boolean = true - override val schemaForDocumentation = Schema(listOf(RestrictedType(type.toString(), "", listOf(type.toString()), SerializerFactory.primitiveTypeName(ByteArray::class.java)!!, descriptor, emptyList()))) + override val schemaForDocumentation = Schema( + listOf( + RestrictedType( + type.toString(), + "", + listOf(type.toString()), + SerializerFactory.primitiveTypeName(ByteArray::class.java)!!, + descriptor, + emptyList()))) override fun writeDescribedObject(obj: InputStream, data: Data, type: Type, output: SerializationOutput, context: SerializationContext diff --git a/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/EvolvabilityTests.kt b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/EvolvabilityTests.kt index c636c15a39..76dab87758 100644 --- a/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/EvolvabilityTests.kt +++ b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/EvolvabilityTests.kt @@ -18,10 +18,7 @@ import net.corda.core.node.NotaryInfo import net.corda.core.serialization.ConstructorForDeserialization import net.corda.core.serialization.DeprecatedConstructorForDeserialization import net.corda.core.serialization.SerializedBytes -import net.corda.serialization.internal.amqp.testutils.TestSerializationOutput -import net.corda.serialization.internal.amqp.testutils.deserialize -import net.corda.serialization.internal.amqp.testutils.serialize -import net.corda.serialization.internal.amqp.testutils.testDefaultFactory +import net.corda.serialization.internal.amqp.testutils.* import net.corda.testing.common.internal.ProjectStructure.projectRootDir import net.corda.testing.core.DUMMY_NOTARY_NAME import net.corda.testing.core.TestIdentity @@ -32,6 +29,7 @@ import java.io.NotSerializableException import java.net.URI import java.time.Instant import kotlin.test.assertEquals +import net.corda.serialization.internal.amqp.custom.InstantSerializer // To regenerate any of the binary test files do the following // @@ -214,6 +212,86 @@ class EvolvabilityTests { assertEquals("hello", deserializedCC.b) } + @Test + fun addMandatoryFieldWithAltConstructorForceReorder() { + val sf = testDefaultFactory() + val z = 30 + val y = 20 + val resource = "EvolvabilityTests.addMandatoryFieldWithAltConstructorForceReorder" + + // Original version of the class as it was serialised + // data class CC(val z: Int, val y: Int) + // File(URI("$localPath/$resource")).writeBytes(SerializationOutput(sf).serialize(CC(z, y)).bytes) + + @Suppress("UNUSED") + data class CC(val z: Int, val y: Int, val a: String) { + @DeprecatedConstructorForDeserialization(1) + constructor (z: Int, y: Int) : this(z, y, "10") + } + + val url = EvolvabilityTests::class.java.getResource(resource) + val deserializedCC = DeserializationInput(sf).deserialize(SerializedBytes(url.readBytes())) + + assertEquals("10", deserializedCC.a) + assertEquals(y, deserializedCC.y) + assertEquals(z, deserializedCC.z) + } + + @Test + fun moreComplexNonNullWithReorder() { + val resource = "${javaClass.simpleName}.${testName()}" + + data class NetworkParametersExample( + val minimumPlatformVersion: Int, + val notaries: List, + val maxMessageSize: Int, + val maxTransactionSize: Int, + val modifiedTime: Instant, + val epoch: Int, + val whitelistedContractImplementations: Map>, + /* to regenerate test class, comment out this element */ + val eventHorizon: Int + ) { + // when regenerating test class this won't be required + @DeprecatedConstructorForDeserialization(1) + @Suppress("UNUSED") + constructor ( + minimumPlatformVersion: Int, + notaries: List, + maxMessageSize: Int, + maxTransactionSize: Int, + modifiedTime: Instant, + epoch: Int, + whitelistedContractImplementations: Map> + ) : this(minimumPlatformVersion, + notaries, + maxMessageSize, + maxTransactionSize, + modifiedTime, + epoch, + whitelistedContractImplementations, + Int.MAX_VALUE) + } + + val factory = testDefaultFactory().apply { + register(InstantSerializer(this)) + } + + // Uncomment to regenerate test case + // File(URI("$localPath/$resource")).writeBytes(SerializationOutput(factory).serialize( + // NetworkParametersExample( + // 10, + // listOf("Notary1", "Notary2"), + // 100, + // 10, + // Instant.now(), + // 9, + // mapOf("A" to listOf(1, 2, 3), "B" to listOf (4, 5, 6)))).bytes) + + val url = EvolvabilityTests::class.java.getResource(resource) + DeserializationInput(factory).deserialize(SerializedBytes(url.readBytes())) + } + @Test(expected = NotSerializableException::class) @Suppress("UNUSED") fun addMandatoryFieldWithAltConstructorUnAnnotated() { @@ -490,7 +568,7 @@ class EvolvabilityTests { // @Test @Ignore("Test fails after moving NetworkParameters and NotaryInfo into core from node-api") - fun readBrokenNetworkParameters(){ + fun readBrokenNetworkParameters() { val sf = testDefaultFactory() sf.register(net.corda.serialization.internal.amqp.custom.InstantSerializer(sf)) sf.register(net.corda.serialization.internal.amqp.custom.PublicKeySerializer) @@ -526,7 +604,7 @@ class EvolvabilityTests { val resource = "networkParams.." val DUMMY_NOTARY = TestIdentity(DUMMY_NOTARY_NAME, 20).party val networkParameters = NetworkParameters( - 3, listOf(NotaryInfo(DUMMY_NOTARY, false)),1000, 1000, Instant.EPOCH, 1, emptyMap()) + 3, listOf(NotaryInfo(DUMMY_NOTARY, false)), 1000, 1000, Instant.EPOCH, 1, emptyMap()) val sf = testDefaultFactory() sf.register(net.corda.serialization.internal.amqp.custom.InstantSerializer(sf)) diff --git a/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/StreamTests.kt b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/StreamTests.kt new file mode 100644 index 0000000000..4ac37e433b --- /dev/null +++ b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/StreamTests.kt @@ -0,0 +1,53 @@ +package net.corda.serialization.internal.amqp + +import net.corda.core.internal.InputStreamAndHash +import net.corda.serialization.internal.amqp.custom.InputStreamSerializer +import net.corda.serialization.internal.amqp.testutils.TestSerializationOutput +import net.corda.serialization.internal.amqp.testutils.deserialize +import net.corda.serialization.internal.amqp.testutils.testDefaultFactory +import org.junit.Test +import java.io.FilterInputStream +import java.io.InputStream + +class StreamTests { + + private class WrapperStream(input: InputStream) : FilterInputStream(input) + + @Test + fun inputStream() { + val attachment = InputStreamAndHash.createInMemoryTestZip(2116, 1) + val id : InputStream = WrapperStream(attachment.inputStream) + + val serializerFactory = testDefaultFactory().apply { + register(InputStreamSerializer) + } + + val bytes = TestSerializationOutput(true, serializerFactory).serialize(id) + + val deserializerFactory = testDefaultFactory().apply { + register(InputStreamSerializer) + } + + DeserializationInput(serializerFactory).deserialize(bytes) + DeserializationInput(deserializerFactory).deserialize(bytes) + } + + @Test + fun listInputStream() { + val attachment = InputStreamAndHash.createInMemoryTestZip(2116, 1) + val id /* : List */= listOf(WrapperStream(attachment.inputStream)) + + val serializerFactory = testDefaultFactory().apply { + register(InputStreamSerializer) + } + + val bytes = TestSerializationOutput(true, serializerFactory).serialize(id) + + val deserializerFactory = testDefaultFactory().apply { + register(InputStreamSerializer) + } + + DeserializationInput(serializerFactory).deserialize(bytes) + DeserializationInput(deserializerFactory).deserialize(bytes) + } +} \ No newline at end of file diff --git a/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/testutils/AMQPTestUtils.kt b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/testutils/AMQPTestUtils.kt index 68e4227172..8d78ed61c6 100644 --- a/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/testutils/AMQPTestUtils.kt +++ b/serialization/src/test/kotlin/net/corda/serialization/internal/amqp/testutils/AMQPTestUtils.kt @@ -30,6 +30,15 @@ class TestSerializationOutput( } super.writeTransformSchema(transformsSchema, data) } + + @Throws(NotSerializableException::class) + fun serialize(obj: T): SerializedBytes { + try { + return _serialize(obj, testSerializationContext) + } finally { + andFinally() + } + } } fun testName(): String = Thread.currentThread().stackTrace[2].methodName diff --git a/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.addMandatoryFieldWithAltConstructorForceReorder b/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.addMandatoryFieldWithAltConstructorForceReorder new file mode 100644 index 0000000000..8aa1e1f014 Binary files /dev/null and b/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.addMandatoryFieldWithAltConstructorForceReorder differ diff --git a/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.moreComplexNonNullWithReorder b/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.moreComplexNonNullWithReorder new file mode 100644 index 0000000000..3a01cc6b08 Binary files /dev/null and b/serialization/src/test/resources/net/corda/serialization/internal/amqp/EvolvabilityTests.moreComplexNonNullWithReorder differ diff --git a/settings.gradle b/settings.gradle index 706ce1bc10..bedcf5c4cb 100644 --- a/settings.gradle +++ b/settings.gradle @@ -55,6 +55,7 @@ include 'tools:loadtest' include 'tools:notarytest' include 'tools:graphs' include 'tools:bootstrapper' +include 'tools:blobinspector' include 'tools:dbmigration' include 'tools:shell' include 'example-code' diff --git a/testing/node-driver/src/main/kotlin/net/corda/testing/node/InMemoryMessagingNetwork.kt b/testing/node-driver/src/main/kotlin/net/corda/testing/node/InMemoryMessagingNetwork.kt index d99836853f..c77f72a821 100644 --- a/testing/node-driver/src/main/kotlin/net/corda/testing/node/InMemoryMessagingNetwork.kt +++ b/testing/node-driver/src/main/kotlin/net/corda/testing/node/InMemoryMessagingNetwork.kt @@ -30,6 +30,8 @@ import net.corda.core.utilities.getOrThrow import net.corda.core.utilities.trace import net.corda.node.services.messaging.* import net.corda.node.services.statemachine.DeduplicationId +import net.corda.node.services.statemachine.ExternalEvent +import net.corda.node.services.statemachine.SenderDeduplicationId import net.corda.node.utilities.AffinityExecutor import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.testing.node.internal.InMemoryMessage @@ -118,7 +120,7 @@ class InMemoryMessagingNetwork private constructor( get() = _receivedMessages internal val endpoints: List @Synchronized get() = handleEndpointMap.values.toList() /** Get a [List] of all the [MockMessagingService] endpoints **/ - val endpointsExternal: List @Synchronized get() = handleEndpointMap.values.map{ MockMessagingService.createMockMessagingService(it) }.toList() + val endpointsExternal: List @Synchronized get() = handleEndpointMap.values.map { MockMessagingService.createMockMessagingService(it) }.toList() /** * Creates a node at the given address: useful if you want to recreate a node to simulate a restart. @@ -145,7 +147,10 @@ class InMemoryMessagingNetwork private constructor( ?: emptyList() //TODO only notary can be distributed? synchronized(this) { val node = InMemoryMessaging(manuallyPumped, peerHandle, executor, database) - handleEndpointMap[peerHandle] = node + val oldNode = handleEndpointMap.put(peerHandle, node) + if (oldNode != null) { + node.inheritPendingRedelivery(oldNode) + } serviceHandles.forEach { serviceToPeersMapping.getOrPut(it) { LinkedHashSet() }.add(peerHandle) } @@ -171,7 +176,10 @@ class InMemoryMessagingNetwork private constructor( @Synchronized private fun netNodeHasShutdown(peerHandle: PeerHandle) { - handleEndpointMap.remove(peerHandle) + val endpoint = handleEndpointMap[peerHandle] + if (!(endpoint?.hasPendingDeliveries() ?: false)) { + handleEndpointMap.remove(peerHandle) + } } @Synchronized @@ -276,6 +284,30 @@ class InMemoryMessagingNetwork private constructor( return transfer } + /** + * When a new message handler is added, this implies we have started a new node. The add handler logic uses this to + * push back any un-acknowledged messages for this peer onto the head of the queue (rather than the tail) to maintain message + * delivery order. We push them back because their consumption was not complete and a restarted node would + * see them re-delivered if this was Artemis. + */ + @Synchronized + private fun unPopMessages(transfers: Collection, us: PeerHandle) { + messageReceiveQueues.compute(us) { _, existing -> + if (existing == null) { + LinkedBlockingQueue().apply { + addAll(transfers) + } + } else { + existing.apply { + val drained = mutableListOf() + existing.drainTo(drained) + existing.addAll(transfers) + existing.addAll(drained) + } + } + } + } + private fun pumpSendInternal(transfer: MessageTransfer) { when (transfer.recipients) { is PeerHandle -> getQueueForPeerHandle(transfer.recipients).add(transfer) @@ -348,6 +380,7 @@ class InMemoryMessagingNetwork private constructor( private val processedMessages: MutableSet = Collections.synchronizedSet(HashSet()) override val myAddress: PeerHandle get() = peerHandle + override val ourSenderUUID: String = UUID.randomUUID().toString() private val backgroundThread = if (manuallyPumped) null else thread(isDaemon = true, name = "In-memory message dispatcher") { @@ -380,10 +413,16 @@ class InMemoryMessagingNetwork private constructor( Pair(handler, pending) } - transfers.forEach { pumpSendInternal(it) } + unPopMessages(transfers, peerHandle) return handler } + fun inheritPendingRedelivery(other: InMemoryMessaging) { + state.locked { + pendingRedelivery.addAll(other.state.locked { pendingRedelivery }) + } + } + override fun removeMessageHandler(registration: MessageHandlerRegistration) { check(running) state.locked { check(handlers.remove(registration as Handler)) } @@ -415,8 +454,8 @@ class InMemoryMessagingNetwork private constructor( override fun cancelRedelivery(retryId: Long) {} /** Returns the given (topic & session, data) pair as a newly created message object. */ - override fun createMessage(topic: String, data: ByteArray, deduplicationId: DeduplicationId, additionalHeaders: Map): Message { - return InMemoryMessage(topic, OpaqueBytes(data), deduplicationId) + override fun createMessage(topic: String, data: ByteArray, deduplicationId: SenderDeduplicationId, additionalHeaders: Map): Message { + return InMemoryMessage(topic, OpaqueBytes(data), deduplicationId.deduplicationId, senderUUID = deduplicationId.senderUUID) } /** @@ -480,13 +519,14 @@ class InMemoryMessagingNetwork private constructor( database.transaction { for (handler in deliverTo) { try { - handler.callback(transfer.toReceivedMessage(), handler, DummyDeduplicationHandler()) + val receivedMessage = transfer.toReceivedMessage() + state.locked { pendingRedelivery.add(transfer) } + handler.callback(receivedMessage, handler, InMemoryDeduplicationHandler(receivedMessage, transfer)) } catch (e: Exception) { log.error("Caught exception in handler for $this/${handler.topicSession}", e) } } _receivedMessages.onNext(transfer) - processedMessages += transfer.message.uniqueMessageId messagesInFlight.countDown() } } @@ -503,13 +543,23 @@ class InMemoryMessagingNetwork private constructor( message.uniqueMessageId, message.debugTimestamp, sender.name) - } - private class DummyDeduplicationHandler : DeduplicationHandler { - override fun afterDatabaseTransaction() { - } - override fun insideDatabaseTransaction() { + private inner class InMemoryDeduplicationHandler(override val receivedMessage: ReceivedMessage, val transfer: MessageTransfer) : DeduplicationHandler, ExternalEvent.ExternalMessageEvent { + override val externalCause: ExternalEvent + get() = this + override val deduplicationHandler: DeduplicationHandler + get() = this + + override fun afterDatabaseTransaction() { + this@InMemoryMessaging.state.locked { pendingRedelivery.remove(transfer) } + } + + override fun insideDatabaseTransaction() { + processedMessages += transfer.message.uniqueMessageId + } } + + fun hasPendingDeliveries(): Boolean = state.locked { pendingRedelivery.isNotEmpty() } } } diff --git a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt index 8d24c56a5f..5041c04e6f 100644 --- a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt +++ b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt @@ -33,6 +33,7 @@ import net.corda.core.utilities.contextLogger import net.corda.core.utilities.getOrThrow import net.corda.core.utilities.millis import net.corda.node.NodeRegistrationOption +import net.corda.node.internal.ConfigurationException import net.corda.node.internal.Node import net.corda.node.internal.StartedNode import net.corda.node.services.Permissions @@ -195,7 +196,7 @@ class DriverDSLImpl( val registrationFuture = if (compatibilityZone?.rootCert != null) { // We don't need the network map to be available to be able to register the node - startNodeRegistration(name, compatibilityZone.rootCert, compatibilityZone.url) + startNodeRegistration(name, compatibilityZone.rootCert, compatibilityZone.doormanURL()) } else { doneFuture(Unit) } @@ -220,7 +221,15 @@ class DriverDSLImpl( val rpcAdminAddress = portAllocation.nextHostAndPort() val webAddress = portAllocation.nextHostAndPort() val users = rpcUsers.map { it.copy(permissions = it.permissions + DRIVER_REQUIRED_PERMISSIONS) } - val czUrlConfig = if (compatibilityZone != null) mapOf("compatibilityZoneURL" to compatibilityZone.url.toString()) else emptyMap() + val czUrlConfig = when (compatibilityZone) { + null -> emptyMap() + is SharedCompatibilityZoneParams -> + mapOf("compatibilityZoneURL" to compatibilityZone.doormanURL().toString()) + is SplitCompatibilityZoneParams -> + mapOf("networkServices.doormanURL" to compatibilityZone.doormanURL().toString(), + "networkServices.networkMapURL" to compatibilityZone.networkMapURL().toString()) + } + val overrides = configOf( "myLegalName" to name.toString(), "p2pAddress" to p2pAddress.toString(), @@ -423,7 +432,7 @@ class DriverDSLImpl( startNotaryIdentityGeneration() } else { // With a root cert specified we delegate generation of the notary identities to the CZ. - startAllNotaryRegistrations(compatibilityZone.rootCert, compatibilityZone.url) + startAllNotaryRegistrations(compatibilityZone.rootCert, compatibilityZone.doormanURL()) } notaryInfosFuture.map { notaryInfos -> compatibilityZone.publishNotaries(notaryInfos) @@ -513,7 +522,7 @@ class DriverDSLImpl( private fun startNotaries(localNetworkMap: LocalNetworkMap?, customOverrides: Map): List>> { return notarySpecs.map { when (it.cluster) { - null -> startSingleNotary(it, localNetworkMap, customOverrides ) + null -> startSingleNotary(it, localNetworkMap, customOverrides) is ClusterSpec.Raft, // DummyCluster is used for testing the notary communication path, and it does not matter // which underlying consensus algorithm is used, so we just stick to Raft @@ -877,7 +886,8 @@ class DriverDSLImpl( val index = stackTrace.indexOfLast { it.className == "net.corda.testing.driver.Driver" } // In this case we're dealing with the the RPCDriver or one of it's cousins which are internal and we don't care about them if (index == -1) return emptyList() - val callerPackage = Class.forName(stackTrace[index + 1].className).`package` ?: throw IllegalStateException("Function instantiating driver must be defined in a package.") + val callerPackage = Class.forName(stackTrace[index + 1].className).`package` + ?: throw IllegalStateException("Function instantiating driver must be defined in a package.") return listOf(callerPackage.name) } @@ -1064,15 +1074,49 @@ fun genericDriver( /** * Internal API to enable testing of the network map service and node registration process using the internal driver. - * @property url The base CZ URL for registration and network map updates + * * @property publishNotaries Hook for a network map server to capture the generated [NotaryInfo] objects needed for * creating the network parameters. This is needed as the network map server is expected to distribute it. The callback * will occur on a different thread to the driver-calling thread. * @property rootCert If specified then the nodes will register themselves with the doorman service using [url] and expect * the registration response to be rooted at this cert. If not specified then no registration is performed and the dev * root cert is used as normal. + * + * @see SharedCompatibilityZoneParams + * @see SplitCompatibilityZoneParams */ -data class CompatibilityZoneParams(val url: URL, val publishNotaries: (List) -> Unit, val rootCert: X509Certificate? = null) +sealed class CompatibilityZoneParams( + val publishNotaries: (List) -> Unit, + val rootCert: X509Certificate? = null +) { + abstract fun networkMapURL(): URL + abstract fun doormanURL(): URL +} + +/** + * Represent network management services, network map and doorman, running on the same URL + */ +class SharedCompatibilityZoneParams( + private val url: URL, + publishNotaries: (List) -> Unit, + rootCert: X509Certificate? = null +) : CompatibilityZoneParams(publishNotaries, rootCert) { + override fun doormanURL() = url + override fun networkMapURL() = url +} + +/** + * Represent network management services, network map and doorman, running on different URLs + */ +class SplitCompatibilityZoneParams( + private val doormanURL: URL, + private val networkMapURL: URL, + publishNotaries: (List) -> Unit, + rootCert: X509Certificate? = null +) : CompatibilityZoneParams(publishNotaries, rootCert) { + override fun doormanURL() = doormanURL + override fun networkMapURL() = networkMapURL +} fun internalDriver( isDebug: Boolean = DriverParameters().isDebug, diff --git a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/InternalMockNetwork.kt b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/InternalMockNetwork.kt index 47918879a1..94acec8715 100644 --- a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/InternalMockNetwork.kt +++ b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/InternalMockNetwork.kt @@ -477,6 +477,7 @@ private fun mockNodeConfiguration(): NodeConfiguration { doReturn(null).whenever(it).jmxMonitoringHttpPort doReturn(true).whenever(it).devMode doReturn(null).whenever(it).compatibilityZoneURL + doReturn(null).whenever(it).networkServices doReturn(VerifierType.InMemory).whenever(it).verifierType doReturn(P2PMessagingRetryConfiguration(5.seconds, 3, backoffBase = 1.0)).whenever(it).p2pMessagingRetry doReturn(5.seconds.toMillis()).whenever(it).additionalNodeInfoPollingFrequencyMsec diff --git a/tools/blobinspector/build.gradle b/tools/blobinspector/build.gradle new file mode 100644 index 0000000000..5d49461034 --- /dev/null +++ b/tools/blobinspector/build.gradle @@ -0,0 +1,27 @@ +apply plugin: 'java' +apply plugin: 'kotlin' + +dependencies { + compile project(':client:jackson') + compile 'info.picocli:picocli:3.0.0' + compile "org.slf4j:slf4j-nop:$slf4j_version" + compile "com.jcabi:jcabi-manifests:$jcabi_manifests_version" + + testCompile project(':test-utils') + testCompile "junit:junit:$junit_version" +} + +jar { + from(configurations.compile.collect { it.isDirectory() ? it : zipTree(it) }) { + exclude "META-INF/*.SF" + exclude "META-INF/*.DSA" + exclude "META-INF/*.RSA" + } + baseName 'blobinspector' + manifest { + attributes( + 'Automatic-Module-Name': 'net.corda.blobinspector', + 'Main-Class': 'net.corda.blobinspector.MainKt' + ) + } +} diff --git a/tools/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt b/tools/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt new file mode 100644 index 0000000000..e2bde0639e --- /dev/null +++ b/tools/blobinspector/src/main/kotlin/net/corda/blobinspector/Main.kt @@ -0,0 +1,125 @@ +package net.corda.blobinspector + +import com.fasterxml.jackson.core.JsonFactory +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory +import com.jcabi.manifests.Manifests +import net.corda.client.jackson.JacksonSupport +import net.corda.core.internal.rootMessage +import net.corda.core.serialization.SerializationContext +import net.corda.core.serialization.deserialize +import net.corda.core.serialization.internal.SerializationEnvironmentImpl +import net.corda.core.serialization.internal._contextSerializationEnv +import net.corda.core.utilities.sequence +import net.corda.serialization.internal.AMQP_P2P_CONTEXT +import net.corda.serialization.internal.CordaSerializationMagic +import net.corda.serialization.internal.SerializationFactoryImpl +import net.corda.serialization.internal.amqp.AbstractAMQPSerializationScheme +import net.corda.serialization.internal.amqp.DeserializationInput +import net.corda.serialization.internal.amqp.amqpMagic +import picocli.CommandLine +import picocli.CommandLine.* +import java.net.MalformedURLException +import java.net.URL +import java.nio.file.Paths +import kotlin.system.exitProcess + +fun main(args: Array) { + val main = Main() + try { + CommandLine.run(main, *args) + } catch (e: ExecutionException) { + val throwable = e.cause ?: e + if (main.verbose) { + throwable.printStackTrace() + } else { + System.err.println("*ERROR*: ${throwable.rootMessage ?: "Use --verbose for more details"}") + } + exitProcess(1) + } +} + +@Command( + name = "Blob Inspector", + versionProvider = VersionProvider::class, + mixinStandardHelpOptions = true, // add --help and --version options, + showDefaultValues = true, + description = ["Inspect AMQP serialised binary blobs"] +) +class Main : Runnable { + @Parameters(index = "0", paramLabel = "SOURCE", description = ["URL or file path to the blob"], converter = [SourceConverter::class]) + private var source: URL? = null + + @Option(names = ["--format"], paramLabel = "type", description = ["Output format. Possible values: [YAML, JSON]"]) + private var formatType: FormatType = FormatType.YAML + + @Option(names = ["--full-parties"], + description = ["Display the owningKey and certPath properties of Party and PartyAndReference objects respectively"]) + private var fullParties: Boolean = false + + @Option(names = ["--schema"], description = ["Print the blob's schema first"]) + private var schema: Boolean = false + + @Option(names = ["--verbose"], description = ["Enable verbose output"]) + var verbose: Boolean = false + + override fun run() { + val bytes = source!!.readBytes().run { + require(size > amqpMagic.size) { "Insufficient bytes for AMQP blob" } + sequence() + } + + require(bytes.take(amqpMagic.size) == amqpMagic) { "Not an AMQP blob" } + + if (schema) { + val envelope = DeserializationInput.getEnvelope(bytes) + println(envelope.schema) + println() + } + + initialiseSerialization() + + val factory = when (formatType) { + FormatType.YAML -> YAMLFactory() + FormatType.JSON -> JsonFactory() + } + val mapper = JacksonSupport.createNonRpcMapper(factory, fullParties) + + val deserialized = bytes.deserialize() + println(deserialized.javaClass.name) + mapper.writeValue(System.out, deserialized) + } + + private fun initialiseSerialization() { + _contextSerializationEnv.set(SerializationEnvironmentImpl( + SerializationFactoryImpl().apply { + registerScheme(AMQPInspectorSerializationScheme) + }, + AMQP_P2P_CONTEXT + )) + } +} + +private object AMQPInspectorSerializationScheme : AbstractAMQPSerializationScheme(emptyList()) { + override fun canDeserializeVersion(magic: CordaSerializationMagic, target: SerializationContext.UseCase): Boolean { + return magic == amqpMagic && target == SerializationContext.UseCase.P2P + } + override fun rpcClientSerializerFactory(context: SerializationContext) = throw UnsupportedOperationException() + override fun rpcServerSerializerFactory(context: SerializationContext) = throw UnsupportedOperationException() +} + +private class SourceConverter : ITypeConverter { + override fun convert(value: String): URL { + return try { + URL(value) + } catch (e: MalformedURLException) { + Paths.get(value).toUri().toURL() + } + } +} + +private class VersionProvider : IVersionProvider { + override fun getVersion(): Array = arrayOf(Manifests.read("Corda-Release-Version")) +} + +private enum class FormatType { YAML, JSON } +