mirror of
https://github.com/corda/corda.git
synced 2025-03-14 08:16:32 +00:00
Merge pull request #881 from corda/parkri-os-merge-20180525-1
OS -> ENT merge
This commit is contained in:
commit
687b6080af
4
.idea/compiler.xml
generated
4
.idea/compiler.xml
generated
@ -98,8 +98,6 @@
|
||||
<module name="experimental-behave_main" target="1.8" />
|
||||
<module name="experimental-behave_smokeTest" target="1.8" />
|
||||
<module name="experimental-behave_test" target="1.8" />
|
||||
<module name="experimental-blobinspector_main" target="1.8" />
|
||||
<module name="experimental-blobinspector_test" target="1.8" />
|
||||
<module name="experimental-kryo-hook_main" target="1.8" />
|
||||
<module name="experimental-kryo-hook_test" target="1.8" />
|
||||
<module name="experimental_main" target="1.8" />
|
||||
@ -235,6 +233,8 @@
|
||||
<module name="testing-test-utils_test" target="1.8" />
|
||||
<module name="testing_main" target="1.8" />
|
||||
<module name="testing_test" target="1.8" />
|
||||
<module name="tools-blobinspector_main" target="1.8" />
|
||||
<module name="tools-blobinspector_test" target="1.8" />
|
||||
<module name="tools_main" target="1.8" />
|
||||
<module name="tools_test" target="1.8" />
|
||||
<module name="trader-demo_integrationTest" target="1.8" />
|
||||
|
@ -96,6 +96,7 @@ buildscript {
|
||||
ext.commons_cli_version = '1.4'
|
||||
ext.snappy_version = '0.4'
|
||||
ext.fast_classpath_scanner_version = '2.12.3'
|
||||
ext.jcabi_manifests_version = '1.1'
|
||||
|
||||
// Update 121 is required for ObjectInputFilter and at time of writing 131 was latest:
|
||||
ext.java8_minUpdateVersion = '131'
|
||||
|
@ -16,11 +16,8 @@ apply plugin: 'com.jfrog.artifactory'
|
||||
|
||||
dependencies {
|
||||
compile project(':serialization')
|
||||
testCompile project(':test-utils')
|
||||
|
||||
compile "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
|
||||
testCompile "org.jetbrains.kotlin:kotlin-test:$kotlin_version"
|
||||
|
||||
// Jackson and its plugins: parsing to/from JSON and other textual formats.
|
||||
compile "com.fasterxml.jackson.module:jackson-module-kotlin:$jackson_version"
|
||||
// Yaml is useful for parsing strings to method calls.
|
||||
@ -29,7 +26,9 @@ dependencies {
|
||||
compile "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:$jackson_version"
|
||||
compile "com.google.guava:guava:$guava_version"
|
||||
|
||||
testCompile project(':test-utils')
|
||||
testCompile project(path: ':core', configuration: 'testArtifacts')
|
||||
testCompile "org.jetbrains.kotlin:kotlin-test:$kotlin_version"
|
||||
testCompile "junit:junit:$junit_version"
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,7 @@ import net.corda.core.contracts.Amount
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.internal.CertRole
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
@ -65,12 +62,13 @@ import javax.security.auth.x500.X500Principal
|
||||
*
|
||||
* Note that Jackson can also be used to serialise/deserialise other formats such as Yaml and XML.
|
||||
*/
|
||||
@Suppress("DEPRECATION")
|
||||
@Suppress("DEPRECATION", "MemberVisibilityCanBePrivate")
|
||||
object JacksonSupport {
|
||||
// If you change this API please update the docs in the docsite (json.rst)
|
||||
|
||||
@DoNotImplement
|
||||
interface PartyObjectMapper {
|
||||
val isFullParties: Boolean
|
||||
fun wellKnownPartyFromX500Name(name: CordaX500Name): Party?
|
||||
fun partyFromKey(owningKey: PublicKey): Party?
|
||||
fun partiesFromName(query: String): Set<Party>
|
||||
@ -78,9 +76,11 @@ object JacksonSupport {
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use", replaceWith = ReplaceWith("JacksonSupport.createDefaultMapper"))
|
||||
class RpcObjectMapper(val rpc: CordaRPCOps,
|
||||
factory: JsonFactory,
|
||||
val fuzzyIdentityMatch: Boolean) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
class RpcObjectMapper
|
||||
@JvmOverloads constructor(val rpc: CordaRPCOps,
|
||||
factory: JsonFactory,
|
||||
val fuzzyIdentityMatch: Boolean,
|
||||
override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = rpc.wellKnownPartyFromX500Name(name)
|
||||
override fun partyFromKey(owningKey: PublicKey): Party? = rpc.partyFromKey(owningKey)
|
||||
override fun partiesFromName(query: String) = rpc.partiesFromName(query, fuzzyIdentityMatch)
|
||||
@ -88,9 +88,11 @@ object JacksonSupport {
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
class IdentityObjectMapper(val identityService: IdentityService,
|
||||
factory: JsonFactory,
|
||||
val fuzzyIdentityMatch: Boolean) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
class IdentityObjectMapper
|
||||
@JvmOverloads constructor(val identityService: IdentityService,
|
||||
factory: JsonFactory,
|
||||
val fuzzyIdentityMatch: Boolean,
|
||||
override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = identityService.wellKnownPartyFromX500Name(name)
|
||||
override fun partyFromKey(owningKey: PublicKey): Party? = identityService.partyFromKey(owningKey)
|
||||
override fun partiesFromName(query: String) = identityService.partiesFromName(query, fuzzyIdentityMatch)
|
||||
@ -98,7 +100,9 @@ object JacksonSupport {
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use", replaceWith = ReplaceWith("JacksonSupport.createNonRpcMapper"))
|
||||
class NoPartyObjectMapper(factory: JsonFactory) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
class NoPartyObjectMapper
|
||||
@JvmOverloads constructor(factory: JsonFactory,
|
||||
override val isFullParties: Boolean = false) : PartyObjectMapper, ObjectMapper(factory) {
|
||||
override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? = null
|
||||
override fun partyFromKey(owningKey: PublicKey): Party? = null
|
||||
override fun partiesFromName(query: String): Set<Party> = emptySet()
|
||||
@ -112,22 +116,33 @@ object JacksonSupport {
|
||||
/**
|
||||
* Creates a Jackson ObjectMapper that uses RPC to deserialise parties from string names.
|
||||
*
|
||||
* If [fuzzyIdentityMatch] is false, fields mapped to [Party] objects must be in X.500 name form and precisely
|
||||
* @param fuzzyIdentityMatch If false, fields mapped to [Party] objects must be in X.500 name form and precisely
|
||||
* match an identity known from the network map. If true, the name is matched more leniently but if the match
|
||||
* is ambiguous a [JsonParseException] is thrown.
|
||||
*
|
||||
* @param fullParties If true then [Party] objects will be serialised as JSON objects, with the owning key serialised
|
||||
* in addition to the name. For [PartyAndCertificate] objects the cert path will be included.
|
||||
*/
|
||||
@JvmStatic
|
||||
@JvmOverloads
|
||||
fun createDefaultMapper(rpc: CordaRPCOps,
|
||||
factory: JsonFactory = JsonFactory(),
|
||||
fuzzyIdentityMatch: Boolean = false): ObjectMapper {
|
||||
return configureMapper(RpcObjectMapper(rpc, factory, fuzzyIdentityMatch))
|
||||
fuzzyIdentityMatch: Boolean = false,
|
||||
fullParties: Boolean = false): ObjectMapper {
|
||||
return configureMapper(RpcObjectMapper(rpc, factory, fuzzyIdentityMatch, fullParties))
|
||||
}
|
||||
|
||||
/** For testing or situations where deserialising parties is not required */
|
||||
/**
|
||||
* For testing or situations where deserialising parties is not required
|
||||
*
|
||||
* @param fullParties If true then [Party] objects will be serialised as JSON objects, with the owning key serialised
|
||||
* in addition to the name. For [PartyAndCertificate] objects the cert path will be included.
|
||||
*/
|
||||
@JvmStatic
|
||||
@JvmOverloads
|
||||
fun createNonRpcMapper(factory: JsonFactory = JsonFactory()): ObjectMapper = configureMapper(NoPartyObjectMapper(factory))
|
||||
fun createNonRpcMapper(factory: JsonFactory = JsonFactory(), fullParties: Boolean = false): ObjectMapper {
|
||||
return configureMapper(NoPartyObjectMapper(factory, fullParties))
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Jackson ObjectMapper that uses an [IdentityService] directly inside the node to deserialise parties from string names.
|
||||
@ -207,7 +222,14 @@ object JacksonSupport {
|
||||
.filter { Modifier.isStatic(it.modifiers) && it.type == KeyPurposeId::class.java }
|
||||
.associateBy({ (it.get(null) as KeyPurposeId).id }, { it.name })
|
||||
|
||||
val knownExtensions = setOf("2.5.29.15", "2.5.29.37", "2.5.29.19", "2.5.29.17", "2.5.29.18", CordaOID.X509_EXTENSION_CORDA_ROLE)
|
||||
val knownExtensions = setOf(
|
||||
"2.5.29.15",
|
||||
"2.5.29.17",
|
||||
"2.5.29.18",
|
||||
"2.5.29.19",
|
||||
"2.5.29.37",
|
||||
CordaOID.X509_EXTENSION_CORDA_ROLE
|
||||
)
|
||||
|
||||
override fun serialize(value: X509Certificate, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
@ -218,17 +240,20 @@ object JacksonSupport {
|
||||
writeObjectField("issuer", value.issuerX500Principal)
|
||||
writeObjectField("notBefore", value.notBefore)
|
||||
writeObjectField("notAfter", value.notAfter)
|
||||
writeObjectField("cordaCertRole", CertRole.extract(value))
|
||||
writeObjectField("issuerUniqueID", value.issuerUniqueID)
|
||||
writeObjectField("subjectUniqueID", value.subjectUniqueID)
|
||||
writeObjectField("keyUsage", value.keyUsage?.asList()?.mapIndexedNotNull { i, flag -> if (flag) keyUsages[i] else null })
|
||||
writeObjectField("extendedKeyUsage", value.extendedKeyUsage.map { keyPurposeIds.getOrDefault(it, it) })
|
||||
jsonObject("basicConstraints") {
|
||||
writeBooleanField("isCA", value.basicConstraints != -1)
|
||||
writeObjectField("pathLength", value.basicConstraints.let { if (it != Int.MAX_VALUE) it else null })
|
||||
val isCa = value.basicConstraints != -1
|
||||
writeBooleanField("isCA", isCa)
|
||||
if (isCa) {
|
||||
writeObjectField("pathLength", value.basicConstraints.let { if (it != Int.MAX_VALUE) it else null })
|
||||
}
|
||||
}
|
||||
writeObjectField("subjectAlternativeNames", value.subjectAlternativeNames)
|
||||
writeObjectField("issuerAlternativeNames", value.issuerAlternativeNames)
|
||||
writeObjectField("cordaCertRole", CertRole.extract(value))
|
||||
writeObjectField("otherCriticalExtensions", value.criticalExtensionOIDs - knownExtensions)
|
||||
writeObjectField("otherNonCriticalExtensions", value.nonCriticalExtensionOIDs - knownExtensions)
|
||||
writeBinaryField("encoded", value.encoded)
|
||||
@ -239,8 +264,12 @@ object JacksonSupport {
|
||||
private class X509CertificateDeserializer : JsonDeserializer<X509Certificate>() {
|
||||
private val certFactory = CertificateFactory.getInstance("X.509")
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): X509Certificate {
|
||||
val encoded = parser.readValueAsTree<ObjectNode>()["encoded"]
|
||||
return certFactory.generateCertificate(encoded.binaryValue().inputStream()) as X509Certificate
|
||||
val encoded = if (parser.currentToken == JsonToken.START_OBJECT) {
|
||||
parser.readValueAsTree<ObjectNode>()["encoded"].binaryValue()
|
||||
} else {
|
||||
parser.binaryValue
|
||||
}
|
||||
return certFactory.generateCertificate(encoded.inputStream()) as X509Certificate
|
||||
}
|
||||
}
|
||||
|
||||
@ -284,9 +313,13 @@ object JacksonSupport {
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object PartySerializer : JsonSerializer<Party>() {
|
||||
override fun serialize(value: Party, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
// TODO Add configurable option to output this as an object which includes the owningKey
|
||||
generator.writeObject(value.name)
|
||||
override fun serialize(value: Party, gen: JsonGenerator, provider: SerializerProvider) {
|
||||
val mapper = gen.codec as PartyObjectMapper
|
||||
if (mapper.isFullParties) {
|
||||
gen.writeObject(PartyAnalogue(value.name, value.owningKey))
|
||||
} else {
|
||||
gen.writeObject(value.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,28 +327,39 @@ object JacksonSupport {
|
||||
object PartyDeserializer : JsonDeserializer<Party>() {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): Party {
|
||||
val mapper = parser.codec as PartyObjectMapper
|
||||
// The comma character is invalid in Base58, and required as a separator for X.500 names. As Corda
|
||||
// X.500 names all involve at least three attributes (organisation, locality, country), they must
|
||||
// include a comma. As such we can use it as a distinguisher between the two types.
|
||||
return if ("," in parser.text) {
|
||||
val principal = CordaX500Name.parse(parser.text)
|
||||
mapper.wellKnownPartyFromX500Name(principal) ?: throw JsonParseException(parser, "Could not find a Party with name $principal")
|
||||
return if (parser.currentToken == JsonToken.START_OBJECT) {
|
||||
val analogue = parser.readValueAs<PartyAnalogue>()
|
||||
Party(analogue.name, analogue.owningKey)
|
||||
} else {
|
||||
val nameMatches = mapper.partiesFromName(parser.text)
|
||||
when {
|
||||
nameMatches.isEmpty() -> {
|
||||
val publicKey = parser.readValueAs<PublicKey>()
|
||||
mapper.partyFromKey(publicKey)
|
||||
?: throw JsonParseException(parser, "Could not find a Party with key ${publicKey.toStringShort()}")
|
||||
}
|
||||
nameMatches.size == 1 -> nameMatches.first()
|
||||
else -> throw JsonParseException(parser, "Ambiguous name match '${parser.text}': could be any of " +
|
||||
nameMatches.map { it.name }.joinToString(" ... or ... "))
|
||||
// The comma character is invalid in Base58, and required as a separator for X.500 names. As Corda
|
||||
// X.500 names all involve at least three attributes (organisation, locality, country), they must
|
||||
// include a comma. As such we can use it as a distinguisher between the two types.
|
||||
if ("," in parser.text) {
|
||||
val principal = CordaX500Name.parse(parser.text)
|
||||
mapper.wellKnownPartyFromX500Name(principal) ?: throw JsonParseException(parser, "Could not find a Party with name $principal")
|
||||
} else {
|
||||
lookupByNameSegment(mapper, parser)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun lookupByNameSegment(mapper: PartyObjectMapper, parser: JsonParser): Party {
|
||||
val nameMatches = mapper.partiesFromName(parser.text)
|
||||
return when {
|
||||
nameMatches.isEmpty() -> {
|
||||
val publicKey = parser.readValueAs<PublicKey>()
|
||||
mapper.partyFromKey(publicKey)
|
||||
?: throw JsonParseException(parser, "Could not find a Party with key ${publicKey.toStringShort()}")
|
||||
}
|
||||
nameMatches.size == 1 -> nameMatches.first()
|
||||
else -> throw JsonParseException(parser, "Ambiguous name match '${parser.text}': could be any of " +
|
||||
nameMatches.map { it.name }.joinToString(" ... or ... "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PartyAnalogue(val name: CordaX500Name, val owningKey: PublicKey)
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object CordaX500NameDeserializer : JsonDeserializer<CordaX500Name>() {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): CordaX500Name {
|
||||
|
@ -29,9 +29,10 @@ import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.serialization.internal.AllWhitelist
|
||||
import net.corda.serialization.internal.amqp.SerializerFactory
|
||||
import net.corda.serialization.internal.amqp.constructorForDeserialization
|
||||
import net.corda.serialization.internal.amqp.createSerializerFactoryFactory
|
||||
import net.corda.serialization.internal.amqp.hasCordaSerializable
|
||||
import net.corda.serialization.internal.amqp.propertiesForSerialization
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.CertPath
|
||||
|
||||
class CordaModule : SimpleModule("corda-core") {
|
||||
override fun setupModule(context: SetupContext) {
|
||||
@ -39,7 +40,7 @@ class CordaModule : SimpleModule("corda-core") {
|
||||
|
||||
context.addBeanSerializerModifier(CordaSerializableBeanSerializerModifier())
|
||||
|
||||
context.setMixInAnnotations(PartyAndCertificate::class.java, PartyAndCertificateSerializerMixin::class.java)
|
||||
context.setMixInAnnotations(PartyAndCertificate::class.java, PartyAndCertificateMixin::class.java)
|
||||
context.setMixInAnnotations(NetworkHostAndPort::class.java, NetworkHostAndPortMixin::class.java)
|
||||
context.setMixInAnnotations(CordaX500Name::class.java, CordaX500NameMixin::class.java)
|
||||
context.setMixInAnnotations(Amount::class.java, AmountMixin::class.java)
|
||||
@ -53,7 +54,7 @@ class CordaModule : SimpleModule("corda-core") {
|
||||
context.setMixInAnnotations(DigitalSignature.WithKey::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
context.setMixInAnnotations(DigitalSignatureWithCert::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
context.setMixInAnnotations(TransactionSignature::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
context.setMixInAnnotations(SignedTransaction::class.java, SignedTransactionMixin2::class.java)
|
||||
context.setMixInAnnotations(SignedTransaction::class.java, SignedTransactionMixin::class.java)
|
||||
context.setMixInAnnotations(WireTransaction::class.java, JacksonSupport.WireTransactionMixin::class.java)
|
||||
context.setMixInAnnotations(NodeInfo::class.java, NodeInfoMixin::class.java)
|
||||
}
|
||||
@ -69,12 +70,15 @@ private class CordaSerializableBeanSerializerModifier : BeanSerializerModifier()
|
||||
override fun changeProperties(config: SerializationConfig,
|
||||
beanDesc: BeanDescription,
|
||||
beanProperties: MutableList<BeanPropertyWriter>): MutableList<BeanPropertyWriter> {
|
||||
// TODO We're assuming here that Jackson gives us a superset of all the properties. Either confirm this or
|
||||
// make sure the returned beanProperties are exactly the AMQP properties
|
||||
if (beanDesc.beanClass.isAnnotationPresent(CordaSerializable::class.java)) {
|
||||
if (hasCordaSerializable(beanDesc.beanClass)) {
|
||||
val ctor = constructorForDeserialization(beanDesc.beanClass)
|
||||
val amqpProperties = propertiesForSerialization(ctor, beanDesc.beanClass, serializerFactory).serializationOrder
|
||||
beanProperties.removeIf { bean -> amqpProperties.none { amqp -> amqp.serializer.name == bean.name } }
|
||||
val amqpProperties = propertiesForSerialization(ctor, beanDesc.beanClass, serializerFactory)
|
||||
.serializationOrder
|
||||
.map { it.serializer.name }
|
||||
beanProperties.removeIf { it.name !in amqpProperties }
|
||||
(amqpProperties - beanProperties.map { it.name }).let {
|
||||
check(it.isEmpty()) { "Jackson didn't provide serialisers for $it" }
|
||||
}
|
||||
}
|
||||
return beanProperties
|
||||
}
|
||||
@ -85,26 +89,31 @@ private class CordaSerializableBeanSerializerModifier : BeanSerializerModifier()
|
||||
private interface NetworkHostAndPortMixin
|
||||
|
||||
private class NetworkHostAndPortDeserializer : JsonDeserializer<NetworkHostAndPort>() {
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext) = NetworkHostAndPort.parse(parser.text)
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): NetworkHostAndPort {
|
||||
return NetworkHostAndPort.parse(parser.text)
|
||||
}
|
||||
}
|
||||
|
||||
@JsonSerialize(using = PartyAndCertificateSerializer::class)
|
||||
// TODO Add deserialization which follows the same lookup logic as Party
|
||||
private interface PartyAndCertificateSerializerMixin
|
||||
private interface PartyAndCertificateMixin
|
||||
|
||||
private class PartyAndCertificateSerializer : JsonSerializer<PartyAndCertificate>() {
|
||||
override fun serialize(value: PartyAndCertificate, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
writeObjectField("name", value.name)
|
||||
writeObjectField("owningKey", value.owningKey)
|
||||
// TODO Add configurable option to output the certPath
|
||||
val mapper = gen.codec as JacksonSupport.PartyObjectMapper
|
||||
if (mapper.isFullParties) {
|
||||
gen.writeObject(PartyAndCertificateWrapper(value.name, value.certPath))
|
||||
} else {
|
||||
gen.writeObject(value.party)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PartyAndCertificateWrapper(val name: CordaX500Name, val certPath: CertPath)
|
||||
|
||||
@JsonSerialize(using = SignedTransactionSerializer::class)
|
||||
@JsonDeserialize(using = SignedTransactionDeserializer::class)
|
||||
private interface SignedTransactionMixin2
|
||||
private interface SignedTransactionMixin
|
||||
|
||||
private class SignedTransactionSerializer : JsonSerializer<SignedTransaction>() {
|
||||
override fun serialize(value: SignedTransaction, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
|
@ -24,10 +24,7 @@ import net.corda.core.contracts.Amount
|
||||
import net.corda.core.cordapp.CordappProvider
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.crypto.CompositeKey
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.internal.DigitalSignatureWithCert
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.ServiceHub
|
||||
@ -257,10 +254,20 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
assertThat(json.textValue()).isEqualTo(MINI_CORP.name.toString())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Party serialization with isFullParty = true`() {
|
||||
partyObjectMapper.isFullParties = true
|
||||
val json = mapper.valueToTree<ObjectNode>(MINI_CORP.party)
|
||||
val (name, owningKey) = json.assertHasOnlyFields("name", "owningKey")
|
||||
assertThat(name.valueAs<CordaX500Name>(mapper)).isEqualTo(MINI_CORP.name)
|
||||
assertThat(owningKey.valueAs<PublicKey>(mapper)).isEqualTo(MINI_CORP.publicKey)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Party deserialization on full name`() {
|
||||
fun convertToParty() = mapper.convertValue<Party>(TextNode(MINI_CORP.name.toString()))
|
||||
|
||||
// Check that it fails if it can't find the party
|
||||
assertThatThrownBy { convertToParty() }
|
||||
|
||||
partyObjectMapper.identities += MINI_CORP.party
|
||||
@ -271,6 +278,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
fun `Party deserialization on part of name`() {
|
||||
fun convertToParty() = mapper.convertValue<Party>(TextNode(MINI_CORP.name.organisation))
|
||||
|
||||
// Check that it fails if it can't find the party
|
||||
assertThatThrownBy { convertToParty() }
|
||||
|
||||
partyObjectMapper.identities += MINI_CORP.party
|
||||
@ -281,12 +289,24 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
fun `Party deserialization on public key`() {
|
||||
fun convertToParty() = mapper.convertValue<Party>(TextNode(MINI_CORP.publicKey.toBase58String()))
|
||||
|
||||
// Check that it fails if it can't find the party
|
||||
assertThatThrownBy { convertToParty() }
|
||||
|
||||
partyObjectMapper.identities += MINI_CORP.party
|
||||
assertThat(convertToParty()).isEqualTo(MINI_CORP.party)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Party deserialization on name and key`() {
|
||||
val party = mapper.convertValue<Party>(mapOf(
|
||||
"name" to MINI_CORP.name,
|
||||
"owningKey" to MINI_CORP.publicKey
|
||||
))
|
||||
// Party.equals is only defined on the public key so we must check the name as well
|
||||
assertThat(party.name).isEqualTo(MINI_CORP.name)
|
||||
assertThat(party.owningKey).isEqualTo(MINI_CORP.publicKey)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun PublicKey() {
|
||||
val json = mapper.valueToTree<TextNode>(MINI_CORP.publicKey)
|
||||
@ -326,15 +346,31 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `PartyAndCertificate serialisation`() {
|
||||
val json = mapper.valueToTree<ObjectNode>(MINI_CORP.identity)
|
||||
val (name, owningKey) = json.assertHasOnlyFields("name", "owningKey")
|
||||
assertThat(name.valueAs<CordaX500Name>(mapper)).isEqualTo(MINI_CORP.name)
|
||||
assertThat(owningKey.valueAs<PublicKey>(mapper)).isEqualTo(MINI_CORP.publicKey)
|
||||
fun `PartyAndCertificate serialization`() {
|
||||
val json = mapper.valueToTree<TextNode>(MINI_CORP.identity)
|
||||
assertThat(json.textValue()).isEqualTo(MINI_CORP.name.toString())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `NodeInfo serialisation`() {
|
||||
fun `PartyAndCertificate serialization with isFullParty = true`() {
|
||||
partyObjectMapper.isFullParties = true
|
||||
val json = mapper.valueToTree<ObjectNode>(MINI_CORP.identity)
|
||||
println(mapper.writeValueAsString(json))
|
||||
val (name, certPath) = json.assertHasOnlyFields("name", "certPath")
|
||||
assertThat(name.valueAs<CordaX500Name>(mapper)).isEqualTo(MINI_CORP.name)
|
||||
assertThat(certPath.valueAs<CertPath>(mapper)).isEqualTo(MINI_CORP.identity.certPath)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `PartyAndCertificate deserialization on cert path`() {
|
||||
val certPathJson = mapper.valueToTree<JsonNode>(MINI_CORP.identity.certPath)
|
||||
val partyAndCert = mapper.convertValue<PartyAndCertificate>(mapOf("certPath" to certPathJson))
|
||||
// PartyAndCertificate.equals is defined on the Party so we must check the certPath directly
|
||||
assertThat(partyAndCert.certPath).isEqualTo(MINI_CORP.identity.certPath)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `NodeInfo serialization`() {
|
||||
val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME)
|
||||
val json = mapper.valueToTree<ObjectNode>(nodeInfo)
|
||||
val (addresses, legalIdentitiesAndCerts, platformVersion, serial) = json.assertHasOnlyFields(
|
||||
@ -349,14 +385,14 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
}
|
||||
legalIdentitiesAndCerts.run {
|
||||
assertThat(this).hasSize(1)
|
||||
assertThat(this[0]["name"].valueAs<CordaX500Name>(mapper)).isEqualTo(ALICE_NAME)
|
||||
assertThat(this[0].valueAs<CordaX500Name>(mapper)).isEqualTo(ALICE_NAME)
|
||||
}
|
||||
assertThat(platformVersion.intValue()).isEqualTo(nodeInfo.platformVersion)
|
||||
assertThat(serial.longValue()).isEqualTo(nodeInfo.serial)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `NodeInfo deserialisation on name`() {
|
||||
fun `NodeInfo deserialization on name`() {
|
||||
val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME)
|
||||
|
||||
fun convertToNodeInfo() = mapper.convertValue<NodeInfo>(TextNode(ALICE_NAME.toString()))
|
||||
@ -369,7 +405,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `NodeInfo deserialisation on public key`() {
|
||||
fun `NodeInfo deserialization on public key`() {
|
||||
val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME)
|
||||
|
||||
fun convertToNodeInfo() = mapper.convertValue<NodeInfo>(TextNode(nodeInfo.legalIdentities[0].owningKey.toBase58String()))
|
||||
@ -396,7 +432,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
}
|
||||
|
||||
@Test
|
||||
fun X509Certificate() {
|
||||
fun `X509Certificate serialization`() {
|
||||
val cert: X509Certificate = MINI_CORP.identity.certificate
|
||||
val json = mapper.valueToTree<ObjectNode>(cert)
|
||||
println(mapper.writeValueAsString(json))
|
||||
@ -407,7 +443,13 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
assertThat(json["notAfter"].valueAs<Date>(mapper)).isEqualTo(cert.notAfter)
|
||||
assertThat(json["notBefore"].valueAs<Date>(mapper)).isEqualTo(cert.notBefore)
|
||||
assertThat(json["encoded"].binaryValue()).isEqualTo(cert.encoded)
|
||||
assertThat(mapper.convertValue<X509Certificate>(json).encoded).isEqualTo(cert.encoded)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `X509Certificate deserialization`() {
|
||||
val cert: X509Certificate = MINI_CORP.identity.certificate
|
||||
assertThat(mapper.convertValue<X509Certificate>(mapOf("encoded" to cert.encoded))).isEqualTo(cert)
|
||||
assertThat(mapper.convertValue<X509Certificate>(BinaryNode(cert.encoded))).isEqualTo(cert)
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -458,6 +500,7 @@ class JacksonSupportTest(@Suppress("unused") private val name: String, factory:
|
||||
}
|
||||
|
||||
private class TestPartyObjectMapper : JacksonSupport.PartyObjectMapper {
|
||||
override var isFullParties: Boolean = false
|
||||
val identities = ArrayList<Party>()
|
||||
val nodes = ArrayList<NodeInfo>()
|
||||
override fun wellKnownPartyFromX500Name(name: CordaX500Name): Party? {
|
||||
|
@ -37,6 +37,7 @@ import net.corda.smoketesting.NodeProcess
|
||||
import org.apache.commons.io.output.NullOutputStream
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Ignore
|
||||
import org.junit.Test
|
||||
import java.io.FilterInputStream
|
||||
import java.io.InputStream
|
||||
@ -104,8 +105,24 @@ class StandaloneCordaRPClientTest {
|
||||
financeJar.copyToDirectory(cordappsDir)
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
fun `test attachments`() {
|
||||
val attachment = InputStreamAndHash.createInMemoryTestZip(attachmentSize, 1)
|
||||
assertFalse(rpcProxy.attachmentExists(attachment.sha256))
|
||||
val id = attachment.inputStream.use { rpcProxy.uploadAttachment(it) }
|
||||
assertEquals(attachment.sha256, id, "Attachment has incorrect SHA256 hash")
|
||||
|
||||
val hash = HashingInputStream(Hashing.sha256(), rpcProxy.openAttachment(id)).use { it ->
|
||||
it.copyTo(NullOutputStream())
|
||||
SecureHash.SHA256(it.hash().asBytes())
|
||||
}
|
||||
assertEquals(attachment.sha256, hash)
|
||||
}
|
||||
|
||||
@Ignore("CORDA-1520 - After switching from Kryo to AMQP this test won't work")
|
||||
@Test
|
||||
fun `test wrapped attachments`() {
|
||||
val attachment = InputStreamAndHash.createInMemoryTestZip(attachmentSize, 1)
|
||||
assertFalse(rpcProxy.attachmentExists(attachment.sha256))
|
||||
val id = WrapperStream(attachment.inputStream).use { rpcProxy.uploadAttachment(it) }
|
||||
|
@ -14,6 +14,7 @@ import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.crypto.isFulfilledBy
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.groupAbstractPartyByWellKnownParty
|
||||
import net.corda.core.internal.pushToLoggingContext
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
@ -61,17 +62,24 @@ class FinalityFlow(val transaction: SignedTransaction,
|
||||
//
|
||||
// Lookup the resolved transactions and use them to map each signed transaction to the list of participants.
|
||||
// Then send to the notary if needed, record locally and distribute.
|
||||
|
||||
transaction.pushToLoggingContext()
|
||||
val commandDataTypes = transaction.tx.commands.map { it.value }.mapNotNull { it::class.qualifiedName }.distinct()
|
||||
logger.info("Started finalization, commands are ${commandDataTypes.joinToString(", ", "[", "]")}.")
|
||||
val parties = getPartiesToSend(verifyTx())
|
||||
val notarised = notariseAndRecord()
|
||||
|
||||
// Each transaction has its own set of recipients, but extra recipients get them all.
|
||||
progressTracker.currentStep = BROADCASTING
|
||||
for (party in parties) {
|
||||
if (!serviceHub.myInfo.isLegalIdentity(party)) {
|
||||
val session = initiateFlow(party)
|
||||
subFlow(SendTransactionFlow(session, notarised))
|
||||
}
|
||||
val recipients = parties.filterNot(serviceHub.myInfo::isLegalIdentity)
|
||||
logger.info("Broadcasting transaction to parties ${recipients.map { it.name }.joinToString(", ", "[", "]")}.")
|
||||
for (party in recipients) {
|
||||
logger.info("Sending transaction to party ${party.name}.")
|
||||
val session = initiateFlow(party)
|
||||
subFlow(SendTransactionFlow(session, notarised))
|
||||
logger.info("Party ${party.name} received the transaction.")
|
||||
}
|
||||
logger.info("All parties received the transaction successfully.")
|
||||
|
||||
return notarised
|
||||
}
|
||||
@ -83,9 +91,12 @@ class FinalityFlow(val transaction: SignedTransaction,
|
||||
val notarySignatures = subFlow(NotaryFlow.Client(transaction))
|
||||
transaction + notarySignatures
|
||||
} else {
|
||||
logger.info("No need to notarise this transaction.")
|
||||
transaction
|
||||
}
|
||||
logger.info("Recording transaction locally.")
|
||||
serviceHub.recordTransactions(notarised)
|
||||
logger.info("Recorded transaction locally successfully.")
|
||||
return notarised
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FetchDataFlow
|
||||
import net.corda.core.internal.notary.generateSignature
|
||||
import net.corda.core.internal.notary.validateSignatures
|
||||
import net.corda.core.internal.pushToLoggingContext
|
||||
import net.corda.core.transactions.ContractUpgradeWireTransaction
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.WireTransaction
|
||||
@ -54,9 +55,12 @@ class NotaryFlow {
|
||||
@Suspendable
|
||||
@Throws(NotaryException::class)
|
||||
override fun call(): List<TransactionSignature> {
|
||||
stx.pushToLoggingContext()
|
||||
val notaryParty = checkTransaction()
|
||||
progressTracker.currentStep = REQUESTING
|
||||
logger.info("Sending transaction to notary: ${notaryParty.name}.")
|
||||
val response = notarise(notaryParty)
|
||||
logger.info("Notary responded.")
|
||||
progressTracker.currentStep = VALIDATING
|
||||
return validateResponse(response, notaryParty)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ package net.corda.core.flows
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.internal.ResolveTransactionsFlow
|
||||
import net.corda.core.internal.pushToLoggingContext
|
||||
import net.corda.core.node.StatesToRecord
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.unwrap
|
||||
@ -46,18 +47,25 @@ class ReceiveTransactionFlow @JvmOverloads constructor(private val otherSideSess
|
||||
} else {
|
||||
logger.trace("Receiving a transaction (but without checking the signatures) from ${otherSideSession.counterparty}")
|
||||
}
|
||||
|
||||
val stx = otherSideSession.receive<SignedTransaction>().unwrap {
|
||||
it.pushToLoggingContext()
|
||||
logger.info("Received transaction acknowledgement request from party ${otherSideSession.counterparty.name}.")
|
||||
subFlow(ResolveTransactionsFlow(it, otherSideSession))
|
||||
it.verify(serviceHub, checkSufficientSignatures)
|
||||
it
|
||||
logger.info("Transaction dependencies resolution completed.")
|
||||
try {
|
||||
it.verify(serviceHub, checkSufficientSignatures)
|
||||
it
|
||||
} catch (e: Exception) {
|
||||
logger.warn("Transaction verification failed.")
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
if (checkSufficientSignatures) {
|
||||
// We should only send a transaction to the vault for processing if we did in fact fully verify it, and
|
||||
// there are no missing signatures. We don't want partly signed stuff in the vault.
|
||||
logger.trace("Successfully received fully signed tx ${stx.id}, sending to the vault for processing")
|
||||
logger.info("Successfully received fully signed tx. Sending it to the vault for processing.")
|
||||
serviceHub.recordTransactions(statesToRecord, setOf(stx))
|
||||
logger.info("Successfully recorded received transaction locally.")
|
||||
}
|
||||
return stx
|
||||
}
|
||||
|
@ -51,4 +51,5 @@ interface FlowStateMachine<FLOWRETURN> {
|
||||
val resultFuture: CordaFuture<FLOWRETURN>
|
||||
val context: InvocationContext
|
||||
val ourIdentity: Party
|
||||
val ourSenderUUID: String?
|
||||
}
|
||||
|
@ -17,13 +17,19 @@ import com.google.common.hash.HashingInputStream
|
||||
import net.corda.core.cordapp.Cordapp
|
||||
import net.corda.core.cordapp.CordappConfig
|
||||
import net.corda.core.cordapp.CordappContext
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.DigitalSignature
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.crypto.sha256
|
||||
import net.corda.core.crypto.sign
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.node.ServicesForResolution
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.transactions.WireTransaction
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
@ -31,11 +37,15 @@ import org.bouncycastle.asn1.x500.X500Name
|
||||
import org.bouncycastle.asn1.x500.X500NameBuilder
|
||||
import org.bouncycastle.asn1.x500.style.BCStyle
|
||||
import org.slf4j.Logger
|
||||
import org.slf4j.MDC
|
||||
import rx.Observable
|
||||
import rx.Observer
|
||||
import rx.subjects.PublishSubject
|
||||
import rx.subjects.UnicastSubject
|
||||
import java.io.*
|
||||
import java.io.ByteArrayOutputStream
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
import java.lang.reflect.Field
|
||||
import java.lang.reflect.Modifier
|
||||
import java.math.BigDecimal
|
||||
@ -51,11 +61,23 @@ import java.nio.file.Paths
|
||||
import java.security.KeyPair
|
||||
import java.security.PrivateKey
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.*
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.CertPathValidator
|
||||
import java.security.cert.CertPathValidatorException
|
||||
import java.security.cert.PKIXCertPathValidatorResult
|
||||
import java.security.cert.PKIXParameters
|
||||
import java.security.cert.TrustAnchor
|
||||
import java.security.cert.X509Certificate
|
||||
import java.time.Duration
|
||||
import java.time.temporal.Temporal
|
||||
import java.util.*
|
||||
import java.util.Spliterator.*
|
||||
import java.util.Spliterator.DISTINCT
|
||||
import java.util.Spliterator.IMMUTABLE
|
||||
import java.util.Spliterator.NONNULL
|
||||
import java.util.Spliterator.ORDERED
|
||||
import java.util.Spliterator.SIZED
|
||||
import java.util.Spliterator.SORTED
|
||||
import java.util.Spliterator.SUBSIZED
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.stream.IntStream
|
||||
@ -68,6 +90,17 @@ import kotlin.reflect.KClass
|
||||
import kotlin.reflect.full.createInstance
|
||||
|
||||
val Throwable.rootCause: Throwable get() = cause?.rootCause ?: this
|
||||
val Throwable.rootMessage: String? get() {
|
||||
var message = this.message
|
||||
var throwable = cause
|
||||
while (throwable != null) {
|
||||
if (throwable.message != null) {
|
||||
message = throwable.message
|
||||
}
|
||||
throwable = throwable.cause
|
||||
}
|
||||
return message
|
||||
}
|
||||
|
||||
infix fun Temporal.until(endExclusive: Temporal): Duration = Duration.between(this, endExclusive)
|
||||
|
||||
@ -469,3 +502,10 @@ val PublicKey.hash: SecureHash get() = encoded.sha256()
|
||||
* Extension method for providing a sumBy method that processes and returns a Long
|
||||
*/
|
||||
fun <T> Iterable<T>.sumByLong(selector: (T) -> Long): Long = this.map { selector(it) }.sum()
|
||||
|
||||
/**
|
||||
* Ensures each log entry from the current thread will contain id of the transaction in the MDC.
|
||||
*/
|
||||
internal fun SignedTransaction.pushToLoggingContext() {
|
||||
MDC.put("tx_id", id.toString())
|
||||
}
|
@ -219,7 +219,8 @@ object SerializationDefaults {
|
||||
/**
|
||||
* Convenience extension method for deserializing a ByteSequence, utilising the defaults.
|
||||
*/
|
||||
inline fun <reified T : Any> ByteSequence.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
inline fun <reified T : Any> ByteSequence.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
return serializationFactory.deserialize(this, T::class.java, context)
|
||||
}
|
||||
|
||||
@ -228,31 +229,40 @@ inline fun <reified T : Any> ByteSequence.deserialize(serializationFactory: Seri
|
||||
* It might be helpful to know [SerializationContext] to use the same encoding in the reply.
|
||||
*/
|
||||
inline fun <reified T : Any> ByteSequence.deserializeWithCompatibleContext(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): ObjectWithCompatibleContext<T> {
|
||||
context: SerializationContext = serializationFactory.defaultContext): ObjectWithCompatibleContext<T> {
|
||||
return serializationFactory.deserializeWithCompatibleContext(this, T::class.java, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience extension method for deserializing SerializedBytes with type matching, utilising the defaults.
|
||||
*/
|
||||
inline fun <reified T : Any> SerializedBytes<T>.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
inline fun <reified T : Any> SerializedBytes<T>.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
return serializationFactory.deserialize(this, T::class.java, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience extension method for deserializing a ByteArray, utilising the defaults.
|
||||
*/
|
||||
inline fun <reified T : Any> ByteArray.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T = this.sequence().deserialize(serializationFactory, context)
|
||||
inline fun <reified T : Any> ByteArray.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
require(isNotEmpty()) { "Empty bytes" }
|
||||
return this.sequence().deserialize(serializationFactory, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience extension method for deserializing a JDBC Blob, utilising the defaults.
|
||||
*/
|
||||
inline fun <reified T : Any> Blob.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): T = this.getBytes(1, this.length().toInt()).deserialize(serializationFactory, context)
|
||||
inline fun <reified T : Any> Blob.deserialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): T {
|
||||
return this.getBytes(1, this.length().toInt()).deserialize(serializationFactory, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience extension method for serializing an object of type T, utilising the defaults.
|
||||
*/
|
||||
fun <T : Any> T.serialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory, context: SerializationContext = serializationFactory.defaultContext): SerializedBytes<T> {
|
||||
fun <T : Any> T.serialize(serializationFactory: SerializationFactory = SerializationFactory.defaultFactory,
|
||||
context: SerializationContext = serializationFactory.defaultContext): SerializedBytes<T> {
|
||||
return serializationFactory.serialize(this, context)
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
|
||||
val requiredSigningKeys: Set<PublicKey>
|
||||
get() {
|
||||
val commandKeys = commands.flatMap { it.signers }.toSet()
|
||||
// TODO: prevent notary field from being set if there are no inputs and no timestamp.
|
||||
// TODO: prevent notary field from being set if there are no inputs and no time-window.
|
||||
return if (notary != null && (inputs.isNotEmpty() || timeWindow != null)) {
|
||||
commandKeys + notary.owningKey
|
||||
} else {
|
||||
|
@ -30,7 +30,7 @@ In our flow, the Initiator flow class will be doing the majority of the work:
|
||||
2. Create a transaction builder
|
||||
3. Extract any input states from the vault and add them to the builder
|
||||
4. Create any output states and add them to the builder
|
||||
5. Add any commands, attachments and timestamps to the builder
|
||||
5. Add any commands, attachments and time-window to the builder
|
||||
|
||||
*Part 2 - Sign the transaction*
|
||||
|
||||
|
58
docs/source/api-scanner.rst
Normal file
58
docs/source/api-scanner.rst
Normal file
@ -0,0 +1,58 @@
|
||||
API stability check
|
||||
===================
|
||||
|
||||
We have committed not to alter Corda's API so that developers will not have to keep rewriting their CorDapps with each
|
||||
new Corda release. The stable Corda modules are listed :ref:`here <internal-apis-and-stability-guarantees>`. Our CI process runs an "API Stability"
|
||||
check for each GitHub pull request in order to check that we don't accidentally introduce an API-breaking change.
|
||||
|
||||
Build Process
|
||||
-------------
|
||||
|
||||
As part of the build process the following commands are run for each PR:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ gradlew generateApi
|
||||
$ .ci/check-api-changes.sh
|
||||
|
||||
This ``bash`` script has been tested on both MacOS and various Linux distributions, it can also be run on Windows with the
|
||||
use of a suitable bash emulator such as git bash. The script's return value is the number of API-breaking changes that it
|
||||
has detected, and this should be zero for the check to pass. The maximum return value is 255, although the script will still
|
||||
correctly report higher numbers of breaking changes.
|
||||
|
||||
There are three kinds of breaking change:
|
||||
|
||||
* Removal or modification of existing API, i.e. an existing class, method or field has been either deleted or renamed, or
|
||||
its signature somehow altered.
|
||||
* Addition of a new method to an interface or abstract class. Types that have been annotated as ``@DoNotImplement`` are
|
||||
excluded from this check. (This annotation is also inherited across subclasses and subinterfaces.)
|
||||
* Exposure of an internal type via a public API. Internal types are considered to be anything in a ``*.internal.`` package
|
||||
or anything in a module that isn't in the stable modules list :ref:`here <internal-apis-and-stability-guarantees>`.
|
||||
|
||||
Developers can execute these commands themselves before submitting their PR, to ensure that they haven't inadvertently
|
||||
broken Corda's API.
|
||||
|
||||
|
||||
How it works
|
||||
------------
|
||||
|
||||
The ``generateApi`` Gradle task writes a summary of Corda's public API into the file ``build/api/api-corda-<version>.txt``.
|
||||
The ``.ci/check-api-changes.sh`` script then compares this file with the contents of ``.ci/api-current.txt``, which is a
|
||||
managed file within the Corda repository.
|
||||
|
||||
The Gradle task itself is implemented by the API Scanner plugin. More information on the API Scanner plugin is available `here <https://github.com/corda/corda-gradle-plugins/tree/master/api-scanner>`_.
|
||||
|
||||
|
||||
Updating the API
|
||||
----------------
|
||||
|
||||
As a rule, ``api-current.txt`` should only be updated by the release manager for each Corda release.
|
||||
|
||||
We do not expect modifications to ``api-current.txt`` as part of normal development. However, we may sometimes need to adjust
|
||||
the public API in ways that would not break developers' CorDapps but which would be blocked by the API Stabilty check.
|
||||
For example, migrating a method from an interface into a superinterface. Any changes to the API summary file should be
|
||||
included in the PR, which would then need explicit approval from either `Mike Hearn <https://github.com/mikehearn>`_, `Rick Parker <https://github.com/rick-r3>`_ or `Matthew Nesbit <https://github.com/mnesbit>`_.
|
||||
|
||||
.. note:: If you need to modify ``api-current.txt``, do not re-generate the file on the master branch. This will include new API that
|
||||
hasn't been released or committed to, and may be subject to change. Manually change the specific line or lines of the
|
||||
existing committed API that has changed.
|
63
docs/source/blob-inspector.rst
Normal file
63
docs/source/blob-inspector.rst
Normal file
@ -0,0 +1,63 @@
|
||||
Blob Inspector
|
||||
==============
|
||||
|
||||
There are many benefits to having a custom binary serialisation format (see :doc:`serialization` for details) but one
|
||||
disadvantage is the inability to view the contents in a human-friendly manner. The blob inspector tool alleviates this issue
|
||||
by allowing the contents of a binary blob file (or URL end-point) to be output in either YAML or JSON. It uses
|
||||
``JacksonSupport`` to do this (see :doc:`json`).
|
||||
|
||||
The latest version of the tool can be downloaded from `here <https://www.corda.net/downloads/>`_.
|
||||
|
||||
To run simply pass in the file or URL as the first parameter:
|
||||
|
||||
``java -jar blob-inspector.jar <file or URL>``
|
||||
|
||||
Use the ``--help`` flag for a full list of command line options.
|
||||
|
||||
``SerializedBytes`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
One thing to note is that the binary blob may contain embedded ``SerializedBytes`` objects. Rather than printing these
|
||||
out as a Base64 string, the blob inspector will first materialise them into Java objects and then output those. You will
|
||||
see this when dealing with classes such as ``SignedData`` or other structures that attach a signature, such as the
|
||||
``nodeInfo-*`` files or the ``network-parameters`` file in the node's directory. For example, the output of a node-info
|
||||
file may look like:
|
||||
|
||||
.. container:: codeset
|
||||
|
||||
.. sourcecode:: yaml
|
||||
|
||||
net.corda.nodeapi.internal.SignedNodeInfo
|
||||
---
|
||||
raw:
|
||||
class: "net.corda.core.node.NodeInfo"
|
||||
deserialized:
|
||||
addresses:
|
||||
- "localhost:10011"
|
||||
legalIdentitiesAndCerts:
|
||||
- "O=BankOfCorda, L=New York, C=US"
|
||||
platformVersion: 4
|
||||
serial: 1527074180971
|
||||
signatures:
|
||||
- !!binary |
|
||||
dmoAnnzcv0MzRN+3ZSCDcCJIAbXnoYy5mFWB3Nijndzu/dzIoYdIawINXbNSY/5z2XloDK01vZRV
|
||||
TreFZCbZAg==
|
||||
|
||||
.. sourcecode:: json
|
||||
|
||||
net.corda.nodeapi.internal.SignedNodeInfo
|
||||
{
|
||||
"raw" : {
|
||||
"class" : "net.corda.core.node.NodeInfo",
|
||||
"deserialized" : {
|
||||
"addresses" : [ "localhost:10011" ],
|
||||
"legalIdentitiesAndCerts" : [ "O=BankOfCorda, L=New York, C=US" ],
|
||||
"platformVersion" : 4,
|
||||
"serial" : 1527074180971
|
||||
}
|
||||
},
|
||||
"signatures" : [ "dmoAnnzcv0MzRN+3ZSCDcCJIAbXnoYy5mFWB3Nijndzu/dzIoYdIawINXbNSY/5z2XloDK01vZRVTreFZCbZAg==" ]
|
||||
}
|
||||
|
||||
Notice the file is actually a serialised ``SignedNodeInfo`` object, which has a ``raw`` property of type ``SerializedBytes<NodeInfo>``.
|
||||
This property is materialised into a ``NodeInfo`` and is output under the ``deserialized`` field.
|
@ -8,6 +8,12 @@ Unreleased
|
||||
==========
|
||||
* Introduced a hierarchy of ``DatabaseMigrationException``s, allowing ``NodeStartup`` to gracefully inform users of problems related to database migrations before exiting with a non-zero code.
|
||||
|
||||
* Doorman and NetworkMap url's can now be configured individually rather than being assumed to be
|
||||
the same server. Current ``compatibilityZoneURL`` configurations remain valid. See both :doc:`corda-configuration-file`
|
||||
and :doc:`permissioning` for details.
|
||||
|
||||
* Improved audit trail for ``FinalityFlow`` and related sub-flows.
|
||||
|
||||
* ``NodeStartup`` will now only print node's configuration if ``devMode`` is ``true``, avoiding the risk of printing passwords in a production setup.
|
||||
|
||||
* SLF4J's MDC will now only be printed to the console if not empty. No more log lines ending with "{}".
|
||||
@ -18,7 +24,7 @@ Unreleased
|
||||
* RPC server will now mask internal errors to RPC clients if not in devMode. ``Throwable``s implementing ``ClientRelevantError`` will continue to be propagated to clients.
|
||||
|
||||
* RPC Framework moved from Kryo to the Corda AMQP implementation [Corda-847]. This completes the removal
|
||||
of ``Kryo`` from general use within Corda, remaining only for use in flow checkpointing.
|
||||
of ``Kryo`` from general use within Corda, remaining only for use in flow checkpointing.
|
||||
|
||||
* Set co.paralleluniverse.fibers.verifyInstrumentation=true in devMode.
|
||||
|
||||
@ -35,12 +41,19 @@ Unreleased
|
||||
* ``Party`` objects can be deserialised by looking up their public key, in addition to their name
|
||||
* ``NodeInfo`` objects are serialised as an object and can be looked up using the same mechanism as ``Party``
|
||||
* ``NetworkHostAndPort`` serialised according to its ``toString()``
|
||||
* ``PartyAndCertificate`` is serialised as an object containing the name and owning key
|
||||
* ``SerializedBytes`` is serialised by converting the bytes into the object it represents, which is then serialised into
|
||||
a JSON/YAML object
|
||||
* ``CertPath`` and ``X509Certificate`` are serialised as objects and can be deserialised back
|
||||
* ``PartyAndCertificate`` is serialised as the name
|
||||
* ``SerializedBytes`` is serialised by materialising the bytes into the object it represents, and then serialising that
|
||||
object into YAML/JSON
|
||||
* ``X509Certificate`` is serialised as an object with key fields such as ``issuer``, ``publicKey``, ``serialNumber``, etc.
|
||||
The encoded bytes are also serialised into the ``encoded`` field. This can be used to deserialise an ``X509Certificate``
|
||||
back.
|
||||
* ``CertPath`` objects are serialised as a list of ``X509Certificate`` objects.
|
||||
* ``SignedTransaction`` is serialised into its ``txBits`` and ``signatures`` and can be deserialised back
|
||||
|
||||
* ``fullParties`` boolean parameter added to ``JacksonSupport.createDefaultMapper`` and ``createNonRpcMapper``. If ``true``
|
||||
then ``Party`` objects are serialised as JSON objects with the ``name`` and ``owningKey`` fields. For ``PartyAndCertificate``
|
||||
the ``certPath`` is serialised.
|
||||
|
||||
* Several members of ``JacksonSupport`` have been deprecated to highlight that they are internal and not to be used.
|
||||
|
||||
* The Vault Criteria API has been extended to take a more precise specification of which class contains a field. This
|
||||
|
@ -24,12 +24,15 @@ Certificate hierarchy
|
||||
|
||||
A Corda network has 8 types of keys and a regular node requires 4 of them:
|
||||
|
||||
**Network Keys**
|
||||
|
||||
* The **root network CA** key
|
||||
* The **doorman CA** key
|
||||
* The **network map** key
|
||||
* The **service identity** key(s) (per service, such as a notary cluster; it can be a Composite Key)
|
||||
* The **service identity** key(s) (per service, such as a notary cluster; it can be a Composite key)
|
||||
|
||||
**Node Keys**
|
||||
|
||||
-- **Node Keys** --
|
||||
* The **node CA** key(s) (one per node)
|
||||
* The **legal identity** key(s) (one per node)
|
||||
* The **tls** key(s) (per node)
|
||||
|
@ -102,6 +102,12 @@ Building against the master branch
|
||||
You can test your changes against CorDapps defined in other repos by following the instructions
|
||||
:doc:`here </building-against-master>`.
|
||||
|
||||
Running the API scanner
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Your changes must also not break compatibility with existing public API. We have an API scanning tool which runs as part of the build
|
||||
process which can be used to flag up any accidental changes, which is detailed :doc:`here </api-scanner>`.
|
||||
|
||||
|
||||
Updating the docs
|
||||
-----------------
|
||||
|
||||
|
@ -21,6 +21,8 @@ The following are the core APIs that are used in the development of CorDapps:
|
||||
|
||||
Before reading this page, you should be familiar with the :doc:`key concepts of Corda <key-concepts>`.
|
||||
|
||||
.. _internal-apis-and-stability-guarantees:
|
||||
|
||||
Internal APIs and stability guarantees
|
||||
--------------------------------------
|
||||
|
||||
|
@ -184,7 +184,16 @@ absolute path to the node's base directory.
|
||||
interfaces, and then by sending an IP discovery request to the network map service. Set to ``false`` to disable.
|
||||
|
||||
:compatibilityZoneURL: The root address of Corda compatibility zone network management services, it is used by the Corda node to register with the network and
|
||||
obtain Corda node certificate, (See :doc:`permissioning` for more information.) and also used by the node to obtain network map information.
|
||||
obtain Corda node certificate, (See :doc:`permissioning` for more information.) and also used by the node to obtain network map information. Cannot be
|
||||
set at the same time as the ``networkServices`` option.
|
||||
|
||||
:networkServices: If the Corda compatibility zone services, both network map and registration (doorman), are not running on the same endpoint
|
||||
and thus have different URLs then this option should be used in place of the ``compatibilityZoneURL`` setting.
|
||||
|
||||
:doormanURL: Root address of the network registration service.
|
||||
:networkMapURL: Root address of the network map service.
|
||||
|
||||
.. note:: Only one of ``compatibilityZoneURL`` or ``networkServices`` should be used.
|
||||
|
||||
:jvmArgs: An optional list of JVM args, as strings, which replace those inherited from the command line when launching via ``corda.jar``
|
||||
only. e.g. ``jvmArgs = [ "-Xmx220m", "-Xms220m", "-XX:+UseG1GC" ]``
|
||||
@ -269,7 +278,7 @@ Simple notary configuration file:
|
||||
notary : {
|
||||
validating : false
|
||||
}
|
||||
devMode : true
|
||||
devMode : false
|
||||
compatibilityZoneURL : "https://cz.corda.net"
|
||||
|
||||
An example ``web-server.conf`` file is as follow:
|
||||
@ -288,6 +297,10 @@ An example ``web-server.conf`` file is as follow:
|
||||
webAddress : "localhost:12347",
|
||||
rpcUsers : [{ username=user1, password=letmein, permissions=[ StartFlow.net.corda.protocols.CashProtocol ] }]
|
||||
|
||||
Configuring a node where the Corda Comatability Zone's registration and Network Map services exist on different URLs
|
||||
|
||||
.. literalinclude:: example-code/src/main/resources/example-node-with-networkservices.conf
|
||||
|
||||
Fields
|
||||
------
|
||||
|
||||
@ -344,4 +357,4 @@ Example adding/overriding keyStore password when starting Corda node:
|
||||
|
||||
.. sourcecode:: shell
|
||||
|
||||
java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar
|
||||
java -Dcorda.rpcSettings.ssl.keyStorePassword=mypassword -jar node.jar
|
||||
|
@ -66,7 +66,7 @@ public class CommercialPaper implements Contract {
|
||||
});
|
||||
} else if (cmd.getValue() instanceof Commands.Issue) {
|
||||
State output = outputs.get(0);
|
||||
if (timeWindow == null) throw new IllegalArgumentException("Issuances must be timestamped");
|
||||
if (timeWindow == null) throw new IllegalArgumentException("Issuances must have a time-window");
|
||||
Instant time = timeWindow.getUntilTime();
|
||||
requireThat(require -> {
|
||||
// Don't allow people to issue commercial paper under other entities identities.
|
||||
|
@ -0,0 +1,25 @@
|
||||
myLegalName : "O=Bank A,L=London,C=GB"
|
||||
keyStorePassword : "cordacadevpass"
|
||||
trustStorePassword : "trustpass"
|
||||
crlCheckSoftFail: true
|
||||
dataSourceProperties : {
|
||||
dataSourceClassName : org.h2.jdbcx.JdbcDataSource
|
||||
dataSource.url : "jdbc:h2:file:"${baseDirectory}"/persistence"
|
||||
dataSource.user : sa
|
||||
dataSource.password : ""
|
||||
}
|
||||
p2pAddress : "my-corda-node:10002"
|
||||
rpcSettings = {
|
||||
useSsl = false
|
||||
standAloneBroker = false
|
||||
address : "my-corda-node:10003"
|
||||
adminAddress : "my-corda-node:10004"
|
||||
}
|
||||
rpcUsers : [
|
||||
{ username=user1, password=letmein, permissions=[ StartFlow.net.corda.protocols.CashProtocol ] }
|
||||
]
|
||||
devMode : false
|
||||
networkServices : {
|
||||
doormanURL = "https://registration.corda.net"
|
||||
networkMapURL = "https://cz.corda.net"
|
||||
}
|
@ -228,7 +228,7 @@ Next, we call another subflow called ``SignTransactionFlow``. ``SignTransactionF
|
||||
* Sending the transaction back to the buyer.
|
||||
|
||||
The transaction then needs to be finalized. This is the the process of sending the transaction to a notary to assert
|
||||
(with another signature) that the timestamp in the transaction (if any) is valid and there are no double spends.
|
||||
(with another signature) that the time-window in the transaction (if any) is valid and there are no double spends.
|
||||
In this flow, finalization is handled by the buyer, so we just wait for the signed transaction to appear in our
|
||||
transaction storage. It will have the same ID as the one we started with but more signatures.
|
||||
|
||||
|
@ -36,7 +36,7 @@ We can picture this situation as follows:
|
||||
The contract code can be written in any JVM language, and has access to the full capabilities of the language,
|
||||
including:
|
||||
|
||||
* Checking the number of inputs, outputs, commands, timestamps, and/or attachments
|
||||
* Checking the number of inputs, outputs, commands, time-window, and/or attachments
|
||||
* Checking the contents of any of these components
|
||||
* Looping constructs, variable assignment, function calls, helper methods, etc.
|
||||
* Grouping similar states to validate them as a group (e.g. imposing a rule on the combined value of all the cash
|
||||
|
@ -49,14 +49,14 @@ Transaction Merkle trees
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
A Merkle tree is constructed from a transaction by splitting the transaction into leaves, where each leaf contains
|
||||
either an input, an output, a command, or an attachment. The Merkle tree also contains the other fields of the
|
||||
``WireTransaction``, such as the timestamp, the notary, the type and the signers.
|
||||
``WireTransaction``, such as the time-window, the notary, the type and the signers.
|
||||
|
||||
Next, the Merkle tree is built in the normal way by hashing the concatenation of nodes’ hashes below the current one
|
||||
together. It’s visible on the example image below, where ``H`` denotes sha256 function, "+" - concatenation.
|
||||
|
||||
.. image:: resources/merkleTree.png
|
||||
|
||||
The transaction has two input states, one output state, one attachment, one command and a timestamp. For brevity
|
||||
The transaction has two input states, one output state, one attachment, one command and a time-window. For brevity
|
||||
we didn't include all leaves on the diagram (type, notary and signers are presented as one leaf labelled Rest - in
|
||||
reality they are separate leaves). Notice that if a tree is not a full binary tree, leaves are padded to the nearest
|
||||
power of 2 with zero hash (since finding a pre-image of sha256(x) == 0 is hard computational task) - marked light
|
||||
@ -73,7 +73,7 @@ obtained belongs to that particular transaction.
|
||||
.. image:: resources/partialMerkle.png
|
||||
|
||||
In the example above, the node ``H(f)`` is the one holding command data for signing by Oracle service. Blue leaf
|
||||
``H(g)`` is also included since it's holding timestamp information. Nodes labelled ``Provided`` form the Partial
|
||||
Merkle Tree, black ones are omitted. Having timestamp with the command that should be in a violet node place and
|
||||
``H(g)`` is also included since it's holding time-window information. Nodes labelled ``Provided`` form the Partial
|
||||
Merkle Tree, black ones are omitted. Having time-window with the command that should be in a violet node place and
|
||||
branch we are able to calculate root of this tree and compare it with original transaction identifier - we have a
|
||||
proof that this command and timestamp belong to this transaction.
|
||||
proof that this command and time-window belong to this transaction.
|
@ -111,10 +111,10 @@ As well as input states and output states, transactions contain:
|
||||
|
||||
* Commands
|
||||
* Attachments
|
||||
* Timestamps
|
||||
* Time-Window
|
||||
|
||||
For example, a transaction where Alice pays off £5 of an IOU with Bob using a £5 cash payment, supported by two
|
||||
attachments and a timestamp, may look as follows:
|
||||
attachments and a time-window, may look as follows:
|
||||
|
||||
.. image:: resources/full-tx.png
|
||||
:scale: 25%
|
||||
@ -172,8 +172,8 @@ For this use case, we have *attachments*. Each transaction can refer to zero or
|
||||
attachments are ZIP/JAR files containing arbitrary content. The information in these files can then be
|
||||
used when checking the transaction's validity.
|
||||
|
||||
Time-windows
|
||||
^^^^^^^^^^^^
|
||||
Time-window
|
||||
^^^^^^^^^^^
|
||||
In some cases, we want a transaction proposed to only be approved during a certain time-window. For example:
|
||||
|
||||
* An option can only be exercised after a certain date
|
||||
|
@ -196,21 +196,25 @@ This can be overridden with the additional ``--network-root-truststore`` flag.
|
||||
The certificate signing request will be created based on node information obtained from the node configuration.
|
||||
The following information from the node configuration file is needed to generate the request.
|
||||
|
||||
:myLegalName: Your company's legal name as an X.500 string. X.500 allows differentiation between entities with the same
|
||||
name as the legal name needs to be unique on the network. If another node has already been permissioned with this
|
||||
name then the permissioning server will automatically reject the request. The request will also be rejected if it
|
||||
violates legal name rules, see :ref:`node_naming` for more information.
|
||||
* **myLegalName** Your company's legal name as an X.500 string. X.500 allows differentiation between entities with the same
|
||||
name as the legal name needs to be unique on the network. If another node has already been permissioned with this
|
||||
name then the permissioning server will automatically reject the request. The request will also be rejected if it
|
||||
violates legal name rules, see :ref:`node_naming` for more information.
|
||||
|
||||
:emailAddress: e.g. "admin@company.com"
|
||||
* **emailAddress** e.g. "admin@company.com"
|
||||
|
||||
:devMode: must be set to false
|
||||
* **devMode** must be set to false
|
||||
|
||||
:compatibilityZoneURL: Corda compatibility zone network management service root URL.
|
||||
* **networkServices or compatibilityZoneURL** The Corda compatibility zone services must be configured. This must be either:
|
||||
|
||||
A new pair of private and public keys generated by the Corda node will be used to create the request.
|
||||
* **compatibilityZoneURL** The Corda compatibility zone network management service root URL.
|
||||
* **networkServices** Replaces the ``compatibilityZoneURL`` when the Doorman and Network Map services
|
||||
are configured to operate on different URL endpoints. The ``doorman`` entry is used for registration.
|
||||
|
||||
The utility will submit the request to the doorman server and poll for a result periodically to retrieve the certificates.
|
||||
Once the request has been approved and the certificates downloaded from the server, the node will create the keystore and trust store using the certificates and the generated private key.
|
||||
A new pair of private and public keys generated by the Corda node will be used to create the request.
|
||||
|
||||
The utility will submit the request to the doorman server and poll for a result periodically to retrieve the certificates.
|
||||
Once the request has been approved and the certificates downloaded from the server, the node will create the keystore and trust store using the certificates and the generated private key.
|
||||
|
||||
.. note:: You can exit the utility at any time if the approval process is taking longer than expected. The request process will resume on restart.
|
||||
|
||||
|
@ -8,4 +8,5 @@ Release process
|
||||
changelog
|
||||
contributing
|
||||
codestyle
|
||||
testing
|
||||
testing
|
||||
api-scanner
|
Binary file not shown.
Before Width: | Height: | Size: 294 KiB After Width: | Height: | Size: 200 KiB |
@ -4,6 +4,7 @@ Tools
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
blob-inspector
|
||||
network-simulator
|
||||
demobench
|
||||
node-explorer
|
||||
|
@ -44,7 +44,7 @@ Transactions in Corda contain a number of elements:
|
||||
transactions to migrate the states across to a consistent notary node
|
||||
before being allowed to mutate any states)
|
||||
|
||||
7. Optionally a timestamp that can used by the notary to bound the
|
||||
7. Optionally a time-window that can used by the notary to bound the
|
||||
period during which the proposed transaction can be committed to the
|
||||
ledger
|
||||
|
||||
|
@ -299,13 +299,13 @@ logic.
|
||||
|
||||
This loop is the core logic of the contract.
|
||||
|
||||
The first line simply gets the timestamp out of the transaction. Timestamping of transactions is optional, so a time
|
||||
The first line simply gets the time-window out of the transaction. Setting a time-window in transactions is optional, so a time
|
||||
may be missing here. We check for it being null later.
|
||||
|
||||
.. warning:: In the Kotlin version as long as we write a comparison with the transaction time first the compiler will
|
||||
verify we didn't forget to check if it's missing. Unfortunately due to the need for smooth Java interop, this
|
||||
check won't happen if we write e.g. ``someDate > time``, it has to be ``time < someDate``. So it's good practice to
|
||||
always write the transaction timestamp first.
|
||||
always write the transaction time-window first.
|
||||
|
||||
Next, we take one of three paths, depending on what the type of the command object is.
|
||||
|
||||
@ -597,7 +597,7 @@ The time-lock contract mentioned above can be implemented very simply:
|
||||
class TestTimeLock : Contract {
|
||||
...
|
||||
override fun verify(tx: LedgerTransaction) {
|
||||
val time = tx.timestamp.before ?: throw IllegalStateException(...)
|
||||
val time = tx.timeWindow?.untilTime ?: throw IllegalStateException(...)
|
||||
...
|
||||
requireThat {
|
||||
"the time specified in the time-lock has passed" by
|
||||
|
@ -1,52 +0,0 @@
|
||||
apply plugin: 'java'
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'application'
|
||||
|
||||
mainClassName = 'net.corda.blobinspector.MainKt'
|
||||
|
||||
dependencies {
|
||||
compile project(':core')
|
||||
compile project(':node-api')
|
||||
|
||||
compile "commons-cli:commons-cli:$commons_cli_version"
|
||||
|
||||
testCompile project(':test-utils')
|
||||
|
||||
testCompile "junit:junit:$junit_version"
|
||||
}
|
||||
|
||||
/**
|
||||
* To run from within gradle use
|
||||
*
|
||||
* ./gradlew -PrunArgs="<cmd> <line> <args>" :experimental:blobinspector:run
|
||||
*
|
||||
* For example, to parse a file from the command line and print out the deserialized properties
|
||||
*
|
||||
* ./gradlew -PrunArgs="-f <path/to/file> -d" :experimental:blobinspector:run
|
||||
*
|
||||
* at the command line.
|
||||
*/
|
||||
run {
|
||||
if (project.hasProperty('runArgs')) {
|
||||
args = [ project.findProperty('runArgs').toString().split(" ") ].flatten()
|
||||
}
|
||||
|
||||
if (System.properties.getProperty('consoleLogLevel') != null) {
|
||||
logging.captureStandardOutput(LogLevel.valueOf(System.properties.getProperty('consoleLogLevel')))
|
||||
logging.captureStandardError(LogLevel.valueOf(System.properties.getProperty('consoleLogLevel')))
|
||||
systemProperty "consoleLogLevel", System.properties.getProperty('consoleLogLevel')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a executable jar
|
||||
*/
|
||||
jar {
|
||||
baseName 'blobinspector'
|
||||
manifest {
|
||||
attributes(
|
||||
'Automatic-Module-Name': 'net.corda.experimental.blobinspector',
|
||||
'Main-Class': 'net.corda.blobinspector.MainKt'
|
||||
)
|
||||
}
|
||||
}
|
@ -1,405 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.serialization.EncodingWhitelist
|
||||
import net.corda.core.serialization.SerializationEncoding
|
||||
import net.corda.core.utilities.ByteSequence
|
||||
import net.corda.serialization.internal.SerializationFactoryImpl
|
||||
import net.corda.serialization.internal.amqp.CompositeType
|
||||
import net.corda.serialization.internal.amqp.DeserializationInput
|
||||
import net.corda.serialization.internal.amqp.RestrictedType
|
||||
import net.corda.serialization.internal.amqp.TypeNotation
|
||||
import net.corda.serialization.internal.amqp.amqpMagic
|
||||
import org.apache.qpid.proton.amqp.Binary
|
||||
import org.apache.qpid.proton.amqp.DescribedType
|
||||
import org.apache.qpid.proton.amqp.Symbol
|
||||
|
||||
/**
|
||||
* Print a string to the console only if the verbose config option is set.
|
||||
*/
|
||||
fun String.debug(config: Config) {
|
||||
if (config.verbose) {
|
||||
println(this)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
interface Stringify {
|
||||
fun stringify(sb: IndentingStringBuilder)
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes classnames easier to read by stripping off the package names from the class and separating nested
|
||||
* classes
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* net.corda.blobinspector.Class1<net.corda.blobinspector.Class2>
|
||||
* Class1 <Class2>
|
||||
*
|
||||
* net.corda.blobinspector.Class1<net.corda.blobinspector.Class2, net.corda.blobinspector.Class3>
|
||||
* Class1 <Class2, Class3>
|
||||
*
|
||||
* net.corda.blobinspector.Class1<net.corda.blobinspector.Class2<net.corda.blobinspector.Class3>>
|
||||
* Class1 <Class2 <Class3>>
|
||||
*
|
||||
* net.corda.blobinspector.Class1<net.corda.blobinspector.Class2<net.corda.blobinspector.Class3>>
|
||||
* Class1 :: C <Class2 <Class3>>
|
||||
*/
|
||||
fun String.simplifyClass(): String {
|
||||
|
||||
return if (this.endsWith('>')) {
|
||||
val templateStart = this.indexOf('<')
|
||||
val clazz = (this.substring(0, templateStart))
|
||||
val params = this.substring(templateStart+1, this.length-1).split(',').joinToString { it.simplifyClass() }
|
||||
|
||||
"${clazz.simplifyClass()} <$params>"
|
||||
}
|
||||
else {
|
||||
substring(this.lastIndexOf('.') + 1).replace("$", " :: ")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the deserialized form of the property of an Object
|
||||
*
|
||||
* @param name
|
||||
* @param type
|
||||
*/
|
||||
abstract class Property(
|
||||
val name: String,
|
||||
val type: String) : Stringify
|
||||
|
||||
/**
|
||||
* Derived class of [Property], represents properties of an object that are non compelex, such
|
||||
* as any POD type or String
|
||||
*/
|
||||
class PrimProperty(
|
||||
name: String,
|
||||
type: String,
|
||||
private val value: String) : Property(name, type) {
|
||||
override fun toString(): String = "$name : $type : $value"
|
||||
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
sb.appendln("$name : $type : $value")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived class of [Property] that represents a binary blob. Specifically useful because printing
|
||||
* a stream of bytes onto the screen isn't very use friendly
|
||||
*/
|
||||
class BinaryProperty(
|
||||
name: String,
|
||||
type: String,
|
||||
val value: ByteArray) : Property(name, type) {
|
||||
override fun toString(): String = "$name : $type : <<<BINARY BLOB>>>"
|
||||
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
sb.appendln("$name : $type : <<<BINARY BLOB>>>")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived class of [Property] that represent a list property. List could be either PoD types or
|
||||
* composite types.
|
||||
*/
|
||||
class ListProperty(
|
||||
name: String,
|
||||
type: String,
|
||||
private val values: MutableList<Any> = mutableListOf()) : Property(name, type) {
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
sb.apply {
|
||||
when {
|
||||
values.isEmpty() -> appendln("$name : $type : [ << EMPTY LIST >> ]")
|
||||
values.first() is Stringify -> {
|
||||
appendln("$name : $type : [")
|
||||
values.forEach {
|
||||
(it as Stringify).stringify(this)
|
||||
}
|
||||
appendln("]")
|
||||
}
|
||||
else -> {
|
||||
appendln("$name : $type : [")
|
||||
values.forEach {
|
||||
appendln(it.toString())
|
||||
}
|
||||
appendln("]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class MapProperty(
|
||||
name: String,
|
||||
type: String,
|
||||
private val map: MutableMap<*, *>
|
||||
) : Property(name, type) {
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
if (map.isEmpty()) {
|
||||
sb.appendln("$name : $type : { << EMPTY MAP >> }")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO this will not produce pretty output
|
||||
sb.apply {
|
||||
appendln("$name : $type : {")
|
||||
map.forEach {
|
||||
try {
|
||||
(it.key as Stringify).stringify(this)
|
||||
} catch (e: ClassCastException) {
|
||||
append (it.key.toString() + " : ")
|
||||
}
|
||||
try {
|
||||
(it.value as Stringify).stringify(this)
|
||||
} catch (e: ClassCastException) {
|
||||
appendln("\"${it.value.toString()}\"")
|
||||
}
|
||||
}
|
||||
appendln("}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived class of [Property] that represents class properties that are themselves instances of
|
||||
* some complex type.
|
||||
*/
|
||||
class InstanceProperty(
|
||||
name: String,
|
||||
type: String,
|
||||
val value: Instance) : Property(name, type) {
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
sb.append("$name : ")
|
||||
value.stringify(sb)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents an instance of a composite type.
|
||||
*/
|
||||
class Instance(
|
||||
val name: String,
|
||||
val type: String,
|
||||
val fields: MutableList<Property> = mutableListOf()) : Stringify {
|
||||
override fun stringify(sb: IndentingStringBuilder) {
|
||||
sb.apply {
|
||||
appendln("${name.simplifyClass()} : {")
|
||||
fields.forEach {
|
||||
it.stringify(this)
|
||||
}
|
||||
appendln("}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
fun inspectComposite(
|
||||
config: Config,
|
||||
typeMap: Map<Symbol?, TypeNotation>,
|
||||
obj: DescribedType): Instance {
|
||||
if (obj.described !is List<*>) throw MalformedBlob("")
|
||||
|
||||
val name = (typeMap[obj.descriptor] as CompositeType).name
|
||||
"composite: $name".debug(config)
|
||||
|
||||
val inst = Instance(
|
||||
typeMap[obj.descriptor]?.name ?: "",
|
||||
typeMap[obj.descriptor]?.label ?: "")
|
||||
|
||||
(typeMap[obj.descriptor] as CompositeType).fields.zip(obj.described as List<*>).forEach {
|
||||
" field: ${it.first.name}".debug(config)
|
||||
inst.fields.add(
|
||||
if (it.second is DescribedType) {
|
||||
" - is described".debug(config)
|
||||
val d = inspectDescribed(config, typeMap, it.second as DescribedType)
|
||||
|
||||
when (d) {
|
||||
is Instance ->
|
||||
InstanceProperty(
|
||||
it.first.name,
|
||||
it.first.type,
|
||||
d)
|
||||
is List<*> -> {
|
||||
" - List".debug(config)
|
||||
ListProperty(
|
||||
it.first.name,
|
||||
it.first.type,
|
||||
d as MutableList<Any>)
|
||||
}
|
||||
is Map<*, *> -> {
|
||||
MapProperty(
|
||||
it.first.name,
|
||||
it.first.type,
|
||||
d as MutableMap<*, *>)
|
||||
}
|
||||
else -> {
|
||||
" skip it".debug(config)
|
||||
return@forEach
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
" - is prim".debug(config)
|
||||
when (it.first.type) {
|
||||
// Note, as in the case of SHA256 we can treat particular binary types
|
||||
// as different properties with a little coercion
|
||||
"binary" -> {
|
||||
if (name == "net.corda.core.crypto.SecureHash\$SHA256") {
|
||||
PrimProperty(
|
||||
it.first.name,
|
||||
it.first.type,
|
||||
SecureHash.SHA256((it.second as Binary).array).toString())
|
||||
} else {
|
||||
BinaryProperty(it.first.name, it.first.type, (it.second as Binary).array)
|
||||
}
|
||||
}
|
||||
else -> PrimProperty(it.first.name, it.first.type, it.second.toString())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return inst
|
||||
}
|
||||
|
||||
fun inspectRestricted(
|
||||
config: Config,
|
||||
typeMap: Map<Symbol?, TypeNotation>,
|
||||
obj: DescribedType): Any {
|
||||
return when ((typeMap[obj.descriptor] as RestrictedType).source) {
|
||||
"list" -> inspectRestrictedList(config, typeMap, obj)
|
||||
"map" -> inspectRestrictedMap(config, typeMap, obj)
|
||||
else -> throw NotImplementedError()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fun inspectRestrictedList(
|
||||
config: Config,
|
||||
typeMap: Map<Symbol?, TypeNotation>,
|
||||
obj: DescribedType
|
||||
) : List<Any> {
|
||||
if (obj.described !is List<*>) throw MalformedBlob("")
|
||||
|
||||
return mutableListOf<Any>().apply {
|
||||
(obj.described as List<*>).forEach {
|
||||
when (it) {
|
||||
is DescribedType -> add(inspectDescribed(config, typeMap, it))
|
||||
is RestrictedType -> add(inspectRestricted(config, typeMap, it))
|
||||
else -> add (it.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun inspectRestrictedMap(
|
||||
config: Config,
|
||||
typeMap: Map<Symbol?, TypeNotation>,
|
||||
obj: DescribedType
|
||||
) : Map<Any, Any> {
|
||||
if (obj.described !is Map<*,*>) throw MalformedBlob("")
|
||||
|
||||
return mutableMapOf<Any, Any>().apply {
|
||||
(obj.described as Map<*, *>).forEach {
|
||||
val key = when (it.key) {
|
||||
is DescribedType -> inspectDescribed(config, typeMap, it.key as DescribedType)
|
||||
is RestrictedType -> inspectRestricted(config, typeMap, it.key as RestrictedType)
|
||||
else -> it.key.toString()
|
||||
}
|
||||
|
||||
val value = when (it.value) {
|
||||
is DescribedType -> inspectDescribed(config, typeMap, it.value as DescribedType)
|
||||
is RestrictedType -> inspectRestricted(config, typeMap, it.value as RestrictedType)
|
||||
else -> it.value.toString()
|
||||
}
|
||||
|
||||
this[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Every element of the blob stream will be a ProtonJ [DescribedType]. When inspecting the blob stream
|
||||
* the two custom Corda types we're interested in are [CompositeType]'s, representing the instance of
|
||||
* some object (class), and [RestrictedType]'s, representing containers and enumerations.
|
||||
*
|
||||
* @param config The configuration object that controls the behaviour of the BlobInspector
|
||||
* @param typeMap
|
||||
* @param obj
|
||||
*/
|
||||
fun inspectDescribed(
|
||||
config: Config,
|
||||
typeMap: Map<Symbol?, TypeNotation>,
|
||||
obj: DescribedType): Any {
|
||||
"${obj.descriptor} in typeMap? = ${obj.descriptor in typeMap}".debug(config)
|
||||
|
||||
return when (typeMap[obj.descriptor]) {
|
||||
is CompositeType -> {
|
||||
"* It's composite".debug(config)
|
||||
inspectComposite(config, typeMap, obj)
|
||||
}
|
||||
is RestrictedType -> {
|
||||
"* It's restricted".debug(config)
|
||||
inspectRestricted(config, typeMap, obj)
|
||||
}
|
||||
else -> {
|
||||
"${typeMap[obj.descriptor]?.name} is neither Composite or Restricted".debug(config)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
internal object NullEncodingWhitelist : EncodingWhitelist {
|
||||
override fun acceptEncoding(encoding: SerializationEncoding) = false
|
||||
}
|
||||
|
||||
// TODO : Refactor to generically poerate on arbitrary blobs, not a single workflow
|
||||
fun inspectBlob(config: Config, blob: ByteArray) {
|
||||
val bytes = ByteSequence.of(blob)
|
||||
|
||||
val headerSize = SerializationFactoryImpl.magicSize
|
||||
|
||||
// TODO written to only understand one version, when we support multiple this will need to change
|
||||
val headers = listOf(ByteSequence.of(amqpMagic.bytes))
|
||||
|
||||
val blobHeader = bytes.take(headerSize)
|
||||
|
||||
if (blobHeader !in headers) {
|
||||
throw MalformedBlob("Blob is not a Corda AMQP serialised object graph")
|
||||
}
|
||||
|
||||
|
||||
val e = DeserializationInput.getEnvelope(bytes, NullEncodingWhitelist)
|
||||
|
||||
if (config.schema) {
|
||||
println(e.schema)
|
||||
}
|
||||
|
||||
if (config.transforms) {
|
||||
println(e.transformsSchema)
|
||||
}
|
||||
|
||||
val typeMap = e.schema.types.associateBy({ it.descriptor.name }, { it })
|
||||
|
||||
if (config.data) {
|
||||
val inspected = inspectDescribed(config, typeMap, e.obj as DescribedType)
|
||||
|
||||
println("\n${IndentingStringBuilder().apply { (inspected as Instance).stringify(this) }}")
|
||||
|
||||
(inspected as Instance).fields.find {
|
||||
it.type.startsWith("net.corda.core.serialization.SerializedBytes<")
|
||||
}?.let {
|
||||
"Found field of SerializedBytes".debug(config)
|
||||
(it as InstanceProperty).value.fields.find { it.name == "bytes" }?.let { raw ->
|
||||
inspectBlob(config, (raw as BinaryProperty).value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,40 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import java.io.File
|
||||
import java.net.URL
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class FileBlobHandler(config_: Config) : BlobHandler(config_) {
|
||||
private val path = File(URL((config_ as FileConfig).file).toURI())
|
||||
|
||||
override fun getBytes(): ByteArray {
|
||||
return path.readBytes()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class InMemoryBlobHandler(config_: Config) : BlobHandler(config_) {
|
||||
private val localBytes = (config_ as InMemoryConfig).blob?.bytes ?: kotlin.ByteArray(0)
|
||||
override fun getBytes(): ByteArray = localBytes
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
abstract class BlobHandler(val config: Config) {
|
||||
companion object {
|
||||
fun make(config: Config): BlobHandler {
|
||||
return when (config.mode) {
|
||||
Mode.file -> FileBlobHandler(config)
|
||||
Mode.inMem -> InMemoryBlobHandler(config)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abstract fun getBytes(): ByteArray
|
||||
}
|
||||
|
@ -1,137 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import org.apache.commons.cli.CommandLine
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import org.apache.commons.cli.Option
|
||||
import org.apache.commons.cli.Options
|
||||
|
||||
/**
|
||||
* Enumeration of the modes in which the blob inspector can be run.
|
||||
*
|
||||
* @property make lambda function that takes no parameters and returns a specific instance of the configuration
|
||||
* object for that mode.
|
||||
*
|
||||
* @property options A lambda function that takes no parameters and returns an [Options] instance that define
|
||||
* the command line flags related to this mode. For example ``file`` mode would have an option to pass in
|
||||
* the name of the file to read.
|
||||
*
|
||||
*/
|
||||
enum class Mode(
|
||||
val make : () -> Config,
|
||||
val options : (Options) -> Unit
|
||||
) {
|
||||
file(
|
||||
{
|
||||
FileConfig(Mode.file)
|
||||
},
|
||||
{ o ->
|
||||
o.apply{
|
||||
addOption(
|
||||
Option ("f", "file", true, "path to file").apply {
|
||||
isRequired = true
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
),
|
||||
inMem(
|
||||
{
|
||||
InMemoryConfig(Mode.inMem)
|
||||
},
|
||||
{
|
||||
// The in memory only mode has no specific option assocaited with it as it's intended for
|
||||
// testing purposes only within the unit test framework and not use on the command line
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration data class for the Blob Inspector.
|
||||
*
|
||||
* @property mode
|
||||
*/
|
||||
abstract class Config (val mode: Mode) {
|
||||
var schema: Boolean = false
|
||||
var transforms: Boolean = false
|
||||
var data: Boolean = false
|
||||
var verbose: Boolean = false
|
||||
|
||||
abstract fun populateSpecific(cmdLine: CommandLine)
|
||||
abstract fun withVerbose() : Config
|
||||
|
||||
fun populate(cmdLine: CommandLine) {
|
||||
schema = cmdLine.hasOption('s')
|
||||
transforms = cmdLine.hasOption('t')
|
||||
data = cmdLine.hasOption('d')
|
||||
verbose = cmdLine.hasOption('v')
|
||||
|
||||
populateSpecific(cmdLine)
|
||||
}
|
||||
|
||||
fun options() = Options().apply {
|
||||
// install generic options
|
||||
addOption(Option("s", "schema", false, "print the blob's schema").apply {
|
||||
isRequired = false
|
||||
})
|
||||
|
||||
addOption(Option("t", "transforms", false, "print the blob's transforms schema").apply {
|
||||
isRequired = false
|
||||
})
|
||||
|
||||
addOption(Option("d", "data", false, "Display the serialised data").apply {
|
||||
isRequired = false
|
||||
})
|
||||
|
||||
addOption(Option("v", "verbose", false, "Enable debug output").apply {
|
||||
isRequired = false
|
||||
})
|
||||
|
||||
// install the mode specific options
|
||||
mode.options(this)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Configuration object when running in "File" mode, i.e. the object has been specified at
|
||||
* the command line
|
||||
*/
|
||||
class FileConfig (
|
||||
mode: Mode
|
||||
) : Config(mode) {
|
||||
|
||||
var file: String = "unset"
|
||||
|
||||
override fun populateSpecific(cmdLine : CommandLine) {
|
||||
file = cmdLine.getParsedOptionValue("f") as String
|
||||
}
|
||||
|
||||
override fun withVerbose() : FileConfig {
|
||||
return FileConfig(mode).apply {
|
||||
this.schema = schema
|
||||
this.transforms = transforms
|
||||
this.data = data
|
||||
this.verbose = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Placeholder config objet used when running unit tests and the inspected blob is being fed in
|
||||
* via some mechanism directly. Normally this will be the direct serialisation of an object in a unit
|
||||
* test and then dumping that blob into the inspector for visual comparison of the output
|
||||
*/
|
||||
class InMemoryConfig (
|
||||
mode: Mode
|
||||
) : Config(mode) {
|
||||
var blob: SerializedBytes<*>? = null
|
||||
|
||||
override fun populateSpecific(cmdLine: CommandLine) {
|
||||
throw UnsupportedOperationException("In memory config is for testing only and cannot set specific flags")
|
||||
}
|
||||
|
||||
override fun withVerbose(): Config {
|
||||
throw UnsupportedOperationException("In memory config is for testing headlessly, cannot be verbose")
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
class MalformedBlob(msg: String) : Exception(msg)
|
@ -1,44 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
/**
|
||||
* Wrapper around a [StringBuilder] that automates the indenting of lines as they're appended to facilitate
|
||||
* pretty printing of deserialized blobs.
|
||||
*
|
||||
* @property sb The wrapped [StringBuilder]
|
||||
* @property indenting Boolean flag that indicates weather we need to pad the start of whatever text
|
||||
* currently being added to the string.
|
||||
* @property indent How deeply the next line should be offset from the first column
|
||||
*/
|
||||
class IndentingStringBuilder(s: String = "", private val offset: Int = 4) {
|
||||
private val sb = StringBuilder(s)
|
||||
private var indenting = true
|
||||
private var indent = 0
|
||||
|
||||
private fun wrap(ln: String, appender: (String) -> Unit) {
|
||||
if ((ln.endsWith("}") || ln.endsWith("]")) && indent > 0 && ln.length == 1) {
|
||||
indent -= offset
|
||||
}
|
||||
|
||||
appender(ln)
|
||||
|
||||
if (ln.endsWith("{") || ln.endsWith("[")) {
|
||||
indent += offset
|
||||
}
|
||||
}
|
||||
|
||||
fun appendln(ln: String) {
|
||||
wrap(ln) { s -> sb.appendln("${"".padStart(if (indenting) indent else 0, ' ')}$s") }
|
||||
|
||||
indenting = true
|
||||
}
|
||||
|
||||
fun append(ln: String) {
|
||||
indenting = false
|
||||
|
||||
wrap(ln) { s -> sb.append("${"".padStart(indent, ' ')}$s") }
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return sb.toString()
|
||||
}
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import org.apache.commons.cli.*
|
||||
import java.lang.IllegalArgumentException
|
||||
|
||||
/**
|
||||
* Mode isn't a required property as we default it to [Mode.file]
|
||||
*/
|
||||
private fun modeOption() = Option("m", "mode", true, "mode, file is the default").apply {
|
||||
isRequired = false
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Parse the command line arguments looking for the main mode into which the application is
|
||||
* being put. Note, this defaults to [Mode.file] if not set meaning we will look for a file path
|
||||
* being passed as a parameter and parse that file.
|
||||
*
|
||||
* @param args reflects the command line arguments
|
||||
*
|
||||
* @return An instantiated but unpopulated [Config] object instance suitable for the mode into
|
||||
* which we've been placed. This Config object should be populated via [loadModeSpecificOptions]
|
||||
*/
|
||||
fun getMode(args: Array<String>): Config {
|
||||
// For now we only care what mode we're being put in, we can build the rest of the args and parse them
|
||||
// later
|
||||
val options = Options().apply {
|
||||
addOption(modeOption())
|
||||
}
|
||||
|
||||
val cmd = try {
|
||||
DefaultParser().parse(options, args, true)
|
||||
} catch (e: org.apache.commons.cli.ParseException) {
|
||||
println(e)
|
||||
HelpFormatter().printHelp("blobinspector", options)
|
||||
throw IllegalArgumentException("OH NO!!!")
|
||||
}
|
||||
|
||||
return try {
|
||||
Mode.valueOf(cmd.getParsedOptionValue("m") as? String ?: "file")
|
||||
} catch (e: IllegalArgumentException) {
|
||||
Mode.file
|
||||
}.make()
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param config an instance of a [Config] specialisation suitable for the mode into which
|
||||
* the application has been put.
|
||||
* @param args The command line arguments
|
||||
*/
|
||||
fun loadModeSpecificOptions(config: Config, args: Array<String>) {
|
||||
config.apply {
|
||||
// load that modes specific command line switches, needs to include the mode option
|
||||
val modeSpecificOptions = config.options().apply {
|
||||
addOption(modeOption())
|
||||
}
|
||||
|
||||
populate(try {
|
||||
DefaultParser().parse(modeSpecificOptions, args, false)
|
||||
} catch (e: org.apache.commons.cli.ParseException) {
|
||||
println("Error: ${e.message}")
|
||||
HelpFormatter().printHelp("blobinspector", modeSpecificOptions)
|
||||
System.exit(1)
|
||||
return
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executable entry point
|
||||
*/
|
||||
fun main(args: Array<String>) {
|
||||
println("<<< WARNING: this tool is experimental and under active development >>>")
|
||||
getMode(args).let { mode ->
|
||||
loadModeSpecificOptions(mode, args)
|
||||
BlobHandler.make(mode)
|
||||
}.apply {
|
||||
inspectBlob(config, getBytes())
|
||||
}
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import java.net.URI
|
||||
|
||||
import org.junit.Test
|
||||
import net.corda.testing.common.internal.ProjectStructure.projectRootDir
|
||||
|
||||
class FileParseTests {
|
||||
@Suppress("UNUSED")
|
||||
var localPath: URI = projectRootDir.toUri().resolve(
|
||||
"tools/blobinspector/src/test/resources/net/corda/blobinspector")
|
||||
|
||||
fun setupArgsWithFile(path: String) = Array(5) {
|
||||
when (it) {
|
||||
0 -> "-m"
|
||||
1 -> "file"
|
||||
2 -> "-f"
|
||||
3 -> path
|
||||
4 -> "-d"
|
||||
else -> "error"
|
||||
}
|
||||
}
|
||||
|
||||
private val filesToTest = listOf(
|
||||
"FileParseTests.1Int",
|
||||
"FileParseTests.2Int",
|
||||
"FileParseTests.3Int",
|
||||
"FileParseTests.1String",
|
||||
"FileParseTests.1Composite",
|
||||
"FileParseTests.2Composite",
|
||||
"FileParseTests.IntList",
|
||||
"FileParseTests.StringList",
|
||||
"FileParseTests.MapIntString",
|
||||
"FileParseTests.MapIntClass"
|
||||
)
|
||||
|
||||
fun testFile(file: String) {
|
||||
val path = FileParseTests::class.java.getResource(file)
|
||||
val args = setupArgsWithFile(path.toString())
|
||||
|
||||
val handler = getMode(args).let { mode ->
|
||||
loadModeSpecificOptions(mode, args)
|
||||
BlobHandler.make(mode)
|
||||
}
|
||||
|
||||
inspectBlob(handler.config, handler.getBytes())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun simpleFiles() {
|
||||
filesToTest.forEach { testFile(it) }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun specificTest() {
|
||||
testFile(filesToTest[4])
|
||||
testFile(filesToTest[5])
|
||||
testFile(filesToTest[6])
|
||||
}
|
||||
|
||||
@Test
|
||||
fun networkParams() {
|
||||
val file = "networkParams"
|
||||
val path = FileParseTests::class.java.getResource(file)
|
||||
val verbose = false
|
||||
|
||||
val args = verbose.let {
|
||||
if (it)
|
||||
Array(4) {
|
||||
when (it) { 0 -> "-f"; 1 -> path.toString(); 2 -> "-d"; 3 -> "-vs"; else -> "error"
|
||||
}
|
||||
}
|
||||
else
|
||||
Array(3) {
|
||||
when (it) { 0 -> "-f"; 1 -> path.toString(); 2 -> "-d"; else -> "error"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val handler = getMode(args).let { mode ->
|
||||
loadModeSpecificOptions(mode, args)
|
||||
BlobHandler.make(mode)
|
||||
}
|
||||
|
||||
inspectBlob(handler.config, handler.getBytes())
|
||||
}
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.serialization.internal.AllWhitelist
|
||||
import net.corda.serialization.internal.amqp.SerializationOutput
|
||||
import net.corda.serialization.internal.amqp.SerializerFactory
|
||||
import net.corda.serialization.internal.AMQP_P2P_CONTEXT
|
||||
import org.junit.Test
|
||||
|
||||
|
||||
class InMemoryTests {
|
||||
private val factory = SerializerFactory(AllWhitelist, ClassLoader.getSystemClassLoader())
|
||||
|
||||
private fun inspect (b: SerializedBytes<*>) {
|
||||
BlobHandler.make(
|
||||
InMemoryConfig(Mode.inMem).apply { blob = b; data = true}
|
||||
).apply {
|
||||
inspectBlob(config, getBytes())
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test1() {
|
||||
data class C (val a: Int, val b: Long, val c: String)
|
||||
inspect (SerializationOutput(factory).serialize(C(100, 567L, "this is a test"), AMQP_P2P_CONTEXT))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test2() {
|
||||
data class C (val i: Int, val c: C?)
|
||||
inspect (SerializationOutput(factory).serialize(C(1, C(2, C(3, C(4, null)))), AMQP_P2P_CONTEXT))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test3() {
|
||||
data class C (val a: IntArray, val b: Array<String>)
|
||||
|
||||
val a = IntArray(10) { i -> i }
|
||||
val c = C(a, arrayOf("aaa", "bbb", "ccc"))
|
||||
|
||||
inspect (SerializationOutput(factory).serialize(c, AMQP_P2P_CONTEXT))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test4() {
|
||||
data class Elem(val e1: Long, val e2: String)
|
||||
data class Wrapper (val name: String, val elementes: List<Elem>)
|
||||
|
||||
inspect (SerializationOutput(factory).serialize(
|
||||
Wrapper("Outer Class",
|
||||
listOf(
|
||||
Elem(1L, "First element"),
|
||||
Elem(2L, "Second element"),
|
||||
Elem(3L, "Third element")
|
||||
)), AMQP_P2P_CONTEXT))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test4b() {
|
||||
data class Elem(val e1: Long, val e2: String)
|
||||
data class Wrapper (val name: String, val elementes: List<List<Elem>>)
|
||||
|
||||
inspect (SerializationOutput(factory).serialize(
|
||||
Wrapper("Outer Class",
|
||||
listOf (
|
||||
listOf(
|
||||
Elem(1L, "First element"),
|
||||
Elem(2L, "Second element"),
|
||||
Elem(3L, "Third element")
|
||||
),
|
||||
listOf(
|
||||
Elem(4L, "Fourth element"),
|
||||
Elem(5L, "Fifth element"),
|
||||
Elem(6L, "Sixth element")
|
||||
)
|
||||
)), AMQP_P2P_CONTEXT))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test5() {
|
||||
data class C (val a: Map<String, String>)
|
||||
|
||||
inspect (SerializationOutput(factory).serialize(
|
||||
C(mapOf(
|
||||
"a" to "a a a",
|
||||
"b" to "b b b",
|
||||
"c" to "c c c")),
|
||||
AMQP_P2P_CONTEXT
|
||||
))
|
||||
}
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import org.junit.Test
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Assert.assertTrue
|
||||
import kotlin.test.assertFalse
|
||||
|
||||
class ModeParse {
|
||||
@Test
|
||||
fun fileIsSetToFile() {
|
||||
val opts1 = Array(2) {
|
||||
when (it) {
|
||||
0 -> "-m"
|
||||
1 -> "file"
|
||||
else -> "error"
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals(Mode.file, getMode(opts1).mode)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun nothingIsSetToFile() {
|
||||
val opts1 = Array(0) { "" }
|
||||
|
||||
assertEquals(Mode.file, getMode(opts1).mode)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun filePathIsSet() {
|
||||
val opts1 = Array(4) {
|
||||
when (it) {
|
||||
0 -> "-m"
|
||||
1 -> "file"
|
||||
2 -> "-f"
|
||||
3 -> "path/to/file"
|
||||
else -> "error"
|
||||
}
|
||||
}
|
||||
|
||||
val config = getMode(opts1)
|
||||
assertTrue(config is FileConfig)
|
||||
assertEquals(Mode.file, config.mode)
|
||||
assertEquals("unset", (config as FileConfig).file)
|
||||
|
||||
loadModeSpecificOptions(config, opts1)
|
||||
|
||||
assertEquals("path/to/file", config.file)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun schemaIsSet() {
|
||||
Array(2) {
|
||||
when (it) { 0 -> "-f"; 1 -> "path/to/file"; else -> "error"
|
||||
}
|
||||
}.let { options ->
|
||||
getMode(options).apply {
|
||||
loadModeSpecificOptions(this, options)
|
||||
assertFalse(schema)
|
||||
}
|
||||
}
|
||||
|
||||
Array(3) {
|
||||
when (it) { 0 -> "--schema"; 1 -> "-f"; 2 -> "path/to/file"; else -> "error"
|
||||
}
|
||||
}.let {
|
||||
getMode(it).apply {
|
||||
loadModeSpecificOptions(this, it)
|
||||
assertTrue(schema)
|
||||
}
|
||||
}
|
||||
|
||||
Array(3) {
|
||||
when (it) { 0 -> "-f"; 1 -> "path/to/file"; 2 -> "-s"; else -> "error"
|
||||
}
|
||||
}.let {
|
||||
getMode(it).apply {
|
||||
loadModeSpecificOptions(this, it)
|
||||
assertTrue(schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
package net.corda.blobinspector
|
||||
|
||||
import org.junit.Test
|
||||
|
||||
class SimplifyClassTests {
|
||||
|
||||
@Test
|
||||
fun test1() {
|
||||
data class A(val a: Int)
|
||||
|
||||
println(A::class.java.name)
|
||||
println(A::class.java.name.simplifyClass())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun test2() {
|
||||
val p = this.javaClass.`package`.name
|
||||
|
||||
println("$p.Class1<$p.Class2>")
|
||||
println("$p.Class1<$p.Class2>".simplifyClass())
|
||||
println("$p.Class1<$p.Class2, $p.Class3>")
|
||||
println("$p.Class1<$p.Class2, $p.Class3>".simplifyClass())
|
||||
println("$p.Class1<$p.Class2<$p.Class3>>")
|
||||
println("$p.Class1<$p.Class2<$p.Class3>>".simplifyClass())
|
||||
println("$p.Class1<$p.Class2<$p.Class3>>")
|
||||
println("$p.Class1\$C<$p.Class2<$p.Class3>>".simplifyClass())
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -12,7 +12,12 @@
|
||||
|
||||
package net.corda.nodeapi.internal.config
|
||||
|
||||
import com.typesafe.config.*
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigUtil
|
||||
import com.typesafe.config.ConfigValueFactory
|
||||
import com.typesafe.config.ConfigValueType
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.noneOrSingle
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
@ -49,7 +54,7 @@ operator fun <T : Any> Config.getValue(receiver: Any, metadata: KProperty<*>): T
|
||||
return getValueInternal(metadata.name, metadata.returnType, UnknownConfigKeysPolicy.IGNORE::handle)
|
||||
}
|
||||
|
||||
fun <T : Any> Config.parseAs(clazz: KClass<T>, onUnknownKeys: ((Set<String>, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle): T {
|
||||
fun <T : Any> Config.parseAs(clazz: KClass<T>, onUnknownKeys: ((Set<String>, logger: Logger) -> Unit) = UnknownConfigKeysPolicy.FAIL::handle, nestedPath: String? = null): T {
|
||||
require(clazz.isData) { "Only Kotlin data classes can be parsed. Offending: ${clazz.qualifiedName}" }
|
||||
val constructor = clazz.primaryConstructor!!
|
||||
val parameters = constructor.parameters
|
||||
@ -72,7 +77,7 @@ fun <T : Any> Config.parseAs(clazz: KClass<T>, onUnknownKeys: ((Set<String>, log
|
||||
// Get the matching property for this parameter
|
||||
val property = clazz.memberProperties.first { it.name == param.name }
|
||||
val path = defaultToOldPath(property)
|
||||
getValueInternal<Any>(path, param.type, onUnknownKeys)
|
||||
getValueInternal<Any>(path, param.type, onUnknownKeys, nestedPath)
|
||||
}
|
||||
try {
|
||||
return constructor.callBy(args)
|
||||
@ -101,68 +106,83 @@ fun Config.toProperties(): Properties {
|
||||
{ it.value.unwrapped().toString() })
|
||||
}
|
||||
|
||||
private fun <T : Any> Config.getValueInternal(path: String, type: KType, onUnknownKeys: ((Set<String>, logger: Logger) -> Unit)): T {
|
||||
return uncheckedCast(if (type.arguments.isEmpty()) getSingleValue(path, type, onUnknownKeys) else getCollectionValue(path, type, onUnknownKeys))
|
||||
private fun <T : Any> Config.getValueInternal(path: String, type: KType, onUnknownKeys: ((Set<String>, logger: Logger) -> Unit), nestedPath: String? = null): T {
|
||||
return uncheckedCast(if (type.arguments.isEmpty()) getSingleValue(path, type, onUnknownKeys, nestedPath) else getCollectionValue(path, type, onUnknownKeys, nestedPath))
|
||||
}
|
||||
|
||||
private fun Config.getSingleValue(path: String, type: KType, onUnknownKeys: (Set<String>, logger: Logger) -> Unit): Any? {
|
||||
private fun Config.getSingleValue(path: String, type: KType, onUnknownKeys: (Set<String>, logger: Logger) -> Unit, nestedPath: String? = null): Any? {
|
||||
if (type.isMarkedNullable && !hasPath(path)) return null
|
||||
val typeClass = type.jvmErasure
|
||||
return when (typeClass) {
|
||||
String::class -> getString(path)
|
||||
Int::class -> getInt(path)
|
||||
Long::class -> getLong(path)
|
||||
Double::class -> getDouble(path)
|
||||
Boolean::class -> getBoolean(path)
|
||||
LocalDate::class -> LocalDate.parse(getString(path))
|
||||
Duration::class -> getDuration(path)
|
||||
Instant::class -> Instant.parse(getString(path))
|
||||
NetworkHostAndPort::class -> NetworkHostAndPort.parse(getString(path))
|
||||
Path::class -> Paths.get(getString(path))
|
||||
URL::class -> URL(getString(path))
|
||||
UUID::class -> UUID.fromString(getString(path))
|
||||
CordaX500Name::class -> {
|
||||
when (getValue(path).valueType()) {
|
||||
ConfigValueType.OBJECT -> getConfig(path).parseAs(onUnknownKeys)
|
||||
else -> CordaX500Name.parse(getString(path))
|
||||
return try {
|
||||
when (typeClass) {
|
||||
String::class -> getString(path)
|
||||
Int::class -> getInt(path)
|
||||
Long::class -> getLong(path)
|
||||
Double::class -> getDouble(path)
|
||||
Boolean::class -> getBoolean(path)
|
||||
LocalDate::class -> LocalDate.parse(getString(path))
|
||||
Duration::class -> getDuration(path)
|
||||
Instant::class -> Instant.parse(getString(path))
|
||||
NetworkHostAndPort::class -> NetworkHostAndPort.parse(getString(path))
|
||||
Path::class -> Paths.get(getString(path))
|
||||
URL::class -> URL(getString(path))
|
||||
UUID::class -> UUID.fromString(getString(path))
|
||||
CordaX500Name::class -> {
|
||||
when (getValue(path).valueType()) {
|
||||
ConfigValueType.OBJECT -> getConfig(path).parseAs(onUnknownKeys)
|
||||
else -> CordaX500Name.parse(getString(path))
|
||||
}
|
||||
}
|
||||
Properties::class -> getConfig(path).toProperties()
|
||||
Config::class -> getConfig(path)
|
||||
else -> if (typeClass.java.isEnum) {
|
||||
parseEnum(typeClass.java, getString(path))
|
||||
} else {
|
||||
getConfig(path).parseAs(typeClass, onUnknownKeys, nestedPath?.let { "$it.$path" } ?: path)
|
||||
}
|
||||
}
|
||||
Properties::class -> getConfig(path).toProperties()
|
||||
Config::class -> getConfig(path)
|
||||
else -> if (typeClass.java.isEnum) {
|
||||
parseEnum(typeClass.java, getString(path))
|
||||
} else {
|
||||
getConfig(path).parseAs(typeClass, onUnknownKeys)
|
||||
}
|
||||
} catch (e: ConfigException.Missing) {
|
||||
throw e.relative(path, nestedPath)
|
||||
}
|
||||
}
|
||||
|
||||
private fun Config.getCollectionValue(path: String, type: KType, onUnknownKeys: (Set<String>, logger: Logger) -> Unit): Collection<Any> {
|
||||
private fun ConfigException.Missing.relative(path: String, nestedPath: String?): ConfigException.Missing {
|
||||
return when {
|
||||
nestedPath != null -> throw ConfigException.Missing("$nestedPath.$path")
|
||||
else -> this
|
||||
}
|
||||
}
|
||||
|
||||
private fun Config.getCollectionValue(path: String, type: KType, onUnknownKeys: (Set<String>, logger: Logger) -> Unit, nestedPath: String? = null): Collection<Any> {
|
||||
val typeClass = type.jvmErasure
|
||||
require(typeClass == List::class || typeClass == Set::class) { "$typeClass is not supported" }
|
||||
val elementClass = type.arguments[0].type?.jvmErasure ?: throw IllegalArgumentException("Cannot work with star projection: $type")
|
||||
if (!hasPath(path)) {
|
||||
return if (typeClass == List::class) emptyList() else emptySet()
|
||||
}
|
||||
val values: List<Any> = when (elementClass) {
|
||||
String::class -> getStringList(path)
|
||||
Int::class -> getIntList(path)
|
||||
Long::class -> getLongList(path)
|
||||
Double::class -> getDoubleList(path)
|
||||
Boolean::class -> getBooleanList(path)
|
||||
LocalDate::class -> getStringList(path).map(LocalDate::parse)
|
||||
Instant::class -> getStringList(path).map(Instant::parse)
|
||||
NetworkHostAndPort::class -> getStringList(path).map(NetworkHostAndPort.Companion::parse)
|
||||
Path::class -> getStringList(path).map { Paths.get(it) }
|
||||
URL::class -> getStringList(path).map(::URL)
|
||||
UUID::class -> getStringList(path).map { UUID.fromString(it) }
|
||||
CordaX500Name::class -> getStringList(path).map(CordaX500Name.Companion::parse)
|
||||
Properties::class -> getConfigList(path).map(Config::toProperties)
|
||||
else -> if (elementClass.java.isEnum) {
|
||||
getStringList(path).map { parseEnum(elementClass.java, it) }
|
||||
} else {
|
||||
getConfigList(path).map { it.parseAs(elementClass, onUnknownKeys) }
|
||||
val values: List<Any> = try {
|
||||
when (elementClass) {
|
||||
String::class -> getStringList(path)
|
||||
Int::class -> getIntList(path)
|
||||
Long::class -> getLongList(path)
|
||||
Double::class -> getDoubleList(path)
|
||||
Boolean::class -> getBooleanList(path)
|
||||
LocalDate::class -> getStringList(path).map(LocalDate::parse)
|
||||
Instant::class -> getStringList(path).map(Instant::parse)
|
||||
NetworkHostAndPort::class -> getStringList(path).map(NetworkHostAndPort.Companion::parse)
|
||||
Path::class -> getStringList(path).map { Paths.get(it) }
|
||||
URL::class -> getStringList(path).map(::URL)
|
||||
UUID::class -> getStringList(path).map { UUID.fromString(it) }
|
||||
CordaX500Name::class -> getStringList(path).map(CordaX500Name.Companion::parse)
|
||||
Properties::class -> getConfigList(path).map(Config::toProperties)
|
||||
else -> if (elementClass.java.isEnum) {
|
||||
getStringList(path).map { parseEnum(elementClass.java, it) }
|
||||
} else {
|
||||
getConfigList(path).map { it.parseAs(elementClass, onUnknownKeys) }
|
||||
}
|
||||
}
|
||||
} catch (e: ConfigException.Missing) {
|
||||
throw e.relative(path, nestedPath)
|
||||
}
|
||||
return if (typeClass == Set::class) values.toSet() else values
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import rx.Observable
|
||||
import rx.Subscriber
|
||||
import rx.subjects.PublishSubject
|
||||
import rx.subjects.UnicastSubject
|
||||
import java.io.Closeable
|
||||
import java.sql.Connection
|
||||
@ -81,9 +80,7 @@ class CordaPersistence(
|
||||
}
|
||||
val entityManagerFactory get() = hibernateConfig.sessionFactoryForRegisteredSchemas
|
||||
|
||||
data class Boundary(val txId: UUID)
|
||||
|
||||
internal val transactionBoundaries = PublishSubject.create<Boundary>().toSerialized()
|
||||
data class Boundary(val txId: UUID, val success: Boolean)
|
||||
|
||||
init {
|
||||
// Found a unit test that was forgetting to close the database transactions. When you close() on the top level
|
||||
@ -204,15 +201,19 @@ class CordaPersistence(
|
||||
*
|
||||
* For examples, see the call hierarchy of this function.
|
||||
*/
|
||||
fun <T : Any> rx.Observer<T>.bufferUntilDatabaseCommit(): rx.Observer<T> {
|
||||
val currentTxId = contextTransaction.id
|
||||
val databaseTxBoundary: Observable<CordaPersistence.Boundary> = contextDatabase.transactionBoundaries.first { it.txId == currentTxId }
|
||||
fun <T : Any> rx.Observer<T>.bufferUntilDatabaseCommit(propagateRollbackAsError: Boolean = false): rx.Observer<T> {
|
||||
val currentTx = contextTransaction
|
||||
val subject = UnicastSubject.create<T>()
|
||||
val databaseTxBoundary: Observable<CordaPersistence.Boundary> = currentTx.boundary.filter { it.success }
|
||||
if (propagateRollbackAsError) {
|
||||
currentTx.boundary.filter { !it.success }.subscribe { this.onError(DatabaseTransactionRolledBackException(it.txId)) }
|
||||
}
|
||||
subject.delaySubscription(databaseTxBoundary).subscribe(this)
|
||||
databaseTxBoundary.doOnCompleted { subject.onCompleted() }
|
||||
return subject
|
||||
}
|
||||
|
||||
class DatabaseTransactionRolledBackException(txId: UUID) : Exception("Database transaction $txId was rolled back")
|
||||
|
||||
// A subscriber that delegates to multiple others, wrapping a database transaction around the combination.
|
||||
private class DatabaseTransactionWrappingSubscriber<U>(private val db: CordaPersistence?) : Subscriber<U>() {
|
||||
// Some unsubscribes happen inside onNext() so need something that supports concurrent modification.
|
||||
|
@ -13,6 +13,7 @@ package net.corda.nodeapi.internal.persistence
|
||||
import co.paralleluniverse.strands.Strand
|
||||
import org.hibernate.Session
|
||||
import org.hibernate.Transaction
|
||||
import rx.subjects.PublishSubject
|
||||
import java.sql.Connection
|
||||
import java.util.*
|
||||
|
||||
@ -51,6 +52,10 @@ class DatabaseTransaction(
|
||||
|
||||
val session: Session by sessionDelegate
|
||||
private lateinit var hibernateTransaction: Transaction
|
||||
|
||||
internal val boundary = PublishSubject.create<CordaPersistence.Boundary>()
|
||||
private var committed = false
|
||||
|
||||
fun commit() {
|
||||
if (sessionDelegate.isInitialized()) {
|
||||
hibernateTransaction.commit()
|
||||
@ -58,6 +63,7 @@ class DatabaseTransaction(
|
||||
if (_connectionCreated) {
|
||||
connection.commit()
|
||||
}
|
||||
committed = true
|
||||
}
|
||||
|
||||
fun rollback() {
|
||||
@ -78,7 +84,15 @@ class DatabaseTransaction(
|
||||
}
|
||||
contextTransactionOrNull = outerTransaction
|
||||
if (outerTransaction == null) {
|
||||
database.transactionBoundaries.onNext(CordaPersistence.Boundary(id))
|
||||
boundary.onNext(CordaPersistence.Boundary(id, committed))
|
||||
}
|
||||
}
|
||||
|
||||
fun onCommit(callback: () -> Unit) {
|
||||
boundary.filter { it.success }.subscribe { callback() }
|
||||
}
|
||||
|
||||
fun onRollback(callback: () -> Unit) {
|
||||
boundary.filter { !it.success }.subscribe { callback() }
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ internal class AMQPChannelHandler(private val serverMode: Boolean,
|
||||
private var remoteCert: X509Certificate? = null
|
||||
private var eventProcessor: EventProcessor? = null
|
||||
private var suppressClose: Boolean = false
|
||||
private var badCert: Boolean = false
|
||||
|
||||
override fun channelActive(ctx: ChannelHandlerContext) {
|
||||
val ch = ctx.channel()
|
||||
@ -86,7 +87,7 @@ internal class AMQPChannelHandler(private val serverMode: Boolean,
|
||||
val ch = ctx.channel()
|
||||
log.info("Closed client connection ${ch.id()} from $remoteAddress to ${ch.localAddress()}")
|
||||
if (!suppressClose) {
|
||||
onClose(Pair(ch as SocketChannel, ConnectionChange(remoteAddress, remoteCert, false)))
|
||||
onClose(Pair(ch as SocketChannel, ConnectionChange(remoteAddress, remoteCert, false, badCert)))
|
||||
}
|
||||
eventProcessor?.close()
|
||||
ctx.fireChannelInactive()
|
||||
@ -104,19 +105,22 @@ internal class AMQPChannelHandler(private val serverMode: Boolean,
|
||||
val remoteX500Name = try {
|
||||
CordaX500Name.build(remoteCert!!.subjectX500Principal)
|
||||
} catch (ex: IllegalArgumentException) {
|
||||
badCert = true
|
||||
log.error("Certificate subject not a valid CordaX500Name", ex)
|
||||
ctx.close()
|
||||
return
|
||||
}
|
||||
if (allowedRemoteLegalNames != null && remoteX500Name !in allowedRemoteLegalNames) {
|
||||
badCert = true
|
||||
log.error("Provided certificate subject $remoteX500Name not in expected set $allowedRemoteLegalNames")
|
||||
ctx.close()
|
||||
return
|
||||
}
|
||||
log.info("Handshake completed with subject: $remoteX500Name")
|
||||
createAMQPEngine(ctx)
|
||||
onOpen(Pair(ctx.channel() as SocketChannel, ConnectionChange(remoteAddress, remoteCert, true)))
|
||||
onOpen(Pair(ctx.channel() as SocketChannel, ConnectionChange(remoteAddress, remoteCert, true, false)))
|
||||
} else {
|
||||
badCert = true
|
||||
log.error("Handshake failure ${evt.cause().message}")
|
||||
if (log.isTraceEnabled) {
|
||||
log.trace("Handshake failure", evt.cause())
|
||||
|
@ -78,7 +78,7 @@ class AMQPClient(val targets: List<NetworkHostAndPort>,
|
||||
|
||||
val log = contextLogger()
|
||||
const val MIN_RETRY_INTERVAL = 1000L
|
||||
const val MAX_RETRY_INTERVAL = 60000L
|
||||
const val MAX_RETRY_INTERVAL = 300000L
|
||||
const val BACKOFF_MULTIPLIER = 2L
|
||||
const val NUM_CLIENT_THREADS = 2
|
||||
}
|
||||
@ -93,9 +93,22 @@ class AMQPClient(val targets: List<NetworkHostAndPort>,
|
||||
private var targetIndex = 0
|
||||
private var currentTarget: NetworkHostAndPort = targets.first()
|
||||
private var retryInterval = MIN_RETRY_INTERVAL
|
||||
private val badCertTargets = mutableSetOf<NetworkHostAndPort>()
|
||||
|
||||
private fun nextTarget() {
|
||||
targetIndex = (targetIndex + 1).rem(targets.size)
|
||||
val origIndex = targetIndex
|
||||
targetIndex = -1
|
||||
for (offset in 1..targets.size) {
|
||||
val newTargetIndex = (origIndex + offset).rem(targets.size)
|
||||
if (targets[newTargetIndex] !in badCertTargets) {
|
||||
targetIndex = newTargetIndex
|
||||
break
|
||||
}
|
||||
}
|
||||
if (targetIndex == -1) {
|
||||
log.error("No targets have presented acceptable certificates for $allowedRemoteLegalNames. Halting retries")
|
||||
return
|
||||
}
|
||||
log.info("Retry connect to ${targets[targetIndex]}")
|
||||
retryInterval = min(MAX_RETRY_INTERVAL, retryInterval * BACKOFF_MULTIPLIER)
|
||||
}
|
||||
@ -162,7 +175,8 @@ class AMQPClient(val targets: List<NetworkHostAndPort>,
|
||||
}
|
||||
}
|
||||
|
||||
val handler = createClientSslHelper(parent.currentTarget, keyManagerFactory, trustManagerFactory)
|
||||
val target = parent.currentTarget
|
||||
val handler = createClientSslHelper(target, keyManagerFactory, trustManagerFactory)
|
||||
pipeline.addLast("sslHandler", handler)
|
||||
if (parent.trace) pipeline.addLast("logger", LoggingHandler(LogLevel.INFO))
|
||||
pipeline.addLast(AMQPChannelHandler(false,
|
||||
@ -174,7 +188,13 @@ class AMQPClient(val targets: List<NetworkHostAndPort>,
|
||||
parent.retryInterval = MIN_RETRY_INTERVAL // reset to fast reconnect if we connect properly
|
||||
parent._onConnection.onNext(it.second)
|
||||
},
|
||||
{ parent._onConnection.onNext(it.second) },
|
||||
{
|
||||
parent._onConnection.onNext(it.second)
|
||||
if (it.second.badCert) {
|
||||
log.error("Blocking future connection attempts to $target due to bad certificate on endpoint")
|
||||
parent.badCertTargets += target
|
||||
}
|
||||
},
|
||||
{ rcv -> parent._onReceive.onNext(rcv) }))
|
||||
}
|
||||
}
|
||||
@ -188,6 +208,9 @@ class AMQPClient(val targets: List<NetworkHostAndPort>,
|
||||
}
|
||||
|
||||
private fun restart() {
|
||||
if (targetIndex == -1) {
|
||||
return
|
||||
}
|
||||
val bootstrap = Bootstrap()
|
||||
// TODO Needs more configuration control when we profile. e.g. to use EPOLL on Linux
|
||||
bootstrap.group(workerGroup).channel(NioSocketChannel::class.java).handler(ClientChannelInitializer(this))
|
||||
|
@ -13,4 +13,4 @@ package net.corda.nodeapi.internal.protonwrapper.netty
|
||||
import java.net.InetSocketAddress
|
||||
import java.security.cert.X509Certificate
|
||||
|
||||
data class ConnectionChange(val remoteAddress: InetSocketAddress, val remoteCert: X509Certificate?, val connected: Boolean)
|
||||
data class ConnectionChange(val remoteAddress: InetSocketAddress, val remoteCert: X509Certificate?, val connected: Boolean, val badCert: Boolean)
|
@ -111,7 +111,7 @@ dependencies {
|
||||
compile "org.fusesource.jansi:jansi:$jansi_version"
|
||||
|
||||
// Manifests: for reading stuff from the manifest file
|
||||
compile "com.jcabi:jcabi-manifests:1.1"
|
||||
compile "com.jcabi:jcabi-manifests:$jcabi_manifests_version"
|
||||
|
||||
compile("com.intellij:forms_rt:7.0.3") {
|
||||
exclude group: "asm"
|
||||
|
@ -0,0 +1,158 @@
|
||||
package net.corda.node.flows
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.client.rpc.CordaRPCClient
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.testing.core.singleIdentity
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.driver.internal.RandomFree
|
||||
import net.corda.testing.node.User
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import java.lang.management.ManagementFactory
|
||||
import java.sql.SQLException
|
||||
import java.util.*
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertNotNull
|
||||
|
||||
|
||||
class FlowRetryTest {
|
||||
@Before
|
||||
fun resetCounters() {
|
||||
InitiatorFlow.seen.clear()
|
||||
InitiatedFlow.seen.clear()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `flows continue despite errors`() {
|
||||
val numSessions = 2
|
||||
val numIterations = 10
|
||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<InitiatorFlow>()))
|
||||
val result: Any? = driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified(),
|
||||
portAllocation = RandomFree)) {
|
||||
|
||||
val nodeAHandle = startNode(rpcUsers = listOf(user)).getOrThrow()
|
||||
val nodeBHandle = startNode(rpcUsers = listOf(user)).getOrThrow()
|
||||
|
||||
val result = CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||
it.proxy.startFlow(::InitiatorFlow, numSessions, numIterations, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
||||
}
|
||||
result
|
||||
}
|
||||
assertNotNull(result)
|
||||
assertEquals("$numSessions:$numIterations", result)
|
||||
}
|
||||
}
|
||||
|
||||
fun isQuasarAgentSpecified(): Boolean {
|
||||
val jvmArgs = ManagementFactory.getRuntimeMXBean().inputArguments
|
||||
return jvmArgs.any { it.startsWith("-javaagent:") && it.contains("quasar") }
|
||||
}
|
||||
|
||||
class ExceptionToCauseRetry : SQLException("deadlock")
|
||||
|
||||
@StartableByRPC
|
||||
@InitiatingFlow
|
||||
class InitiatorFlow(private val sessionsCount: Int, private val iterationsCount: Int, private val other: Party) : FlowLogic<Any>() {
|
||||
companion object {
|
||||
object FIRST_STEP : ProgressTracker.Step("Step one")
|
||||
|
||||
fun tracker() = ProgressTracker(FIRST_STEP)
|
||||
|
||||
val seen = Collections.synchronizedSet(HashSet<Visited>())
|
||||
|
||||
fun visit(sessionNum: Int, iterationNum: Int, step: Step) {
|
||||
val visited = Visited(sessionNum, iterationNum, step)
|
||||
if (visited !in seen) {
|
||||
seen += visited
|
||||
throw ExceptionToCauseRetry()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override val progressTracker = tracker()
|
||||
|
||||
@Suspendable
|
||||
override fun call(): Any {
|
||||
progressTracker.currentStep = FIRST_STEP
|
||||
var received: Any? = null
|
||||
visit(-1, -1, Step.First)
|
||||
for (sessionNum in 1..sessionsCount) {
|
||||
visit(sessionNum, -1, Step.BeforeInitiate)
|
||||
val session = initiateFlow(other)
|
||||
visit(sessionNum, -1, Step.AfterInitiate)
|
||||
session.send(SessionInfo(sessionNum, iterationsCount))
|
||||
visit(sessionNum, -1, Step.AfterInitiateSendReceive)
|
||||
for (iteration in 1..iterationsCount) {
|
||||
visit(sessionNum, iteration, Step.BeforeSend)
|
||||
logger.info("A Sending $sessionNum:$iteration")
|
||||
session.send("$sessionNum:$iteration")
|
||||
visit(sessionNum, iteration, Step.AfterSend)
|
||||
received = session.receive<Any>().unwrap { it }
|
||||
visit(sessionNum, iteration, Step.AfterReceive)
|
||||
logger.info("A Got $sessionNum:$iteration")
|
||||
}
|
||||
doSleep()
|
||||
}
|
||||
return received!!
|
||||
}
|
||||
|
||||
// This non-flow-friendly sleep triggered a bug with session end messages and non-retryable checkpoints.
|
||||
private fun doSleep() {
|
||||
Thread.sleep(2000)
|
||||
}
|
||||
}
|
||||
|
||||
@InitiatedBy(InitiatorFlow::class)
|
||||
class InitiatedFlow(val session: FlowSession) : FlowLogic<Any>() {
|
||||
companion object {
|
||||
object FIRST_STEP : ProgressTracker.Step("Step one")
|
||||
|
||||
fun tracker() = ProgressTracker(FIRST_STEP)
|
||||
|
||||
val seen = Collections.synchronizedSet(HashSet<Visited>())
|
||||
|
||||
fun visit(sessionNum: Int, iterationNum: Int, step: Step) {
|
||||
val visited = Visited(sessionNum, iterationNum, step)
|
||||
if (visited !in seen) {
|
||||
seen += visited
|
||||
throw ExceptionToCauseRetry()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override val progressTracker = tracker()
|
||||
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
progressTracker.currentStep = FIRST_STEP
|
||||
visit(-1, -1, Step.AfterInitiate)
|
||||
val sessionInfo = session.receive<SessionInfo>().unwrap { it }
|
||||
visit(sessionInfo.sessionNum, -1, Step.AfterInitiateSendReceive)
|
||||
for (iteration in 1..sessionInfo.iterationsCount) {
|
||||
visit(sessionInfo.sessionNum, iteration, Step.BeforeReceive)
|
||||
val got = session.receive<Any>().unwrap { it }
|
||||
visit(sessionInfo.sessionNum, iteration, Step.AfterReceive)
|
||||
logger.info("B Got $got")
|
||||
logger.info("B Sending $got")
|
||||
visit(sessionInfo.sessionNum, iteration, Step.BeforeSend)
|
||||
session.send(got)
|
||||
visit(sessionInfo.sessionNum, iteration, Step.AfterSend)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@CordaSerializable
|
||||
data class SessionInfo(val sessionNum: Int, val iterationsCount: Int)
|
||||
|
||||
enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive, BeforeSend, AfterSend, BeforeReceive, AfterReceive }
|
||||
|
||||
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
@ -25,6 +25,7 @@ import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.NonEmptySet
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.testMessage.ScheduledState
|
||||
import net.corda.testMessage.SpentState
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
@ -110,7 +111,7 @@ class ScheduledFlowIntegrationTests : IntegrationTest() {
|
||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password)
|
||||
val bobClient = CordaRPCClient(bob.rpcAddress).start(rpcUser.username, rpcUser.password)
|
||||
|
||||
val scheduledFor = Instant.now().plusSeconds(20)
|
||||
val scheduledFor = Instant.now().plusSeconds(10)
|
||||
val initialiseFutures = mutableListOf<CordaFuture<*>>()
|
||||
for (i in 0 until N) {
|
||||
initialiseFutures.add(aliceClient.proxy.startFlow(::InsertInitialStateFlow, bob.nodeInfo.legalIdentities.first(), defaultNotaryIdentity, i, scheduledFor).returnValue)
|
||||
@ -125,6 +126,9 @@ class ScheduledFlowIntegrationTests : IntegrationTest() {
|
||||
}
|
||||
spendAttemptFutures.getOrThrowAll()
|
||||
|
||||
// TODO: the queries below are not atomic so we need to allow enough time for the scheduler to finish. Would be better to query scheduler.
|
||||
Thread.sleep(20.seconds.toMillis())
|
||||
|
||||
val aliceStates = aliceClient.proxy.vaultQuery(ScheduledState::class.java).states.filter { it.state.data.processed }
|
||||
val aliceSpentStates = aliceClient.proxy.vaultQuery(SpentState::class.java).states
|
||||
|
||||
|
@ -30,27 +30,23 @@ import net.corda.testing.driver.internal.RandomFree
|
||||
import net.corda.testing.internal.IntegrationTest
|
||||
import net.corda.testing.internal.IntegrationTestSchemas
|
||||
import net.corda.testing.internal.toDatabaseSchemaName
|
||||
import net.corda.testing.node.internal.CompatibilityZoneParams
|
||||
import net.corda.testing.node.internal.internalDriver
|
||||
import net.corda.testing.node.internal.*
|
||||
import net.corda.testing.node.internal.network.NetworkMapServer
|
||||
import net.corda.testing.node.internal.startNode
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.*
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.runner.RunWith
|
||||
import org.junit.runners.Parameterized
|
||||
import java.net.URL
|
||||
import java.time.Instant
|
||||
|
||||
class NetworkMapTest : IntegrationTest() {
|
||||
companion object {
|
||||
@ClassRule
|
||||
@JvmField
|
||||
val databaseSchemas = IntegrationTestSchemas(
|
||||
ALICE_NAME.toDatabaseSchemaName(),
|
||||
BOB_NAME.toDatabaseSchemaName(),
|
||||
DUMMY_NOTARY_NAME.toDatabaseSchemaName())
|
||||
}
|
||||
|
||||
@RunWith(Parameterized::class)
|
||||
class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneParams) : IntegrationTest() {
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
@ -61,13 +57,44 @@ class NetworkMapTest : IntegrationTest() {
|
||||
private lateinit var networkMapServer: NetworkMapServer
|
||||
private lateinit var compatibilityZone: CompatibilityZoneParams
|
||||
|
||||
companion object {
|
||||
@ClassRule
|
||||
@JvmField
|
||||
val databaseSchemas = IntegrationTestSchemas(
|
||||
ALICE_NAME.toDatabaseSchemaName(),
|
||||
BOB_NAME.toDatabaseSchemaName(),
|
||||
DUMMY_NOTARY_NAME.toDatabaseSchemaName())
|
||||
|
||||
@JvmStatic
|
||||
@Parameterized.Parameters(name = "{0}")
|
||||
fun runParams() = listOf(
|
||||
{ addr: URL, nms: NetworkMapServer ->
|
||||
SharedCompatibilityZoneParams(
|
||||
addr,
|
||||
publishNotaries = {
|
||||
nms.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2)
|
||||
}
|
||||
)
|
||||
},
|
||||
{ addr: URL, nms: NetworkMapServer ->
|
||||
SplitCompatibilityZoneParams(
|
||||
doormanURL = URL("http://I/Don't/Exist"),
|
||||
networkMapURL = addr,
|
||||
publishNotaries = {
|
||||
nms.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@Before
|
||||
fun start() {
|
||||
networkMapServer = NetworkMapServer(cacheTimeout, portAllocation.nextHostAndPort())
|
||||
val address = networkMapServer.start()
|
||||
compatibilityZone = CompatibilityZoneParams(URL("http://$address"), publishNotaries = {
|
||||
networkMapServer.networkParameters = testNetworkParameters(it, modifiedTime = Instant.ofEpochMilli(random63BitValue()), epoch = 2)
|
||||
})
|
||||
compatibilityZone = initFunc(URL("http://$address"), networkMapServer)
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -13,8 +13,10 @@ package net.corda.node.services.vault
|
||||
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.internal.*
|
||||
import org.junit.*
|
||||
import net.corda.testing.internal.GlobalDatabaseRule
|
||||
import net.corda.testing.internal.toDatabaseSchemaName
|
||||
import org.junit.ClassRule
|
||||
import org.junit.Rule
|
||||
import org.junit.rules.RuleChain
|
||||
|
||||
class VaultQueryIntegrationTests : VaultQueryTestsBase(), VaultQueryParties by vaultQueryTestRule {
|
||||
@ -29,4 +31,9 @@ class VaultQueryIntegrationTests : VaultQueryTestsBase(), VaultQueryParties by v
|
||||
@ClassRule @JvmField
|
||||
val ruleChain = RuleChain.outerRule(globalDatabaseRule).around(vaultQueryTestRule)
|
||||
}
|
||||
|
||||
@Suppress("LeakingThis")
|
||||
@Rule
|
||||
@JvmField
|
||||
val transactionRule = VaultQueryRollbackRule(this)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import net.corda.testing.internal.IntegrationTest
|
||||
import net.corda.testing.internal.IntegrationTestSchemas
|
||||
import net.corda.testing.node.NotarySpec
|
||||
import net.corda.testing.node.internal.CompatibilityZoneParams
|
||||
import net.corda.testing.node.internal.SharedCompatibilityZoneParams
|
||||
import net.corda.testing.node.internal.internalDriver
|
||||
import net.corda.testing.node.internal.network.NetworkMapServer
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
@ -92,7 +93,7 @@ class NodeRegistrationTest : IntegrationTest() {
|
||||
|
||||
@Test
|
||||
fun `node registration correct root cert`() {
|
||||
val compatibilityZone = CompatibilityZoneParams(
|
||||
val compatibilityZone = SharedCompatibilityZoneParams(
|
||||
URL("http://$serverHostAndPort"),
|
||||
publishNotaries = { server.networkParameters = testNetworkParameters(it) },
|
||||
rootCert = DEV_ROOT_CA.certificate)
|
||||
|
@ -135,11 +135,11 @@ data class CmdLineOptions(val baseDirectory: Path,
|
||||
if (devMode) mapOf("devMode" to this.devMode) else emptyMap<String, Any>())
|
||||
)
|
||||
return rawConfig to Try.on {
|
||||
rawConfig.parseAsNodeConfiguration(unknownConfigKeysPolicy::handle).also {
|
||||
rawConfig.parseAsNodeConfiguration(unknownConfigKeysPolicy::handle).also { config ->
|
||||
if (nodeRegistrationOption != null) {
|
||||
require(!it.devMode) { "registration cannot occur in devMode" }
|
||||
requireNotNull(it.compatibilityZoneURL) {
|
||||
"compatibilityZoneURL must be present in node configuration file in registration mode."
|
||||
require(!config.devMode) { "registration cannot occur in devMode" }
|
||||
require(config.compatibilityZoneURL != null || config.networkServices != null) {
|
||||
"compatibilityZoneURL or networkServices must be present in the node configuration file in registration mode."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -122,6 +122,7 @@ import net.corda.node.services.persistence.NodePropertiesPersistentStore
|
||||
import net.corda.node.services.persistence.RunOnceService
|
||||
import net.corda.node.services.schema.HibernateObserver
|
||||
import net.corda.node.services.schema.NodeSchemaService
|
||||
import net.corda.node.services.statemachine.ExternalEvent
|
||||
import net.corda.node.services.statemachine.FlowLogicRefFactoryImpl
|
||||
import net.corda.node.services.statemachine.SingleThreadedStateMachineManager
|
||||
import net.corda.node.services.statemachine.StateMachineManager
|
||||
@ -303,7 +304,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null)
|
||||
val identityService = makeIdentityService(identity.certificate)
|
||||
|
||||
networkMapClient = configuration.compatibilityZoneURL?.let { NetworkMapClient(it, identityService.trustRoot) }
|
||||
networkMapClient = configuration.networkServices?.let { NetworkMapClient(it.networkMapURL, identityService.trustRoot) }
|
||||
|
||||
val networkParameters = NetworkParametersReader(identityService.trustRoot, networkMapClient, configuration.baseDirectory).networkParameters
|
||||
check(networkParameters.minimumPlatformVersion <= versionInfo.platformVersion) {
|
||||
@ -1023,8 +1024,37 @@ internal fun logVendorString(database: CordaPersistence, log: Logger) {
|
||||
}
|
||||
|
||||
internal class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogicRefFactory: FlowLogicRefFactory) : FlowStarter {
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext, deduplicationHandler: DeduplicationHandler?): CordaFuture<FlowStateMachine<T>> {
|
||||
return smm.startFlow(logic, context, ourIdentity = null, deduplicationHandler = deduplicationHandler)
|
||||
override fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<FlowStateMachine<T>> {
|
||||
smm.deliverExternalEvent(event)
|
||||
return event.future
|
||||
}
|
||||
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>> {
|
||||
val startFlowEvent = object : ExternalEvent.ExternalStartFlowEvent<T>, DeduplicationHandler {
|
||||
override fun insideDatabaseTransaction() {}
|
||||
|
||||
override fun afterDatabaseTransaction() {}
|
||||
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val deduplicationHandler: DeduplicationHandler
|
||||
get() = this
|
||||
|
||||
override val flowLogic: FlowLogic<T>
|
||||
get() = logic
|
||||
override val context: InvocationContext
|
||||
get() = context
|
||||
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<T>>) {
|
||||
_future.captureLater(flowFuture)
|
||||
}
|
||||
|
||||
private val _future = openFuture<FlowStateMachine<T>>()
|
||||
override val future: CordaFuture<FlowStateMachine<T>>
|
||||
get() = _future
|
||||
|
||||
}
|
||||
return startFlow(startFlowEvent)
|
||||
}
|
||||
|
||||
override fun <T> invokeFlowAsync(
|
||||
|
@ -224,7 +224,9 @@ open class NodeStartup(val args: Array<String>) {
|
||||
}
|
||||
|
||||
protected open fun registerWithNetwork(conf: NodeConfiguration, nodeRegistrationConfig: NodeRegistrationOption) {
|
||||
val compatibilityZoneURL = conf.compatibilityZoneURL!!
|
||||
val compatibilityZoneURL = conf.networkServices?.doormanURL ?: throw RuntimeException(
|
||||
"compatibilityZoneURL or networkServices must be configured!")
|
||||
|
||||
println()
|
||||
println("******************************************************************")
|
||||
println("* *")
|
||||
|
@ -18,13 +18,12 @@ import com.esotericsoftware.kryo.util.DefaultClassResolver
|
||||
import com.esotericsoftware.kryo.util.Util
|
||||
import net.corda.core.internal.writer
|
||||
import net.corda.core.serialization.ClassWhitelist
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.serialization.internal.AttachmentsClassLoader
|
||||
import net.corda.serialization.internal.MutableClassWhitelist
|
||||
import net.corda.serialization.internal.TransientClassWhiteList
|
||||
import net.corda.serialization.internal.amqp.hasAnnotationInHierarchy
|
||||
import net.corda.serialization.internal.amqp.hasCordaSerializable
|
||||
import java.io.PrintWriter
|
||||
import java.lang.reflect.Modifier
|
||||
import java.lang.reflect.Modifier.isAbstract
|
||||
@ -137,7 +136,7 @@ class CordaClassResolver(serializationContext: SerializationContext) : DefaultCl
|
||||
return (type.classLoader !is AttachmentsClassLoader)
|
||||
&& !KryoSerializable::class.java.isAssignableFrom(type)
|
||||
&& !type.isAnnotationPresent(DefaultSerializer::class.java)
|
||||
&& (type.isAnnotationPresent(CordaSerializable::class.java) || whitelist.hasAnnotationInHierarchy(type))
|
||||
&& hasCordaSerializable(type)
|
||||
}
|
||||
|
||||
// Need to clear out class names from attachments.
|
||||
|
@ -30,6 +30,12 @@ interface CheckpointStorage {
|
||||
*/
|
||||
fun removeCheckpoint(id: StateMachineRunId): Boolean
|
||||
|
||||
/**
|
||||
* Load an existing checkpoint from the store.
|
||||
* @return the checkpoint, still in serialized form, or null if not found.
|
||||
*/
|
||||
fun getCheckpoint(id: StateMachineRunId): SerializedBytes<Checkpoint>?
|
||||
|
||||
/**
|
||||
* Stream all checkpoints from the store. If this is backed by a database the stream will be valid until the
|
||||
* underlying database connection is closed, so any processing should happen before it is closed.
|
||||
|
@ -29,9 +29,9 @@ import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.internal.InitiatedFlowFactory
|
||||
import net.corda.node.internal.cordapp.CordappProviderInternal
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.messaging.MessagingService
|
||||
import net.corda.node.services.network.NetworkMapUpdater
|
||||
import net.corda.node.services.statemachine.ExternalEvent
|
||||
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
|
||||
@ -144,11 +144,17 @@ interface ServiceHubInternal : ServiceHub {
|
||||
interface FlowStarter {
|
||||
|
||||
/**
|
||||
* Starts an already constructed flow. Note that you must be on the server thread to call this method.
|
||||
* Starts an already constructed flow. Note that you must be on the server thread to call this method. This method
|
||||
* just synthesizes an [ExternalEvent.ExternalStartFlowEvent] and calls the method below.
|
||||
* @param context indicates who started the flow, see: [InvocationContext].
|
||||
* @param deduplicationHandler allows exactly-once start of the flow, see [DeduplicationHandler]
|
||||
*/
|
||||
fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext, deduplicationHandler: DeduplicationHandler? = null): CordaFuture<FlowStateMachine<T>>
|
||||
fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>>
|
||||
|
||||
/**
|
||||
* Starts a flow as described by an [ExternalEvent.ExternalStartFlowEvent]. If a transient error
|
||||
* occurs during invocation, it will re-attempt to start the flow.
|
||||
*/
|
||||
fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<FlowStateMachine<T>>
|
||||
|
||||
/**
|
||||
* Will check [logicType] and [args] against a whitelist and if acceptable then construct and initiate the flow.
|
||||
|
@ -11,6 +11,7 @@
|
||||
package net.corda.node.services.config
|
||||
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import net.corda.core.context.AuthServiceId
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.div
|
||||
@ -46,6 +47,7 @@ interface NodeConfiguration : NodeSSLConfiguration {
|
||||
val devMode: Boolean
|
||||
val devModeOptions: DevModeOptions?
|
||||
val compatibilityZoneURL: URL?
|
||||
val networkServices: NetworkServicesConfig?
|
||||
val certificateChainCheckPolicies: List<CertChainPolicyConfig>
|
||||
val verifierType: VerifierType
|
||||
val p2pMessagingRetry: P2PMessagingRetryConfiguration
|
||||
@ -58,6 +60,7 @@ interface NodeConfiguration : NodeSSLConfiguration {
|
||||
val enterpriseConfiguration: EnterpriseConfiguration
|
||||
// TODO Move into DevModeOptions
|
||||
val useTestClock: Boolean get() = false
|
||||
val lazyBridgeStart: Boolean
|
||||
val detectPublicIp: Boolean get() = true
|
||||
val sshd: SSHDConfiguration?
|
||||
val database: DatabaseConfig
|
||||
@ -168,6 +171,25 @@ data class BFTSMaRtConfiguration(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used as an alternative to the older compatibilityZoneURL to allow the doorman and network map
|
||||
* services for a node to be configured as different URLs. Cannot be set at the same time as the
|
||||
* compatibilityZoneURL, and will be defaulted (if not set) to both point at the configured
|
||||
* compatibilityZoneURL.
|
||||
*
|
||||
* @property doormanURL The URL of the tls certificate signing service.
|
||||
* @property networkMapURL The URL of the Network Map service.
|
||||
* @property inferred Non user setting that indicates weather the Network Services configuration was
|
||||
* set explicitly ([inferred] == false) or weather they have been inferred via the compatibilityZoneURL parameter
|
||||
* ([inferred] == true) where both the network map and doorman are running on the same endpoint. Only one,
|
||||
* compatibilityZoneURL or networkServices, can be set at any one time.
|
||||
*/
|
||||
data class NetworkServicesConfig(
|
||||
val doormanURL: URL,
|
||||
val networkMapURL: URL,
|
||||
val inferred : Boolean = false
|
||||
)
|
||||
|
||||
/**
|
||||
* Currently only used for notarisation requests.
|
||||
*
|
||||
@ -193,6 +215,7 @@ data class NodeConfigurationImpl(
|
||||
override val crlCheckSoftFail: Boolean,
|
||||
override val dataSourceProperties: Properties,
|
||||
override val compatibilityZoneURL: URL? = null,
|
||||
override var networkServices: NetworkServicesConfig? = null,
|
||||
override val tlsCertCrlDistPoint: URL? = null,
|
||||
override val tlsCertCrlIssuer: String? = null,
|
||||
override val rpcUsers: List<User>,
|
||||
@ -215,6 +238,7 @@ data class NodeConfigurationImpl(
|
||||
override val noLocalShell: Boolean = false,
|
||||
override val devModeOptions: DevModeOptions? = null,
|
||||
override val useTestClock: Boolean = false,
|
||||
override val lazyBridgeStart: Boolean = true,
|
||||
override val detectPublicIp: Boolean = true,
|
||||
// TODO See TODO above. Rename this to nodeInfoPollingFrequency and make it of type Duration
|
||||
override val additionalNodeInfoPollingFrequencyMsec: Long = 5.seconds.toMillis(),
|
||||
@ -241,9 +265,13 @@ data class NodeConfigurationImpl(
|
||||
explicitAddress != null -> {
|
||||
require(settings.address == null) { "Can't provide top-level rpcAddress and rpcSettings.address (they control the same property)." }
|
||||
logger.warn("Top-level declaration of property 'rpcAddress' is deprecated. Please use 'rpcSettings.address' instead.")
|
||||
|
||||
settings.copy(address = explicitAddress)
|
||||
}
|
||||
else -> settings
|
||||
else -> {
|
||||
settings.address ?: throw ConfigException.Missing("rpcSettings.address")
|
||||
settings
|
||||
}
|
||||
}.asOptions(fallbackSslOptions)
|
||||
}
|
||||
|
||||
@ -270,6 +298,7 @@ data class NodeConfigurationImpl(
|
||||
errors += validateDevModeOptions()
|
||||
errors += validateRpcOptions(rpcOptions)
|
||||
errors += validateTlsCertCrlConfig()
|
||||
errors += validateNetworkServices()
|
||||
return errors
|
||||
}
|
||||
|
||||
@ -284,12 +313,28 @@ data class NodeConfigurationImpl(
|
||||
}
|
||||
|
||||
private fun validateDevModeOptions(): List<String> {
|
||||
val errors = mutableListOf<String>()
|
||||
if (devMode) {
|
||||
compatibilityZoneURL?.let {
|
||||
errors += "'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true."
|
||||
return listOf("'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true.")
|
||||
}
|
||||
|
||||
// if compatibiliZoneURL is set then it will be copied into the networkServices field and thus skipping
|
||||
// this check by returning above is fine.
|
||||
networkServices?.let {
|
||||
return listOf("'networkServices': present. Property cannot be set when 'devMode' is true.")
|
||||
}
|
||||
}
|
||||
|
||||
return emptyList()
|
||||
}
|
||||
|
||||
private fun validateNetworkServices(): List<String> {
|
||||
val errors = mutableListOf<String>()
|
||||
|
||||
if (compatibilityZoneURL != null && networkServices != null && !(networkServices!!.inferred)) {
|
||||
errors += "Cannot configure both compatibilityZoneUrl and networkServices simultaneously"
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
@ -336,20 +381,24 @@ data class NodeConfigurationImpl(
|
||||
|Please contact the R3 team on the public slack to discuss your use case.
|
||||
""".trimMargin())
|
||||
}
|
||||
|
||||
if (compatibilityZoneURL != null && networkServices == null) {
|
||||
networkServices = NetworkServicesConfig(compatibilityZoneURL, compatibilityZoneURL, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data class NodeRpcSettings(
|
||||
val address: NetworkHostAndPort,
|
||||
val adminAddress: NetworkHostAndPort,
|
||||
val address: NetworkHostAndPort?,
|
||||
val adminAddress: NetworkHostAndPort?,
|
||||
val standAloneBroker: Boolean = false,
|
||||
val useSsl: Boolean = false,
|
||||
val ssl: BrokerRpcSslOptions?
|
||||
) {
|
||||
fun asOptions(fallbackSslOptions: BrokerRpcSslOptions): NodeRpcOptions {
|
||||
return object : NodeRpcOptions {
|
||||
override val address = this@NodeRpcSettings.address
|
||||
override val adminAddress = this@NodeRpcSettings.adminAddress
|
||||
override val address = this@NodeRpcSettings.address!!
|
||||
override val adminAddress = this@NodeRpcSettings.adminAddress!!
|
||||
override val standAloneBroker = this@NodeRpcSettings.standAloneBroker
|
||||
override val useSsl = this@NodeRpcSettings.useSsl
|
||||
override val sslConfig = this@NodeRpcSettings.ssl ?: fallbackSslOptions
|
||||
|
@ -12,6 +12,7 @@ package net.corda.node.services.events
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.google.common.util.concurrent.ListenableFuture
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.context.InvocationOrigin
|
||||
import net.corda.core.contracts.SchedulableState
|
||||
@ -20,11 +21,9 @@ import net.corda.core.contracts.ScheduledStateRef
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowLogicRefFactory
|
||||
import net.corda.core.internal.ThreadBox
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.flatMap
|
||||
import net.corda.core.internal.join
|
||||
import net.corda.core.internal.until
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.node.ServicesForResolution
|
||||
import net.corda.core.schemas.PersistentStateRef
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
@ -36,8 +35,10 @@ import net.corda.node.services.api.FlowStarter
|
||||
import net.corda.node.services.api.NodePropertiesStore
|
||||
import net.corda.node.services.api.SchedulerService
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.statemachine.ExternalEvent
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
||||
import net.corda.nodeapi.internal.persistence.contextTransaction
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import org.apache.mina.util.ConcurrentHashSet
|
||||
import org.slf4j.Logger
|
||||
@ -166,29 +167,31 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
|
||||
override fun scheduleStateActivity(action: ScheduledStateRef) {
|
||||
log.trace { "Schedule $action" }
|
||||
if (!schedulerRepo.merge(action)) {
|
||||
// Only increase the number of unfinished schedules if the state didn't already exist on the queue
|
||||
unfinishedSchedules.countUp()
|
||||
}
|
||||
mutex.locked {
|
||||
if (action.scheduledAt < nextScheduledAction?.scheduledAt ?: Instant.MAX) {
|
||||
// We are earliest
|
||||
rescheduleWakeUp()
|
||||
} else if (action.ref == nextScheduledAction?.ref && action.scheduledAt != nextScheduledAction?.scheduledAt) {
|
||||
// We were earliest but might not be any more
|
||||
rescheduleWakeUp()
|
||||
// Only increase the number of unfinished schedules if the state didn't already exist on the queue
|
||||
val countUp = !schedulerRepo.merge(action)
|
||||
contextTransaction.onCommit {
|
||||
if (countUp) unfinishedSchedules.countUp()
|
||||
mutex.locked {
|
||||
if (action.scheduledAt < nextScheduledAction?.scheduledAt ?: Instant.MAX) {
|
||||
// We are earliest
|
||||
rescheduleWakeUp()
|
||||
} else if (action.ref == nextScheduledAction?.ref && action.scheduledAt != nextScheduledAction?.scheduledAt) {
|
||||
// We were earliest but might not be any more
|
||||
rescheduleWakeUp()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun unscheduleStateActivity(ref: StateRef) {
|
||||
log.trace { "Unschedule $ref" }
|
||||
if (startingStateRefs.all { it.ref != ref } && schedulerRepo.delete(ref)) {
|
||||
unfinishedSchedules.countDown()
|
||||
}
|
||||
mutex.locked {
|
||||
if (nextScheduledAction?.ref == ref) {
|
||||
rescheduleWakeUp()
|
||||
val countDown = startingStateRefs.all { it.ref != ref } && schedulerRepo.delete(ref)
|
||||
contextTransaction.onCommit {
|
||||
if (countDown) unfinishedSchedules.countDown()
|
||||
mutex.locked {
|
||||
if (nextScheduledAction?.ref == ref) {
|
||||
rescheduleWakeUp()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -237,7 +240,12 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
schedulerTimerExecutor.join()
|
||||
}
|
||||
|
||||
private inner class FlowStartDeduplicationHandler(val scheduledState: ScheduledStateRef) : DeduplicationHandler {
|
||||
private inner class FlowStartDeduplicationHandler(val scheduledState: ScheduledStateRef, override val flowLogic: FlowLogic<Any?>, override val context: InvocationContext) : DeduplicationHandler, ExternalEvent.ExternalStartFlowEvent<Any?> {
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val deduplicationHandler: FlowStartDeduplicationHandler
|
||||
get() = this
|
||||
|
||||
override fun insideDatabaseTransaction() {
|
||||
schedulerRepo.delete(scheduledState.ref)
|
||||
}
|
||||
@ -249,6 +257,18 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
override fun toString(): String {
|
||||
return "${javaClass.simpleName}($scheduledState)"
|
||||
}
|
||||
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<Any?>>) {
|
||||
_future.captureLater(flowFuture)
|
||||
val future = _future.flatMap { it.resultFuture }
|
||||
future.then {
|
||||
unfinishedSchedules.countDown()
|
||||
}
|
||||
}
|
||||
|
||||
private val _future = openFuture<FlowStateMachine<Any?>>()
|
||||
override val future: CordaFuture<FlowStateMachine<Any?>>
|
||||
get() = _future
|
||||
}
|
||||
|
||||
private fun onTimeReached(scheduledState: ScheduledStateRef) {
|
||||
@ -260,11 +280,8 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
flowName = scheduledFlow.javaClass.name
|
||||
// TODO refactor the scheduler to store and propagate the original invocation context
|
||||
val context = InvocationContext.newInstance(InvocationOrigin.Scheduled(scheduledState))
|
||||
val deduplicationHandler = FlowStartDeduplicationHandler(scheduledState)
|
||||
val future = flowStarter.startFlow(scheduledFlow, context, deduplicationHandler).flatMap { it.resultFuture }
|
||||
future.then {
|
||||
unfinishedSchedules.countDown()
|
||||
}
|
||||
val startFlowEvent = FlowStartDeduplicationHandler(scheduledState, scheduledFlow, context)
|
||||
flowStarter.startFlow(startFlowEvent)
|
||||
}
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
|
@ -20,6 +20,8 @@ import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.ByteSequence
|
||||
import net.corda.node.services.statemachine.DeduplicationId
|
||||
import net.corda.node.services.statemachine.ExternalEvent
|
||||
import net.corda.node.services.statemachine.SenderDeduplicationId
|
||||
import java.time.Instant
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
|
||||
@ -35,6 +37,12 @@ import javax.annotation.concurrent.ThreadSafe
|
||||
*/
|
||||
@ThreadSafe
|
||||
interface MessagingService {
|
||||
/**
|
||||
* A unique identifier for this sender that changes whenever a node restarts. This is used in conjunction with a sequence
|
||||
* number for message de-duplication at the recipient.
|
||||
*/
|
||||
val ourSenderUUID: String
|
||||
|
||||
/**
|
||||
* The provided function will be invoked for each received message whose topic and session matches. The callback
|
||||
* will run on the main server thread provided when the messaging service is constructed, and a database
|
||||
@ -103,11 +111,12 @@ interface MessagingService {
|
||||
/**
|
||||
* Returns an initialised [Message] with the current time, etc, already filled in.
|
||||
*
|
||||
* @param topicSession identifier for the topic and session the message is sent to.
|
||||
* @param additionalProperties optional additional message headers.
|
||||
* @param topic identifier for the topic the message is sent to.
|
||||
* @param data the payload for the message.
|
||||
* @param deduplicationId optional message deduplication ID including sender identifier.
|
||||
* @param additionalHeaders optional additional message headers.
|
||||
*/
|
||||
fun createMessage(topic: String, data: ByteArray, deduplicationId: DeduplicationId = DeduplicationId.createRandom(newSecureRandom()), additionalHeaders: Map<String, String> = emptyMap()): Message
|
||||
fun createMessage(topic: String, data: ByteArray, deduplicationId: SenderDeduplicationId = SenderDeduplicationId(DeduplicationId.createRandom(newSecureRandom()), ourSenderUUID), additionalHeaders: Map<String, String> = emptyMap()): Message
|
||||
|
||||
/** Given information about either a specific node or a service returns its corresponding address */
|
||||
fun getAddressOfParty(partyInfo: PartyInfo): MessageRecipients
|
||||
@ -116,7 +125,7 @@ interface MessagingService {
|
||||
val myAddress: SingleMessageRecipient
|
||||
}
|
||||
|
||||
fun MessagingService.send(topicSession: String, payload: Any, to: MessageRecipients, deduplicationId: DeduplicationId = DeduplicationId.createRandom(newSecureRandom()), retryId: Long? = null, additionalHeaders: Map<String, String> = emptyMap()) = send(createMessage(topicSession, payload.serialize().bytes, deduplicationId, additionalHeaders), to, retryId)
|
||||
fun MessagingService.send(topicSession: String, payload: Any, to: MessageRecipients, deduplicationId: SenderDeduplicationId = SenderDeduplicationId(DeduplicationId.createRandom(newSecureRandom()), ourSenderUUID), retryId: Long? = null, additionalHeaders: Map<String, String> = emptyMap()) = send(createMessage(topicSession, payload.serialize().bytes, deduplicationId, additionalHeaders), to, retryId)
|
||||
|
||||
interface MessageHandlerRegistration
|
||||
|
||||
@ -162,15 +171,17 @@ object TopicStringValidator {
|
||||
}
|
||||
|
||||
/**
|
||||
* This handler is used to implement exactly-once delivery of an event on top of a possibly duplicated one. This is done
|
||||
* This handler is used to implement exactly-once delivery of an external event on top of an at-least-once delivery. This is done
|
||||
* using two hooks that are called from the event processor, one called from the database transaction committing the
|
||||
* side-effect caused by the event, and another one called after the transaction has committed successfully.
|
||||
* side-effect caused by the external event, and another one called after the transaction has committed successfully.
|
||||
*
|
||||
* For example for messaging we can use [insideDatabaseTransaction] to store the message's unique ID for later
|
||||
* deduplication, and [afterDatabaseTransaction] to acknowledge the message and stop retries.
|
||||
*
|
||||
* We also use this for exactly-once start of a scheduled flow, [insideDatabaseTransaction] is used to remove the
|
||||
* to-be-scheduled state of the flow, [afterDatabaseTransaction] is used for cleanup of in-memory bookkeeping.
|
||||
*
|
||||
* It holds a reference back to the causing external event.
|
||||
*/
|
||||
interface DeduplicationHandler {
|
||||
/**
|
||||
@ -184,6 +195,11 @@ interface DeduplicationHandler {
|
||||
* cleanup/acknowledgement/stopping of retries.
|
||||
*/
|
||||
fun afterDatabaseTransaction()
|
||||
|
||||
/**
|
||||
* The external event for which we are trying to reduce from at-least-once delivery to exactly-once.
|
||||
*/
|
||||
val externalCause: ExternalEvent
|
||||
}
|
||||
|
||||
typealias MessageHandler = (ReceivedMessage, MessageHandlerRegistration, DeduplicationHandler) -> Unit
|
||||
|
@ -19,7 +19,6 @@ import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
||||
import java.io.Serializable
|
||||
import java.time.Instant
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.TimeUnit
|
||||
import javax.persistence.Column
|
||||
@ -32,8 +31,6 @@ typealias SenderHashToSeqNo = Pair<String, Long?>
|
||||
* Encapsulate the de-duplication logic.
|
||||
*/
|
||||
class P2PMessageDeduplicator(private val database: CordaPersistence) {
|
||||
val ourSenderUUID = UUID.randomUUID().toString()
|
||||
|
||||
// A temporary in-memory set of deduplication IDs and associated high water mark details.
|
||||
// When we receive a message we don't persist the ID immediately,
|
||||
// so we store the ID here in the meantime (until the persisting db tx has committed). This is because Artemis may
|
||||
|
@ -26,11 +26,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.ByteSequence
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.trace
|
||||
import net.corda.core.utilities.*
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.node.internal.LifecycleSupport
|
||||
import net.corda.node.internal.artemis.ReactiveArtemisConsumer
|
||||
@ -38,19 +34,18 @@ import net.corda.node.internal.artemis.ReactiveArtemisConsumer.Companion.multipl
|
||||
import net.corda.node.services.api.NetworkMapCacheInternal
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.statemachine.DeduplicationId
|
||||
import net.corda.node.services.statemachine.ExternalEvent
|
||||
import net.corda.node.services.statemachine.SenderDeduplicationId
|
||||
import net.corda.node.utilities.AffinityExecutor
|
||||
import net.corda.node.utilities.PersistentMap
|
||||
import net.corda.nodeapi.ArtemisTcpTransport.Companion.p2pConnectorTcpTransport
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ArtemisAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.*
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_CONTROL
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_NOTIFY
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.JOURNAL_HEADER_SIZE
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2PMessagingHeaders
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEERS_PREFIX
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.NodeAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.RemoteInboxAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ServiceAddress
|
||||
import net.corda.nodeapi.internal.bridging.BridgeControl
|
||||
import net.corda.nodeapi.internal.bridging.BridgeEntry
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
@ -61,12 +56,7 @@ import org.apache.activemq.artemis.api.core.Message.HDR_DUPLICATE_DETECTION_ID
|
||||
import org.apache.activemq.artemis.api.core.Message.HDR_VALIDATED_USER
|
||||
import org.apache.activemq.artemis.api.core.RoutingType
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import org.apache.activemq.artemis.api.core.client.ActiveMQClient
|
||||
import org.apache.activemq.artemis.api.core.client.ClientConsumer
|
||||
import org.apache.activemq.artemis.api.core.client.ClientMessage
|
||||
import org.apache.activemq.artemis.api.core.client.ClientProducer
|
||||
import org.apache.activemq.artemis.api.core.client.ClientSession
|
||||
import org.apache.activemq.artemis.api.core.client.ServerLocator
|
||||
import org.apache.activemq.artemis.api.core.client.*
|
||||
import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY
|
||||
import rx.Observable
|
||||
import rx.Subscription
|
||||
@ -149,7 +139,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
)
|
||||
}
|
||||
|
||||
private class NodeClientMessage(override val topic: String, override val data: ByteSequence, override val uniqueMessageId: DeduplicationId, override val senderUUID: String?, override val additionalHeaders: Map<String, String>) : Message {
|
||||
class NodeClientMessage(override val topic: String, override val data: ByteSequence, override val uniqueMessageId: DeduplicationId, override val senderUUID: String?, override val additionalHeaders: Map<String, String>) : Message {
|
||||
override val debugTimestamp: Instant = Instant.now()
|
||||
override fun toString() = "$topic#${String(data.bytes)}"
|
||||
}
|
||||
@ -183,9 +173,12 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
data class HandlerRegistration(val topic: String, val callback: Any) : MessageHandlerRegistration
|
||||
|
||||
override val myAddress: SingleMessageRecipient = NodeAddress(myIdentity, advertisedAddress)
|
||||
override val ourSenderUUID = UUID.randomUUID().toString()
|
||||
|
||||
private val messageRedeliveryDelaySeconds = config.p2pMessagingRetry.messageRedeliveryDelay.seconds
|
||||
private val state = ThreadBox(InnerState())
|
||||
private val knownQueues = Collections.newSetFromMap(ConcurrentHashMap<String, Boolean>())
|
||||
private val delayStartQueues = Collections.newSetFromMap(ConcurrentHashMap<String, Boolean>())
|
||||
private val externalBridge: Boolean = config.enterpriseConfiguration.externalBridge ?: false
|
||||
|
||||
private val handlers = ConcurrentHashMap<String, MessageHandler>()
|
||||
@ -255,7 +248,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
this@P2PMessagingClient,
|
||||
metricRegistry,
|
||||
queueBound = config.enterpriseConfiguration.tuning.maximumMessagingBatchSize,
|
||||
ourSenderUUID = deduplicator.ourSenderUUID,
|
||||
ourSenderUUID = ourSenderUUID,
|
||||
myLegalName = legalName
|
||||
)
|
||||
this@P2PMessagingClient.messagingExecutor = messagingExecutor
|
||||
@ -352,7 +345,12 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
|
||||
val queues = session.addressQuery(SimpleString("$PEERS_PREFIX#")).queueNames
|
||||
for (queue in queues) {
|
||||
createBridgeEntry(queue)
|
||||
val queueQuery = session.queueQuery(queue)
|
||||
if (!config.lazyBridgeStart || queueQuery.messageCount > 0) {
|
||||
createBridgeEntry(queue)
|
||||
} else {
|
||||
delayStartQueues += queue.toString()
|
||||
}
|
||||
}
|
||||
val startupMessage = BridgeControl.NodeToBridgeSnapshot(myIdentity.toStringShort(), inboxes, requiredBridges)
|
||||
sendBridgeControl(startupMessage)
|
||||
@ -466,18 +464,23 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
inner class MessageDeduplicationHandler(val artemisMessage: ClientMessage, val cordaMessage: ReceivedMessage) : DeduplicationHandler {
|
||||
private inner class MessageDeduplicationHandler(val artemisMessage: ClientMessage, override val receivedMessage: ReceivedMessage) : DeduplicationHandler, ExternalEvent.ExternalMessageEvent {
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val deduplicationHandler: MessageDeduplicationHandler
|
||||
get() = this
|
||||
|
||||
override fun insideDatabaseTransaction() {
|
||||
deduplicator.persistDeduplicationId(cordaMessage.uniqueMessageId)
|
||||
deduplicator.persistDeduplicationId(receivedMessage.uniqueMessageId)
|
||||
}
|
||||
|
||||
override fun afterDatabaseTransaction() {
|
||||
deduplicator.signalMessageProcessFinish(cordaMessage.uniqueMessageId)
|
||||
deduplicator.signalMessageProcessFinish(receivedMessage.uniqueMessageId)
|
||||
messagingExecutor!!.acknowledge(artemisMessage)
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "${javaClass.simpleName}(${cordaMessage.uniqueMessageId})"
|
||||
return "${javaClass.simpleName}(${receivedMessage.uniqueMessageId})"
|
||||
}
|
||||
}
|
||||
|
||||
@ -600,19 +603,26 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
|
||||
/** Attempts to create a durable queue on the broker which is bound to an address of the same name. */
|
||||
private fun createQueueIfAbsent(queueName: String, session: ClientSession) {
|
||||
fun sendBridgeCreateMessage() {
|
||||
val keyHash = queueName.substring(PEERS_PREFIX.length)
|
||||
val peers = networkMap.getNodesByOwningKeyIndex(keyHash)
|
||||
for (node in peers) {
|
||||
val bridge = BridgeEntry(queueName, node.addresses, node.legalIdentities.map { it.name })
|
||||
val createBridgeMessage = BridgeControl.Create(myIdentity.toStringShort(), bridge)
|
||||
sendBridgeControl(createBridgeMessage)
|
||||
}
|
||||
}
|
||||
if (!knownQueues.contains(queueName)) {
|
||||
val queueQuery = session.queueQuery(SimpleString(queueName))
|
||||
if (!queueQuery.isExists) {
|
||||
log.info("Create fresh queue $queueName bound on same address")
|
||||
session.createQueue(queueName, RoutingType.ANYCAST, queueName, true)
|
||||
if (queueName.startsWith(PEERS_PREFIX)) {
|
||||
val keyHash = queueName.substring(PEERS_PREFIX.length)
|
||||
val peers = networkMap.getNodesByOwningKeyIndex(keyHash)
|
||||
for (node in peers) {
|
||||
val bridge = BridgeEntry(queueName, node.addresses, node.legalIdentities.map { it.name })
|
||||
val createBridgeMessage = BridgeControl.Create(myIdentity.toStringShort(), bridge)
|
||||
sendBridgeControl(createBridgeMessage)
|
||||
}
|
||||
if (delayStartQueues.contains(queueName)) {
|
||||
log.info("Start bridge for previously empty queue $queueName")
|
||||
sendBridgeCreateMessage()
|
||||
delayStartQueues -= queueName
|
||||
} else {
|
||||
val queueQuery = session.queueQuery(SimpleString(queueName))
|
||||
if (!queueQuery.isExists) {
|
||||
log.info("Create fresh queue $queueName bound on same address")
|
||||
session.createQueue(queueName, RoutingType.ANYCAST, queueName, true)
|
||||
sendBridgeCreateMessage()
|
||||
}
|
||||
}
|
||||
knownQueues += queueName
|
||||
@ -635,8 +645,8 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
handlers.remove(registration.topic)
|
||||
}
|
||||
|
||||
override fun createMessage(topic: String, data: ByteArray, deduplicationId: DeduplicationId, additionalHeaders: Map<String, String>): Message {
|
||||
return NodeClientMessage(topic, OpaqueBytes(data), deduplicationId, deduplicator.ourSenderUUID, additionalHeaders)
|
||||
override fun createMessage(topic: String, data: ByteArray, deduplicationId: SenderDeduplicationId, additionalHeaders: Map<String, String>): Message {
|
||||
return NodeClientMessage(topic, OpaqueBytes(data), deduplicationId.deduplicationId, deduplicationId.senderUUID, additionalHeaders)
|
||||
}
|
||||
|
||||
override fun getAddressOfParty(partyInfo: PartyInfo): MessageRecipients {
|
||||
|
@ -20,9 +20,9 @@ import net.corda.nodeapi.internal.persistence.currentDBSession
|
||||
import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY
|
||||
import org.slf4j.Logger
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.Serializable
|
||||
import java.util.*
|
||||
import java.util.stream.Stream
|
||||
import java.io.Serializable
|
||||
import javax.persistence.Column
|
||||
import javax.persistence.Entity
|
||||
import javax.persistence.Id
|
||||
@ -63,6 +63,11 @@ class DBCheckpointStorage : CheckpointStorage {
|
||||
return session.createQuery(delete).executeUpdate() > 0
|
||||
}
|
||||
|
||||
override fun getCheckpoint(id: StateMachineRunId): SerializedBytes<Checkpoint>? {
|
||||
val bytes = currentDBSession().get(DBCheckpoint::class.java, id.uuid.toString())?.checkpoint ?: return null
|
||||
return SerializedBytes<Checkpoint>(bytes)
|
||||
}
|
||||
|
||||
override fun getAllCheckpoints(): Stream<Pair<StateMachineRunId, SerializedBytes<Checkpoint>>> {
|
||||
val session = currentDBSession()
|
||||
val criteriaQuery = session.criteriaBuilder.createQuery(DBCheckpoint::class.java)
|
||||
|
@ -33,7 +33,6 @@ import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY
|
||||
import rx.Observable
|
||||
import rx.subjects.PublishSubject
|
||||
import java.io.Serializable
|
||||
import java.util.*
|
||||
import javax.persistence.*
|
||||
|
||||
// cache value type to just store the immutable bits of a signed transaction plus conversion helpers
|
||||
@ -83,11 +82,11 @@ class DBTransactionStorage(cacheSizeBytes: Long) : WritableTransactionStorage, S
|
||||
// to the memory pressure at all here.
|
||||
private const val transactionSignatureOverheadEstimate = 1024
|
||||
|
||||
private fun weighTx(tx: Optional<TxCacheValue>): Int {
|
||||
if (!tx.isPresent) {
|
||||
private fun weighTx(tx: AppendOnlyPersistentMapBase.Transactional<TxCacheValue>): Int {
|
||||
val actTx = tx.valueWithoutIsolation
|
||||
if (actTx == null) {
|
||||
return 0
|
||||
}
|
||||
val actTx = tx.get()
|
||||
return actTx.second.sumBy { it.size + transactionSignatureOverheadEstimate } + actTx.first.size
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ sealed class Action {
|
||||
data class SendInitial(
|
||||
val party: Party,
|
||||
val initialise: InitialSessionMessage,
|
||||
val deduplicationId: DeduplicationId
|
||||
val deduplicationId: SenderDeduplicationId
|
||||
) : Action()
|
||||
|
||||
/**
|
||||
@ -43,7 +43,7 @@ sealed class Action {
|
||||
data class SendExisting(
|
||||
val peerParty: Party,
|
||||
val message: ExistingSessionMessage,
|
||||
val deduplicationId: DeduplicationId
|
||||
val deduplicationId: SenderDeduplicationId
|
||||
) : Action()
|
||||
|
||||
/**
|
||||
@ -72,7 +72,8 @@ sealed class Action {
|
||||
*/
|
||||
data class PropagateErrors(
|
||||
val errorMessages: List<ErrorSessionMessage>,
|
||||
val sessions: List<SessionState.Initiated>
|
||||
val sessions: List<SessionState.Initiated>,
|
||||
val senderUUID: String?
|
||||
) : Action()
|
||||
|
||||
/**
|
||||
@ -139,6 +140,11 @@ sealed class Action {
|
||||
* Release soft locks associated with given ID (currently the flow ID).
|
||||
*/
|
||||
data class ReleaseSoftLocks(val uuid: UUID?) : Action()
|
||||
|
||||
/**
|
||||
* Retry a flow from the last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details.
|
||||
*/
|
||||
data class RetryFlowFromSafePoint(val currentState: StateMachineState) : Action()
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -83,6 +83,7 @@ class ActionExecutorImpl(
|
||||
is Action.CommitTransaction -> executeCommitTransaction()
|
||||
is Action.ExecuteAsyncOperation -> executeAsyncOperation(fiber, action)
|
||||
is Action.ReleaseSoftLocks -> executeReleaseSoftLocks(action)
|
||||
is Action.RetryFlowFromSafePoint -> executeRetryFlowFromSafePoint(action)
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,7 +136,7 @@ class ActionExecutorImpl(
|
||||
@Suspendable
|
||||
private fun executePropagateErrors(action: Action.PropagateErrors) {
|
||||
action.errorMessages.forEach { (exception) ->
|
||||
log.debug("Propagating error", exception)
|
||||
log.warn("Propagating error", exception)
|
||||
}
|
||||
for (sessionState in action.sessions) {
|
||||
// We cannot propagate if the session isn't live.
|
||||
@ -147,7 +148,7 @@ class ActionExecutorImpl(
|
||||
val sinkSessionId = sessionState.initiatedState.peerSinkSessionId
|
||||
val existingMessage = ExistingSessionMessage(sinkSessionId, errorMessage)
|
||||
val deduplicationId = DeduplicationId.createForError(errorMessage.errorId, sinkSessionId)
|
||||
flowMessaging.sendSessionMessage(sessionState.peerParty, existingMessage, deduplicationId)
|
||||
flowMessaging.sendSessionMessage(sessionState.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, action.senderUUID))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -236,6 +237,10 @@ class ActionExecutorImpl(
|
||||
)
|
||||
}
|
||||
|
||||
private fun executeRetryFlowFromSafePoint(action: Action.RetryFlowFromSafePoint) {
|
||||
stateMachineManager.retryFlowFromSafePoint(action.currentState)
|
||||
}
|
||||
|
||||
private fun serializeCheckpoint(checkpoint: Checkpoint): SerializedBytes<Checkpoint> {
|
||||
return checkpoint.serialize(context = checkpointSerializationContext)
|
||||
}
|
||||
|
@ -55,3 +55,9 @@ data class DeduplicationId(val toString: String) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the deduplication ID of a flow message, and the sender identifier for the flow doing the sending. The identifier might be
|
||||
* null if the flow is trying to replay messages and doesn't want an optimisation to ignore the deduplication ID.
|
||||
*/
|
||||
data class SenderDeduplicationId(val deduplicationId: DeduplicationId, val senderUUID: String?)
|
@ -41,9 +41,9 @@ sealed class Event {
|
||||
*/
|
||||
data class DeliverSessionMessage(
|
||||
val sessionMessage: ExistingSessionMessage,
|
||||
val deduplicationHandler: DeduplicationHandler,
|
||||
override val deduplicationHandler: DeduplicationHandler,
|
||||
val sender: Party
|
||||
) : Event()
|
||||
) : Event(), GeneratedByExternalEvent
|
||||
|
||||
/**
|
||||
* Signal that an error has happened. This may be due to an uncaught exception in the flow or some external error.
|
||||
@ -143,4 +143,19 @@ sealed class Event {
|
||||
* @param returnValue the result of the operation.
|
||||
*/
|
||||
data class AsyncOperationCompletion(val returnValue: Any?) : Event()
|
||||
|
||||
/**
|
||||
* Retry a flow from the last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details.
|
||||
*/
|
||||
object RetryFlowFromSafePoint : Event() {
|
||||
override fun toString() = "RetryFlowFromSafePoint"
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that an event was generated by an external event and that external event needs to be replayed if we retry the flow,
|
||||
* even if it has not yet been processed and placed on the pending de-duplication handlers list.
|
||||
*/
|
||||
interface GeneratedByExternalEvent {
|
||||
val deduplicationHandler: DeduplicationHandler
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,15 @@ interface FlowHospital {
|
||||
/**
|
||||
* The flow running in [flowFiber] has errored.
|
||||
*/
|
||||
fun flowErrored(flowFiber: FlowFiber)
|
||||
fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>)
|
||||
|
||||
/**
|
||||
* The flow running in [flowFiber] has cleaned, possibly as a result of a flow hospital resume.
|
||||
*/
|
||||
fun flowCleaned(flowFiber: FlowFiber)
|
||||
|
||||
/**
|
||||
* The flow has been removed from the state machine.
|
||||
*/
|
||||
fun flowRemoved(flowFiber: FlowFiber)
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ interface FlowMessaging {
|
||||
* listen on the send acknowledgement.
|
||||
*/
|
||||
@Suspendable
|
||||
fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: DeduplicationId)
|
||||
fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: SenderDeduplicationId)
|
||||
|
||||
/**
|
||||
* Start the messaging using the [onMessage] message handler.
|
||||
@ -59,7 +59,7 @@ class FlowMessagingImpl(val serviceHub: ServiceHubInternal): FlowMessaging {
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
override fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: DeduplicationId) {
|
||||
override fun sendSessionMessage(party: Party, message: SessionMessage, deduplicationId: SenderDeduplicationId) {
|
||||
log.trace { "Sending message $deduplicationId $message to party $party" }
|
||||
val networkMessage = serviceHub.networkService.createMessage(sessionTopic, serializeSessionMessage(message).bytes, deduplicationId, message.additionalHeaders(party))
|
||||
val partyInfo = serviceHub.networkMapCache.getPartyInfo(party) ?: throw IllegalArgumentException("Don't know about $party")
|
||||
|
@ -57,6 +57,8 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
fun currentStateMachine(): FlowStateMachineImpl<*>? = Strand.currentStrand() as? FlowStateMachineImpl<*>
|
||||
|
||||
private val log: Logger = LoggerFactory.getLogger("net.corda.flow")
|
||||
|
||||
private val SERIALIZER_BLOCKER = Fiber::class.java.getDeclaredField("SERIALIZER_BLOCKER").apply { isAccessible = true }.get(null)
|
||||
}
|
||||
|
||||
override val serviceHub get() = getTransientField(TransientValues::serviceHub)
|
||||
@ -75,6 +77,14 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
internal var transientValues: TransientReference<TransientValues>? = null
|
||||
internal var transientState: TransientReference<StateMachineState>? = null
|
||||
|
||||
/**
|
||||
* What sender identifier to put on messages sent by this flow. This will either be the identifier for the current
|
||||
* state machine manager / messaging client, or null to indicate this flow is restored from a checkpoint and
|
||||
* the de-duplication of messages it sends should not be optimised since this could be unreliable.
|
||||
*/
|
||||
override val ourSenderUUID: String?
|
||||
get() = transientState?.value?.senderUUID
|
||||
|
||||
private fun <A> getTransientField(field: KProperty1<TransientValues, A>): A {
|
||||
val suppliedValues = transientValues ?: throw IllegalStateException("${field.name} wasn't supplied!")
|
||||
return field.get(suppliedValues.value)
|
||||
@ -178,6 +188,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
fun setLoggingContext() {
|
||||
context.pushToLoggingContext()
|
||||
MDC.put("flow-id", id.uuid.toString())
|
||||
MDC.put("fiber-id", this.getId().toString())
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@ -195,7 +206,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
suspend(FlowIORequest.WaitForSessionConfirmations, maySkipCheckpoint = true)
|
||||
Try.Success(result)
|
||||
} catch (throwable: Throwable) {
|
||||
logger.warn("Flow threw exception", throwable)
|
||||
logger.info("Flow threw exception... sending to flow hospital", throwable)
|
||||
Try.Failure<R>(throwable)
|
||||
}
|
||||
val softLocksId = if (hasSoftLockedStates) logic.runId.uuid else null
|
||||
@ -335,7 +346,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
isDbTransactionOpenOnExit = false
|
||||
)
|
||||
require(continuation == FlowContinuation.ProcessEvents)
|
||||
Fiber.unparkDeserialized(this, scheduler)
|
||||
unpark(SERIALIZER_BLOCKER)
|
||||
}
|
||||
setLoggingContext()
|
||||
return uncheckedCast(processEventsUntilFlowIsResumed(
|
||||
|
@ -23,12 +23,20 @@ import net.corda.core.flows.FlowInfo
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.ConcurrentBox
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.LifeCycle
|
||||
import net.corda.core.internal.bufferUntilSubscribed
|
||||
import net.corda.core.internal.castIfPossible
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.map
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.serialization.*
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.core.utilities.contextLogger
|
||||
@ -39,7 +47,12 @@ import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.shouldCheckCheckpoints
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.messaging.ReceivedMessage
|
||||
import net.corda.node.services.statemachine.interceptors.*
|
||||
import net.corda.node.services.statemachine.interceptors.DumpHistoryOnErrorInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationChecker
|
||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationCheckingInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.HospitalisingInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.MetricInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.PrintingInterceptor
|
||||
import net.corda.node.services.statemachine.transitions.StateMachine
|
||||
import net.corda.node.utilities.AffinityExecutor
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
@ -53,13 +66,15 @@ import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.ThreadPoolExecutor
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import kotlin.collections.ArrayList
|
||||
import kotlin.concurrent.withLock
|
||||
import kotlin.streams.toList
|
||||
|
||||
/**
|
||||
* The StateMachineManagerImpl will always invoke the flow fibers on the given [AffinityExecutor], regardless of which
|
||||
* thread actually starts them via [startFlow].
|
||||
* thread actually starts them via [deliverExternalEvent].
|
||||
*/
|
||||
@ThreadSafe
|
||||
class MultiThreadedStateMachineManager(
|
||||
@ -101,6 +116,7 @@ class MultiThreadedStateMachineManager(
|
||||
private val flowMessaging: FlowMessaging = FlowMessagingImpl(serviceHub)
|
||||
private val fiberDeserializationChecker = if (serviceHub.configuration.shouldCheckCheckpoints()) FiberDeserializationChecker() else null
|
||||
private val transitionExecutor = makeTransitionExecutor()
|
||||
private val ourSenderUUID = serviceHub.networkService.ourSenderUUID
|
||||
|
||||
private var checkpointSerializationContext: SerializationContext? = null
|
||||
private var tokenizableServices: List<Any>? = null
|
||||
@ -142,7 +158,7 @@ class MultiThreadedStateMachineManager(
|
||||
resumeRestoredFlows(fibers)
|
||||
flowMessaging.start { receivedMessage, deduplicationHandler ->
|
||||
lifeCycle.requireState(State.STARTED) {
|
||||
onSessionMessage(receivedMessage, deduplicationHandler)
|
||||
deliverExternalEvent(deduplicationHandler.externalCause)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -187,7 +203,7 @@ class MultiThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
override fun <A> startFlow(
|
||||
private fun <A> startFlow(
|
||||
flowLogic: FlowLogic<A>,
|
||||
context: InvocationContext,
|
||||
ourIdentity: Party?,
|
||||
@ -322,7 +338,73 @@ class MultiThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
private fun onSessionMessage(message: ReceivedMessage, deduplicationHandler: DeduplicationHandler) {
|
||||
override fun retryFlowFromSafePoint(currentState: StateMachineState) {
|
||||
// Get set of external events
|
||||
val flowId = currentState.flowLogic.runId
|
||||
val oldFlowLeftOver = concurrentBox.concurrent { flows[flowId] }?.fiber?.transientValues?.value?.eventQueue
|
||||
if (oldFlowLeftOver == null) {
|
||||
logger.error("Unable to find flow for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
val flow = if (currentState.isAnyCheckpointPersisted) {
|
||||
val serializedCheckpoint = checkpointStorage.getCheckpoint(flowId)
|
||||
if (serializedCheckpoint == null) {
|
||||
logger.error("Unable to find database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
val checkpoint = deserializeCheckpoint(serializedCheckpoint)
|
||||
if (checkpoint == null) {
|
||||
logger.error("Unable to deserialize database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
// Resurrect flow
|
||||
createFlowFromCheckpoint(
|
||||
id = flowId,
|
||||
checkpoint = checkpoint,
|
||||
initialDeduplicationHandler = null,
|
||||
isAnyCheckpointPersisted = true,
|
||||
isStartIdempotent = false,
|
||||
senderUUID = null
|
||||
)
|
||||
} else {
|
||||
// Just flow initiation message
|
||||
null
|
||||
}
|
||||
externalEventMutex.withLock {
|
||||
if (flow != null) addAndStartFlow(flowId, flow)
|
||||
// Deliver all the external events from the old flow instance.
|
||||
val unprocessedExternalEvents = mutableListOf<ExternalEvent>()
|
||||
do {
|
||||
val event = oldFlowLeftOver.tryReceive()
|
||||
if (event is Event.GeneratedByExternalEvent) {
|
||||
unprocessedExternalEvents += event.deduplicationHandler.externalCause
|
||||
}
|
||||
} while (event != null)
|
||||
val externalEvents = currentState.pendingDeduplicationHandlers.map { it.externalCause } + unprocessedExternalEvents
|
||||
for (externalEvent in externalEvents) {
|
||||
deliverExternalEvent(externalEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private val externalEventMutex = ReentrantLock()
|
||||
override fun deliverExternalEvent(event: ExternalEvent) {
|
||||
externalEventMutex.withLock {
|
||||
when (event) {
|
||||
is ExternalEvent.ExternalMessageEvent -> onSessionMessage(event)
|
||||
is ExternalEvent.ExternalStartFlowEvent<*> -> onExternalStartFlow(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun <T> onExternalStartFlow(event: ExternalEvent.ExternalStartFlowEvent<T>) {
|
||||
val future = startFlow(event.flowLogic, event.context, ourIdentity = null, deduplicationHandler = event.deduplicationHandler)
|
||||
event.wireUpFuture(future)
|
||||
}
|
||||
|
||||
private fun onSessionMessage(event: ExternalEvent.ExternalMessageEvent) {
|
||||
val message: ReceivedMessage = event.receivedMessage
|
||||
val deduplicationHandler: DeduplicationHandler = event.deduplicationHandler
|
||||
val peer = message.peer
|
||||
val sessionMessage = try {
|
||||
message.data.deserialize<SessionMessage>()
|
||||
@ -396,7 +478,7 @@ class MultiThreadedStateMachineManager(
|
||||
}
|
||||
|
||||
if (replyError != null) {
|
||||
flowMessaging.sendSessionMessage(sender, replyError, DeduplicationId.createRandom(secureRandom))
|
||||
flowMessaging.sendSessionMessage(sender, replyError, SenderDeduplicationId(DeduplicationId.createRandom(secureRandom), ourSenderUUID))
|
||||
deduplicationHandler.afterDatabaseTransaction()
|
||||
}
|
||||
}
|
||||
@ -470,7 +552,8 @@ class MultiThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = false,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = flowLogic
|
||||
flowLogic = flowLogic,
|
||||
senderUUID = ourSenderUUID
|
||||
)
|
||||
flowStateMachineImpl.transientState = TransientReference(initialState)
|
||||
concurrentBox.concurrent {
|
||||
@ -505,7 +588,7 @@ class MultiThreadedStateMachineManager(
|
||||
|
||||
private fun createTransientValues(id: StateMachineRunId, resultFuture: CordaFuture<Any?>): FlowStateMachineImpl.TransientValues {
|
||||
return FlowStateMachineImpl.TransientValues(
|
||||
eventQueue = Channels.newChannel(stateMachineConfiguration.eventQueueSize, Channels.OverflowPolicy.BLOCK),
|
||||
eventQueue = Channels.newChannel(-1, Channels.OverflowPolicy.BLOCK),
|
||||
resultFuture = resultFuture,
|
||||
database = database,
|
||||
transitionExecutor = transitionExecutor,
|
||||
@ -521,7 +604,8 @@ class MultiThreadedStateMachineManager(
|
||||
checkpoint: Checkpoint,
|
||||
isAnyCheckpointPersisted: Boolean,
|
||||
isStartIdempotent: Boolean,
|
||||
initialDeduplicationHandler: DeduplicationHandler?
|
||||
initialDeduplicationHandler: DeduplicationHandler?,
|
||||
senderUUID: String? = ourSenderUUID
|
||||
): Flow {
|
||||
val flowState = checkpoint.flowState
|
||||
val resultFuture = openFuture<Any?>()
|
||||
@ -536,7 +620,8 @@ class MultiThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = isAnyCheckpointPersisted,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = logic
|
||||
flowLogic = logic,
|
||||
senderUUID = senderUUID
|
||||
)
|
||||
val fiber = FlowStateMachineImpl(id, logic, scheduler)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
@ -554,7 +639,8 @@ class MultiThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = isAnyCheckpointPersisted,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = fiber.logic
|
||||
flowLogic = fiber.logic,
|
||||
senderUUID = senderUUID
|
||||
)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
fiber.transientState = TransientReference(state)
|
||||
@ -574,9 +660,13 @@ class MultiThreadedStateMachineManager(
|
||||
sessionToFlow.put(sessionId, id)
|
||||
}
|
||||
concurrentBox.concurrent {
|
||||
incrementLiveFibers()
|
||||
unfinishedFibers.countUp()
|
||||
flows.put(id, flow)
|
||||
val oldFlow = flows.put(id, flow)
|
||||
if (oldFlow == null) {
|
||||
incrementLiveFibers()
|
||||
unfinishedFibers.countUp()
|
||||
} else {
|
||||
oldFlow.resultFuture.captureLater(flow.resultFuture)
|
||||
}
|
||||
flow.fiber.scheduleEvent(Event.DoRemainingWork)
|
||||
when (checkpoint.flowState) {
|
||||
is FlowState.Unstarted -> {
|
||||
@ -611,7 +701,7 @@ class MultiThreadedStateMachineManager(
|
||||
|
||||
private fun makeTransitionExecutor(): TransitionExecutor {
|
||||
val interceptors = ArrayList<TransitionInterceptor>()
|
||||
interceptors.add { HospitalisingInterceptor(PropagatingFlowHospital, it) }
|
||||
interceptors.add { HospitalisingInterceptor(StaffedFlowHospital, it) }
|
||||
if (serviceHub.configuration.devMode) {
|
||||
interceptors.add { DumpHistoryOnErrorInterceptor(it) }
|
||||
interceptors.add { MetricInterceptor(metrics, it) }
|
||||
|
@ -19,12 +19,17 @@ import net.corda.core.utilities.loggerFor
|
||||
object PropagatingFlowHospital : FlowHospital {
|
||||
private val log = loggerFor<PropagatingFlowHospital>()
|
||||
|
||||
override fun flowErrored(flowFiber: FlowFiber) {
|
||||
log.debug { "Flow ${flowFiber.id} dirtied ${flowFiber.snapshot().checkpoint.errorState}" }
|
||||
override fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>) {
|
||||
log.debug { "Flow ${flowFiber.id} in state $currentState encountered error" }
|
||||
flowFiber.scheduleEvent(Event.StartErrorPropagation)
|
||||
for ((index, error) in errors.withIndex()) {
|
||||
log.warn("Flow ${flowFiber.id} is propagating error [$index] ", error)
|
||||
}
|
||||
}
|
||||
|
||||
override fun flowCleaned(flowFiber: FlowFiber) {
|
||||
throw IllegalStateException("Flow ${flowFiber.id} cleaned after error propagation triggered")
|
||||
}
|
||||
|
||||
override fun flowRemoved(flowFiber: FlowFiber) {}
|
||||
}
|
||||
|
@ -56,13 +56,15 @@ import java.security.SecureRandom
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import kotlin.collections.ArrayList
|
||||
import kotlin.concurrent.withLock
|
||||
import kotlin.streams.toList
|
||||
|
||||
/**
|
||||
* The StateMachineManagerImpl will always invoke the flow fibers on the given [AffinityExecutor], regardless of which
|
||||
* thread actually starts them via [startFlow].
|
||||
* thread actually starts them via [deliverExternalEvent].
|
||||
*/
|
||||
@ThreadSafe
|
||||
class SingleThreadedStateMachineManager(
|
||||
@ -100,6 +102,7 @@ class SingleThreadedStateMachineManager(
|
||||
private val flowMessaging: FlowMessaging = FlowMessagingImpl(serviceHub)
|
||||
private val fiberDeserializationChecker = if (serviceHub.configuration.shouldCheckCheckpoints()) FiberDeserializationChecker() else null
|
||||
private val transitionExecutor = makeTransitionExecutor()
|
||||
private val ourSenderUUID = serviceHub.networkService.ourSenderUUID
|
||||
|
||||
private var checkpointSerializationContext: SerializationContext? = null
|
||||
private var tokenizableServices: List<Any>? = null
|
||||
@ -138,7 +141,7 @@ class SingleThreadedStateMachineManager(
|
||||
resumeRestoredFlows(fibers)
|
||||
flowMessaging.start { receivedMessage, deduplicationHandler ->
|
||||
executor.execute {
|
||||
onSessionMessage(receivedMessage, deduplicationHandler)
|
||||
deliverExternalEvent(deduplicationHandler.externalCause)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -186,7 +189,7 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
override fun <A> startFlow(
|
||||
private fun <A> startFlow(
|
||||
flowLogic: FlowLogic<A>,
|
||||
context: InvocationContext,
|
||||
ourIdentity: Party?,
|
||||
@ -320,7 +323,73 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
private fun onSessionMessage(message: ReceivedMessage, deduplicationHandler: DeduplicationHandler) {
|
||||
override fun retryFlowFromSafePoint(currentState: StateMachineState) {
|
||||
// Get set of external events
|
||||
val flowId = currentState.flowLogic.runId
|
||||
val oldFlowLeftOver = mutex.locked { flows[flowId] }?.fiber?.transientValues?.value?.eventQueue
|
||||
if (oldFlowLeftOver == null) {
|
||||
logger.error("Unable to find flow for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
val flow = if (currentState.isAnyCheckpointPersisted) {
|
||||
val serializedCheckpoint = checkpointStorage.getCheckpoint(flowId)
|
||||
if (serializedCheckpoint == null) {
|
||||
logger.error("Unable to find database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
val checkpoint = deserializeCheckpoint(serializedCheckpoint)
|
||||
if (checkpoint == null) {
|
||||
logger.error("Unable to deserialize database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
// Resurrect flow
|
||||
createFlowFromCheckpoint(
|
||||
id = flowId,
|
||||
checkpoint = checkpoint,
|
||||
initialDeduplicationHandler = null,
|
||||
isAnyCheckpointPersisted = true,
|
||||
isStartIdempotent = false,
|
||||
senderUUID = null
|
||||
)
|
||||
} else {
|
||||
// Just flow initiation message
|
||||
null
|
||||
}
|
||||
externalEventMutex.withLock {
|
||||
if (flow != null) addAndStartFlow(flowId, flow)
|
||||
// Deliver all the external events from the old flow instance.
|
||||
val unprocessedExternalEvents = mutableListOf<ExternalEvent>()
|
||||
do {
|
||||
val event = oldFlowLeftOver.tryReceive()
|
||||
if (event is Event.GeneratedByExternalEvent) {
|
||||
unprocessedExternalEvents += event.deduplicationHandler.externalCause
|
||||
}
|
||||
} while (event != null)
|
||||
val externalEvents = currentState.pendingDeduplicationHandlers.map { it.externalCause } + unprocessedExternalEvents
|
||||
for (externalEvent in externalEvents) {
|
||||
deliverExternalEvent(externalEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private val externalEventMutex = ReentrantLock()
|
||||
override fun deliverExternalEvent(event: ExternalEvent) {
|
||||
externalEventMutex.withLock {
|
||||
when (event) {
|
||||
is ExternalEvent.ExternalMessageEvent -> onSessionMessage(event)
|
||||
is ExternalEvent.ExternalStartFlowEvent<*> -> onExternalStartFlow(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun <T> onExternalStartFlow(event: ExternalEvent.ExternalStartFlowEvent<T>) {
|
||||
val future = startFlow(event.flowLogic, event.context, ourIdentity = null, deduplicationHandler = event.deduplicationHandler)
|
||||
event.wireUpFuture(future)
|
||||
}
|
||||
|
||||
private fun onSessionMessage(event: ExternalEvent.ExternalMessageEvent) {
|
||||
val message: ReceivedMessage = event.receivedMessage
|
||||
val deduplicationHandler: DeduplicationHandler = event.deduplicationHandler
|
||||
val peer = message.peer
|
||||
val sessionMessage = try {
|
||||
message.data.deserialize<SessionMessage>()
|
||||
@ -394,7 +463,7 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
|
||||
if (replyError != null) {
|
||||
flowMessaging.sendSessionMessage(sender, replyError, DeduplicationId.createRandom(secureRandom))
|
||||
flowMessaging.sendSessionMessage(sender, replyError, SenderDeduplicationId(DeduplicationId.createRandom(secureRandom), ourSenderUUID))
|
||||
deduplicationHandler.afterDatabaseTransaction()
|
||||
}
|
||||
}
|
||||
@ -468,7 +537,8 @@ class SingleThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = false,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = flowLogic
|
||||
flowLogic = flowLogic,
|
||||
senderUUID = ourSenderUUID
|
||||
)
|
||||
flowStateMachineImpl.transientState = TransientReference(initialState)
|
||||
mutex.locked {
|
||||
@ -503,7 +573,7 @@ class SingleThreadedStateMachineManager(
|
||||
|
||||
private fun createTransientValues(id: StateMachineRunId, resultFuture: CordaFuture<Any?>): FlowStateMachineImpl.TransientValues {
|
||||
return FlowStateMachineImpl.TransientValues(
|
||||
eventQueue = Channels.newChannel(stateMachineConfiguration.eventQueueSize, Channels.OverflowPolicy.BLOCK),
|
||||
eventQueue = Channels.newChannel(-1, Channels.OverflowPolicy.BLOCK),
|
||||
resultFuture = resultFuture,
|
||||
database = database,
|
||||
transitionExecutor = transitionExecutor,
|
||||
@ -519,7 +589,8 @@ class SingleThreadedStateMachineManager(
|
||||
checkpoint: Checkpoint,
|
||||
isAnyCheckpointPersisted: Boolean,
|
||||
isStartIdempotent: Boolean,
|
||||
initialDeduplicationHandler: DeduplicationHandler?
|
||||
initialDeduplicationHandler: DeduplicationHandler?,
|
||||
senderUUID: String? = ourSenderUUID
|
||||
): Flow {
|
||||
val flowState = checkpoint.flowState
|
||||
val resultFuture = openFuture<Any?>()
|
||||
@ -534,7 +605,8 @@ class SingleThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = isAnyCheckpointPersisted,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = logic
|
||||
flowLogic = logic,
|
||||
senderUUID = senderUUID
|
||||
)
|
||||
val fiber = FlowStateMachineImpl(id, logic, scheduler)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
@ -552,7 +624,8 @@ class SingleThreadedStateMachineManager(
|
||||
isAnyCheckpointPersisted = isAnyCheckpointPersisted,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = fiber.logic
|
||||
flowLogic = fiber.logic,
|
||||
senderUUID = senderUUID
|
||||
)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
fiber.transientState = TransientReference(state)
|
||||
@ -576,9 +649,13 @@ class SingleThreadedStateMachineManager(
|
||||
startedFutures[id]?.setException(IllegalStateException("Will not start flow as SMM is stopping"))
|
||||
logger.trace("Not resuming as SMM is stopping.")
|
||||
} else {
|
||||
incrementLiveFibers()
|
||||
unfinishedFibers.countUp()
|
||||
flows[id] = flow
|
||||
val oldFlow = flows.put(id, flow)
|
||||
if (oldFlow == null) {
|
||||
incrementLiveFibers()
|
||||
unfinishedFibers.countUp()
|
||||
} else {
|
||||
oldFlow.resultFuture.captureLater(flow.resultFuture)
|
||||
}
|
||||
flow.fiber.scheduleEvent(Event.DoRemainingWork)
|
||||
when (checkpoint.flowState) {
|
||||
is FlowState.Unstarted -> {
|
||||
@ -614,7 +691,7 @@ class SingleThreadedStateMachineManager(
|
||||
|
||||
private fun makeTransitionExecutor(): TransitionExecutor {
|
||||
val interceptors = ArrayList<TransitionInterceptor>()
|
||||
interceptors.add { HospitalisingInterceptor(PropagatingFlowHospital, it) }
|
||||
interceptors.add { HospitalisingInterceptor(StaffedFlowHospital, it) }
|
||||
if (serviceHub.configuration.devMode) {
|
||||
interceptors.add { DumpHistoryOnErrorInterceptor(it) }
|
||||
}
|
||||
|
@ -0,0 +1,127 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import java.sql.SQLException
|
||||
import java.time.Instant
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
/**
|
||||
* This hospital consults "staff" to see if they can automatically diagnose and treat flows.
|
||||
*/
|
||||
object StaffedFlowHospital : FlowHospital {
|
||||
private val log = loggerFor<StaffedFlowHospital>()
|
||||
|
||||
private val staff = listOf(DeadlockNurse, DuplicateInsertSpecialist)
|
||||
|
||||
private val patients = ConcurrentHashMap<StateMachineRunId, MedicalHistory>()
|
||||
|
||||
val numberOfPatients = patients.size
|
||||
|
||||
class MedicalHistory {
|
||||
val records: MutableList<Record> = mutableListOf()
|
||||
|
||||
sealed class Record(val suspendCount: Int) {
|
||||
class Admitted(val at: Instant, suspendCount: Int) : Record(suspendCount) {
|
||||
override fun toString() = "Admitted(at=$at, suspendCount=$suspendCount)"
|
||||
}
|
||||
|
||||
class Discharged(val at: Instant, suspendCount: Int, val by: Staff, val error: Throwable) : Record(suspendCount) {
|
||||
override fun toString() = "Discharged(at=$at, suspendCount=$suspendCount, by=$by)"
|
||||
}
|
||||
}
|
||||
|
||||
fun notDischargedForTheSameThingMoreThan(max: Int, by: Staff): Boolean {
|
||||
val lastAdmittanceSuspendCount = (records.last() as MedicalHistory.Record.Admitted).suspendCount
|
||||
return records.filterIsInstance(MedicalHistory.Record.Discharged::class.java).filter { it.by == by && it.suspendCount == lastAdmittanceSuspendCount }.count() <= max
|
||||
}
|
||||
|
||||
override fun toString(): String = "${this.javaClass.simpleName}(records = $records)"
|
||||
}
|
||||
|
||||
override fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>) {
|
||||
log.info("Flow ${flowFiber.id} admitted to hospital in state $currentState")
|
||||
val medicalHistory = patients.computeIfAbsent(flowFiber.id) { MedicalHistory() }
|
||||
medicalHistory.records += MedicalHistory.Record.Admitted(Instant.now(), currentState.checkpoint.numberOfSuspends)
|
||||
for ((index, error) in errors.withIndex()) {
|
||||
log.info("Flow ${flowFiber.id} has error [$index]", error)
|
||||
if (!errorIsDischarged(flowFiber, currentState, error, medicalHistory)) {
|
||||
// If any error isn't discharged, then we propagate.
|
||||
log.warn("Flow ${flowFiber.id} error was not discharged, propagating.")
|
||||
flowFiber.scheduleEvent(Event.StartErrorPropagation)
|
||||
return
|
||||
}
|
||||
}
|
||||
// If all are discharged, retry.
|
||||
flowFiber.scheduleEvent(Event.RetryFlowFromSafePoint)
|
||||
}
|
||||
|
||||
private fun errorIsDischarged(flowFiber: FlowFiber, currentState: StateMachineState, error: Throwable, medicalHistory: MedicalHistory): Boolean {
|
||||
for (staffMember in staff) {
|
||||
val diagnosis = staffMember.consult(flowFiber, currentState, error, medicalHistory)
|
||||
if (diagnosis == Diagnosis.DISCHARGE) {
|
||||
medicalHistory.records += MedicalHistory.Record.Discharged(Instant.now(), currentState.checkpoint.numberOfSuspends, staffMember, error)
|
||||
log.info("Flow ${flowFiber.id} error discharged from hospital by $staffMember")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// It's okay for flows to be cleaned... we fix them now!
|
||||
override fun flowCleaned(flowFiber: FlowFiber) {}
|
||||
|
||||
override fun flowRemoved(flowFiber: FlowFiber) {
|
||||
patients.remove(flowFiber.id)
|
||||
}
|
||||
|
||||
enum class Diagnosis {
|
||||
/**
|
||||
* Retry from last safe point.
|
||||
*/
|
||||
DISCHARGE,
|
||||
/**
|
||||
* Please try another member of staff.
|
||||
*/
|
||||
NOT_MY_SPECIALTY
|
||||
}
|
||||
|
||||
interface Staff {
|
||||
fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL Deadlock detection.
|
||||
*/
|
||||
object DeadlockNurse : Staff {
|
||||
override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis {
|
||||
return if (mentionsDeadlock(newError)) {
|
||||
Diagnosis.DISCHARGE
|
||||
} else {
|
||||
Diagnosis.NOT_MY_SPECIALTY
|
||||
}
|
||||
}
|
||||
|
||||
private fun mentionsDeadlock(exception: Throwable?): Boolean {
|
||||
return exception != null && (exception is SQLException && ((exception.message?.toLowerCase()?.contains("deadlock")
|
||||
?: false)) || mentionsDeadlock(exception.cause))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Primary key violation detection for duplicate inserts. Will detect other constraint violations too.
|
||||
*/
|
||||
object DuplicateInsertSpecialist : Staff {
|
||||
override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: MedicalHistory): Diagnosis {
|
||||
return if (mentionsConstraintViolation(newError) && history.notDischargedForTheSameThingMoreThan(3, this)) {
|
||||
Diagnosis.DISCHARGE
|
||||
} else {
|
||||
Diagnosis.NOT_MY_SPECIALTY
|
||||
}
|
||||
}
|
||||
|
||||
private fun mentionsConstraintViolation(exception: Throwable?): Boolean {
|
||||
return exception != null && (exception is org.hibernate.exception.ConstraintViolationException || mentionsConstraintViolation(exception.cause))
|
||||
}
|
||||
}
|
||||
}
|
@ -14,11 +14,11 @@ import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.messaging.ReceivedMessage
|
||||
import rx.Observable
|
||||
|
||||
/**
|
||||
@ -50,21 +50,6 @@ interface StateMachineManager {
|
||||
*/
|
||||
fun stop(allowedUnsuspendedFiberCount: Int)
|
||||
|
||||
/**
|
||||
* Starts a new flow.
|
||||
*
|
||||
* @param flowLogic The flow's code.
|
||||
* @param context The context of the flow.
|
||||
* @param ourIdentity The identity to use for the flow.
|
||||
* @param deduplicationHandler Allows exactly-once start of the flow, see [DeduplicationHandler].
|
||||
*/
|
||||
fun <A> startFlow(
|
||||
flowLogic: FlowLogic<A>,
|
||||
context: InvocationContext,
|
||||
ourIdentity: Party?,
|
||||
deduplicationHandler: DeduplicationHandler?
|
||||
): CordaFuture<FlowStateMachine<A>>
|
||||
|
||||
/**
|
||||
* Represents an addition/removal of a state machine.
|
||||
*/
|
||||
@ -101,6 +86,12 @@ interface StateMachineManager {
|
||||
* @return whether the flow existed and was killed.
|
||||
*/
|
||||
fun killFlow(id: StateMachineRunId): Boolean
|
||||
|
||||
/**
|
||||
* Deliver an external event to the state machine. Such an event might be a new P2P message, or a request to start a flow.
|
||||
* The event may be replayed if a flow fails and attempts to retry.
|
||||
*/
|
||||
fun deliverExternalEvent(event: ExternalEvent)
|
||||
}
|
||||
|
||||
// These must be idempotent! A later failure in the state transition may error the flow state, and a replay may call
|
||||
@ -110,4 +101,38 @@ interface StateMachineManagerInternal {
|
||||
fun addSessionBinding(flowId: StateMachineRunId, sessionId: SessionId)
|
||||
fun removeSessionBindings(sessionIds: Set<SessionId>)
|
||||
fun removeFlow(flowId: StateMachineRunId, removalReason: FlowRemovalReason, lastState: StateMachineState)
|
||||
fun retryFlowFromSafePoint(currentState: StateMachineState)
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents an external event that can be injected into the state machine and that might need to be replayed if
|
||||
* a flow retries. They always have de-duplication handlers to assist with the at-most once logic where required.
|
||||
*/
|
||||
interface ExternalEvent {
|
||||
val deduplicationHandler: DeduplicationHandler
|
||||
|
||||
/**
|
||||
* An external P2P message event.
|
||||
*/
|
||||
interface ExternalMessageEvent : ExternalEvent {
|
||||
val receivedMessage: ReceivedMessage
|
||||
}
|
||||
|
||||
/**
|
||||
* An external request to start a flow, from the scheduler for example.
|
||||
*/
|
||||
interface ExternalStartFlowEvent<T> : ExternalEvent {
|
||||
val flowLogic: FlowLogic<T>
|
||||
val context: InvocationContext
|
||||
|
||||
/**
|
||||
* A callback for the state machine to pass back the [Future] associated with the flow start to the submitter.
|
||||
*/
|
||||
fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<T>>)
|
||||
|
||||
/**
|
||||
* The future representing the flow start, passed back from the state machine to the submitter of this event.
|
||||
*/
|
||||
val future: CordaFuture<FlowStateMachine<T>>
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ import net.corda.node.services.messaging.DeduplicationHandler
|
||||
* possible.
|
||||
* @param isRemoved true if the flow has been removed from the state machine manager. This is used to avoid any further
|
||||
* work.
|
||||
* @param senderUUID the identifier of the sending state machine or null if this flow is resumed from a checkpoint so that it does not participate in de-duplication high-water-marking.
|
||||
*/
|
||||
// TODO perhaps add a read-only environment to the state machine for things that don't change over time?
|
||||
// TODO evaluate persistent datastructure libraries to replace the inefficient copying we currently do.
|
||||
@ -47,7 +48,8 @@ data class StateMachineState(
|
||||
val isTransactionTracked: Boolean,
|
||||
val isAnyCheckpointPersisted: Boolean,
|
||||
val isStartIdempotent: Boolean,
|
||||
val isRemoved: Boolean
|
||||
val isRemoved: Boolean,
|
||||
val senderUUID: String?
|
||||
)
|
||||
|
||||
/**
|
||||
|
@ -44,7 +44,8 @@ class DumpHistoryOnErrorInterceptor(val delegate: TransitionExecutor) : Transiti
|
||||
(record ?: ArrayList()).apply { add(transitionRecord) }
|
||||
}
|
||||
|
||||
if (nextState.checkpoint.errorState is ErrorState.Errored) {
|
||||
// Just if we decide to propagate, and not if just on the way to the hospital.
|
||||
if (nextState.checkpoint.errorState is ErrorState.Errored && nextState.checkpoint.errorState.propagating) {
|
||||
log.warn("Flow ${fiber.id} errored, dumping all transitions:\n${record!!.joinToString("\n")}")
|
||||
for (error in nextState.checkpoint.errorState.errors) {
|
||||
log.warn("Flow ${fiber.id} error", error.exception)
|
||||
|
@ -36,20 +36,23 @@ class HospitalisingInterceptor(
|
||||
actionExecutor: ActionExecutor
|
||||
): Pair<FlowContinuation, StateMachineState> {
|
||||
val (continuation, nextState) = delegate.executeTransition(fiber, previousState, event, transition, actionExecutor)
|
||||
when (nextState.checkpoint.errorState) {
|
||||
ErrorState.Clean -> {
|
||||
if (hospitalisedFlows.remove(fiber.id) != null) {
|
||||
flowHospital.flowCleaned(fiber)
|
||||
|
||||
when (nextState.checkpoint.errorState) {
|
||||
is ErrorState.Clean -> {
|
||||
if (hospitalisedFlows.remove(fiber.id) != null) {
|
||||
flowHospital.flowCleaned(fiber)
|
||||
}
|
||||
}
|
||||
is ErrorState.Errored -> {
|
||||
val exceptionsToHandle = nextState.checkpoint.errorState.errors.map { it.exception }
|
||||
if (hospitalisedFlows.putIfAbsent(fiber.id, fiber) == null) {
|
||||
flowHospital.flowErrored(fiber, previousState, exceptionsToHandle)
|
||||
}
|
||||
}
|
||||
}
|
||||
is ErrorState.Errored -> {
|
||||
if (hospitalisedFlows.putIfAbsent(fiber.id, fiber) == null) {
|
||||
flowHospital.flowErrored(fiber)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (nextState.isRemoved) {
|
||||
hospitalisedFlows.remove(fiber.id)
|
||||
flowHospital.flowRemoved(fiber)
|
||||
}
|
||||
return Pair(continuation, nextState)
|
||||
}
|
||||
|
@ -56,9 +56,6 @@ class DeliverSessionMessageTransition(
|
||||
is EndSessionMessage -> endMessageTransition()
|
||||
}
|
||||
}
|
||||
if (!isErrored()) {
|
||||
persistCheckpointIfNeeded()
|
||||
}
|
||||
// Schedule a DoRemainingWork to check whether the flow needs to be woken up.
|
||||
actions.add(Action.ScheduleEvent(Event.DoRemainingWork))
|
||||
FlowContinuation.ProcessEvents
|
||||
@ -83,7 +80,7 @@ class DeliverSessionMessageTransition(
|
||||
// Send messages that were buffered pending confirmation of session.
|
||||
val sendActions = sessionState.bufferedMessages.map { (deduplicationId, bufferedMessage) ->
|
||||
val existingMessage = ExistingSessionMessage(message.initiatedSessionId, bufferedMessage)
|
||||
Action.SendExisting(initiatedSession.peerParty, existingMessage, deduplicationId)
|
||||
Action.SendExisting(initiatedSession.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID))
|
||||
}
|
||||
actions.addAll(sendActions)
|
||||
currentState = currentState.copy(checkpoint = newCheckpoint)
|
||||
@ -156,24 +153,6 @@ class DeliverSessionMessageTransition(
|
||||
}
|
||||
}
|
||||
|
||||
private fun TransitionBuilder.persistCheckpointIfNeeded() {
|
||||
// We persist the message as soon as it arrives.
|
||||
if (context.configuration.sessionDeliverPersistenceStrategy == SessionDeliverPersistenceStrategy.OnDeliver &&
|
||||
event.sessionMessage.payload !is EndSessionMessage) {
|
||||
actions.addAll(arrayOf(
|
||||
Action.CreateTransaction,
|
||||
Action.PersistCheckpoint(context.id, currentState.checkpoint),
|
||||
Action.PersistDeduplicationFacts(currentState.pendingDeduplicationHandlers),
|
||||
Action.CommitTransaction,
|
||||
Action.AcknowledgeMessages(currentState.pendingDeduplicationHandlers)
|
||||
))
|
||||
currentState = currentState.copy(
|
||||
pendingDeduplicationHandlers = emptyList(),
|
||||
isAnyCheckpointPersisted = true
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private fun TransitionBuilder.endMessageTransition() {
|
||||
val sessionId = event.sessionMessage.recipientSessionId
|
||||
val sessions = currentState.checkpoint.sessions
|
||||
|
@ -56,7 +56,7 @@ class ErrorFlowTransition(
|
||||
sessions = newSessions
|
||||
)
|
||||
currentState = currentState.copy(checkpoint = newCheckpoint)
|
||||
actions.add(Action.PropagateErrors(errorMessages, initiatedSessions))
|
||||
actions.add(Action.PropagateErrors(errorMessages, initiatedSessions, startingState.senderUUID))
|
||||
}
|
||||
|
||||
// If we're errored but not propagating keep processing events.
|
||||
|
@ -226,7 +226,7 @@ class StartedFlowTransition(
|
||||
}
|
||||
val deduplicationId = DeduplicationId.createForNormal(checkpoint, index++)
|
||||
val initialMessage = createInitialSessionMessage(sessionState.initiatingSubFlow, sourceSessionId, null)
|
||||
actions.add(Action.SendInitial(sessionState.party, initialMessage, deduplicationId))
|
||||
actions.add(Action.SendInitial(sessionState.party, initialMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID)))
|
||||
newSessions[sourceSessionId] = SessionState.Initiating(
|
||||
bufferedMessages = emptyList(),
|
||||
rejectionError = null
|
||||
@ -263,7 +263,7 @@ class StartedFlowTransition(
|
||||
when (existingSessionState) {
|
||||
is SessionState.Uninitiated -> {
|
||||
val initialMessage = createInitialSessionMessage(existingSessionState.initiatingSubFlow, sourceSessionId, message)
|
||||
actions.add(Action.SendInitial(existingSessionState.party, initialMessage, deduplicationId))
|
||||
actions.add(Action.SendInitial(existingSessionState.party, initialMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID)))
|
||||
newSessions[sourceSessionId] = SessionState.Initiating(
|
||||
bufferedMessages = emptyList(),
|
||||
rejectionError = null
|
||||
@ -280,7 +280,7 @@ class StartedFlowTransition(
|
||||
is InitiatedSessionState.Live -> {
|
||||
val sinkSessionId = existingSessionState.initiatedState.peerSinkSessionId
|
||||
val existingMessage = ExistingSessionMessage(sinkSessionId, sessionMessage)
|
||||
actions.add(Action.SendExisting(existingSessionState.peerParty, existingMessage, deduplicationId))
|
||||
actions.add(Action.SendExisting(existingSessionState.peerParty, existingMessage, SenderDeduplicationId(deduplicationId, startingState.senderUUID)))
|
||||
Unit
|
||||
}
|
||||
InitiatedSessionState.Ended -> {
|
||||
|
@ -28,18 +28,19 @@ class TopLevelTransition(
|
||||
) : Transition {
|
||||
override fun transition(): TransitionResult {
|
||||
return when (event) {
|
||||
is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition()
|
||||
is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition()
|
||||
is Event.Error -> errorTransition(event)
|
||||
is Event.TransactionCommitted -> transactionCommittedTransition(event)
|
||||
is Event.SoftShutdown -> softShutdownTransition()
|
||||
is Event.StartErrorPropagation -> startErrorPropagationTransition()
|
||||
is Event.EnterSubFlow -> enterSubFlowTransition(event)
|
||||
is Event.LeaveSubFlow -> leaveSubFlowTransition()
|
||||
is Event.Suspend -> suspendTransition(event)
|
||||
is Event.FlowFinish -> flowFinishTransition(event)
|
||||
is Event.InitiateFlow -> initiateFlowTransition(event)
|
||||
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
||||
is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition()
|
||||
is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition()
|
||||
is Event.Error -> errorTransition(event)
|
||||
is Event.TransactionCommitted -> transactionCommittedTransition(event)
|
||||
is Event.SoftShutdown -> softShutdownTransition()
|
||||
is Event.StartErrorPropagation -> startErrorPropagationTransition()
|
||||
is Event.EnterSubFlow -> enterSubFlowTransition(event)
|
||||
is Event.LeaveSubFlow -> leaveSubFlowTransition()
|
||||
is Event.Suspend -> suspendTransition(event)
|
||||
is Event.FlowFinish -> flowFinishTransition(event)
|
||||
is Event.InitiateFlow -> initiateFlowTransition(event)
|
||||
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
||||
is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition(startingState)
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,7 +213,7 @@ class TopLevelTransition(
|
||||
if (state is SessionState.Initiated && state.initiatedState is InitiatedSessionState.Live) {
|
||||
val message = ExistingSessionMessage(state.initiatedState.peerSinkSessionId, EndSessionMessage)
|
||||
val deduplicationId = DeduplicationId.createForNormal(currentState.checkpoint, index)
|
||||
Action.SendExisting(state.peerParty, message, deduplicationId)
|
||||
Action.SendExisting(state.peerParty, message, SenderDeduplicationId(deduplicationId, currentState.senderUUID))
|
||||
} else {
|
||||
null
|
||||
}
|
||||
@ -251,4 +252,14 @@ class TopLevelTransition(
|
||||
resumeFlowLogic(event.returnValue)
|
||||
}
|
||||
}
|
||||
|
||||
private fun retryFlowFromSafePointTransition(startingState: StateMachineState): TransitionResult {
|
||||
return builder {
|
||||
// Need to create a flow from the prior checkpoint or flow initiation.
|
||||
actions.add(Action.CreateTransaction)
|
||||
actions.add(Action.RetryFlowFromSafePoint(startingState))
|
||||
actions.add(Action.CommitTransaction)
|
||||
FlowContinuation.Abort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user