Merge commit '3f2e653f0' into mike-merge-413f476a3
@ -1,7 +1,7 @@
|
||||
# !! DO NOT MODIFY THE API FILE IN THIS DIRECTORY !!
|
||||
|
||||
The `api-current.txt` file contains a summary of Corda's current public APIs,
|
||||
as generated by the `api-scanner` Gradle plugin. (See [here](../gradle-plugins/api-scanner/README.md) for a detailed description of this plugin.) It will be regenerated and the copy in this repository updated by the Release Manager with
|
||||
as generated by the `api-scanner` Gradle plugin. (See [here](https://github.com/corda/corda-gradle-plugins/blob/master/api-scanner/README.md) for a detailed description of this plugin.) It will be regenerated and the copy in this repository updated by the Release Manager with
|
||||
each new Corda release. It will not be modified otherwise except under special circumstances that will require extra approval.
|
||||
|
||||
Deleting or changing the existing Corda APIs listed in `api-current.txt` may
|
||||
|
2
.gitignore
vendored
@ -39,7 +39,6 @@ lib/quasar.jar
|
||||
.idea/runConfigurations
|
||||
.idea/dictionaries
|
||||
.idea/codeStyles/
|
||||
/gradle-plugins/.idea/
|
||||
|
||||
# Include the -parameters compiler option by default in IntelliJ required for serialization.
|
||||
!.idea/compiler.xml
|
||||
@ -62,7 +61,6 @@ lib/quasar.jar
|
||||
# Gradle:
|
||||
# .idea/gradle.xml
|
||||
# .idea/libraries
|
||||
/gradle-plugins/gradle*
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
# .idea/mongoSettings.xml
|
||||
|
102
.idea/compiler.xml
generated
@ -10,40 +10,18 @@
|
||||
<module name="bank-of-corda-demo_integrationTest" target="1.8" />
|
||||
<module name="bank-of-corda-demo_main" target="1.8" />
|
||||
<module name="bank-of-corda-demo_test" target="1.8" />
|
||||
<module name="behave-tools_main" target="1.8" />
|
||||
<module name="behave-tools_test" target="1.8" />
|
||||
<module name="behave_api" target="1.8" />
|
||||
<module name="behave_behave" target="1.8" />
|
||||
<module name="behave_main" target="1.8" />
|
||||
<module name="behave_scenario" target="1.8" />
|
||||
<module name="behave_smokeTest" target="1.8" />
|
||||
<module name="behave_test" target="1.8" />
|
||||
<module name="blobinspector_main" target="1.8" />
|
||||
<module name="blobinspector_test" target="1.8" />
|
||||
<module name="bootstrapper_main" target="1.8" />
|
||||
<module name="bootstrapper_test" target="1.8" />
|
||||
<module name="bridge_integrationTest" target="1.8" />
|
||||
<module name="bridge_main" target="1.8" />
|
||||
<module name="bridge_test" target="1.8" />
|
||||
<module name="bridgecapsule_main" target="1.6" />
|
||||
<module name="bridgecapsule_smokeTest" target="1.6" />
|
||||
<module name="bridgecapsule_test" target="1.6" />
|
||||
<module name="bridges_integrationTest" target="1.8" />
|
||||
<module name="bridges_main" target="1.8" />
|
||||
<module name="bridges_test" target="1.8" />
|
||||
<module name="buildSrc_main" target="1.8" />
|
||||
<module name="buildSrc_test" target="1.8" />
|
||||
<module name="business-network-demo_integrationTest" target="1.8" />
|
||||
<module name="business-network-demo_main" target="1.8" />
|
||||
<module name="business-network-demo_test" target="1.8" />
|
||||
<module name="canonicalizer_main" target="1.8" />
|
||||
<module name="canonicalizer_test" target="1.8" />
|
||||
<module name="capsule-crr-submission_main" target="1.8" />
|
||||
<module name="capsule-crr-submission_test" target="1.8" />
|
||||
<module name="capsule-hsm-cert-generator_main" target="1.8" />
|
||||
<module name="capsule-hsm-cert-generator_test" target="1.8" />
|
||||
<module name="capsule-hsm_main" target="1.8" />
|
||||
<module name="capsule-hsm_test" target="1.8" />
|
||||
<module name="client_main" target="1.8" />
|
||||
<module name="client_test" target="1.8" />
|
||||
<module name="confidential-identities_main" target="1.8" />
|
||||
@ -54,36 +32,34 @@
|
||||
<module name="corda-core_integrationTest" target="1.8" />
|
||||
<module name="corda-core_smokeTest" target="1.8" />
|
||||
<module name="corda-finance_integrationTest" target="1.8" />
|
||||
<module name="corda-project-tools_main" target="1.8" />
|
||||
<module name="corda-project-tools_test" target="1.8" />
|
||||
<module name="corda-project_main" target="1.8" />
|
||||
<module name="corda-project_test" target="1.8" />
|
||||
<module name="corda-webserver_integrationTest" target="1.8" />
|
||||
<module name="corda-webserver_main" target="1.8" />
|
||||
<module name="corda-webserver_test" target="1.8" />
|
||||
<module name="cordapp-configuration_main" target="1.8" />
|
||||
<module name="cordapp-configuration_test" target="1.8" />
|
||||
<module name="cordapp_integrationTest" target="1.8" />
|
||||
<module name="cordapp_main" target="1.8" />
|
||||
<module name="cordapp_test" target="1.8" />
|
||||
<module name="cordform-common_main" target="1.8" />
|
||||
<module name="cordform-common_test" target="1.8" />
|
||||
<module name="cordformation_main" target="1.8" />
|
||||
<module name="cordformation_runnodes" target="1.8" />
|
||||
<module name="cordformation_test" target="1.8" />
|
||||
<module name="core_extraResource" target="1.8" />
|
||||
<module name="core_integrationTest" target="1.8" />
|
||||
<module name="core_main" target="1.8" />
|
||||
<module name="core_smokeTest" target="1.8" />
|
||||
<module name="core_smokeTestPlugins" target="1.8" />
|
||||
<module name="core_test" target="1.8" />
|
||||
<module name="dbmigration_main" target="1.8" />
|
||||
<module name="dbmigration_test" target="1.8" />
|
||||
<module name="demobench_main" target="1.8" />
|
||||
<module name="demobench_test" target="1.8" />
|
||||
<module name="docs_main" target="1.8" />
|
||||
<module name="docs_source_example-code_integrationTest" target="1.8" />
|
||||
<module name="docs_source_example-code_main" target="1.8" />
|
||||
<module name="docs_source_example-code_test" target="1.8" />
|
||||
<module name="docs_test" target="1.8" />
|
||||
<module name="example-code_integrationTest" target="1.8" />
|
||||
<module name="example-code_main" target="1.8" />
|
||||
<module name="example-code_test" target="1.8" />
|
||||
<module name="experimental-behave_behave" target="1.8" />
|
||||
<module name="experimental-behave_main" target="1.8" />
|
||||
<module name="experimental-behave_scenario" target="1.8" />
|
||||
<module name="experimental-behave_smokeTest" target="1.8" />
|
||||
<module name="experimental-behave_test" target="1.8" />
|
||||
<module name="experimental-kryo-hook_main" target="1.8" />
|
||||
<module name="experimental-kryo-hook_test" target="1.8" />
|
||||
<module name="experimental_main" target="1.8" />
|
||||
@ -95,8 +71,6 @@
|
||||
<module name="finance_integrationTest" target="1.8" />
|
||||
<module name="finance_main" target="1.8" />
|
||||
<module name="finance_test" target="1.8" />
|
||||
<module name="flow-hook_main" target="1.8" />
|
||||
<module name="flow-hook_test" target="1.8" />
|
||||
<module name="flows_integrationTest" target="1.8" />
|
||||
<module name="flows_main" target="1.8" />
|
||||
<module name="flows_test" target="1.8" />
|
||||
@ -104,8 +78,13 @@
|
||||
<module name="gradle-plugins-cordapp_test" target="1.8" />
|
||||
<module name="graphs_main" target="1.8" />
|
||||
<module name="graphs_test" target="1.8" />
|
||||
<module name="intellij-plugin_main" target="1.8" />
|
||||
<module name="intellij-plugin_test" target="1.8" />
|
||||
<module name="irs-demo-cordapp_integrationTest" target="1.8" />
|
||||
<module name="irs-demo-cordapp_main" target="1.8" />
|
||||
<module name="irs-demo-cordapp_main~1" target="1.8" />
|
||||
<module name="irs-demo-cordapp_test" target="1.8" />
|
||||
<module name="irs-demo-cordapp_test~1" target="1.8" />
|
||||
<module name="irs-demo-web_main" target="1.8" />
|
||||
<module name="irs-demo-web_test" target="1.8" />
|
||||
<module name="irs-demo_integrationTest" target="1.8" />
|
||||
<module name="irs-demo_main" target="1.8" />
|
||||
<module name="irs-demo_systemTest" target="1.8" />
|
||||
@ -117,19 +96,12 @@
|
||||
<module name="jfx_integrationTest" target="1.8" />
|
||||
<module name="jfx_main" target="1.8" />
|
||||
<module name="jfx_test" target="1.8" />
|
||||
<module name="jmeter_main" target="1.8" />
|
||||
<module name="jmeter_test" target="1.8" />
|
||||
<module name="kryo-hook_main" target="1.8" />
|
||||
<module name="kryo-hook_test" target="1.8" />
|
||||
<module name="loadtest_main" target="1.8" />
|
||||
<module name="loadtest_test" target="1.8" />
|
||||
<module name="mock_main" target="1.8" />
|
||||
<module name="mock_test" target="1.8" />
|
||||
<module name="network-management-capsule_main" target="1.8" />
|
||||
<module name="network-management-capsule_test" target="1.8" />
|
||||
<module name="network-management_integrationTest" target="1.8" />
|
||||
<module name="network-management_main" target="1.8" />
|
||||
<module name="network-management_test" target="1.8" />
|
||||
<module name="network-visualiser_main" target="1.8" />
|
||||
<module name="network-visualiser_test" target="1.8" />
|
||||
<module name="node-api_main" target="1.8" />
|
||||
@ -145,62 +117,45 @@
|
||||
<module name="node_test" target="1.8" />
|
||||
<module name="notary-demo_main" target="1.8" />
|
||||
<module name="notary-demo_test" target="1.8" />
|
||||
<module name="notaryhealthcheck_main" target="1.8" />
|
||||
<module name="notaryhealthcheck_test" target="1.8" />
|
||||
<module name="perftestcordapp_integrationTest" target="1.8" />
|
||||
<module name="perftestcordapp_main" target="1.8" />
|
||||
<module name="perftestcordapp_test" target="1.8" />
|
||||
<module name="qa-behave_main" target="1.8" />
|
||||
<module name="qa-behave_test" target="1.8" />
|
||||
<module name="qa_main" target="1.8" />
|
||||
<module name="qa_test" target="1.8" />
|
||||
<module name="publish-utils_main" target="1.8" />
|
||||
<module name="publish-utils_test" target="1.8" />
|
||||
<module name="quasar-hook_main" target="1.8" />
|
||||
<module name="quasar-hook_test" target="1.8" />
|
||||
<module name="quasar-utils_main" target="1.8" />
|
||||
<module name="quasar-utils_test" target="1.8" />
|
||||
<module name="registration-tool_integrationTest" target="1.8" />
|
||||
<module name="registration-tool_main" target="1.8" />
|
||||
<module name="registration-tool_test" target="1.8" />
|
||||
<module name="rpc-proxy_main" target="1.8" />
|
||||
<module name="rpc-proxy_rpcProxy" target="1.8" />
|
||||
<module name="rpc-proxy_smokeTest" target="1.8" />
|
||||
<module name="rpc-proxy_test" target="1.8" />
|
||||
<module name="rpc_integrationTest" target="1.8" />
|
||||
<module name="rpc_main" target="1.8" />
|
||||
<module name="rpc_smokeTest" target="1.8" />
|
||||
<module name="rpc_test" target="1.8" />
|
||||
<module name="samples-business-network-demo_main" target="1.8" />
|
||||
<module name="samples-business-network-demo_test" target="1.8" />
|
||||
<module name="samples_main" target="1.8" />
|
||||
<module name="samples_test" target="1.8" />
|
||||
<module name="sandbox_main" target="1.8" />
|
||||
<module name="sandbox_test" target="1.8" />
|
||||
<module name="sgx-hsm-tool_main" target="1.8" />
|
||||
<module name="sgx-hsm-tool_test" target="1.8" />
|
||||
<module name="sgx-jvm_hsm-tool_main" target="1.8" />
|
||||
<module name="sgx-jvm_hsm-tool_test" target="1.8" />
|
||||
<module name="shell_integrationTest" target="1.8" />
|
||||
<module name="shell_main" target="1.8" />
|
||||
<module name="shell_test" target="1.8" />
|
||||
<module name="simm-valuation-demo_integrationTest" target="1.8" />
|
||||
<module name="simm-valuation-demo_main" target="1.8" />
|
||||
<module name="simm-valuation-demo_scenario" target="1.8" />
|
||||
<module name="simm-valuation-demo_scenarioTest" target="1.8" />
|
||||
<module name="simm-valuation-demo_test" target="1.8" />
|
||||
<module name="smoke-test-utils_main" target="1.8" />
|
||||
<module name="smoke-test-utils_test" target="1.8" />
|
||||
<module name="source-example-code_integrationTest" target="1.8" />
|
||||
<module name="source-example-code_main" target="1.8" />
|
||||
<module name="source-example-code_test" target="1.8" />
|
||||
<module name="test-common_main" target="1.8" />
|
||||
<module name="test-common_test" target="1.8" />
|
||||
<module name="test-utils_integrationTest" target="1.8" />
|
||||
<module name="test-utils_main" target="1.8" />
|
||||
<module name="test-utils_test" target="1.8" />
|
||||
<module name="testing-node-driver_integrationTest" target="1.8" />
|
||||
<module name="testing-node-driver_main" target="1.8" />
|
||||
<module name="testing-node-driver_test" target="1.8" />
|
||||
<module name="testing-smoke-test-utils_main" target="1.8" />
|
||||
<module name="testing-smoke-test-utils_test" target="1.8" />
|
||||
<module name="testing-test-common_main" target="1.8" />
|
||||
<module name="testing-test-common_test" target="1.8" />
|
||||
<module name="testing-test-utils_main" target="1.8" />
|
||||
<module name="testing-test-utils_test" target="1.8" />
|
||||
<module name="testing_main" target="1.8" />
|
||||
<module name="testing_test" target="1.8" />
|
||||
<module name="tools_main" target="1.8" />
|
||||
<module name="tools_test" target="1.8" />
|
||||
<module name="trader-demo_integrationTest" target="1.8" />
|
||||
@ -209,13 +164,12 @@
|
||||
<module name="verifier_integrationTest" target="1.8" />
|
||||
<module name="verifier_main" target="1.8" />
|
||||
<module name="verifier_test" target="1.8" />
|
||||
<module name="verify-enclave_integrationTest" target="1.8" />
|
||||
<module name="verify-enclave_main" target="1.8" />
|
||||
<module name="verify-enclave_test" target="1.8" />
|
||||
<module name="web_main" target="1.8" />
|
||||
<module name="web_test" target="1.8" />
|
||||
<module name="webcapsule_main" target="1.6" />
|
||||
<module name="webcapsule_test" target="1.6" />
|
||||
<module name="webserver-webcapsule_main" target="1.8" />
|
||||
<module name="webserver-webcapsule_test" target="1.8" />
|
||||
<module name="webserver_integrationTest" target="1.8" />
|
||||
<module name="webserver_main" target="1.8" />
|
||||
<module name="webserver_test" target="1.8" />
|
||||
|
@ -10,22 +10,21 @@
|
||||
|
||||
package net.corda.client.jackson
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore
|
||||
import com.fasterxml.jackson.annotation.JsonProperty
|
||||
import com.fasterxml.jackson.annotation.*
|
||||
import com.fasterxml.jackson.core.*
|
||||
import com.fasterxml.jackson.databind.*
|
||||
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize
|
||||
import com.fasterxml.jackson.databind.deser.std.NumberDeserializers
|
||||
import com.fasterxml.jackson.databind.deser.std.StdDeserializer
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode
|
||||
import com.fasterxml.jackson.databind.ser.std.StdSerializer
|
||||
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule
|
||||
import com.fasterxml.jackson.module.kotlin.KotlinModule
|
||||
import com.fasterxml.jackson.module.kotlin.convertValue
|
||||
import net.corda.client.jackson.internal.addSerAndDeser
|
||||
import net.corda.client.jackson.internal.jsonObject
|
||||
import net.corda.client.jackson.internal.readValueAs
|
||||
import net.corda.core.CordaInternal
|
||||
import net.corda.core.CordaOID
|
||||
import net.corda.core.DoNotImplement
|
||||
import net.corda.core.contracts.Amount
|
||||
import net.corda.core.contracts.ContractState
|
||||
@ -33,24 +32,30 @@ import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.crypto.TransactionSignature
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.internal.CertRole
|
||||
import net.corda.core.internal.DigitalSignatureWithCert
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.messaging.CordaRPCOps
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.CoreTransaction
|
||||
import net.corda.core.transactions.NotaryChangeWireTransaction
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.WireTransaction
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.parsePublicKeyBase58
|
||||
import net.corda.core.utilities.toBase58String
|
||||
import net.corda.core.utilities.*
|
||||
import org.bouncycastle.asn1.x509.KeyPurposeId
|
||||
import java.lang.reflect.Modifier
|
||||
import java.math.BigDecimal
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.CertificateFactory
|
||||
import java.security.cert.X509Certificate
|
||||
import java.util.*
|
||||
import javax.security.auth.x500.X500Principal
|
||||
|
||||
/**
|
||||
* Utilities and serialisers for working with JSON representations of basic types. This adds Jackson support for
|
||||
@ -100,25 +105,26 @@ object JacksonSupport {
|
||||
|
||||
val cordaModule: Module by lazy {
|
||||
SimpleModule("core").apply {
|
||||
addSerAndDeser(AnonymousPartySerializer, AnonymousPartyDeserializer)
|
||||
addSerAndDeser(PartySerializer, PartyDeserializer)
|
||||
addDeserializer(AbstractParty::class.java, PartyDeserializer)
|
||||
addSerAndDeser<BigDecimal>(toStringSerializer, NumberDeserializers.BigDecimalDeserializer())
|
||||
addSerAndDeser<SecureHash.SHA256>(toStringSerializer, SecureHashDeserializer())
|
||||
addSerAndDeser(toStringSerializer, AmountDeserializer)
|
||||
addSerAndDeser(OpaqueBytesSerializer, OpaqueBytesDeserializer)
|
||||
addSerAndDeser(toStringSerializer, CordaX500NameDeserializer)
|
||||
addSerAndDeser(PublicKeySerializer, PublicKeyDeserializer)
|
||||
addDeserializer(CompositeKey::class.java, CompositeKeyDeseriaizer)
|
||||
addSerAndDeser(toStringSerializer, NetworkHostAndPortDeserializer)
|
||||
// TODO Add deserialization which follows the same lookup logic as Party
|
||||
addSerializer(PartyAndCertificate::class.java, PartyAndCertificateSerializer)
|
||||
addDeserializer(NodeInfo::class.java, NodeInfoDeserializer)
|
||||
|
||||
listOf(TransactionSignatureSerde, SignedTransactionSerde).forEach { serde -> serde.applyTo(this) }
|
||||
|
||||
// Using mixins to fine-tune the default serialised output
|
||||
setMixInAnnotation(BigDecimal::class.java, BigDecimalMixin::class.java)
|
||||
setMixInAnnotation(X500Principal::class.java, X500PrincipalMixin::class.java)
|
||||
setMixInAnnotation(X509Certificate::class.java, X509CertificateMixin::class.java)
|
||||
setMixInAnnotation(PartyAndCertificate::class.java, PartyAndCertificateSerializerMixin::class.java)
|
||||
setMixInAnnotation(NetworkHostAndPort::class.java, NetworkHostAndPortMixin::class.java)
|
||||
setMixInAnnotation(CordaX500Name::class.java, CordaX500NameMixin::class.java)
|
||||
setMixInAnnotation(Amount::class.java, AmountMixin::class.java)
|
||||
setMixInAnnotation(AbstractParty::class.java, AbstractPartyMixin::class.java)
|
||||
setMixInAnnotation(AnonymousParty::class.java, AnonymousPartyMixin::class.java)
|
||||
setMixInAnnotation(Party::class.java, PartyMixin::class.java)
|
||||
setMixInAnnotation(PublicKey::class.java, PublicKeyMixin::class.java)
|
||||
setMixInAnnotation(ByteSequence::class.java, ByteSequenceMixin::class.java)
|
||||
setMixInAnnotation(SecureHash.SHA256::class.java, SecureHashSHA256Mixin::class.java)
|
||||
setMixInAnnotation(SerializedBytes::class.java, SerializedBytesMixin::class.java)
|
||||
setMixInAnnotation(DigitalSignature.WithKey::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
setMixInAnnotation(DigitalSignatureWithCert::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
setMixInAnnotation(TransactionSignature::class.java, ByteSequenceWithPropertiesMixin::class.java)
|
||||
setMixInAnnotation(SignedTransaction::class.java, SignedTransactionMixin2::class.java)
|
||||
setMixInAnnotation(WireTransaction::class.java, WireTransactionMixin::class.java)
|
||||
setMixInAnnotation(CertPath::class.java, CertPathMixin::class.java)
|
||||
setMixInAnnotation(NodeInfo::class.java, NodeInfoMixin::class.java)
|
||||
}
|
||||
}
|
||||
@ -181,7 +187,13 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
private val toStringSerializer = com.fasterxml.jackson.databind.ser.std.ToStringSerializer.instance
|
||||
@JacksonAnnotationsInside
|
||||
@JsonSerialize(using = com.fasterxml.jackson.databind.ser.std.ToStringSerializer::class)
|
||||
private annotation class ToStringSerialize
|
||||
|
||||
@ToStringSerialize
|
||||
@JsonDeserialize(using = NumberDeserializers.BigDecimalDeserializer::class)
|
||||
private interface BigDecimalMixin
|
||||
|
||||
private object DateSerializer : JsonSerializer<Date>() {
|
||||
override fun serialize(value: Date, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
@ -189,20 +201,21 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
private object NetworkHostAndPortDeserializer : JsonDeserializer<NetworkHostAndPort>() {
|
||||
@ToStringSerialize
|
||||
@JsonDeserialize(using = NetworkHostAndPortDeserializer::class)
|
||||
private interface NetworkHostAndPortMixin
|
||||
|
||||
private class NetworkHostAndPortDeserializer : JsonDeserializer<NetworkHostAndPort>() {
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): NetworkHostAndPort {
|
||||
return NetworkHostAndPort.parse(parser.text)
|
||||
}
|
||||
}
|
||||
|
||||
private object CompositeKeyDeseriaizer : JsonDeserializer<CompositeKey>() {
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): CompositeKey {
|
||||
val publicKey = parser.readValueAs<PublicKey>()
|
||||
return publicKey as? CompositeKey ?: throw JsonParseException(parser, "Not a CompositeKey: $publicKey")
|
||||
}
|
||||
}
|
||||
@JsonSerialize(using = PartyAndCertificateSerializer::class)
|
||||
// TODO Add deserialization which follows the same lookup logic as Party
|
||||
private interface PartyAndCertificateSerializerMixin
|
||||
|
||||
private object PartyAndCertificateSerializer : JsonSerializer<PartyAndCertificate>() {
|
||||
private class PartyAndCertificateSerializer : JsonSerializer<PartyAndCertificate>() {
|
||||
override fun serialize(value: PartyAndCertificate, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
writeObjectField("name", value.name)
|
||||
@ -212,100 +225,146 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
private interface NodeInfoMixin {
|
||||
@get:JsonIgnore val legalIdentities: Any // This is already covered by legalIdentitiesAndCerts
|
||||
@JsonSerialize(using = SignedTransactionSerializer::class)
|
||||
@JsonDeserialize(using = SignedTransactionDeserializer::class)
|
||||
private interface SignedTransactionMixin2
|
||||
|
||||
private class SignedTransactionSerializer : JsonSerializer<SignedTransaction>() {
|
||||
override fun serialize(value: SignedTransaction, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeObject(SignedTransactionWrapper(value.txBits.bytes, value.sigs))
|
||||
}
|
||||
}
|
||||
|
||||
private interface JsonSerde<TYPE> {
|
||||
val type: Class<TYPE>
|
||||
val serializer: JsonSerializer<TYPE>
|
||||
val deserializer: JsonDeserializer<TYPE>
|
||||
private class SignedTransactionDeserializer : JsonDeserializer<SignedTransaction>() {
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): SignedTransaction {
|
||||
val wrapper = parser.readValueAs<SignedTransactionWrapper>()
|
||||
return SignedTransaction(SerializedBytes(wrapper.txBits), wrapper.signatures)
|
||||
}
|
||||
}
|
||||
|
||||
fun applyTo(module: SimpleModule) {
|
||||
with(module) {
|
||||
addSerializer(type, serializer)
|
||||
addDeserializer(type, deserializer)
|
||||
private class SignedTransactionWrapper(val txBits: ByteArray, val signatures: List<TransactionSignature>)
|
||||
|
||||
@JsonSerialize(using = SerializedBytesSerializer::class)
|
||||
@JsonDeserialize(using = SerializedBytesDeserializer::class)
|
||||
private class SerializedBytesMixin
|
||||
|
||||
private class SerializedBytesSerializer : JsonSerializer<SerializedBytes<*>>() {
|
||||
override fun serialize(value: SerializedBytes<*>, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
val deserialized = value.deserialize<Any>()
|
||||
gen.jsonObject {
|
||||
writeStringField("class", deserialized.javaClass.name)
|
||||
writeObjectField("deserialized", deserialized)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private inline fun <reified RESULT> JsonNode.get(fieldName: String, condition: (JsonNode) -> Boolean, mapper: ObjectMapper, parser: JsonParser): RESULT {
|
||||
if (get(fieldName)?.let(condition) != true) {
|
||||
JsonParseException(parser, "Missing required object field \"$fieldName\".")
|
||||
}
|
||||
return mapper.treeToValue(get(fieldName), RESULT::class.java)
|
||||
}
|
||||
|
||||
private object TransactionSignatureSerde : JsonSerde<TransactionSignature> {
|
||||
override val type: Class<TransactionSignature> = TransactionSignature::class.java
|
||||
|
||||
override val serializer = object : StdSerializer<TransactionSignature>(type) {
|
||||
override fun serialize(value: TransactionSignature, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
writeObjectField("by", value.by)
|
||||
writeObjectField("signatureMetadata", value.signatureMetadata)
|
||||
writeObjectField("bytes", value.bytes)
|
||||
writeObjectField("partialMerkleTree", value.partialMerkleTree)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override val deserializer = object : StdDeserializer<TransactionSignature>(type) {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): TransactionSignature {
|
||||
private class SerializedBytesDeserializer : JsonDeserializer<SerializedBytes<*>>() {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): SerializedBytes<Any> {
|
||||
return if (parser.currentToken == JsonToken.START_OBJECT) {
|
||||
val mapper = parser.codec as ObjectMapper
|
||||
val json = mapper.readTree<JsonNode>(parser)
|
||||
val by = mapper.convertValue<PublicKey>(json["by"])
|
||||
val signatureMetadata = json.get<SignatureMetadata>("signatureMetadata", JsonNode::isObject, mapper, parser)
|
||||
val bytes = json.get<ByteArray>("bytes", JsonNode::isObject, mapper, parser)
|
||||
val partialMerkleTree = json.get<PartialMerkleTree>("partialMerkleTree", JsonNode::isObject, mapper, parser)
|
||||
|
||||
return TransactionSignature(bytes, by, signatureMetadata, partialMerkleTree)
|
||||
val json = parser.readValueAsTree<ObjectNode>()
|
||||
val clazz = context.findClass(json["class"].textValue())
|
||||
val pojo = mapper.convertValue(json["deserialized"], clazz)
|
||||
pojo.serialize()
|
||||
} else {
|
||||
SerializedBytes(parser.binaryValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private object SignedTransactionSerde : JsonSerde<SignedTransaction> {
|
||||
override val type: Class<SignedTransaction> = SignedTransaction::class.java
|
||||
@ToStringSerialize
|
||||
private interface X500PrincipalMixin
|
||||
|
||||
override val serializer = object : StdSerializer<SignedTransaction>(type) {
|
||||
override fun serialize(value: SignedTransaction, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
writeObjectField("txBits", value.txBits.bytes)
|
||||
writeObjectField("signatures", value.sigs)
|
||||
@JsonSerialize(using = X509CertificateSerializer::class)
|
||||
@JsonDeserialize(using = X509CertificateDeserializer::class)
|
||||
private interface X509CertificateMixin
|
||||
|
||||
private object X509CertificateSerializer : JsonSerializer<X509Certificate>() {
|
||||
val keyUsages = arrayOf(
|
||||
"digitalSignature",
|
||||
"nonRepudiation",
|
||||
"keyEncipherment",
|
||||
"dataEncipherment",
|
||||
"keyAgreement",
|
||||
"keyCertSign",
|
||||
"cRLSign",
|
||||
"encipherOnly",
|
||||
"decipherOnly"
|
||||
)
|
||||
|
||||
val keyPurposeIds = KeyPurposeId::class.java
|
||||
.fields
|
||||
.filter { Modifier.isStatic(it.modifiers) && it.type == KeyPurposeId::class.java }
|
||||
.associateBy({ (it.get(null) as KeyPurposeId).id }, { it.name })
|
||||
|
||||
val knownExtensions = setOf("2.5.29.15", "2.5.29.37", "2.5.29.19", "2.5.29.17", "2.5.29.18", CordaOID.X509_EXTENSION_CORDA_ROLE)
|
||||
|
||||
override fun serialize(value: X509Certificate, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.jsonObject {
|
||||
writeNumberField("version", value.version)
|
||||
writeObjectField("serialNumber", value.serialNumber)
|
||||
writeObjectField("subject", value.subjectX500Principal)
|
||||
writeObjectField("publicKey", value.publicKey)
|
||||
writeObjectField("issuer", value.issuerX500Principal)
|
||||
writeObjectField("notBefore", value.notBefore)
|
||||
writeObjectField("notAfter", value.notAfter)
|
||||
writeObjectField("issuerUniqueID", value.issuerUniqueID)
|
||||
writeObjectField("subjectUniqueID", value.subjectUniqueID)
|
||||
writeObjectField("keyUsage", value.keyUsage?.asList()?.mapIndexedNotNull { i, flag -> if (flag) keyUsages[i] else null })
|
||||
writeObjectField("extendedKeyUsage", value.extendedKeyUsage.map { keyPurposeIds.getOrDefault(it, it) })
|
||||
jsonObject("basicConstraints") {
|
||||
writeBooleanField("isCA", value.basicConstraints != -1)
|
||||
writeObjectField("pathLength", value.basicConstraints.let { if (it != Int.MAX_VALUE) it else null })
|
||||
}
|
||||
writeObjectField("subjectAlternativeNames", value.subjectAlternativeNames)
|
||||
writeObjectField("issuerAlternativeNames", value.issuerAlternativeNames)
|
||||
writeObjectField("cordaCertRole", CertRole.extract(value))
|
||||
writeObjectField("otherCriticalExtensions", value.criticalExtensionOIDs - knownExtensions)
|
||||
writeObjectField("otherNonCriticalExtensions", value.nonCriticalExtensionOIDs - knownExtensions)
|
||||
writeBinaryField("encoded", value.encoded)
|
||||
}
|
||||
}
|
||||
|
||||
override val deserializer = object : StdDeserializer<SignedTransaction>(type) {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): SignedTransaction {
|
||||
val mapper = parser.codec as ObjectMapper
|
||||
val json = mapper.readTree<JsonNode>(parser)
|
||||
|
||||
val txBits = json.get<ByteArray>("txBits", JsonNode::isTextual, mapper, parser)
|
||||
val signatures = json.get<TransactionSignatures>("signatures", JsonNode::isArray, mapper, parser)
|
||||
|
||||
return SignedTransaction(SerializedBytes(txBits), signatures)
|
||||
}
|
||||
}
|
||||
|
||||
private class TransactionSignatures : ArrayList<TransactionSignature>()
|
||||
}
|
||||
|
||||
|
||||
|
||||
//
|
||||
// The following should not have been made public and are thus deprecated with warnings.
|
||||
//
|
||||
|
||||
@Deprecated("No longer used as jackson already has a toString serializer",
|
||||
replaceWith = ReplaceWith("com.fasterxml.jackson.databind.ser.std.ToStringSerializer.instance"))
|
||||
object ToStringSerializer : JsonSerializer<Any>() {
|
||||
override fun serialize(obj: Any, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
private class X509CertificateDeserializer : JsonDeserializer<X509Certificate>() {
|
||||
private val certFactory = CertificateFactory.getInstance("X.509")
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): X509Certificate {
|
||||
val encoded = parser.readValueAsTree<ObjectNode>()["encoded"]
|
||||
return certFactory.generateCertificate(encoded.binaryValue().inputStream()) as X509Certificate
|
||||
}
|
||||
}
|
||||
|
||||
@JsonSerialize(using = CertPathSerializer::class)
|
||||
@JsonDeserialize(using = CertPathDeserializer::class)
|
||||
private interface CertPathMixin
|
||||
|
||||
private class CertPathSerializer : JsonSerializer<CertPath>() {
|
||||
override fun serialize(value: CertPath, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeObject(CertPathWrapper(value.type, uncheckedCast(value.certificates)))
|
||||
}
|
||||
}
|
||||
|
||||
private class CertPathDeserializer : JsonDeserializer<CertPath>() {
|
||||
private val certFactory = CertificateFactory.getInstance("X.509")
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): CertPath {
|
||||
val wrapper = parser.readValueAs<CertPathWrapper>()
|
||||
return certFactory.generateCertPath(wrapper.certificates)
|
||||
}
|
||||
}
|
||||
|
||||
private data class CertPathWrapper(val type: String, val certificates: List<X509Certificate>) {
|
||||
init {
|
||||
require(type == "X.509") { "Only X.509 cert paths are supported" }
|
||||
}
|
||||
}
|
||||
|
||||
@JsonDeserialize(using = PartyDeserializer::class)
|
||||
private interface AbstractPartyMixin
|
||||
|
||||
@JsonSerialize(using = AnonymousPartySerializer::class)
|
||||
@JsonDeserialize(using = AnonymousPartyDeserializer::class)
|
||||
private interface AnonymousPartyMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object AnonymousPartySerializer : JsonSerializer<AnonymousParty>() {
|
||||
override fun serialize(value: AnonymousParty, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
@ -320,6 +379,9 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@JsonSerialize(using = PartySerializer::class)
|
||||
private interface PartyMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object PartySerializer : JsonSerializer<Party>() {
|
||||
override fun serialize(value: Party, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
@ -354,13 +416,9 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
// This is no longer used
|
||||
object CordaX500NameSerializer : JsonSerializer<CordaX500Name>() {
|
||||
override fun serialize(obj: CordaX500Name, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
}
|
||||
}
|
||||
@ToStringSerialize
|
||||
@JsonDeserialize(using = CordaX500NameDeserializer::class)
|
||||
private interface CordaX500NameMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object CordaX500NameDeserializer : JsonDeserializer<CordaX500Name>() {
|
||||
@ -373,13 +431,9 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
// This is no longer used
|
||||
object NodeInfoSerializer : JsonSerializer<NodeInfo>() {
|
||||
override fun serialize(value: NodeInfo, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeString(Base58.encode(value.serialize().bytes))
|
||||
}
|
||||
}
|
||||
@JsonIgnoreProperties("legalIdentities") // This is already covered by legalIdentitiesAndCerts
|
||||
@JsonDeserialize(using = NodeInfoDeserializer::class)
|
||||
private interface NodeInfoMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object NodeInfoDeserializer : JsonDeserializer<NodeInfo>() {
|
||||
@ -390,17 +444,10 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
// This is no longer used
|
||||
object SecureHashSerializer : JsonSerializer<SecureHash>() {
|
||||
override fun serialize(obj: SecureHash, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
}
|
||||
}
|
||||
@ToStringSerialize
|
||||
@JsonDeserialize(using = SecureHashDeserializer::class)
|
||||
private interface SecureHashSHA256Mixin
|
||||
|
||||
/**
|
||||
* Implemented as a class so that we can instantiate for T.
|
||||
*/
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
class SecureHashDeserializer<T : SecureHash> : JsonDeserializer<T>() {
|
||||
override fun deserialize(parser: JsonParser, context: DeserializationContext): T {
|
||||
@ -412,6 +459,10 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@JsonSerialize(using = PublicKeySerializer::class)
|
||||
@JsonDeserialize(using = PublicKeyDeserializer::class)
|
||||
private interface PublicKeyMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object PublicKeySerializer : JsonSerializer<PublicKey>() {
|
||||
override fun serialize(value: PublicKey, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
@ -430,13 +481,9 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
// This is no longer used
|
||||
object AmountSerializer : JsonSerializer<Amount<*>>() {
|
||||
override fun serialize(value: Amount<*>, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeString(value.toString())
|
||||
}
|
||||
}
|
||||
@ToStringSerialize
|
||||
@JsonDeserialize(using = AmountDeserializer::class)
|
||||
private interface AmountMixin
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object AmountDeserializer : JsonDeserializer<Amount<*>>() {
|
||||
@ -444,20 +491,30 @@ object JacksonSupport {
|
||||
return if (parser.currentToken == JsonToken.VALUE_STRING) {
|
||||
Amount.parseCurrency(parser.text)
|
||||
} else {
|
||||
try {
|
||||
val tree = parser.readValueAsTree<ObjectNode>()
|
||||
val quantity = tree["quantity"].apply { require(canConvertToLong()) }
|
||||
val token = tree["token"]
|
||||
// Attempt parsing as a currency token. TODO: This needs thought about how to extend to other token types.
|
||||
val currency = (parser.codec as ObjectMapper).convertValue<Currency>(token)
|
||||
Amount(quantity.longValue(), currency)
|
||||
} catch (e: Exception) {
|
||||
throw JsonParseException(parser, "Invalid amount", e)
|
||||
}
|
||||
val wrapper = parser.readValueAs<CurrencyAmountWrapper>()
|
||||
Amount(wrapper.quantity, wrapper.token)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private data class CurrencyAmountWrapper(val quantity: Long, val token: Currency)
|
||||
|
||||
@JsonDeserialize(using = OpaqueBytesDeserializer::class)
|
||||
private interface ByteSequenceMixin {
|
||||
@Suppress("unused")
|
||||
@JsonValue
|
||||
fun copyBytes(): ByteArray
|
||||
}
|
||||
|
||||
@JsonIgnoreProperties("offset", "size")
|
||||
@JsonSerialize
|
||||
@JsonDeserialize
|
||||
private interface ByteSequenceWithPropertiesMixin {
|
||||
@Suppress("unused")
|
||||
@JsonValue(false)
|
||||
fun copyBytes(): ByteArray
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object OpaqueBytesDeserializer : JsonDeserializer<OpaqueBytes>() {
|
||||
override fun deserialize(parser: JsonParser, ctxt: DeserializationContext): OpaqueBytes {
|
||||
@ -465,6 +522,47 @@ object JacksonSupport {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Everything below this point is no longer used but can't be deleted as they leaked into the public API
|
||||
//
|
||||
|
||||
@Deprecated("No longer used as jackson already has a toString serializer",
|
||||
replaceWith = ReplaceWith("com.fasterxml.jackson.databind.ser.std.ToStringSerializer.instance"))
|
||||
object ToStringSerializer : JsonSerializer<Any>() {
|
||||
override fun serialize(obj: Any, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object CordaX500NameSerializer : JsonSerializer<CordaX500Name>() {
|
||||
override fun serialize(obj: CordaX500Name, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object NodeInfoSerializer : JsonSerializer<NodeInfo>() {
|
||||
override fun serialize(value: NodeInfo, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeString(Base58.encode(value.serialize().bytes))
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object SecureHashSerializer : JsonSerializer<SecureHash>() {
|
||||
override fun serialize(obj: SecureHash, generator: JsonGenerator, provider: SerializerProvider) {
|
||||
generator.writeString(obj.toString())
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object AmountSerializer : JsonSerializer<Amount<*>>() {
|
||||
override fun serialize(value: Amount<*>, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
gen.writeString(value.toString())
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated("This is an internal class, do not use")
|
||||
object OpaqueBytesSerializer : JsonSerializer<OpaqueBytes>() {
|
||||
override fun serialize(value: OpaqueBytes, gen: JsonGenerator, serializers: SerializerProvider) {
|
||||
@ -477,7 +575,7 @@ object JacksonSupport {
|
||||
abstract class SignedTransactionMixin {
|
||||
@JsonIgnore abstract fun getTxBits(): SerializedBytes<CoreTransaction>
|
||||
@JsonProperty("signatures") protected abstract fun getSigs(): List<TransactionSignature>
|
||||
@JsonProperty protected abstract fun getTransaction(): CoreTransaction // TODO It seems this should be coreTransaction
|
||||
@JsonProperty protected abstract fun getTransaction(): CoreTransaction
|
||||
@JsonIgnore abstract fun getTx(): WireTransaction
|
||||
@JsonIgnore abstract fun getNotaryChangeTx(): NotaryChangeWireTransaction
|
||||
@JsonIgnore abstract fun getInputs(): List<StateRef>
|
||||
|
@ -3,8 +3,11 @@ package net.corda.client.jackson.internal
|
||||
import com.fasterxml.jackson.core.JsonGenerator
|
||||
import com.fasterxml.jackson.core.JsonParser
|
||||
import com.fasterxml.jackson.databind.JsonDeserializer
|
||||
import com.fasterxml.jackson.databind.JsonNode
|
||||
import com.fasterxml.jackson.databind.JsonSerializer
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule
|
||||
import com.fasterxml.jackson.module.kotlin.convertValue
|
||||
|
||||
inline fun <reified T : Any> SimpleModule.addSerAndDeser(serializer: JsonSerializer<in T>, deserializer: JsonDeserializer<T>) {
|
||||
addSerializer(T::class.java, serializer)
|
||||
@ -19,3 +22,5 @@ inline fun JsonGenerator.jsonObject(fieldName: String? = null, gen: JsonGenerato
|
||||
}
|
||||
|
||||
inline fun <reified T> JsonParser.readValueAs(): T = readValueAs(T::class.java)
|
||||
|
||||
inline fun <reified T : Any> JsonNode.valueAs(mapper: ObjectMapper): T = mapper.convertValue(this)
|
||||
|
@ -10,14 +10,16 @@
|
||||
|
||||
package net.corda.client.jackson
|
||||
|
||||
import com.fasterxml.jackson.databind.SerializationFeature
|
||||
import com.fasterxml.jackson.databind.node.ArrayNode
|
||||
import com.fasterxml.jackson.core.JsonFactory
|
||||
import com.fasterxml.jackson.databind.JsonNode
|
||||
import com.fasterxml.jackson.databind.node.BinaryNode
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode
|
||||
import com.fasterxml.jackson.databind.node.TextNode
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
|
||||
import com.fasterxml.jackson.module.kotlin.convertValue
|
||||
import com.nhaarman.mockito_kotlin.doReturn
|
||||
import com.nhaarman.mockito_kotlin.whenever
|
||||
import net.corda.client.jackson.internal.valueAs
|
||||
import net.corda.core.contracts.Amount
|
||||
import net.corda.core.cordapp.CordappProvider
|
||||
import net.corda.core.crypto.*
|
||||
@ -26,14 +28,16 @@ import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.DigitalSignatureWithCert
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.toBase58String
|
||||
import net.corda.core.utilities.toBase64
|
||||
import net.corda.core.utilities.*
|
||||
import net.corda.finance.USD
|
||||
import net.corda.nodeapi.internal.crypto.x509Certificates
|
||||
import net.corda.testing.common.internal.testNetworkParameters
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.core.*
|
||||
@ -44,19 +48,29 @@ import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.runner.RunWith
|
||||
import org.junit.runners.Parameterized
|
||||
import org.junit.runners.Parameterized.Parameters
|
||||
import java.math.BigInteger
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.X509Certificate
|
||||
import java.util.*
|
||||
import javax.security.auth.x500.X500Principal
|
||||
import kotlin.collections.ArrayList
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
class JacksonSupportTest {
|
||||
@RunWith(Parameterized::class)
|
||||
class JacksonSupportTest(@Suppress("unused") private val name: String, factory: JsonFactory) {
|
||||
private companion object {
|
||||
val SEED: BigInteger = BigInteger.valueOf(20170922L)
|
||||
val ALICE_PUBKEY = TestIdentity(ALICE_NAME, 70).publicKey
|
||||
val BOB_PUBKEY = TestIdentity(BOB_NAME, 70).publicKey
|
||||
val DUMMY_NOTARY = TestIdentity(DUMMY_NOTARY_NAME, 20).party
|
||||
val MINI_CORP = TestIdentity(CordaX500Name("MiniCorp", "London", "GB"))
|
||||
|
||||
@Parameters(name = "{0}")
|
||||
@JvmStatic
|
||||
fun factories() = arrayOf(arrayOf("JSON", JsonFactory()), arrayOf("YAML", YAMLFactory()))
|
||||
}
|
||||
|
||||
@Rule
|
||||
@ -64,7 +78,7 @@ class JacksonSupportTest {
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
|
||||
private val partyObjectMapper = TestPartyObjectMapper()
|
||||
private val mapper = JacksonSupport.createPartyObjectMapper(partyObjectMapper)
|
||||
private val mapper = JacksonSupport.createPartyObjectMapper(partyObjectMapper, factory)
|
||||
|
||||
private lateinit var services: ServiceHub
|
||||
private lateinit var cordappProvider: CordappProvider
|
||||
@ -76,44 +90,29 @@ class JacksonSupportTest {
|
||||
doReturn(cordappProvider).whenever(services).cordappProvider
|
||||
}
|
||||
|
||||
private class Dummy(val notional: Amount<Currency>)
|
||||
|
||||
@Test
|
||||
fun `read Amount`() {
|
||||
val oldJson = """
|
||||
{
|
||||
"notional": {
|
||||
"quantity": 2500000000,
|
||||
"token": "USD"
|
||||
}
|
||||
}
|
||||
"""
|
||||
val newJson = """ { "notional" : "$25000000" } """
|
||||
|
||||
assertEquals(Amount(2500000000L, USD), mapper.readValue(newJson, Dummy::class.java).notional)
|
||||
assertEquals(Amount(2500000000L, USD), mapper.readValue(oldJson, Dummy::class.java).notional)
|
||||
fun `Amount(Currency) serialization`() {
|
||||
assertThat(mapper.valueToTree<TextNode>(Amount.parseCurrency("£25000000")).textValue()).isEqualTo("25000000.00 GBP")
|
||||
assertThat(mapper.valueToTree<TextNode>(Amount.parseCurrency("$250000")).textValue()).isEqualTo("250000.00 USD")
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `write Amount`() {
|
||||
val writer = mapper.writer().without(SerializationFeature.INDENT_OUTPUT)
|
||||
assertEquals("""{"notional":"25000000.00 GBP"}""", writer.writeValueAsString(Dummy(Amount.parseCurrency("£25000000"))))
|
||||
assertEquals("""{"notional":"250000.00 USD"}""", writer.writeValueAsString(Dummy(Amount.parseCurrency("$250000"))))
|
||||
fun `Amount(Currency) deserialization`() {
|
||||
val old = mapOf(
|
||||
"quantity" to 2500000000,
|
||||
"token" to "USD"
|
||||
)
|
||||
assertThat(mapper.convertValue<Amount<Currency>>(old)).isEqualTo(Amount(2_500_000_000, USD))
|
||||
assertThat(mapper.convertValue<Amount<Currency>>(TextNode("$25000000"))).isEqualTo(Amount(2_500_000_000, USD))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun SignedTransaction() {
|
||||
val attachmentRef = SecureHash.randomSHA256()
|
||||
doReturn(attachmentRef).whenever(cordappProvider).getContractAttachmentID(DummyContract.PROGRAM_ID)
|
||||
doReturn(testNetworkParameters()).whenever(services).networkParameters
|
||||
|
||||
val writer = mapper.writer()
|
||||
val stx = makeDummyStx()
|
||||
val json = writer.writeValueAsString(stx)
|
||||
|
||||
val deserializedTransaction = mapper.readValue(json, SignedTransaction::class.java)
|
||||
|
||||
assertThat(deserializedTransaction).isEqualTo(stx)
|
||||
fun ByteSequence() {
|
||||
val byteSequence: ByteSequence = OpaqueBytes.of(1, 2, 3, 4).subSequence(0, 2)
|
||||
val json = mapper.valueToTree<BinaryNode>(byteSequence)
|
||||
assertThat(json.binaryValue()).containsExactly(1, 2)
|
||||
assertThat(json.asText()).isEqualTo(byteArrayOf(1, 2).toBase64())
|
||||
assertThat(mapper.convertValue<ByteSequence>(json)).isEqualTo(byteSequence)
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -125,6 +124,105 @@ class JacksonSupportTest {
|
||||
assertThat(mapper.convertValue<OpaqueBytes>(json)).isEqualTo(opaqueBytes)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun SerializedBytes() {
|
||||
val data = TestData(BOB_NAME, "Summary", SubTestData(1234))
|
||||
val serializedBytes = data.serialize()
|
||||
val json = mapper.valueToTree<ObjectNode>(serializedBytes)
|
||||
println(mapper.writeValueAsString(json))
|
||||
assertThat(json["class"].textValue()).isEqualTo(TestData::class.java.name)
|
||||
assertThat(json["deserialized"].valueAs<TestData>(mapper)).isEqualTo(data)
|
||||
// Check that the entire JSON object can be converted back to the same SerializedBytes
|
||||
assertThat(mapper.convertValue<SerializedBytes<*>>(json)).isEqualTo(serializedBytes)
|
||||
assertThat(mapper.convertValue<SerializedBytes<*>>(BinaryNode(serializedBytes.bytes))).isEqualTo(serializedBytes)
|
||||
}
|
||||
|
||||
// This is the class that was used to serialise the message for the test below. It's commented out so that it's no
|
||||
// longer on the classpath.
|
||||
// @CordaSerializable
|
||||
// data class ClassNotOnClasspath(val name: CordaX500Name, val value: Int)
|
||||
|
||||
@Test
|
||||
fun `SerializedBytes of class not on classpath`() {
|
||||
// The contents of the file were written out as follows:
|
||||
// ClassNotOnClasspath(BOB_NAME, 54321).serialize().open().copyTo("build" / "class-not-on-classpath-data")
|
||||
|
||||
val serializedBytes = SerializedBytes<Any>(javaClass.getResource("class-not-on-classpath-data").readBytes())
|
||||
val json = mapper.valueToTree<ObjectNode>(serializedBytes)
|
||||
println(mapper.writeValueAsString(json))
|
||||
assertThat(json["class"].textValue()).isEqualTo("net.corda.client.jackson.JacksonSupportTest\$ClassNotOnClasspath")
|
||||
assertThat(json["deserialized"].valueAs<Map<*, *>>(mapper)).isEqualTo(mapOf(
|
||||
"name" to BOB_NAME.toString(),
|
||||
"value" to 54321
|
||||
))
|
||||
assertThat(mapper.convertValue<SerializedBytes<*>>(BinaryNode(serializedBytes.bytes))).isEqualTo(serializedBytes)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun DigitalSignature() {
|
||||
val digitalSignature = DigitalSignature(secureRandomBytes(128))
|
||||
val json = mapper.valueToTree<BinaryNode>(digitalSignature)
|
||||
assertThat(json.binaryValue()).isEqualTo(digitalSignature.bytes)
|
||||
assertThat(json.asText()).isEqualTo(digitalSignature.bytes.toBase64())
|
||||
assertThat(mapper.convertValue<DigitalSignature>(json)).isEqualTo(digitalSignature)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `DigitalSignature WithKey`() {
|
||||
val digitalSignature = DigitalSignature.WithKey(BOB_PUBKEY, secureRandomBytes(128))
|
||||
val json = mapper.valueToTree<ObjectNode>(digitalSignature)
|
||||
val (by, bytes) = json.assertHasOnlyFields("by", "bytes")
|
||||
assertThat(by.valueAs<PublicKey>(mapper)).isEqualTo(BOB_PUBKEY)
|
||||
assertThat(bytes.binaryValue()).isEqualTo(digitalSignature.bytes)
|
||||
assertThat(mapper.convertValue<DigitalSignature.WithKey>(json)).isEqualTo(digitalSignature)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun DigitalSignatureWithCert() {
|
||||
val digitalSignature = DigitalSignatureWithCert(MINI_CORP.identity.certificate, secureRandomBytes(128))
|
||||
val json = mapper.valueToTree<ObjectNode>(digitalSignature)
|
||||
val (by, bytes) = json.assertHasOnlyFields("by", "bytes")
|
||||
assertThat(by.valueAs<X509Certificate>(mapper)).isEqualTo(MINI_CORP.identity.certificate)
|
||||
assertThat(bytes.binaryValue()).isEqualTo(digitalSignature.bytes)
|
||||
assertThat(mapper.convertValue<DigitalSignatureWithCert>(json)).isEqualTo(digitalSignature)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun TransactionSignature() {
|
||||
val metadata = SignatureMetadata(1, 1)
|
||||
val transactionSignature = TransactionSignature(secureRandomBytes(128), BOB_PUBKEY, metadata)
|
||||
val json = mapper.valueToTree<ObjectNode>(transactionSignature)
|
||||
val (bytes, by, signatureMetadata, partialMerkleTree) = json.assertHasOnlyFields(
|
||||
"bytes",
|
||||
"by",
|
||||
"signatureMetadata",
|
||||
"partialMerkleTree"
|
||||
)
|
||||
assertThat(bytes.binaryValue()).isEqualTo(transactionSignature.bytes)
|
||||
assertThat(by.valueAs<PublicKey>(mapper)).isEqualTo(BOB_PUBKEY)
|
||||
assertThat(signatureMetadata.valueAs<SignatureMetadata>(mapper)).isEqualTo(metadata)
|
||||
assertThat(partialMerkleTree.isNull).isTrue()
|
||||
assertThat(mapper.convertValue<TransactionSignature>(json)).isEqualTo(transactionSignature)
|
||||
}
|
||||
|
||||
// TODO Add test for PartialMerkleTree
|
||||
|
||||
@Test
|
||||
fun SignedTransaction() {
|
||||
val attachmentRef = SecureHash.randomSHA256()
|
||||
doReturn(attachmentRef).whenever(cordappProvider).getContractAttachmentID(DummyContract.PROGRAM_ID)
|
||||
doReturn(testNetworkParameters()).whenever(services).networkParameters
|
||||
|
||||
val stx = makeDummyStx()
|
||||
val json = mapper.valueToTree<ObjectNode>(stx)
|
||||
println(mapper.writeValueAsString(json))
|
||||
val (txBits, signatures) = json.assertHasOnlyFields("txBits", "signatures")
|
||||
assertThat(txBits.binaryValue()).isEqualTo(stx.txBits.bytes)
|
||||
val sigs = signatures.elements().asSequence().map { it.valueAs<TransactionSignature>(mapper) }.toList()
|
||||
assertThat(sigs).isEqualTo(stx.sigs)
|
||||
assertThat(mapper.convertValue<SignedTransaction>(json)).isEqualTo(stx)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun CordaX500Name() {
|
||||
testToStringSerialisation(CordaX500Name(commonName = "COMMON", organisationUnit = "ORG UNIT", organisation = "ORG", locality = "NYC", state = "NY", country = "US"))
|
||||
@ -221,31 +319,40 @@ class JacksonSupportTest {
|
||||
|
||||
@Test
|
||||
fun AnonymousParty() {
|
||||
val anon = AnonymousParty(ALICE_PUBKEY)
|
||||
val json = mapper.valueToTree<TextNode>(anon)
|
||||
val anonymousParty = AnonymousParty(ALICE_PUBKEY)
|
||||
val json = mapper.valueToTree<TextNode>(anonymousParty)
|
||||
assertThat(json.textValue()).isEqualTo(ALICE_PUBKEY.toBase58String())
|
||||
assertThat(mapper.convertValue<AnonymousParty>(json)).isEqualTo(anon)
|
||||
assertThat(mapper.convertValue<AnonymousParty>(json)).isEqualTo(anonymousParty)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `PartyAndCertificate serialisation`() {
|
||||
val json = mapper.valueToTree<ObjectNode>(MINI_CORP.identity)
|
||||
assertThat(json.fieldNames()).containsOnly("name", "owningKey")
|
||||
assertThat(mapper.convertValue<CordaX500Name>(json["name"])).isEqualTo(MINI_CORP.name)
|
||||
assertThat(mapper.convertValue<PublicKey>(json["owningKey"])).isEqualTo(MINI_CORP.publicKey)
|
||||
val (name, owningKey) = json.assertHasOnlyFields("name", "owningKey")
|
||||
assertThat(name.valueAs<CordaX500Name>(mapper)).isEqualTo(MINI_CORP.name)
|
||||
assertThat(owningKey.valueAs<PublicKey>(mapper)).isEqualTo(MINI_CORP.publicKey)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `NodeInfo serialisation`() {
|
||||
val (nodeInfo) = createNodeInfoAndSigned(ALICE_NAME)
|
||||
val json = mapper.valueToTree<ObjectNode>(nodeInfo)
|
||||
assertThat(json.fieldNames()).containsOnly("addresses", "legalIdentitiesAndCerts", "platformVersion", "serial")
|
||||
val address = (json["addresses"] as ArrayNode).also { assertThat(it).hasSize(1) }[0]
|
||||
assertThat(mapper.convertValue<NetworkHostAndPort>(address)).isEqualTo(nodeInfo.addresses[0])
|
||||
val identity = (json["legalIdentitiesAndCerts"] as ArrayNode).also { assertThat(it).hasSize(1) }[0]
|
||||
assertThat(mapper.convertValue<CordaX500Name>(identity["name"])).isEqualTo(ALICE_NAME)
|
||||
assertThat(mapper.convertValue<Int>(json["platformVersion"])).isEqualTo(nodeInfo.platformVersion)
|
||||
assertThat(mapper.convertValue<Long>(json["serial"])).isEqualTo(nodeInfo.serial)
|
||||
val (addresses, legalIdentitiesAndCerts, platformVersion, serial) = json.assertHasOnlyFields(
|
||||
"addresses",
|
||||
"legalIdentitiesAndCerts",
|
||||
"platformVersion",
|
||||
"serial"
|
||||
)
|
||||
addresses.run {
|
||||
assertThat(this).hasSize(1)
|
||||
assertThat(this[0].valueAs<NetworkHostAndPort>(mapper)).isEqualTo(nodeInfo.addresses[0])
|
||||
}
|
||||
legalIdentitiesAndCerts.run {
|
||||
assertThat(this).hasSize(1)
|
||||
assertThat(this[0]["name"].valueAs<CordaX500Name>(mapper)).isEqualTo(ALICE_NAME)
|
||||
}
|
||||
assertThat(platformVersion.intValue()).isEqualTo(nodeInfo.platformVersion)
|
||||
assertThat(serial.longValue()).isEqualTo(nodeInfo.serial)
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -274,6 +381,40 @@ class JacksonSupportTest {
|
||||
assertThat(convertToNodeInfo()).isEqualTo(nodeInfo)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun CertPath() {
|
||||
val certPath = MINI_CORP.identity.certPath
|
||||
val json = mapper.valueToTree<ObjectNode>(certPath)
|
||||
println(mapper.writeValueAsString(json))
|
||||
val (type, certificates) = json.assertHasOnlyFields("type", "certificates")
|
||||
assertThat(type.textValue()).isEqualTo(certPath.type)
|
||||
certificates.run {
|
||||
val serialNumbers = elements().asSequence().map { it["serialNumber"].bigIntegerValue() }.toList()
|
||||
assertThat(serialNumbers).isEqualTo(certPath.x509Certificates.map { it.serialNumber })
|
||||
}
|
||||
assertThat(mapper.convertValue<CertPath>(json).encoded).isEqualTo(certPath.encoded)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun X509Certificate() {
|
||||
val cert: X509Certificate = MINI_CORP.identity.certificate
|
||||
val json = mapper.valueToTree<ObjectNode>(cert)
|
||||
println(mapper.writeValueAsString(json))
|
||||
assertThat(json["serialNumber"].bigIntegerValue()).isEqualTo(cert.serialNumber)
|
||||
assertThat(json["issuer"].valueAs<X500Principal>(mapper)).isEqualTo(cert.issuerX500Principal)
|
||||
assertThat(json["subject"].valueAs<X500Principal>(mapper)).isEqualTo(cert.subjectX500Principal)
|
||||
assertThat(json["publicKey"].valueAs<PublicKey>(mapper)).isEqualTo(cert.publicKey)
|
||||
assertThat(json["notAfter"].valueAs<Date>(mapper)).isEqualTo(cert.notAfter)
|
||||
assertThat(json["notBefore"].valueAs<Date>(mapper)).isEqualTo(cert.notBefore)
|
||||
assertThat(json["encoded"].binaryValue()).isEqualTo(cert.encoded)
|
||||
assertThat(mapper.convertValue<X509Certificate>(json).encoded).isEqualTo(cert.encoded)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun X500Principal() {
|
||||
testToStringSerialisation(X500Principal("CN=Common,L=London,O=Org,C=UK"))
|
||||
}
|
||||
|
||||
private fun makeDummyStx(): SignedTransaction {
|
||||
val wtx = DummyContract.generateInitial(1, DUMMY_NOTARY, MINI_CORP.ref(1))
|
||||
.toWireTransaction(services)
|
||||
@ -290,6 +431,17 @@ class JacksonSupportTest {
|
||||
assertThat(mapper.convertValue<T>(json)).isEqualTo(value)
|
||||
}
|
||||
|
||||
private fun JsonNode.assertHasOnlyFields(vararg fieldNames: String): List<JsonNode> {
|
||||
assertThat(fieldNames()).containsOnly(*fieldNames)
|
||||
return fieldNames.map { this[it] }
|
||||
}
|
||||
|
||||
@CordaSerializable
|
||||
private data class TestData(val name: CordaX500Name, val summary: String, val subData: SubTestData)
|
||||
|
||||
@CordaSerializable
|
||||
private data class SubTestData(val value: Int)
|
||||
|
||||
private class TestPartyObjectMapper : JacksonSupport.PartyObjectMapper {
|
||||
val identities = ArrayList<Party>()
|
||||
val nodes = ArrayList<NodeInfo>()
|
||||
|
@ -22,7 +22,10 @@ Unreleased
|
||||
* ``NodeInfo`` objects are serialised as an object and can be looked up using the same mechanism as ``Party``
|
||||
* ``NetworkHostAndPort`` serialised according to its ``toString()``
|
||||
* ``PartyAndCertificate`` is serialised as an object containing the name and owning key
|
||||
* ``SignedTransaction`` can now be serialized to JSON and deserialized back into an object.
|
||||
* ``SerializedBytes`` is serialised by converting the bytes into the object it represents, which is then serialised into
|
||||
a JSON/YAML object
|
||||
* ``CertPath`` and ``X509Certificate`` are serialised as objects and can be deserialised back
|
||||
* ``SignedTransaction`` is serialised into its ``txBits`` and ``signatures`` and can be deserialised back
|
||||
|
||||
* Several members of ``JacksonSupport`` have been deprecated to highlight that they are internal and not to be used.
|
||||
|
||||
|
@ -15,7 +15,6 @@ The Corda repository comprises the following folders:
|
||||
* **finance** defines a range of elementary contracts (and associated schemas) and protocols, such as abstract fungible
|
||||
assets, cash, obligation and commercial paper
|
||||
* **gradle** contains the gradle wrapper which you'll use to execute gradle commands
|
||||
* **gradle-plugins** contains some additional plugins which we use to deploy Corda nodes
|
||||
* **lib** contains some dependencies
|
||||
* **node** contains the core code of the Corda node (eg: node driver, node services, messaging, persistence)
|
||||
* **node-api** contains data structures shared between the node and the client module, e.g. types sent via RPC
|
||||
|
@ -21,7 +21,7 @@ Please see the [design review process](design-review-process.md).
|
||||
|
||||
## Design Template
|
||||
|
||||
Please copy this [directory](./designTemplate) to a new location under `/docs/source/design` (use a meaningful short descriptive directory name) and use the [Design Template](./designTemplate/design.md) contained within to guide writing your Design Proposal. Whilst the section headings may be treated as placeholders for guidance, you are expected to be able to answer any questions related to pertinent section headings (where relevant to your design) at the design review stage. Use the [Design Decision Template](./designTemplate/decisions/decision.md) (as many times as needed) to record the pros and cons, and justification of any design decision recommendations where multiple options are available. These should be directly referenced from the *Design Decisions* section of the main design document.
|
||||
Please copy this [directory](template) to a new location under `/docs/source/design` (use a meaningful short descriptive directory name) and use the [Design Template](template/design.md) contained within to guide writing your Design Proposal. Whilst the section headings may be treated as placeholders for guidance, you are expected to be able to answer any questions related to pertinent section headings (where relevant to your design) at the design review stage. Use the [Design Decision Template](template/decisions/decision.md) (as many times as needed) to record the pros and cons, and justification of any design decision recommendations where multiple options are available. These should be directly referenced from the *Design Decisions* section of the main design document.
|
||||
|
||||
The design document may be completed in one or two iterations, by completing the following main two sections individually or singularly:
|
||||
|
||||
|
@ -1,62 +1,24 @@
|
||||
# Design review process
|
||||
|
||||
The Corda Design Review process defines a means of editing, storing, collaborating, reviewing and approving Corda documentation in a consistent, structured, easily accessible and open manner.
|
||||
The Corda design review process defines a means of collaborating approving Corda design thinking in a consistent,
|
||||
structured, easily accessible and open manner.
|
||||
|
||||
## Background
|
||||
The process has several steps:
|
||||
|
||||
Historically, Corda design documentation has been produced in an ad hoc fashion to include:
|
||||
* Multiple sources and formats of storage
|
||||
* Internal ([Tech/Arch technical discussion](https://r3-cev.atlassian.net/wiki/spaces/AR/pages/2588746/Internal+Technical+Discussion)) and External ([AWG design documents](https://r3-cev.atlassian.net/wiki/spaces/AWG/pages/56623418/Design+Documents)) facing wiki(s)
|
||||
* [Public github wiki](https://github.com/corda/corda/wiki)
|
||||
* [Discourse posts](https://discourse.corda.net/c/corda-discussion)
|
||||
* Multiple authored versions of same design with differing coverage
|
||||
* Elaboration and/or additions to scope
|
||||
* Differing opinions, proposals, suggestions.
|
||||
* Unstructured prose (no consistency in format and structure)
|
||||
* Lack of versioning (wiki documents typically evolve without versioned references)
|
||||
* Lack of traceability (audit) to original requirement(s)
|
||||
* Undefined review and approval process, leading to misunderstandings and open interpretations at time of implementation by platform development team
|
||||
* Lack of proposed implementation plan (time, resources, effort).
|
||||
* Often missing stakeholder collaboration and review in the feedback cycle.
|
||||
|
||||
## Process
|
||||
|
||||
This process specifies:
|
||||
|
||||
1. Usage of a design template to include:
|
||||
* Versioning: design documents can be referenced at a point in time, and evolve from such.
|
||||
* Review and approval history: incorporating relevant stakeholders from R3 (Platform, Product Management, Services) and
|
||||
other relevant review groups (community, partners, customers, key collaborators) as deemed appropriate to the request. Ensure design
|
||||
meets the requirements and is realizable within a proposed implementation timeframe.
|
||||
* Consistent structure and headings: top level headings should be preserved, second level headings provide guidance on
|
||||
content to include, and may be omitted where not relevant.
|
||||
* The design template includes both High Level (conceptual, logical) and Technical (implementation specific) sections.
|
||||
* Design decisions are clearly identified with pros/cons of proposed options, and agreed recommendation.
|
||||
|
||||
2. Document review and approval by relevant stakeholders and impacted parties to include R3 organisational units, such as Platform Engineering, Product Management and Services (where relevant), and key stakeholders, to include customers, partners, key collaborators, and community leaders.
|
||||
* Product owner (originator of requirements)
|
||||
* Design Approval Board (DAB)
|
||||
* Platform Development technical lead (and/or nominated developer(s))
|
||||
* Project Technical Lead / Solution Architect (if originating from an R3 Technical Services project)
|
||||
* Other identified stakeholders (community leaders, partners, customers, key collaborators)
|
||||
|
||||
3. Planning: allocation to Corda (open source) or Enterprise project JIRA epic(s) (and/or set of stories) and prioritisation within Product Backlog for future implementation within a Development Team Sprint.
|
||||
|
||||
4. Document repository locations, according to whether the design is related to Open Source or Enterprise (internal only).
|
||||
The recommended repository source is GitHub, and documents should be stored in [Markdown](https://en.wikipedia.org/wiki/Markdown).
|
||||
The collaboration and review process should follow the standard [GitHub Pull Request](https://confluence.atlassian.com/bitbucket/work-with-pull-requests-223220593.html) mechanism.
|
||||
* [Enterprise Github repository](https://github.com/corda/enterprise)
|
||||
* [Open Source Github repository](https://github.com/corda/corda)
|
||||
1. High level discussion with the community and developers on corda-dev.
|
||||
2. Writing a design doc and submitting it for review via a PR to this directory. See other design docs and the
|
||||
design doc template (below).
|
||||
3. Respond to feedback on the github discussion.
|
||||
4. You may be invited to a design review board meeting. This is a video conference in which design may be debated in
|
||||
real time. Notes will be sent afterwards to corda-dev.
|
||||
5. When the design is settled it will be approved and can be merged as normal.
|
||||
|
||||
The following diagram illustrates the process flow:
|
||||
|
||||
![Design Review Process](./designReviewProcess.png)
|
||||
|
||||
## Review Groups
|
||||
At least some of the following people will take part in a DRB meeting:
|
||||
|
||||
Design documents should include all relevant stakeholders in their distribution (mostly as PR reviewers in github). This will often vary and depend on the origin of the Feature Request, particularly for high level business requirements. Technical Design Documents will tend to include a small set of stakeholders (Design Approval Board, Platform Development, DevOps). Final approval authority lays with at least one member of the Design Approval Board (DAB) or nominated delegate(s).
|
||||
|
||||
Design Approval Board (DAB)
|
||||
* Richard G Brown (CTO)
|
||||
* James Carlyle (Chief Engineer)
|
||||
* Mike Hearn (Lead Platform Engineer)
|
||||
@ -64,43 +26,10 @@ Design Approval Board (DAB)
|
||||
* Jonathan Sartin (Information Security manager)
|
||||
* Select external key contributors (directly involved in design process)
|
||||
|
||||
Other review groups inlcude:
|
||||
The Corda Technical Advisory Committee may also be asked to review a design.
|
||||
|
||||
* Product Management
|
||||
Here's the outline of the design doc template:
|
||||
|
||||
* Developer Relations
|
||||
.. toctree::
|
||||
|
||||
* Platform Development Team Leads
|
||||
|
||||
(may nominate team members as design leads)
|
||||
|
||||
* DevOps
|
||||
|
||||
* Services – Project (Incubation & Acceleration)
|
||||
|
||||
* Nominated project leads
|
||||
|
||||
Services – Technical (Consulting)
|
||||
* Nominated solution architects
|
||||
|
||||
* External
|
||||
|
||||
* AWG (general)
|
||||
* Consortium members
|
||||
* ISV, SI, Partners
|
||||
* Customers
|
||||
* Key collaborators
|
||||
|
||||
## Applicability and Timing
|
||||
|
||||
This process should be applied to any major feature request gathered by the product management team or lead technologists that has been entered in the product backlog as a requirement, and has been prioritized for imminent execution.
|
||||
|
||||
Publication and distribution of a design document from initial review to full approval will vary on a case by case basis.
|
||||
|
||||
In general,
|
||||
* High Level designs may require a longer approval cycle as they may need to host a formal review meeting with the DAB in attendance,
|
||||
and will typically have larger stakeholder audiences (potentially including external reviewers), thus leading to multiple iterations of revision.
|
||||
In either case the High Level design must be raised as a GitHub PR and obtain formal approval by reviewers.
|
||||
* Technical designs are anticipated to go through a shorter cycle, with immediate feedback via the GitHub PR workflow.
|
||||
Once approved, a Technical Design should be decomposed into a set of implementable Epic/Stories for prioritization and
|
||||
scheduling as part of Development team(s) delivery cycle(s).
|
||||
template/design.md
|
@ -1,241 +0,0 @@
|
||||
![Corda](https://www.corda.net/wp-content/uploads/2016/11/fg005_corda_b.png)
|
||||
|
||||
# Design Template
|
||||
|
||||
Please read the [Design Review Process](../design-review-process.md) before completing a design.
|
||||
|
||||
This design template should be used for capturing new Corda feature requests that have been raised as JIRA requirements stories by the product management team. The design may be completed in two stages depending on the complexity and scope of the new feature.
|
||||
|
||||
1. High-level: conceptual designs based on business requirements and/or technical vision. Without detailing implementation, this level of design should position the overall solution within the Corda architecture from a logical perspective (independent from code implementation). It should illustrate and walk through the use case scenarios intended to be satisfied by this new feature request. The design should consider non-functional aspects of the system such as performance, scalability, high availability, security, and operational aspects such as management and monitoring.
|
||||
|
||||
This section of the document should go through a formal review process (eg. presentation of design at meeting and subsequent PR review workflow)
|
||||
|
||||
2. Technical: implementable designs with reference to Corda code. This level of design should focus on API specifications, service definitions, public library additions, data models and schemas, code modularity, configuration, execution and deployment of the new feature. It should also list any new software libraries, frameworks or development approaches to be adopted. The technical design should also consider all aspects of the test lifecycle (unit, integration, smoke tests, performance).
|
||||
|
||||
This section of the document should be raised as a PR for development team review.
|
||||
|
||||
An outcome of the Design Document should be an implementation plan that defines JIRA stories and tasks to be completed to produce shippable, demonstrable, executable code.
|
||||
|
||||
Please complete and/or remove section headings as appropriate to the design being proposed. These are provided as guidance and to structure the design in a consistent and coherent manner.
|
||||
|
||||
DOCUMENT MANAGEMENT
|
||||
---
|
||||
|
||||
Design documents should follow the standard GitHub version management and pull request (PR) review workflow mechanism.
|
||||
|
||||
## Document Control
|
||||
|
||||
| Title | |
|
||||
| -------------------- | ---------------------------------------- |
|
||||
| Date | |
|
||||
| Author | |
|
||||
| Distribution | (see review groups in design review process) |
|
||||
| Corda target version | (enterprise, open source and enterprise) |
|
||||
| JIRA reference | (reference to primary Feature Request JIRA story outlining requirements) |
|
||||
|
||||
## Approvals
|
||||
|
||||
#### Document Sign-off
|
||||
|
||||
| Author | |
|
||||
| ----------------- | ---------------------------------------- |
|
||||
| Reviewer(s) | (GitHub PR reviewers) |
|
||||
| Final approver(s) | (GitHub PR approver(s) from Design Approval Board) |
|
||||
|
||||
#### Design Decisions
|
||||
|
||||
| Description | Recommendation | Approval |
|
||||
| ---------------------------------------- | --------------- | ----------------------- |
|
||||
| [Design Decision 1](decisions/decision.md) | Selected option | (Design Approval Board) |
|
||||
| [Design Decision 2](decisions/decision.md) | Selected option | (Design Approval Board) |
|
||||
| [Design Decision 3](decisions/decision.md) | Selected option | (Design Approval Board) |
|
||||
|
||||
|
||||
## Document History
|
||||
|
||||
To be managed by GitHub revision control
|
||||
(please use meaningful identifiers when committing a PR approved design to GitHub - eg. my super design V1.0)
|
||||
|
||||
HIGH LEVEL DESIGN
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
General overall of design proposal (goal, objectives, simple outline)
|
||||
|
||||
## Background
|
||||
|
||||
Description of existing solution (if any) and/or rationale for requirement.
|
||||
|
||||
* Reference(s) to discussions held elsewhere (slack, wiki, etc).
|
||||
* Definitions, acronyms and abbreviations
|
||||
|
||||
## Scope
|
||||
|
||||
* Goals
|
||||
* Non-goals (eg. out of scope)
|
||||
* Reference(s) to similar or related work
|
||||
|
||||
## Timeline
|
||||
|
||||
* Is this a short, medium or long-term solution?
|
||||
* Outline timeline expectations
|
||||
|
||||
Eg1. required for Customer Project X by end of Qy'2049)
|
||||
|
||||
Eg2. required to release Enterprise Vx.y (reference roadmap)
|
||||
|
||||
* Where short-term design, is this evolvable / extensible or stop-gap (eg. potentially throwaway)?
|
||||
|
||||
## Requirements
|
||||
|
||||
* Reference(s) to any of following:
|
||||
|
||||
* Captured Product Backlog JIRA entry
|
||||
|
||||
* Internal White Paper feature item and/or visionary feature
|
||||
|
||||
* Project related requirement (POC, RFP, Pilot, Prototype) from
|
||||
|
||||
* Internal Incubator / Accelerator project
|
||||
|
||||
* Direct from Customer, ISV, SI, Partner
|
||||
* Use Cases
|
||||
* Assumptions
|
||||
|
||||
## Design Decisions
|
||||
|
||||
List of design decisions identified in defining the target solution:
|
||||
(for each item, please complete the attached [Design Decision template](decisions/decision.md))
|
||||
|
||||
| Heading (link to completed Decision document using template) | Recommendation |
|
||||
| ---------------------------------------- | -------------- |
|
||||
| [Design Decision 1](decisions/decision.md) | Option A |
|
||||
| [Design Decision 2](decisions/decision.md) | TBD* |
|
||||
| [Design Decision 3](decisions/decision.md) | Option B |
|
||||
|
||||
It is reasonable to expect decisions to be challenged prior to any formal review and approval.
|
||||
In certain scenarios the Design Decision itself may solicit a recommendation from reviewers.
|
||||
|
||||
## Target Solution
|
||||
|
||||
* Illustrate any business process with diagrams
|
||||
|
||||
* Business Process Flow (or formal BPMN 2.0), swimlane activity
|
||||
|
||||
* UML: activity, state, sequence
|
||||
|
||||
* Illustrate operational solutions with deployment diagrams
|
||||
|
||||
* Network
|
||||
|
||||
* Validation matrix (against requirements)
|
||||
|
||||
* Role, requirement, how design satisfies requirement
|
||||
|
||||
* Sample walk through (against Use Cases)
|
||||
|
||||
* Implications
|
||||
|
||||
* Technical
|
||||
* Operational
|
||||
* Security
|
||||
|
||||
* Adherence to existing industry standards or approaches
|
||||
* List any standards to be followed / adopted
|
||||
* Outstanding issues
|
||||
|
||||
## Complementary solutions
|
||||
|
||||
Other solutions that provide similar functionality and/or overlap with the proposed.
|
||||
Where overlap with existing solution(s), describe how this design fits in and complements the current state.
|
||||
|
||||
## Final recommendation
|
||||
|
||||
* Proposed solution (if more than one option presented)
|
||||
* Proceed direct to implementation
|
||||
* Proceed to Technical Design stage
|
||||
* Proposed Platform Technical team(s) to implement design (if not already decided)
|
||||
|
||||
TECHNICAL DESIGN
|
||||
---
|
||||
|
||||
## Interfaces
|
||||
|
||||
* Public APIs impact
|
||||
* Internal APIs impacted
|
||||
* Modules impacted
|
||||
|
||||
* Illustrate with Software Component diagrams
|
||||
|
||||
## Functional
|
||||
|
||||
* UI requirements
|
||||
|
||||
* Illustrate with UI Mockups and/or Wireframes
|
||||
|
||||
* (Subsystem) Components descriptions and interactions)
|
||||
|
||||
Consider and list existing impacted components and services within Corda:
|
||||
|
||||
* Doorman
|
||||
* Network Map
|
||||
* Public API's (ServiceHub, RPCOps)
|
||||
* Vault
|
||||
* Notaries
|
||||
* Identity services
|
||||
* Flow framework
|
||||
* Attachments
|
||||
* Core data structures, libraries or utilities
|
||||
* Testing frameworks
|
||||
* Pluggable infrastructure: DBs, Message Brokers, LDAP
|
||||
|
||||
* Data model & serialization impact and changes required
|
||||
|
||||
* Illustrate with ERD diagrams
|
||||
|
||||
* Infrastructure services: persistence (schemas), messaging
|
||||
|
||||
## Non-Functional
|
||||
|
||||
* Performance
|
||||
* Scalability
|
||||
* High Availability
|
||||
|
||||
## Operational
|
||||
|
||||
* Deployment
|
||||
|
||||
* Versioning
|
||||
|
||||
* Maintenance
|
||||
|
||||
* Upgradability, migration
|
||||
|
||||
* Management
|
||||
|
||||
* Audit, alerting, monitoring, backup/recovery, archiving
|
||||
|
||||
## Security
|
||||
|
||||
* Data privacy
|
||||
* Authentication
|
||||
* Access control
|
||||
|
||||
## Software Development Tools and Programming Standards to be adopted.
|
||||
|
||||
* languages
|
||||
* frameworks
|
||||
* 3rd party libraries
|
||||
* architectural / design patterns
|
||||
* supporting tools
|
||||
|
||||
## Testability
|
||||
|
||||
* Unit
|
||||
* Integration
|
||||
* Smoke
|
||||
* Non-functional (performance)
|
||||
|
||||
APPENDICES
|
||||
---
|
@ -1,12 +1,9 @@
|
||||
![Corda](https://www.corda.net/wp-content/uploads/2016/11/fg005_corda_b.png)
|
||||
|
||||
--------------------------------------------
|
||||
Design Decision: Notary Backend - Galera or Permazen Raft
|
||||
=========================================================
|
||||
# Design Decision: Notary Backend - Galera or Permazen Raft
|
||||
|
||||
## Background / Context
|
||||
|
||||
We have evaluated Galera and Permazen as a possible replacement for Atomix CopyCat for the storage backend of our Notary Service, more specificalyl the Uniqueness Provider.
|
||||
We have evaluated Galera and Permazen as a possible replacement for Atomix CopyCat for the storage backend of our Notary
|
||||
Service, more specificalyl the Uniqueness Provider.
|
||||
|
||||
## Options Analysis
|
||||
|
||||
@ -14,7 +11,8 @@ We have evaluated Galera and Permazen as a possible replacement for Atomix CopyC
|
||||
|
||||
#### Advantages
|
||||
|
||||
1. Wider user base. In a survey of 478 OpenStack deployments, 32% decided to use Galera Cluster in production, see p. 47 of the [survey](https://www.openstack.org/assets/survey/April2017SurveyReport.pdf).
|
||||
1. Wider user base. In a survey of 478 OpenStack deployments, 32% decided to use Galera Cluster in production, see p. 47
|
||||
of the [survey](https://www.openstack.org/assets/survey/April2017SurveyReport.pdf).
|
||||
|
||||
2. Very little additional work needed.
|
||||
|
226
docs/source/design/notary-service-ha/design.md
Normal file
@ -0,0 +1,226 @@
|
||||
# HA Notary Service
|
||||
|
||||
## Overview
|
||||
|
||||
The distributed notary service tracks spent contract states and prevents double spending. For high-availability (HA),
|
||||
the backing data store is replicated across a cluster of machines in different data centers. In this model, the cluster
|
||||
is meant to be operated by a single party, and only crash faults are tolerated.
|
||||
|
||||
## Background
|
||||
|
||||
We have an existing HA notary service based on Atomix CopyCat, which an open source state machine replication library
|
||||
that implemets the Raft consensus algorithm. However, it doesn't scale well with the number of spent input states, since
|
||||
CopyCat takes periodic snapshots of the state machine and the snapshots have to fit in memory.
|
||||
|
||||
As an alternative, we propose using a more traditional MySQL database-based approach, using Galera Cluster, which
|
||||
provides synchronous multi-master replication. Galera Cluster is based on a MySQL server with Write-Set replication
|
||||
(wsrep) API, and the Galera Replication Plugin. Through the wsrep API Galera provides [certification-based replication](http://galeracluster.com/documentation-webpages/certificationbasedreplication.html). It works roughly as
|
||||
follows:
|
||||
|
||||
1. A single database node executes a transaction optimistically until it reaches the commit point.
|
||||
2. Changes made by the trasaction are collected into a write-set.
|
||||
3. The write-set broadcasted to the cluster.
|
||||
4. Every other node determines whether it can apply the write-set without conflicts.
|
||||
5. In case of conflict, the initial node rolls back the transaction.
|
||||
|
||||
There are different Galera Cluster implementations, and we chose the Percona XtraDB cluster, as they were historically
|
||||
more focused on performance than the competition.
|
||||
|
||||
### Decisions
|
||||
|
||||
- We are replacing the Atomix CopyCat Raft service.
|
||||
- We are using a Percona cluster for Corda Connect.
|
||||
- We keep investigating a more scalable solution, based on Permazen or a custom implementation.
|
||||
- In the long term, we are interested in providing a BFT solution, perhaps leveraging SGX.
|
||||
|
||||
.. toctree::
|
||||
|
||||
decisions/decision.md
|
||||
|
||||
#### Advantages of Percona
|
||||
|
||||
- Production ready
|
||||
- Works out of the box
|
||||
- Backed by a company, enterprise and a community support are available
|
||||
- Running stable at 30 tx/second (with 10 input states / tx), see figure below, in the section about the long running test
|
||||
|
||||
#### Disadvantages of Percona
|
||||
|
||||
- Performance deteriorates over time. This happens because Galera only works with the InnoDB storage engine, internally
|
||||
backed by a B+ tree. Since we use state references as primary keys, table insterts results in random B+ tree inserts,
|
||||
which doesn't scale well.
|
||||
|
||||
## Scope
|
||||
|
||||
### Goals
|
||||
|
||||
* We need a stable notary implementation.
|
||||
* The implementation has to be easy to operate.
|
||||
* We know that the switching costs to a more scalable solution are minimal.
|
||||
* We take periodic backups of the consumed states and we test the recovery.
|
||||
* We remain flexible and open to future requirements.
|
||||
|
||||
### Non-Goals
|
||||
|
||||
* For the time being, we don't need a solution that is shardable (for now, all replicas can hold all the state).
|
||||
* We don't require a solution that can handle throughput beyond 15 tx/second.
|
||||
* We don't design and implement a custom solution in the short term.
|
||||
* We don't need rate limiting and fairness.
|
||||
|
||||
## Design
|
||||
|
||||
![Overview](overview.svg)
|
||||
|
||||
The HA notary service relies on the underlying MySQL uniqueness provider on top of a Percona XtraDB Cluster to prevent
|
||||
double spending of input states. The exact data center locations are to be determined. Our notary service replicas
|
||||
connect via JDBC to the replicated MySQL service.
|
||||
|
||||
Percona XtraDB Cluster is based on Percona Server and the Galera replication library that provides a multi master
|
||||
cluster based on synchronous replication. The cluster is as good as its slowest node.
|
||||
|
||||
## Main Data Structure
|
||||
|
||||
The table below details the data base schema.
|
||||
|
||||
| Field name | Type | Description |
|
||||
| --------------------- | ------------ | ---------------------------------------- |
|
||||
| issue_tx_id | Binary(32) | The ID of the transaction that created the state |
|
||||
| issue_tx_output_id | Int unsigned | Where in the transaction the state was created |
|
||||
| consuming_tx_id | Binary(32) | The ID of the transaction that consumes the input state |
|
||||
| consuming_tx_input_id | Int unsigned | Where in the transaction the state is consumed |
|
||||
| consuming_party | Blob | Who is requesting the notarisation (~1 kByte) |
|
||||
| commit_time | Timestamp | When this row is committed |
|
||||
|
||||
## Functional
|
||||
|
||||
The notary service relies on the MySQL uniqueness provider to prevent double spending. The MySQL database holds a single
|
||||
table as described above. For HA, the data is synchronously replicated to several nodes by the Galera replication
|
||||
plugin.
|
||||
|
||||
During notarisation, the uniqueness provider attempts to commit all input states of the Corda transaction in a single
|
||||
database transaction. If at least one input state has been previously spent, the entire database transaction fails with
|
||||
a batch exception. Unspent states can still be spent in a different later transaction. In case of double spend attempts,
|
||||
the uniqueness provider queries the database for details where the conflicting states have been spent. The consuming
|
||||
transaction ID, position of the input in the transaction and the requesting party are collected for all conflicting
|
||||
inputs, wrapped in a uniqueness exception, thrown by the uniqueness provider. This exception is handled by the notary
|
||||
service and turned into a notary exception.
|
||||
|
||||
We are using the Hikari connection pool to connect the notary services to all nodes of our Percona cluster. The
|
||||
connection pool can be monitored via JMX.
|
||||
|
||||
### Deployment
|
||||
|
||||
We are planning to run a five node Percona cluster that can tolerate two simultaneous node failures. In case we need to
|
||||
provide more storage or upgrade to better hardware we can take a single node down for maintenance and still tolerate one
|
||||
unplanned failure.
|
||||
|
||||
#### Monitoring cluster membership changes
|
||||
|
||||
We setup a [notification command](http://galeracluster.com/documentation-webpages/notificationcmd.html) that gets called
|
||||
whenever the node registers a change.
|
||||
|
||||
### Management
|
||||
|
||||
#### Disaster Recovery
|
||||
|
||||
Our disaster recovery strategy covers the following risks:
|
||||
1. **Host Failure**. For the 5 node cluster we can tolerate 2 host failures without interrupting operation. This includes both machine and disk failures.
|
||||
2. **DC Failure**. The cluster will be distributed across 3 data centers in a 2+2+1 configuration. A loss of one data center can be tolerated without interrupting service operation.
|
||||
3. **Data Corruption/Loss**. In cases of data corruption or loss that is replicated across the cluster (for example, accidental data deletion or modification by an administrator) backups will be used to restore the cluster state. In this scenario service downtime will be incurred.
|
||||
|
||||
#### Backup and Recovery
|
||||
|
||||
Recovery Point Objective: 0
|
||||
|
||||
Recovery Time Objective: 1h
|
||||
|
||||
Any data loss incurred by the notary service will lead to a compromised ledger, since participants would be able to
|
||||
double-spend already notarised states. Note that the backup & recovery procedure is only required for mitigating data
|
||||
loss that gets replicated to the entire cluster.
|
||||
|
||||
This can be achieved by combining periodic backups of the entire database state, and the MySQL [binary
|
||||
log](https://dev.mysql.com/doc/refman/5.7/en/binary-log.html). The binary log contains a log of all executed SQL
|
||||
statements, which can be replayed onto a backup to restore the most up-to-date state. In case of an accidental statement
|
||||
that removes data (e.g. DROP TABLE), the binary log can be replayed only up to the offending statement.
|
||||
|
||||
Scenarios where data corruption is caused by a malicious administrator selectively modifying or removing table rows are
|
||||
out of scope.
|
||||
|
||||
See [Galera's backup documentation](http://galeracluster.com/documentation-webpages/backingupthecluster.html)
|
||||
|
||||
#### Monitoring
|
||||
|
||||
See the [Percona Management and Monitoring](https://www.percona.com/doc/percona-monitoring-and-management/index.html) documentation.
|
||||
|
||||
* Throughput in Tx / second
|
||||
* Throughput in Input states / second
|
||||
* Double spend attempts / time
|
||||
* High level statistics, e.g. number of double spend attempts in the last 24 hours by two parties
|
||||
* Double spend attempts per party
|
||||
* Latency p50, p99
|
||||
* Number of input states in DB
|
||||
* Size of DB
|
||||
* Replication Queues, see [monitoring Galera](http://galeracluster.com/documentation-webpages/monitoringthecluster.html)
|
||||
|
||||
#### Alerting
|
||||
|
||||
Alerts are triggered based on relevant metrics, like number of active members in the cluster and size of write queues of
|
||||
individual nodes. We are configuring PMM to forward alerts to PagerDuty, where we do the routing to the operators who
|
||||
are on call. We configure email alerting and slack integration as additional channels.
|
||||
|
||||
## Security
|
||||
|
||||
SSL encrypted links between the nodes of the Galera cluster and the notary service and the Galera cluster. See the [SSL
|
||||
config documentation](http://galeracluster.com/documentation-webpages/sslconfig.html).
|
||||
|
||||
The managed disks on Azure [are encrypted](https://azure.microsoft.com/en-gb/blog/azure-managed-disks-sse/) with keys
|
||||
managed by Microsoft. We have to trust our cloud provider anyways, so we don't do our own disk encryption.
|
||||
|
||||
## Testing the throughput of the uniqueness provider
|
||||
|
||||
We are using a custom load test flow that includes double spend attempts. The application metrics are forwarded to
|
||||
Graphite and our Percona cluster is monitored by Percona's metrics and monitoring tool (PMM).
|
||||
|
||||
In our tests, the number of input states is Poisson-distributed with an average four input states per transaction. To
|
||||
increase throughput in terms of notarised input states per second, we could batch transactions in the future. We tested
|
||||
batching with batch sizes of up to 1000 input states per batch. And reached a throughput of 2k input states / second for
|
||||
batch sizes 250-1000. When we detect a double spend attempt, we could send through individual transactions to find the
|
||||
source of the conflict or bisect the batch.
|
||||
|
||||
## Long running test
|
||||
|
||||
![throughput](txs.png)
|
||||
|
||||
The figure above shows the throughput in transactions per second over four days, while writing to the cluster with up to
|
||||
three clients. The dips occur while we take nodes off-line to simulate failure and to upgrade the disks. In the last
|
||||
phase of the test all nodes were equipped with managed 1TB SSDs and and the cluster notarised at more than 300 input
|
||||
states per second while holding more than 100 M input states in the DB.
|
||||
|
||||
Glitches in throughput can occur when the write queue of a node is filling up. I'm assuming this is due to increased
|
||||
disk latency when the cloud SAN disk is busy with other operations. When the maximum write queue size is reached, the
|
||||
slow node isn't accepting writes any more and sends out flow control messages to its peers to stop replicating (I'm
|
||||
assuming this leads to messages being queued in their send queue). The queue sizes are monitored by the PMM tool and we
|
||||
can setup alerts based on a configured maximum write queue size or when we see "flow control messages".
|
||||
|
||||
We found that managed SSDs of 1TB in size performed better than a RAID 10 array of four 128GB SSDs. The latency of the
|
||||
1TB SSDs was stable around 8ms, while we have observed latency spikes up to 64ms on the smaller SSDs. The disk load on
|
||||
the slowest node in terms of disk latency was around 6-8 outstanding writes during the last phase of the test. Setting
|
||||
up a RAID 10 was a mistake, for best performance we should have used a RAID 0 configuration, since the Azure disks are
|
||||
replicated.
|
||||
|
||||
![disk load](diskload.png)
|
||||
|
||||
### Recommended Cloud Server Configuration
|
||||
|
||||
We recommend `Standard DS13 v2 (8 cores, 56 GB memory)` servers with 1 TB managed SSD disks attached. To make the setup
|
||||
more cost effective, we can run on more affordable cloud instances, when we have lower demands in terms of throughput.
|
||||
The optimum is yet to be found. It is possible to upgrade or downgrade the nodes of the cluster, one node at a time.
|
||||
|
||||
Be prepared to kill and replace the slowest node of the cluster, especially in the cloud, since the Galera cluster will
|
||||
not perform better than the slowest node. The same goes for SAN disks. If you are unlucky and your disk has high
|
||||
latency, try replacing it with a new one. Maybe your get better performance with your new disk.
|
||||
|
||||
### Disk upgrade using LVM
|
||||
|
||||
We recommend using LVM in production for convenience and flexibility. During our long running test we performed a hot
|
||||
disk upgrade using LVM.
|
Before Width: | Height: | Size: 126 KiB After Width: | Height: | Size: 126 KiB |
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
Before Width: | Height: | Size: 111 KiB After Width: | Height: | Size: 111 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 538 KiB After Width: | Height: | Size: 538 KiB |
@ -1,201 +0,0 @@
|
||||
![Corda](https://www.corda.net/wp-content/uploads/2016/11/fg005_corda_b.png)
|
||||
|
||||
# HA Notary Service Design
|
||||
|
||||
DOCUMENT MANAGEMENT
|
||||
---
|
||||
|
||||
## Document Control
|
||||
|
||||
| Title | HA Notary Design |
|
||||
| -------------------- | ---------------------------------------- |
|
||||
| Date | 13 December 2017 |
|
||||
| Author | Thomas Schroeter and Andrius Dagys |
|
||||
| Distribution | Design Review Board, Product Management, Services - Technical (Consulting), Platform Delivery |
|
||||
| Corda target version | Enterprise |
|
||||
| JIRA reference | https://r3-cev.atlassian.net/browse/ENT-1232 |
|
||||
|
||||
## Approvals
|
||||
|
||||
#### Document Sign-off
|
||||
|
||||
| Author | |
|
||||
| ----------------- | ---------------------------------------- |
|
||||
| Reviewer(s) | (GitHub PR reviewers) |
|
||||
| Final approver(s) | (GitHub PR approver(s) from Design Approval Board) |
|
||||
|
||||
#### Design Decisions
|
||||
|
||||
| Description | Recommendation |
|
||||
| ---------------------------------------- | -------------- |
|
||||
| [Galera or Permazen](decisions/decision.md) | Galera |
|
||||
|
||||
HIGH LEVEL DESIGN
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The distributed notary service tracks spent contract states and prevents double spending. For high-availability (HA), the backing data store is replicated across a cluster of machines in different data centers. In this model, the cluster is meant to be operated by a single party, and only crash faults are tolerated.
|
||||
|
||||
## Background
|
||||
|
||||
We have an existing HA notary service based on Atomix CopyCat, which an open source state machine replication library that implemets the Raft consensus algorithm. However, it doesn't scale well with the number of spent input states, since CopyCat takes periodic snapshots of the state machine and the snapshots have to fit in memory.
|
||||
|
||||
As an alternative, we propose using a more traditional MySQL database-based approach, using Galera Cluster, which provides synchronous multi-master replication. Galera Cluster is based on a MySQL server with Write-Set replication (wsrep) API, and the Galera Replication Plugin. Through the wsrep API Galera provides [certification-based replication](http://galeracluster.com/documentation-webpages/certificationbasedreplication.html). It works roughly as follows:
|
||||
1. A single database node executes a transaction optimistically until it reaches the commit point.
|
||||
2. Changes made by the trasaction are collected into a write-set.
|
||||
3. The write-set broadcasted to the cluster.
|
||||
4. Every other node determines whether it can apply the write-set without conflicts.
|
||||
5. In case of conflict, the initial node rolls back the transaction.
|
||||
|
||||
There are different Galera Cluster implementations, and we chose the Percona XtraDB cluster, as they were historically more focused on performance than the competition.
|
||||
|
||||
### Decisions
|
||||
|
||||
- We are replacing the Atomix CopyCat Raft service.
|
||||
- We are using a Percona cluster for Corda Connect.
|
||||
- We keep investigating a more scalable solution, based on Permazen or a custom implementation.
|
||||
- In the long term, we are interested in providing a BFT solution, perhaps leveraging SGX.
|
||||
|
||||
#### Advantages of Percona
|
||||
|
||||
- Production ready
|
||||
- Works out of the box
|
||||
- Backed by a company, enterprise and a community support are available
|
||||
- Running stable at 30 tx/second (with 10 input states / tx), see figure below, in the section about the long running test
|
||||
|
||||
#### Disadvantages of Percona
|
||||
|
||||
- Performance deteriorates over time. This happens because Galera only works with the InnoDB storage engine, internally backed by a B+ tree. Since we use state references as primary keys, table insterts results in random B+ tree inserts, which doesn't scale well.
|
||||
|
||||
## Scope
|
||||
|
||||
### Goals
|
||||
|
||||
* We need a stable notary implementation.
|
||||
* The implementation has to be easy to operate.
|
||||
* We know that the switching costs to a more scalable solution are minimal.
|
||||
* We take periodic backups of the consumed states and we test the recovery.
|
||||
* We remain flexible and open to future requirements.
|
||||
|
||||
### Non-Goals
|
||||
|
||||
* For the time being, we don't need a solution that is shardable (for now, all replicas can hold all the state).
|
||||
* We don't require a solution that can handle throughput beyond 15 tx/second.
|
||||
* We don't design and implement a custom solution in the short term.
|
||||
* We don't need rate limiting and fairness.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
![Overview](overview.svg)
|
||||
|
||||
The HA notary service relies on the underlying MySQL uniqueness provider on top of a Percona XtraDB Cluster to prevent double spending of input states. The exact data center locations are to be determined. Our notary service replicas connect via JDBC to the replicated MySQL service.
|
||||
|
||||
Percona XtraDB Cluster is based on Percona Server and the Galera replication library that provides a multi master cluster based on synchronous replication. The cluster is as good as its slowest node.
|
||||
|
||||
TECHNICAL DETAILS
|
||||
---
|
||||
|
||||
## Main Data Structure
|
||||
|
||||
The table below details the data base schema.
|
||||
|
||||
| Field name | Type | Description |
|
||||
| --------------------- | ------------ | ---------------------------------------- |
|
||||
| issue_tx_id | Binary(32) | The ID of the transaction that created the state |
|
||||
| issue_tx_output_id | Int unsigned | Where in the transaction the state was created |
|
||||
| consuming_tx_id | Binary(32) | The ID of the transaction that consumes the input state |
|
||||
| consuming_tx_input_id | Int unsigned | Where in the transaction the state is consumed |
|
||||
| consuming_party | Blob | Who is requesting the notarisation (~1 kByte) |
|
||||
| commit_time | Timestamp | When this row is committed |
|
||||
|
||||
## Functional
|
||||
|
||||
The notary service relies on the MySQL uniqueness provider to prevent double spending. The MySQL database holds a single table as described above. For HA, the data is synchronously replicated to several nodes by the Galera replication plugin.
|
||||
|
||||
During notarisation, the uniqueness provider attempts to commit all input states of the Corda transaction in a single database transaction. If at least one input state has been previously spent, the entire database transaction fails with a batch exception. Unspent states can still be spent in a different later transaction. In case of double spend attempts, the uniqueness provider queries the database for details where the conflicting states have been spent. The consuming transaction ID, position of the input in the transaction and the requesting party are collected for all conflicting inputs, wrapped in a uniqueness exception, thrown by the uniqueness provider. This exception is handled by the notary service and turned into a notary exception.
|
||||
|
||||
We are using the Hikari connection pool to connect the notary services to all nodes of our Percona cluster. The connection pool can be monitored via JMX.
|
||||
|
||||
### Deployment
|
||||
|
||||
We are planning to run a five node Percona cluster that can tolerate two simultaneous node failures. In case we need to provide more storage or upgrade to better hardware we can take a single node down for maintenance and still tolerate one unplanned failure.
|
||||
|
||||
#### Monitoring cluster membership changes
|
||||
|
||||
We setup a [notification command](http://galeracluster.com/documentation-webpages/notificationcmd.html) that gets called whenever the node registers a change.
|
||||
|
||||
### Management
|
||||
|
||||
#### Disaster Recovery
|
||||
|
||||
Our disaster recovery strategy covers the following risks:
|
||||
1. **Host Failure**. For the 5 node cluster we can tolerate 2 host failures without interrupting operation. This includes both machine and disk failures.
|
||||
2. **DC Failure**. The cluster will be distributed across 3 data centers in a 2+2+1 configuration. A loss of one data center can be tolerated without interrupting service operation.
|
||||
3. **Data Corruption/Loss**. In cases of data corruption or loss that is replicated across the cluster (for example, accidental data deletion or modification by an administrator) backups will be used to restore the cluster state. In this scenario service downtime will be incurred.
|
||||
|
||||
##### Backup and Recovery
|
||||
|
||||
Recovery Point Objective: 0
|
||||
|
||||
Recovery Time Objective: 1h
|
||||
|
||||
Any data loss incurred by the notary service will lead to a compromised ledger, since participants would be able to double-spend already notarised states. Note that the backup & recovery procedure is only required for mitigating data loss that gets replicated to the entire cluster.
|
||||
|
||||
This can be achieved by combining periodic backups of the entire database state, and the MySQL [binary log](https://dev.mysql.com/doc/refman/5.7/en/binary-log.html). The binary log contains a log of all executed SQL statements, which can be replayed onto a backup to restore the most up-to-date state. In case of an accidental statement that removes data (e.g. DROP TABLE), the binary log can be replayed only up to the offending statement.
|
||||
|
||||
Scenarios where data corruption is caused by a malicious administrator selectively modifying or removing table rows are out of scope.
|
||||
|
||||
See [Galera's backup documentation](http://galeracluster.com/documentation-webpages/backingupthecluster.html)
|
||||
|
||||
#### Monitoring
|
||||
|
||||
See the [Percona Management and Monitoring](https://www.percona.com/doc/percona-monitoring-and-management/index.html) documentation.
|
||||
|
||||
* Throughput in Tx / second
|
||||
* Throughput in Input states / second
|
||||
* Double spend attempts / time
|
||||
* High level statistics, e.g. number of double spend attempts in the last 24 hours by two parties
|
||||
* Double spend attempts per party
|
||||
* Latency p50, p99
|
||||
* Number of input states in DB
|
||||
* Size of DB
|
||||
* Replication Queues, see [monitoring Galera](http://galeracluster.com/documentation-webpages/monitoringthecluster.html)
|
||||
|
||||
#### Alerting
|
||||
|
||||
Alerts are triggered based on relevant metrics, like number of active members in the cluster and size of write queues of individual nodes. We are configuring PMM to forward alerts to PagerDuty, where we do the routing to the operators who are on call. We configure email alerting and slack integration as additional channels.
|
||||
|
||||
## Security
|
||||
|
||||
SSL encrypted links between the nodes of the Galera cluster and the notary service and the Galera cluster. See the [SSL config documentation](http://galeracluster.com/documentation-webpages/sslconfig.html).
|
||||
|
||||
The managed disks on Azure [are encrypted](https://azure.microsoft.com/en-gb/blog/azure-managed-disks-sse/) with keys managed by Microsoft. We have to trust our cloud provider anyways, so we don't do our own disk encryption.
|
||||
|
||||
## Testing the throughput of the uniqueness provider
|
||||
|
||||
We are using a custom load test flow that includes double spend attempts. The application metrics are forwarded to Graphite and our Percona cluster is monitored by Percona's metrics and monitoring tool (PMM).
|
||||
|
||||
In our tests, the number of input states is Poisson-distributed with an average four input states per transaction. To increase throughput in terms of notarised input states per second, we could batch transactions in the future. We tested batching with batch sizes of up to 1000 input states per batch. And reached a throughput of 2k input states / second for batch sizes 250-1000. When we detect a double spend attempt, we could send through individual transactions to find the source of the conflict or bisect the batch.
|
||||
|
||||
## Long running test
|
||||
|
||||
![throughput](txs.png)
|
||||
|
||||
The figure above shows the throughput in transactions per second over four days, while writing to the cluster with up to three clients. The dips occur while we take nodes off-line to simulate failure and to upgrade the disks. In the last phase of the test all nodes were equipped with managed 1TB SSDs and and the cluster notarised at more than 300 input states per second while holding more than 100 M input states in the DB.
|
||||
|
||||
Glitches in throughput can occur when the write queue of a node is filling up. I'm assuming this is due to increased disk latency when the cloud SAN disk is busy with other operations. When the maximum write queue size is reached, the slow node isn't accepting writes any more and sends out flow control messages to its peers to stop replicating (I'm assuming this leads to messages being queued in their send queue). The queue sizes are monitored by the PMM tool and we can setup alerts based on a configured maximum write queue size or when we see "flow control messages".
|
||||
|
||||
We found that managed SSDs of 1TB in size performed better than a RAID 10 array of four 128GB SSDs. The latency of the 1TB SSDs was stable around 8ms, while we have observed latency spikes up to 64ms on the smaller SSDs. The disk load on the slowest node in terms of disk latency was around 6-8 outstanding writes during the last phase of the test. Setting up a RAID 10 was a mistake, for best performance we should have used a RAID 0 configuration, since the Azure disks are replicated.
|
||||
|
||||
![disk load](diskload.png)
|
||||
|
||||
### Recommended Cloud Server Configuration
|
||||
|
||||
We recommend `Standard DS13 v2 (8 cores, 56 GB memory)` servers with 1 TB managed SSD disks attached. To make the setup more cost effective, we can run on more affordable cloud instances, when we have lower demands in terms of throughput. The optimum is yet to be found. It is possible to upgrade or downgrade the nodes of the cluster, one node at a time.
|
||||
|
||||
Be prepared to kill and replace the slowest node of the cluster, especially in the cloud, since the Galera cluster will not perform better than the slowest node. The same goes for SAN disks. If you are unlucky and your disk has high latency, try replacing it with a new one. Maybe your get better performance with your new disk.
|
||||
|
||||
### Disk upgrade using LVM
|
||||
|
||||
We recommend using LVM in production for convenience and flexibility. During our long running test we performed a hot disk upgrade using LVM.
|
76
docs/source/design/template/design.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Design doc template
|
||||
|
||||
## Overview
|
||||
|
||||
Please read the [Design Review Process](../design-review-process.md) before completing a design.
|
||||
|
||||
Each section of the document should be at the second level (two hashes at the start of a line).
|
||||
|
||||
This section should describe the desired change or feature, along with background on why it's needed and what problem
|
||||
it solves.
|
||||
|
||||
An outcome of the design document should be an implementation plan that defines JIRA stories and tasks to be completed
|
||||
to produce shippable, demonstrable, executable code.
|
||||
|
||||
Please complete and/or remove section headings as appropriate to the design being proposed. These are provided as
|
||||
guidance and to structure the design in a consistent and coherent manner.
|
||||
|
||||
## Background
|
||||
|
||||
Description of existing solution (if any) and/or rationale for requirement.
|
||||
|
||||
* Reference(s) to discussions held elsewhere (slack, wiki, etc).
|
||||
* Definitions, acronyms and abbreviations
|
||||
|
||||
## Goals
|
||||
|
||||
What's in scope to be solved.
|
||||
|
||||
## Non-goals
|
||||
|
||||
What won't be tackled as part of this design, either because it's not needed/wanted, or because it will be tackled later
|
||||
as part of a separate design effort. Figuring out what you will *not* do is frequently a useful exercise.
|
||||
|
||||
## Timeline
|
||||
|
||||
* Is this a short, medium or long-term solution?
|
||||
* Where short-term design, is this evolvable / extensible or stop-gap (eg. potentially throwaway)?
|
||||
|
||||
## Requirements
|
||||
|
||||
* Reference(s) to any of following:
|
||||
* Captured Product Backlog JIRA entry
|
||||
* Internal White Paper feature item and/or visionary feature
|
||||
* Project related requirement (POC, RFP, Pilot, Prototype) from
|
||||
* Internal Incubator / Accelerator project
|
||||
* Direct from Customer, ISV, SI, Partner
|
||||
* Use Cases
|
||||
* Assumptions
|
||||
|
||||
## Design Decisions
|
||||
|
||||
List of design decisions identified in defining the target solution.
|
||||
|
||||
For each item, please complete the attached [Design Decision template](decisions/decision.html)
|
||||
|
||||
Use the ``.. toctree::`` feature to list out the design decision docs here (see the source of this file for an example).
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
decisions/decision.md
|
||||
|
||||
## Design
|
||||
|
||||
Think about:
|
||||
|
||||
* Public API, backwards compatibility impact.
|
||||
* UI requirements, if any. Illustrate with UI Mockups and/or wireframes.
|
||||
* Data model & serialization impact and changes required.
|
||||
* Infrastructure services: persistence (schemas), messaging.
|
||||
* Impact on performance, scalability, high availability
|
||||
* Versioning, upgradability, migration=
|
||||
* Management: audit, alerting, monitoring, backup/recovery, archiving
|
||||
* Data privacy, authentication, access control
|
||||
* Logging
|
||||
* Testability
|
@ -64,6 +64,7 @@ We look forward to seeing what you can do with Corda!
|
||||
design/hadr/design.md
|
||||
design/kafka-notary/design.md
|
||||
design/monitoring-management/design.md
|
||||
design/notary-service-ha/design.md
|
||||
|
||||
.. toctree::
|
||||
:caption: Participate
|
||||
|
@ -269,8 +269,8 @@ SecureHash
|
||||
~~~~~~~~~~
|
||||
A parameter of type ``SecureHash`` can be written as a hexadecimal string: ``F69A7626ACC27042FEEAE187E6BFF4CE666E6F318DC2B32BE9FAF87DF687930C``
|
||||
|
||||
OpaqueBytes
|
||||
~~~~~~~~~~~
|
||||
OpaqueBytes and SerializedBytes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
A parameter of type ``OpaqueBytes`` can be provided as a string in Base64.
|
||||
|
||||
PublicKey and CompositeKey
|
||||
|
@ -133,8 +133,6 @@ class RpcExceptionHandlingProxy(private val delegate: SecureCordaRPCOps) : Corda
|
||||
|
||||
override fun isFlowsDrainingModeEnabled() = wrap(delegate::isFlowsDrainingModeEnabled)
|
||||
|
||||
override fun killFlow(id: StateMachineRunId) = wrap { delegate.killFlow(id) }
|
||||
|
||||
override fun shutdown() = wrap(delegate::shutdown)
|
||||
|
||||
private fun <RESULT> wrap(call: () -> RESULT): RESULT {
|
||||
|