mirror of
https://github.com/corda/corda.git
synced 2024-12-18 20:47:57 +00:00
CORDA-3009 - Migrate identity service to use to string short (#5217)
* migrate PersistentIdentityService to use key.toShortString() update definition of PublicKeyToExternalId mapping to allow fast lookup by externalId/publicKey * fix misspelled table name * add test of migration script * add design document for proposal to move IdentityService to using the correct PK.toStringShort() method for hashing a publickey * add enterprise testing considerations to design * address review comments * fix compilation errors * modify PublicKeyToOwningIdentityCache to use toStringShort() as it's lookup key * address syzmon's code review comments
This commit is contained in:
parent
7f89577f83
commit
e35c0c1df7
@ -243,7 +243,7 @@ open class StringToMethodCallParser<in T : Any> @JvmOverloads constructor(
|
||||
if (args.parameterCount == 0) {
|
||||
Pair(name, "")
|
||||
} else {
|
||||
methodParamNames[name]?. let { params ->
|
||||
methodParamNames[name]?.let { params ->
|
||||
val typeNames = args.parameters.map { it.type.simpleName }
|
||||
val paramTypes = params.zip(typeNames)
|
||||
val paramNames = paramTypes.joinToString(", ") { "${it.first}: ${it.second}" }
|
||||
|
@ -153,5 +153,4 @@ class CordaRPCClientReconnectionTest {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -66,7 +66,7 @@ class CordappSmokeTest {
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
notary = factory.create(notaryConfig)
|
||||
notary = factory.create(notaryConfig)
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -6,8 +6,8 @@ import net.corda.testing.core.SerializationEnvironmentRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import static net.corda.core.serialization.internal.CheckpointSerializationAPIKt.checkpointSerialize;
|
||||
import static net.corda.core.serialization.SerializationAPIKt.serialize;
|
||||
import static net.corda.core.serialization.internal.CheckpointSerializationAPIKt.checkpointSerialize;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
/**
|
||||
|
@ -120,19 +120,19 @@ class ConstraintsPropagationTests {
|
||||
fun `Happy path for Hash to Signature Constraint migration`() {
|
||||
val cordapps = (ledgerServices.cordappProvider as MockCordappProvider).cordapps
|
||||
val cordappAttachmentIds =
|
||||
cordapps.map { cordapp ->
|
||||
val unsignedAttId =
|
||||
cordapp.jarPath.toPath().inputStream().use { unsignedJarStream ->
|
||||
ledgerServices.attachments.importContractAttachment(cordapp.contractClassNames, "rpc", unsignedJarStream,null)
|
||||
}
|
||||
val jarAndSigner = ContractJarTestUtils.signContractJar(cordapp.jarPath, copyFirst = true, keyStoreDir = keyStoreDir.path)
|
||||
val signedJar = jarAndSigner.first
|
||||
val signedAttId =
|
||||
signedJar.inputStream().use { signedJarStream ->
|
||||
ledgerServices.attachments.importContractAttachment(cordapp.contractClassNames, "rpc", signedJarStream,null, listOf(jarAndSigner.second))
|
||||
}
|
||||
Pair(unsignedAttId, signedAttId)
|
||||
}
|
||||
cordapps.map { cordapp ->
|
||||
val unsignedAttId =
|
||||
cordapp.jarPath.toPath().inputStream().use { unsignedJarStream ->
|
||||
ledgerServices.attachments.importContractAttachment(cordapp.contractClassNames, "rpc", unsignedJarStream, null)
|
||||
}
|
||||
val jarAndSigner = ContractJarTestUtils.signContractJar(cordapp.jarPath, copyFirst = true, keyStoreDir = keyStoreDir.path)
|
||||
val signedJar = jarAndSigner.first
|
||||
val signedAttId =
|
||||
signedJar.inputStream().use { signedJarStream ->
|
||||
ledgerServices.attachments.importContractAttachment(cordapp.contractClassNames, "rpc", signedJarStream, null, listOf(jarAndSigner.second))
|
||||
}
|
||||
Pair(unsignedAttId, signedAttId)
|
||||
}
|
||||
|
||||
val unsignedAttachmentId = cordappAttachmentIds.first().first
|
||||
println("Unsigned: $unsignedAttachmentId")
|
||||
@ -142,10 +142,10 @@ class ConstraintsPropagationTests {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(
|
||||
unverifiedTransaction {
|
||||
attachment(Cash.PROGRAM_ID, unsignedAttachmentId)
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, HashAttachmentConstraint(unsignedAttachmentId), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
})
|
||||
attachment(Cash.PROGRAM_ID, unsignedAttachmentId)
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, HashAttachmentConstraint(unsignedAttachmentId), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
})
|
||||
unverifiedTransaction {
|
||||
attachment(Cash.PROGRAM_ID, signedAttachmentId)
|
||||
input("c1")
|
||||
@ -316,7 +316,7 @@ class ConstraintsPropagationTests {
|
||||
|
||||
// network parameters
|
||||
val netParams = testNetworkParameters(minimumPlatformVersion = 4,
|
||||
packageOwnership = mapOf( "net.corda.core.contracts" to ALICE_PARTY.owningKey))
|
||||
packageOwnership = mapOf("net.corda.core.contracts" to ALICE_PARTY.owningKey))
|
||||
|
||||
ledgerServices.attachments.importContractAttachment(attachmentIdSigned, attachmentSigned)
|
||||
ledgerServices.attachments.importContractAttachment(attachmentIdUnsigned, attachmentUnsigned)
|
||||
@ -371,23 +371,24 @@ class ConstraintsPropagationTests {
|
||||
assertFailsWith<IllegalArgumentException> { AutomaticPlaceholderConstraint.canBeTransitionedFrom(AutomaticPlaceholderConstraint, attachment) }
|
||||
}
|
||||
|
||||
private fun MockServices.recordTransaction(wireTransaction: WireTransaction){
|
||||
private fun MockServices.recordTransaction(wireTransaction: WireTransaction) {
|
||||
val nodeKey = ALICE_PUBKEY
|
||||
val sigs = listOf(keyManagementService.sign(
|
||||
SignableData(wireTransaction.id, SignatureMetadata(4, Crypto.findSignatureScheme(nodeKey).schemeNumberID)), nodeKey))
|
||||
recordTransactions(SignedTransaction(wireTransaction, sigs))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Input state contract version may be incompatible with lower version`() {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
})
|
||||
transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
input("c1")
|
||||
output(Cash.PROGRAM_ID, "c2", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), BOB_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Move())
|
||||
@ -400,13 +401,13 @@ class ConstraintsPropagationTests {
|
||||
fun `Input state contract version is compatible with the same version`() {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "3"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "3"))
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
})
|
||||
transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "3"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "3"))
|
||||
input("c1")
|
||||
output(Cash.PROGRAM_ID, "c2", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), BOB_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Move())
|
||||
@ -419,13 +420,13 @@ class ConstraintsPropagationTests {
|
||||
fun `Input state contract version is compatible with higher version`() {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
})
|
||||
transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
input("c1")
|
||||
output(Cash.PROGRAM_ID, "c2", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), BOB_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Move())
|
||||
@ -438,13 +439,13 @@ class ConstraintsPropagationTests {
|
||||
fun `Input states contract version may be lower that current contract version`() {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "1"))
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
})
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
output(Cash.PROGRAM_ID, "c2", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
@ -463,7 +464,7 @@ class ConstraintsPropagationTests {
|
||||
fun `Input state with contract version can be downgraded to no version`() {
|
||||
ledgerServices.ledger(DUMMY_NOTARY) {
|
||||
ledgerServices.recordTransaction(transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.allOnesHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
output(Cash.PROGRAM_ID, "c1", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), ALICE_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Issue())
|
||||
verifies()
|
||||
@ -488,7 +489,7 @@ class ConstraintsPropagationTests {
|
||||
verifies()
|
||||
})
|
||||
transaction {
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
attachment(Cash.PROGRAM_ID, SecureHash.zeroHash, listOf(hashToSignatureConstraintsKey), mapOf(Attributes.Name.IMPLEMENTATION_VERSION.toString() to "2"))
|
||||
input("c1")
|
||||
output(Cash.PROGRAM_ID, "c2", DUMMY_NOTARY, null, SignatureAttachmentConstraint(hashToSignatureConstraintsKey), Cash.State(1000.POUNDS `issued by` ALICE_PARTY.ref(1), BOB_PARTY))
|
||||
command(ALICE_PUBKEY, Cash.Commands.Move())
|
||||
|
@ -31,7 +31,6 @@ class X509NameConstraintsTest {
|
||||
private fun makeKeyStores(subjectName: X500Name, nameConstraints: NameConstraints): Pair<X509KeyStore, X509KeyStore> {
|
||||
val (rootCa, intermediateCa) = createDevIntermediateCaCertPath()
|
||||
|
||||
|
||||
val trustStore = X509KeyStore(storePassword).apply {
|
||||
setCertificate(X509Utilities.CORDA_ROOT_CA, rootCa.certificate)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ class AttachmentTests : WithMockNet {
|
||||
|
||||
// Get node one to run a flow to fetch it and insert it.
|
||||
assertThat(
|
||||
bobNode.startAttachmentFlow(id, alice),
|
||||
bobNode.startAttachmentFlow(id, alice),
|
||||
willReturn(noAttachments()))
|
||||
|
||||
// Verify it was inserted into node one's store.
|
||||
@ -63,7 +63,7 @@ class AttachmentTests : WithMockNet {
|
||||
aliceNode.dispose()
|
||||
|
||||
assertThat(
|
||||
bobNode.startAttachmentFlow(id, alice),
|
||||
bobNode.startAttachmentFlow(id, alice),
|
||||
willReturn(soleAttachment(attachment)))
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ class AttachmentTests : WithMockNet {
|
||||
|
||||
// Get node one to fetch a non-existent attachment.
|
||||
assertThat(
|
||||
bobNode.startAttachmentFlow(hash, alice),
|
||||
bobNode.startAttachmentFlow(hash, alice),
|
||||
willThrow(withRequestedHash(hash)))
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ class AttachmentTests : WithMockNet {
|
||||
|
||||
// Get n1 to fetch the attachment. Should receive corrupted bytes.
|
||||
assertThat(
|
||||
bobNode.startAttachmentFlow(id, badAlice),
|
||||
bobNode.startAttachmentFlow(id, badAlice),
|
||||
willThrow<FetchDataFlow.DownloadedVsRequestedDataMismatch>()
|
||||
)
|
||||
}
|
||||
@ -123,9 +123,9 @@ class AttachmentTests : WithMockNet {
|
||||
|
||||
//region Generators
|
||||
override fun makeNode(name: CordaX500Name) =
|
||||
mockNet.createPartyNode(makeUnique(name)).apply {
|
||||
registerInitiatedFlow(FetchAttachmentsResponse::class.java)
|
||||
}
|
||||
mockNet.createPartyNode(makeUnique(name)).apply {
|
||||
registerInitiatedFlow(FetchAttachmentsResponse::class.java)
|
||||
}
|
||||
|
||||
// Makes a node that doesn't do sanity checking at load time.
|
||||
private fun makeBadNode(name: CordaX500Name) = mockNet.createNode(
|
||||
@ -140,10 +140,10 @@ class AttachmentTests : WithMockNet {
|
||||
|
||||
//region Operations
|
||||
private fun TestStartedNode.importAttachment(attachment: ByteArray) =
|
||||
attachments.importAttachment(attachment.inputStream(), "test", null)
|
||||
.andRunNetwork()
|
||||
attachments.importAttachment(attachment.inputStream(), "test", null)
|
||||
.andRunNetwork()
|
||||
|
||||
private fun TestStartedNode.updateAttachment(attachment: NodeAttachmentService.DBAttachment) = database.transaction {
|
||||
private fun TestStartedNode.updateAttachment(attachment: NodeAttachmentService.DBAttachment) = database.transaction {
|
||||
session.update(attachment)
|
||||
}.andRunNetwork()
|
||||
|
||||
@ -151,19 +151,20 @@ class AttachmentTests : WithMockNet {
|
||||
InitiatingFetchAttachmentsFlow(otherSide, setOf(hash)))
|
||||
|
||||
private fun TestStartedNode.getAttachmentWithId(id: SecureHash) =
|
||||
attachments.openAttachment(id)!!
|
||||
attachments.openAttachment(id)!!
|
||||
//endregion
|
||||
|
||||
//region Matchers
|
||||
private fun noAttachments() = has(FetchDataFlow.Result<Attachment>::fromDisk, isEmpty)
|
||||
|
||||
private fun soleAttachment(attachment: Attachment) = has(FetchDataFlow.Result<Attachment>::fromDisk,
|
||||
hasSize(equalTo(1)) and
|
||||
hasElement(attachment))
|
||||
|
||||
private fun hashesTo(hash: SecureHash) = has<Attachment, SecureHash>(
|
||||
"hash",
|
||||
{ it.open().hash() },
|
||||
equalTo(hash))
|
||||
"hash",
|
||||
{ it.open().hash() },
|
||||
equalTo(hash))
|
||||
//endregion
|
||||
|
||||
}
|
||||
|
@ -5,12 +5,8 @@ import com.natpryce.hamkrest.assertion.assertThat
|
||||
import net.corda.core.contracts.Command
|
||||
import net.corda.core.contracts.StateAndContract
|
||||
import net.corda.core.contracts.requireThat
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.excludeHostNode
|
||||
import net.corda.core.identity.groupAbstractPartyByWellKnownParty
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
@ -106,7 +102,8 @@ class CollectSignaturesFlowTests : WithContracts {
|
||||
val keysToLookup = listOf(bConfidentialIdentity1.owningKey, bConfidentialIdentity2.owningKey, cConfidentialIdentity1.owningKey)
|
||||
val keysToKeepAnonymous = listOf(cConfidentialIdentity2.owningKey)
|
||||
|
||||
val future = aliceNode.startFlow(MixAndMatchAnonymousSessionTestFlow(owners, keysToLookup.toSet(), keysToKeepAnonymous.toSet())).resultFuture
|
||||
val future = aliceNode.startFlow(MixAndMatchAnonymousSessionTestFlow(owners, keysToLookup.toSet(), keysToKeepAnonymous.toSet()))
|
||||
.resultFuture
|
||||
mockNet.runNetwork()
|
||||
val stx = future.get()
|
||||
val missingSigners = stx.getMissingSigners()
|
||||
@ -214,7 +211,6 @@ class AnonymousSessionTestFlow(private val cis: List<PartyAndCertificate>) : Flo
|
||||
.addOutputState(state)
|
||||
.addCommand(create, cis.map { it.owningKey })
|
||||
|
||||
|
||||
val ourKey = cis.single { it.name == ourIdentity.name }.owningKey
|
||||
val signedByUsTx = serviceHub.signInitialTransaction(txBuilder, ourKey)
|
||||
val sessionsToCollectFrom = cis.filter { it.name != ourIdentity.name }.map { initiateFlow(AnonymousParty(it.owningKey)) }
|
||||
@ -242,7 +238,7 @@ class MixAndMatchAnonymousSessionTestFlow(private val cis: List<PartyAndCertific
|
||||
@Suspendable
|
||||
override fun call(): SignedTransaction {
|
||||
|
||||
for (ci in cis) {
|
||||
for (ci in cis) {
|
||||
if (ci.name != ourIdentity.name) {
|
||||
(serviceHub.identityService as IdentityServiceInternal).verifyAndRegisterIdentity(ci)
|
||||
}
|
||||
@ -253,7 +249,6 @@ class MixAndMatchAnonymousSessionTestFlow(private val cis: List<PartyAndCertific
|
||||
.addOutputState(state)
|
||||
.addCommand(create, cis.map { it.owningKey })
|
||||
|
||||
|
||||
val ourKey = cis.single { it.name == ourIdentity.name }.owningKey
|
||||
val signedByUsTx = serviceHub.signInitialTransaction(txBuilder, ourKey)
|
||||
|
||||
|
@ -75,7 +75,7 @@ class ContractUpgradeFlowRPCTest : WithContracts, WithFinality {
|
||||
|
||||
// Party A initiates contract upgrade flow, expected to succeed this time.
|
||||
assertThat(
|
||||
rpcA.initiateDummyContractUpgrade(atx),
|
||||
rpcA.initiateDummyContractUpgrade(atx),
|
||||
willReturn(
|
||||
aliceNode.hasDummyContractUpgradeTransaction()
|
||||
and bobNode.hasDummyContractUpgradeTransaction()))
|
||||
@ -113,23 +113,23 @@ class ContractUpgradeFlowRPCTest : WithContracts, WithFinality {
|
||||
private fun TestStartedNode.hasDummyContractUpgradeTransaction() =
|
||||
hasContractUpgradeTransaction<DummyContract.State, DummyContractV2.State>()
|
||||
|
||||
private inline fun <reified FROM : Any, reified TO: Any> TestStartedNode.hasContractUpgradeTransaction() =
|
||||
has<StateAndRef<ContractState>, ContractUpgradeLedgerTransaction>(
|
||||
"a contract upgrade transaction",
|
||||
{ getContractUpgradeTransaction(it) },
|
||||
isUpgrade<FROM, TO>())
|
||||
private inline fun <reified FROM : Any, reified TO : Any> TestStartedNode.hasContractUpgradeTransaction() =
|
||||
has<StateAndRef<ContractState>, ContractUpgradeLedgerTransaction>(
|
||||
"a contract upgrade transaction",
|
||||
{ getContractUpgradeTransaction(it) },
|
||||
isUpgrade<FROM, TO>())
|
||||
|
||||
private fun TestStartedNode.getContractUpgradeTransaction(state: StateAndRef<ContractState>) =
|
||||
services.validatedTransactions.getTransaction(state.ref.txhash)!!
|
||||
.resolveContractUpgradeTransaction(services)
|
||||
services.validatedTransactions.getTransaction(state.ref.txhash)!!
|
||||
.resolveContractUpgradeTransaction(services)
|
||||
|
||||
private inline fun <reified FROM : Any, reified TO : Any> isUpgrade() =
|
||||
isUpgradeFrom<FROM>() and isUpgradeTo<TO>()
|
||||
|
||||
private inline fun <reified T: Any> isUpgradeFrom() =
|
||||
private inline fun <reified T : Any> isUpgradeFrom() =
|
||||
has<ContractUpgradeLedgerTransaction, Any>("input data", { it.inputs.single().state.data }, isA<T>(anything))
|
||||
|
||||
private inline fun <reified T: Any> isUpgradeTo() =
|
||||
private inline fun <reified T : Any> isUpgradeTo() =
|
||||
has<ContractUpgradeLedgerTransaction, Any>("output data", { it.outputs.single().data }, isA<T>(anything))
|
||||
//endregion
|
||||
}
|
||||
|
@ -61,12 +61,12 @@ class FastThreadLocalTest {
|
||||
@Test
|
||||
fun `FastThreadLocal with FastThreadLocalThread is not fiber-local`() =
|
||||
scheduled(3, ::FastThreadLocalThread) {
|
||||
val threadLocal = object : FastThreadLocal<ExpensiveObj>() {
|
||||
override fun initialValue() = ExpensiveObj()
|
||||
}
|
||||
runFibers(100, threadLocal::get) // Return value could be anything.
|
||||
assertThat(expensiveObjCount.get(), lessThanOrEqualTo(3))
|
||||
}
|
||||
val threadLocal = object : FastThreadLocal<ExpensiveObj>() {
|
||||
override fun initialValue() = ExpensiveObj()
|
||||
}
|
||||
runFibers(100, threadLocal::get) // Return value could be anything.
|
||||
assertThat(expensiveObjCount.get(), lessThanOrEqualTo(3))
|
||||
}
|
||||
|
||||
/** @return the number of times a different expensive object was obtained post-suspend. */
|
||||
private fun SchedulerContext.runFibers(fiberCount: Int, threadLocalGet: () -> ExpensiveObj): Int {
|
||||
|
@ -3,12 +3,12 @@ package net.corda.coretests.flows
|
||||
import com.natpryce.hamkrest.and
|
||||
import com.natpryce.hamkrest.assertion.assertThat
|
||||
import net.corda.core.flows.FinalityFlow
|
||||
import net.corda.coretests.flows.WithFinality.FinalityInvoker
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.cordapp.CordappResolver
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.coretests.flows.WithFinality.FinalityInvoker
|
||||
import net.corda.finance.POUNDS
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.issuedBy
|
||||
@ -40,7 +40,7 @@ class FinalityFlowTests : WithFinality {
|
||||
val stx = aliceNode.issuesCashTo(bob)
|
||||
|
||||
assertThat(
|
||||
aliceNode.finalise(stx, bob.info.singleIdentity()),
|
||||
aliceNode.finalise(stx, bob.info.singleIdentity()),
|
||||
willReturn(
|
||||
requiredSignatures(1)
|
||||
and visibleTo(bob)))
|
||||
@ -52,7 +52,7 @@ class FinalityFlowTests : WithFinality {
|
||||
val stx = aliceNode.issuesCashTo(CHARLIE)
|
||||
|
||||
assertThat(
|
||||
aliceNode.finalise(stx),
|
||||
aliceNode.finalise(stx),
|
||||
willThrow<IllegalArgumentException>())
|
||||
}
|
||||
|
||||
|
@ -9,9 +9,9 @@ import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.finance.GBP
|
||||
import net.corda.finance.POUNDS
|
||||
import net.corda.finance.workflows.getCashBalance
|
||||
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
||||
import net.corda.finance.flows.CashPaymentReceiverFlow
|
||||
import net.corda.finance.workflows.getCashBalance
|
||||
import net.corda.node.services.statemachine.StaffedFlowHospital.*
|
||||
import net.corda.node.services.statemachine.StaffedFlowHospital.MedicalRecord.Flow
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
|
@ -59,7 +59,8 @@ class ReferencedStatesFlowTests {
|
||||
|
||||
// 4. Try to use the old reference state. This will throw a NotaryException.
|
||||
val nodeOneIdentity = nodes[1].info.legalIdentities.first()
|
||||
val useRefTx = nodes[1].services.startFlow(WithReferencedStatesFlow { UseRefState(nodeOneIdentity, newRefState.state.data.linearId) }).resultFuture
|
||||
val useRefTx = nodes[1].services.startFlow(WithReferencedStatesFlow { UseRefState(nodeOneIdentity, newRefState.state.data.linearId) })
|
||||
.resultFuture
|
||||
|
||||
// 5. Share the update reference state.
|
||||
nodes[0].services.startFlow(Initiator(updatedRefState)).resultFuture.getOrThrow()
|
||||
@ -75,7 +76,8 @@ class ReferencedStatesFlowTests {
|
||||
val newRefTx = nodes[0].services.startFlow(CreateRefState()).resultFuture.getOrThrow()
|
||||
val newRefState = newRefTx.tx.outRefsOfType<RefState.State>().single()
|
||||
// 2. Use the "newRefState" a transaction involving another party (nodes[1]) which creates a new state. They should store the new state and the reference state.
|
||||
val newTx = nodes[0].services.startFlow(UseRefState(nodes[1].info.legalIdentities.first(), newRefState.state.data.linearId)).resultFuture.getOrThrow()
|
||||
val newTx = nodes[0].services.startFlow(UseRefState(nodes[1].info.legalIdentities.first(), newRefState.state.data.linearId))
|
||||
.resultFuture.getOrThrow()
|
||||
// Wait until node 1 stores the new tx.
|
||||
nodes[1].services.validatedTransactions.trackTransaction(newTx.id).getOrThrow()
|
||||
// Check that nodes[1] has finished recording the transaction (and updating the vault.. hopefully!).
|
||||
@ -106,7 +108,8 @@ class ReferencedStatesFlowTests {
|
||||
val newRefTx = nodes[0].services.startFlow(CreateRefState()).resultFuture.getOrThrow()
|
||||
val newRefState = newRefTx.tx.outRefsOfType<RefState.State>().single()
|
||||
// 2. Use the "newRefState" a transaction involving another party (nodes[1]) which creates a new state. They should store the new state and the reference state.
|
||||
val newTx = nodes[0].services.startFlow(UseRefState(nodes[1].info.legalIdentities.first(), newRefState.state.data.linearId)).resultFuture.getOrThrow()
|
||||
val newTx = nodes[0].services.startFlow(UseRefState(nodes[1].info.legalIdentities.first(), newRefState.state.data.linearId))
|
||||
.resultFuture.getOrThrow()
|
||||
// Wait until node 1 stores the new tx.
|
||||
nodes[1].services.validatedTransactions.trackTransaction(newTx.id).getOrThrow()
|
||||
// Check that nodes[1] has finished recording the transaction (and updating the vault.. hopefully!).
|
||||
|
@ -50,8 +50,8 @@ interface WithContracts : WithMockNet {
|
||||
startFlowAndRunNetwork(ContractUpgradeFlow.Initiate(stateAndRef, toClass.java))
|
||||
|
||||
fun <T : UpgradedContract<*, *>> TestStartedNode.authoriseContractUpgrade(
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
authoriseContractUpgrade(tx.tx.outRef(0), toClass)
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
authoriseContractUpgrade(tx.tx.outRef(0), toClass)
|
||||
|
||||
fun <T : UpgradedContract<*, *>> TestStartedNode.authoriseContractUpgrade(
|
||||
stateAndRef: StateAndRef<ContractState>, toClass: KClass<T>) =
|
||||
@ -60,28 +60,28 @@ interface WithContracts : WithMockNet {
|
||||
)
|
||||
|
||||
fun TestStartedNode.deauthoriseContractUpgrade(tx: SignedTransaction) = startFlow(
|
||||
ContractUpgradeFlow.Deauthorise(tx.tx.outRef<ContractState>(0).ref)
|
||||
ContractUpgradeFlow.Deauthorise(tx.tx.outRef<ContractState>(0).ref)
|
||||
)
|
||||
|
||||
// RPC versions of the above
|
||||
fun <S : ContractState, T : UpgradedContract<S, *>> CordaRPCOps.initiateContractUpgrade(
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
startFlow(
|
||||
{ stateAndRef, upgrade -> ContractUpgradeFlow.Initiate(stateAndRef, upgrade) },
|
||||
tx.tx.outRef<S>(0),
|
||||
toClass.java)
|
||||
.andRunNetwork()
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
startFlow(
|
||||
{ stateAndRef, upgrade -> ContractUpgradeFlow.Initiate(stateAndRef, upgrade) },
|
||||
tx.tx.outRef<S>(0),
|
||||
toClass.java)
|
||||
.andRunNetwork()
|
||||
|
||||
fun <S : ContractState, T : UpgradedContract<S, *>> CordaRPCOps.authoriseContractUpgrade(
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
startFlow(
|
||||
{ stateAndRef, upgrade -> ContractUpgradeFlow.Authorise(stateAndRef, upgrade) },
|
||||
tx.tx.outRef<S>(0),
|
||||
toClass.java)
|
||||
tx: SignedTransaction, toClass: KClass<T>) =
|
||||
startFlow(
|
||||
{ stateAndRef, upgrade -> ContractUpgradeFlow.Authorise(stateAndRef, upgrade) },
|
||||
tx.tx.outRef<S>(0),
|
||||
toClass.java)
|
||||
|
||||
fun CordaRPCOps.deauthoriseContractUpgrade(tx: SignedTransaction) =
|
||||
startFlow(
|
||||
{ stateRef -> ContractUpgradeFlow.Deauthorise(stateRef) },
|
||||
tx.tx.outRef<ContractState>(0).ref)
|
||||
startFlow(
|
||||
{ stateRef -> ContractUpgradeFlow.Deauthorise(stateRef) },
|
||||
tx.tx.outRef<ContractState>(0).ref)
|
||||
//region
|
||||
}
|
@ -30,7 +30,7 @@ interface WithMockNet {
|
||||
/**
|
||||
* Run the mock network before proceeding
|
||||
*/
|
||||
fun <T: Any> T.andRunNetwork(): T = apply { mockNet.runNetwork() }
|
||||
fun <T : Any> T.andRunNetwork(): T = apply { mockNet.runNetwork() }
|
||||
|
||||
//region Operations
|
||||
/**
|
||||
@ -42,8 +42,8 @@ interface WithMockNet {
|
||||
/**
|
||||
* Retrieve the sole instance of a state of a particular class from the node's vault
|
||||
*/
|
||||
fun <S: ContractState> TestStartedNode.getStateFromVault(stateClass: KClass<S>) =
|
||||
services.vaultService.queryBy(stateClass.java).states.single()
|
||||
fun <S : ContractState> TestStartedNode.getStateFromVault(stateClass: KClass<S>) =
|
||||
services.vaultService.queryBy(stateClass.java).states.single()
|
||||
|
||||
/**
|
||||
* Start a flow
|
||||
@ -57,12 +57,12 @@ interface WithMockNet {
|
||||
startFlow(logic).andRunNetwork()
|
||||
|
||||
fun TestStartedNode.createConfidentialIdentity(party: Party) =
|
||||
services.keyManagementService.freshKeyAndCert(
|
||||
services.myInfo.legalIdentitiesAndCerts.single { it.name == party.name },
|
||||
false)
|
||||
services.keyManagementService.freshKeyAndCert(
|
||||
services.myInfo.legalIdentitiesAndCerts.single { it.name == party.name },
|
||||
false)
|
||||
|
||||
fun TestStartedNode.verifyAndRegister(identity: PartyAndCertificate) =
|
||||
services.identityService.verifyAndRegisterIdentity(identity)
|
||||
services.identityService.verifyAndRegisterIdentity(identity)
|
||||
|
||||
//endregion
|
||||
|
||||
@ -85,7 +85,7 @@ interface WithMockNet {
|
||||
* The exception has the expected error message
|
||||
*/
|
||||
fun errorMessage(expected: String) = has(
|
||||
Exception::message,
|
||||
equalTo(expected))
|
||||
Exception::message,
|
||||
equalTo(expected))
|
||||
//endregion
|
||||
}
|
@ -28,7 +28,7 @@ class CertRoleTests {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `check cert roles verify for various cert hierarchies`(){
|
||||
fun `check cert roles verify for various cert hierarchies`() {
|
||||
|
||||
// Testing for various certificate hierarchies (with or without NodeCA).
|
||||
// ROOT -> Intermediate Root -> Doorman -> NodeCA -> Legal Identity cert -> Confidential key cert
|
||||
|
@ -430,6 +430,7 @@ class ResolveTransactionsFlowTest {
|
||||
subFlow(resolveTransactionsFlow)
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
@InitiatedBy(TestFlow::class)
|
||||
class TestResponseFlow(private val otherSideSession: FlowSession) : FlowLogic<Void?>() {
|
||||
@ -447,6 +448,7 @@ class ResolveTransactionsFlowTest {
|
||||
subFlow(DataVendingFlow(session, toVend))
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
@InitiatedBy(TestNoRightsVendingFlow::class)
|
||||
private open class TestResponseResolveNoRightsFlow(val otherSideSession: FlowSession) : FlowLogic<Unit>() {
|
||||
@ -454,7 +456,8 @@ class ResolveTransactionsFlowTest {
|
||||
override fun call() {
|
||||
val noRightsTx = otherSideSession.receive<SignedTransaction>().unwrap { it }
|
||||
otherSideSession.receive<Any>().unwrap { it }
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(noRightsTx.inputs.first().txhash), FetchDataFlow.DataType.TRANSACTION)).unwrap { it }
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(noRightsTx.inputs.first().txhash), FetchDataFlow.DataType.TRANSACTION))
|
||||
.unwrap { it }
|
||||
otherSideSession.send(FetchDataFlow.Request.End)
|
||||
}
|
||||
}
|
||||
@ -468,6 +471,7 @@ class ResolveTransactionsFlowTest {
|
||||
subFlow(DataVendingFlow(session, tx))
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
@InitiatedBy(TestResolveTwiceVendingFlow::class)
|
||||
private open class TestResponseResolveTwiceFlow(val otherSideSession: FlowSession) : FlowLogic<Unit>() {
|
||||
@ -475,8 +479,10 @@ class ResolveTransactionsFlowTest {
|
||||
override fun call() {
|
||||
val tx = otherSideSession.receive<SignedTransaction>().unwrap { it }
|
||||
val parent1 = tx.inputs.first().txhash
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(parent1), FetchDataFlow.DataType.TRANSACTION)).unwrap { it }
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(parent1), FetchDataFlow.DataType.TRANSACTION)).unwrap { it }
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(parent1), FetchDataFlow.DataType.TRANSACTION))
|
||||
.unwrap { it }
|
||||
otherSideSession.sendAndReceive<Any>(FetchDataFlow.Request.Data(NonEmptySet.of(parent1), FetchDataFlow.DataType.TRANSACTION))
|
||||
.unwrap { it }
|
||||
otherSideSession.send(FetchDataFlow.Request.End)
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import org.junit.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertFailsWith
|
||||
|
||||
|
||||
class VaultUpdateTests {
|
||||
private companion object {
|
||||
const val DUMMY_PROGRAM_ID = "net.corda.coretests.node.VaultUpdateTests\$DummyContract"
|
||||
|
@ -3,7 +3,9 @@ package net.corda.coretests.serialization
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.contracts.Attachment
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FetchAttachmentsFlow
|
||||
import net.corda.core.internal.FetchDataFlow
|
||||
|
@ -2,13 +2,17 @@ package net.corda.coretests.serialization
|
||||
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.SignatureMetadata
|
||||
import net.corda.core.crypto.TransactionSignature
|
||||
import net.corda.core.crypto.generateKeyPair
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.*
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.finance.POUNDS
|
||||
import net.corda.testing.common.internal.testNetworkParameters
|
||||
@ -68,7 +72,7 @@ class TransactionSerializationTests {
|
||||
val depositRef = MINI_CORP.ref(1)
|
||||
val signatures = listOf(TransactionSignature(ByteArray(1), MEGA_CORP_KEY.public, SignatureMetadata(1, Crypto.findSignatureScheme(MEGA_CORP_KEY.public).schemeNumberID)))
|
||||
|
||||
lateinit var inputState : StateAndRef<ContractState>
|
||||
lateinit var inputState: StateAndRef<ContractState>
|
||||
val outputState = TransactionState(TestCash.State(depositRef, 600.POUNDS, MEGA_CORP), TEST_CASH_PROGRAM_ID, DUMMY_NOTARY)
|
||||
val changeState = TransactionState(TestCash.State(depositRef, 400.POUNDS, MEGA_CORP), TEST_CASH_PROGRAM_ID, DUMMY_NOTARY)
|
||||
|
||||
@ -84,7 +88,7 @@ class TransactionSerializationTests {
|
||||
//record fake transaction which created inputState
|
||||
val fakeTx = megaCorpServices.signInitialTransaction(TransactionBuilder(DUMMY_NOTARY).withItems(outputState, Command(TestCash.Commands.Issue(), arrayListOf(MEGA_CORP.owningKey))))
|
||||
megaCorpServices.recordTransactions(fakeTx)
|
||||
val fakeStateRef = StateRef(fakeTx.id,0)
|
||||
val fakeStateRef = StateRef(fakeTx.id, 0)
|
||||
inputState = StateAndRef(TransactionState(TestCash.State(depositRef, 100.POUNDS, MEGA_CORP), TEST_CASH_PROGRAM_ID, DUMMY_NOTARY, constraint = AlwaysAcceptAttachmentConstraint), fakeStateRef)
|
||||
tx = TransactionBuilder(DUMMY_NOTARY).withItems(inputState, outputState, changeState, Command(TestCash.Commands.Move(), arrayListOf(MEGA_CORP.owningKey)))
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ class AttachmentsClassLoaderTests {
|
||||
|
||||
@Test
|
||||
fun `Test valid overlapping file condition`() {
|
||||
val att1 = importAttachment(fakeAttachment("file1.txt", "same data", "file2.txt", "same other data" ).inputStream(), "app", "file1.jar")
|
||||
val att1 = importAttachment(fakeAttachment("file1.txt", "same data", "file2.txt", "same other data").inputStream(), "app", "file1.jar")
|
||||
val att2 = importAttachment(fakeAttachment("file1.txt", "same data", "file3.txt", "same totally different").inputStream(), "app", "file2.jar")
|
||||
|
||||
val cl = make(arrayOf(att1, att2).map { storage.openAttachment(it)!! })
|
||||
@ -206,28 +206,28 @@ class AttachmentsClassLoaderTests {
|
||||
val keyPairA = Crypto.generateKeyPair()
|
||||
val keyPairB = Crypto.generateKeyPair()
|
||||
val classJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone trusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone trusted"
|
||||
).inputStream()
|
||||
classJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"rpc",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"rpc",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
val untrustedClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
).inputStream()
|
||||
val untrustedAttachment = untrustedClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
@ -240,28 +240,28 @@ class AttachmentsClassLoaderTests {
|
||||
val keyPairB = Crypto.generateKeyPair()
|
||||
val keyPairC = Crypto.generateKeyPair()
|
||||
val classJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone trusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone trusted"
|
||||
).inputStream()
|
||||
classJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"rpc",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairC.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"rpc",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairC.public)
|
||||
)
|
||||
}
|
||||
|
||||
val untrustedClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
).inputStream()
|
||||
val untrustedAttachment = untrustedClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
@ -273,15 +273,15 @@ class AttachmentsClassLoaderTests {
|
||||
val keyPairA = Crypto.generateKeyPair()
|
||||
val keyPairB = Crypto.generateKeyPair()
|
||||
val untrustedClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
).inputStream()
|
||||
val untrustedAttachment = untrustedClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
@ -295,28 +295,28 @@ class AttachmentsClassLoaderTests {
|
||||
val keyPairA = Crypto.generateKeyPair()
|
||||
val keyPairB = Crypto.generateKeyPair()
|
||||
val classJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted with the same keys"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted with the same keys"
|
||||
).inputStream()
|
||||
classJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
val untrustedClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
).inputStream()
|
||||
val untrustedAttachment = untrustedClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairA.public, keyPairB.public)
|
||||
)
|
||||
}
|
||||
|
||||
@ -331,41 +331,41 @@ class AttachmentsClassLoaderTests {
|
||||
val keyPairB = Crypto.generateKeyPair()
|
||||
val keyPairC = Crypto.generateKeyPair()
|
||||
val classJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted with the same keys"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted with the same keys"
|
||||
).inputStream()
|
||||
classJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"app",
|
||||
it,
|
||||
signers = listOf(keyPairA.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"app",
|
||||
it,
|
||||
signers = listOf(keyPairA.public)
|
||||
)
|
||||
}
|
||||
|
||||
val inheritedTrustClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone who inherits trust"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone who inherits trust"
|
||||
).inputStream()
|
||||
val inheritedTrustAttachment = inheritedTrustClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairB.public, keyPairA.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairB.public, keyPairA.public)
|
||||
)
|
||||
}
|
||||
|
||||
val untrustedClassJar = fakeAttachment(
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
"/com/example/something/UntrustedClass.class",
|
||||
"Signed by someone untrusted"
|
||||
).inputStream()
|
||||
val untrustedAttachment = untrustedClassJar.use {
|
||||
storage.importContractAttachment(
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairB.public, keyPairC.public)
|
||||
listOf("UntrustedClass.class"),
|
||||
"untrusted",
|
||||
it,
|
||||
signers = listOf(keyPairB.public, keyPairC.public)
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -3,10 +3,10 @@ package net.corda.coretests.transactions
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.contracts.ComponentGroupEnum.*
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.internal.createComponentGroups
|
||||
import net.corda.core.internal.accessAvailableComponentHashes
|
||||
import net.corda.core.internal.accessGroupHashes
|
||||
import net.corda.core.internal.accessGroupMerkleRoots
|
||||
import net.corda.core.internal.createComponentGroups
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.*
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
@ -17,7 +17,6 @@ import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import java.time.Instant
|
||||
import java.util.function.Predicate
|
||||
import kotlin.reflect.KVisibility
|
||||
import kotlin.test.*
|
||||
|
||||
class CompatibleTransactionTests {
|
||||
@ -420,7 +419,6 @@ class CompatibleTransactionTests {
|
||||
// Required to call the private constructor.
|
||||
val ftxConstructor = FilteredTransaction::class.constructors.first()
|
||||
|
||||
|
||||
// 1st and 3rd commands require a signature from KEY_1.
|
||||
val twoCommandsforKey1 = listOf(dummyCommand(DUMMY_KEY_1.public, DUMMY_KEY_2.public), dummyCommand(DUMMY_KEY_2.public), dummyCommand(DUMMY_KEY_1.public))
|
||||
val componentGroups = listOf(
|
||||
|
@ -22,7 +22,7 @@ import net.corda.testing.core.SerializationEnvironmentRule
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.node.MockServices
|
||||
import net.corda.testing.node.ledger
|
||||
import org.assertj.core.api.Assertions.*
|
||||
import org.assertj.core.api.Assertions.assertThatIllegalArgumentException
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
|
||||
|
@ -171,7 +171,7 @@ class TransactionTests {
|
||||
val inputs = listOf(StateAndRef(inState, StateRef(SecureHash.randomSHA256(), 0)))
|
||||
val outputs = listOf(outState)
|
||||
val commands = emptyList<CommandWithParties<CommandData>>()
|
||||
val attachments = listOf(object : AbstractAttachment( {
|
||||
val attachments = listOf(object : AbstractAttachment({
|
||||
AttachmentsClassLoaderTests::class.java.getResource("isolated-4.0.jar").openStream().readBytes()
|
||||
}, TESTDSL_UPLOADER) {
|
||||
@Suppress("OverridingDeprecatedMember")
|
||||
|
@ -2,7 +2,8 @@ package net.corda.coretests.utilities
|
||||
|
||||
import com.esotericsoftware.kryo.KryoException
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.serialization.*
|
||||
import net.corda.core.serialization.ClassWhitelist
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.internal.checkpointDeserialize
|
||||
import net.corda.core.serialization.internal.checkpointSerialize
|
||||
import net.corda.core.utilities.transient
|
||||
|
@ -2,7 +2,6 @@ package net.corda.core.internal
|
||||
|
||||
import net.corda.core.DeleteForDJVM
|
||||
import net.corda.core.contracts.Attachment
|
||||
import net.corda.core.contracts.ContractAttachment
|
||||
import net.corda.core.contracts.ContractClassName
|
||||
import net.corda.core.flows.DataVendingFlow
|
||||
import net.corda.core.flows.FlowLogic
|
||||
|
@ -118,8 +118,8 @@ sealed class FetchDataFlow<T : NamedByHash, in W : Any>(
|
||||
if (stx == null)
|
||||
toFetch += txid
|
||||
else
|
||||
// Although the full object is loaded here, only return the id. This prevents the full set of objects already present from
|
||||
// being checkpointed every time a request is made to download an object the node does not yet have.
|
||||
// Although the full object is loaded here, only return the id. This prevents the full set of objects already present from
|
||||
// being checkpointed every time a request is made to download an object the node does not yet have.
|
||||
fromDisk += txid
|
||||
}
|
||||
return Pair(fromDisk, toFetch)
|
||||
|
@ -186,7 +186,7 @@ fun FlowLogic<*>.checkParameterHash(networkParametersHash: SecureHash?) {
|
||||
|
||||
// A cache for caching whether a particular set of signers are trusted
|
||||
private val trustedKeysCache: MutableMap<PublicKey, Boolean> =
|
||||
createSimpleCache<PublicKey, Boolean>(100).toSynchronised()
|
||||
createSimpleCache<PublicKey, Boolean>(100).toSynchronised()
|
||||
|
||||
/**
|
||||
* Establishes whether an attachment should be trusted. This logic is required in order to verify transactions, as transaction
|
||||
@ -210,7 +210,7 @@ fun isAttachmentTrusted(attachment: Attachment, service: AttachmentStorage?): Bo
|
||||
attachment.signerKeys.any { signer ->
|
||||
trustedKeysCache.computeIfAbsent(signer) {
|
||||
val queryCriteria = AttachmentQueryCriteria.AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(signer)),
|
||||
signersCondition = Builder.equal(listOf(signer)),
|
||||
uploaderCondition = Builder.`in`(TRUSTED_UPLOADERS)
|
||||
)
|
||||
service.queryAttachments(queryCriteria).isNotEmpty()
|
||||
|
@ -4,7 +4,10 @@ import co.paralleluniverse.strands.Strand
|
||||
import net.corda.core.CordaInternal
|
||||
import net.corda.core.DeleteForDJVM
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.crypto.CompositeKey
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignableData
|
||||
import net.corda.core.crypto.SignatureMetadata
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.node.NetworkParameters
|
||||
@ -268,14 +271,14 @@ open class TransactionBuilder(
|
||||
// Filter out all contracts that might have been already used by 'normal' input or output states.
|
||||
val referenceStateGroups: Map<ContractClassName, List<TransactionState<ContractState>>> = referencesWithTransactionState.groupBy { it.contract }
|
||||
val refStateContractAttachments: List<AttachmentId> = referenceStateGroups
|
||||
.filterNot { it.key in allContracts }
|
||||
.map { refStateEntry ->
|
||||
getInstalledContractAttachmentId(
|
||||
refStateEntry.key,
|
||||
refStateEntry.value,
|
||||
services
|
||||
)
|
||||
}
|
||||
.filterNot { it.key in allContracts }
|
||||
.map { refStateEntry ->
|
||||
getInstalledContractAttachmentId(
|
||||
refStateEntry.key,
|
||||
refStateEntry.value,
|
||||
services
|
||||
)
|
||||
}
|
||||
|
||||
// For each contract, resolve the AutomaticPlaceholderConstraint, and select the attachment.
|
||||
val contractAttachmentsAndResolvedOutputStates: List<Pair<AttachmentId, List<TransactionState<ContractState>>?>> = allContracts.toSet()
|
||||
@ -321,9 +324,9 @@ open class TransactionBuilder(
|
||||
val inputsAndOutputs = (inputStates ?: emptyList()) + (outputStates ?: emptyList())
|
||||
|
||||
fun selectAttachment() = getInstalledContractAttachmentId(
|
||||
contractClassName,
|
||||
inputsAndOutputs.filterNot { it.constraint in automaticConstraints },
|
||||
services
|
||||
contractClassName,
|
||||
inputsAndOutputs.filterNot { it.constraint in automaticConstraints },
|
||||
services
|
||||
)
|
||||
|
||||
/*
|
||||
@ -340,7 +343,7 @@ open class TransactionBuilder(
|
||||
require(attachment != null) { "Contract attachment $attachmentId for $contractClassName is missing." }
|
||||
if ((attachment as ContractAttachment).isSigned && (explicitContractAttachment == null || explicitContractAttachment == attachment.id)) {
|
||||
val signatureConstraint =
|
||||
makeSignatureAttachmentConstraint(attachment.signerKeys)
|
||||
makeSignatureAttachmentConstraint(attachment.signerKeys)
|
||||
require(signatureConstraint.isSatisfiedBy(attachment)) { "Selected output constraint: $signatureConstraint not satisfying ${attachment.id}" }
|
||||
val resolvedOutputStates = outputStates?.map {
|
||||
if (it.constraint in automaticConstraints) {
|
||||
@ -432,9 +435,9 @@ open class TransactionBuilder(
|
||||
* any possibility of transition off of existing [HashAttachmentConstraint]s.
|
||||
*/
|
||||
private fun canMigrateFromHashToSignatureConstraint(
|
||||
inputStates: List<TransactionState<ContractState>>?,
|
||||
outputStates: List<TransactionState<ContractState>>?,
|
||||
services: ServicesForResolution
|
||||
inputStates: List<TransactionState<ContractState>>?,
|
||||
outputStates: List<TransactionState<ContractState>>?,
|
||||
services: ServicesForResolution
|
||||
): Boolean {
|
||||
return HashAttachmentConstraint.disableHashConstraints
|
||||
&& services.networkParameters.minimumPlatformVersion >= 4
|
||||
@ -478,9 +481,9 @@ open class TransactionBuilder(
|
||||
* TODO - once support for third party signing is added, it should be implemented here. ( a constraint with 2 signatures is less restrictive than a constraint with 1 more signature)
|
||||
*/
|
||||
private fun attachmentConstraintsTransition(
|
||||
constraints: Set<AttachmentConstraint>,
|
||||
attachmentToUse: ContractAttachment,
|
||||
services: ServicesForResolution
|
||||
constraints: Set<AttachmentConstraint>,
|
||||
attachmentToUse: ContractAttachment,
|
||||
services: ServicesForResolution
|
||||
): AttachmentConstraint = when {
|
||||
|
||||
// Sanity check.
|
||||
@ -529,12 +532,12 @@ open class TransactionBuilder(
|
||||
SignatureAttachmentConstraint(CompositeKey.Builder().addKeys(attachmentSigners).build())
|
||||
|
||||
private fun getInstalledContractAttachmentId(
|
||||
contractClassName: String,
|
||||
states: List<TransactionState<ContractState>>,
|
||||
services: ServicesForResolution
|
||||
contractClassName: String,
|
||||
states: List<TransactionState<ContractState>>,
|
||||
services: ServicesForResolution
|
||||
): AttachmentId {
|
||||
return services.cordappProvider.getContractAttachmentID(contractClassName)
|
||||
?: throw MissingContractAttachments(states, contractClassName)
|
||||
?: throw MissingContractAttachments(states, contractClassName)
|
||||
}
|
||||
|
||||
private fun useWhitelistedByZoneAttachmentConstraint(contractClassName: ContractClassName, networkParameters: NetworkParameters) = contractClassName in networkParameters.whitelistedContractImplementations.keys
|
||||
|
@ -1,11 +1,9 @@
|
||||
package net.corda.core.internal
|
||||
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.MerkleTree
|
||||
import net.corda.core.crypto.PartialMerkleTree
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.SerializedStateAndRef
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.transactions.ComponentGroup
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
@ -16,6 +14,7 @@ import net.corda.core.transactions.WireTransaction
|
||||
*/
|
||||
|
||||
fun WireTransaction.accessGroupHashes() = this.groupHashes
|
||||
|
||||
fun WireTransaction.accessGroupMerkleRoots() = this.groupsMerkleRoots
|
||||
fun WireTransaction.accessAvailableComponentHashes() = this.availableComponentHashes
|
||||
|
||||
|
@ -0,0 +1,62 @@
|
||||
# Design doc template
|
||||
|
||||
## Overview
|
||||
|
||||
We wish to move the `PersistentIdentityService` away from using `PublicKey.hash.toHexString()` to using the correct method
|
||||
`PublicKey.toStringShort()`
|
||||
|
||||
This requires modifying the `PersistentIdentityService` and an accompanying Database Migration.
|
||||
|
||||
**It is important to note that the underlying hash function will be sha256 both in the old and new implementations, the only difference is the stored representation.**
|
||||
|
||||
## Background
|
||||
|
||||
In Corda4 we introduced an ability to map a given `PublicKey` to a UUID. Internally this builds a database table which maintains a mapping
|
||||
between `H(PublicKey)` -> UUID. Where `H()` is `PublicKey.toStringShort()`.
|
||||
|
||||
There is a reasonable requirement that for a given `UUID` you would want to find all the keys that are associated with that UUID.
|
||||
To do this, we would need to join the `PublicKeyHashToExternalId` table with the `PersistentIdentity` table.
|
||||
|
||||
This is currently impossible due to the fact that the two tables use different hashes.
|
||||
|
||||
|
||||
## Goals
|
||||
|
||||
* Migrate `PersistentIdentityService` to use `PublicKey.toStringShort()`
|
||||
* Migrate the existing stored data
|
||||
|
||||
## Timeline
|
||||
|
||||
* This would be required for usage of Accounts. Therefore, it would need to be included in any release that is intended for clients using Accounts.
|
||||
|
||||
## Requirements
|
||||
|
||||
* It must be possible to join the `PublicKeyHashToExternalId` table with the `PersistentIdentity` table.
|
||||
* Existing Identities must be safely migrated to using the new hashing approach.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
* We will use Liquibase to perform the migration
|
||||
|
||||
## Design
|
||||
|
||||
The intention is to remove the use of `SecureHash` as an intermediary step in hashing a `PublicKey` and instead directly invoke `toStringShort()`.
|
||||
This will ensure that for the same PK, all the tables within the node share a joinable column.
|
||||
|
||||
Luckily within the `PersistentIdentity` table, we store the full `PartyAndCertificate` which allows us to load the previously stored record,
|
||||
and obtain the correct hash. We can then use the same `PartyAndCertificate` to calculate the originally stored value,
|
||||
and perform a simple `UPDATE RECORD SET RECORD.PK_HASH = <new_value> WHERE RECORD.PK_HASH = <old_value>` to safely migrate the data.
|
||||
|
||||
### Testing
|
||||
|
||||
It is possible to write a simple Unit Test to insert records into the table using the old hashing mechanism, execute the migration.
|
||||
and then check that the expected value is present within the updated rows. This will give a level of confidence that the migration is safe.
|
||||
|
||||
For more extensive testing, we propose to start a node using an unfixed C4 version, insert some Identities (both well known and confidential)
|
||||
shutdown the node, place a fixed version and then check that the identities are resolvable and present in the database with the correct form of hash.
|
||||
|
||||
For enterprise, the testing performed with H2 (start, shutdown, migrate, restart) must be performed for all the supported database engines
|
||||
both using the DbMigrationTool and by allowing the node to migrate itself.
|
||||
|
||||
|
||||
To see a reference implementation of this design check out: https://github.com/corda/corda/pull/5217/files
|
@ -5,14 +5,13 @@ import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigParseOptions
|
||||
import net.corda.cliutils.CordaCliWrapper
|
||||
import net.corda.cliutils.start
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.copyTo
|
||||
import net.corda.core.internal.readObject
|
||||
import net.corda.core.internal.signWithCertPath
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.SerializationEnvironment
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.core.serialization.serialize
|
||||
@ -20,9 +19,12 @@ import net.corda.nodeapi.internal.SignedNodeInfo
|
||||
import net.corda.nodeapi.internal.createDevNetworkMapCa
|
||||
import net.corda.nodeapi.internal.crypto.CertificateAndKeyPair
|
||||
import net.corda.nodeapi.internal.crypto.X509KeyStore
|
||||
import net.corda.serialization.internal.*
|
||||
import net.corda.serialization.internal.amqp.*
|
||||
import picocli.CommandLine.*
|
||||
import net.corda.serialization.internal.AMQP_P2P_CONTEXT
|
||||
import net.corda.serialization.internal.CordaSerializationMagic
|
||||
import net.corda.serialization.internal.SerializationFactoryImpl
|
||||
import net.corda.serialization.internal.amqp.AbstractAMQPSerializationScheme
|
||||
import net.corda.serialization.internal.amqp.amqpMagic
|
||||
import picocli.CommandLine.Option
|
||||
import java.io.File
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.StandardCopyOption
|
||||
|
@ -1,8 +1,8 @@
|
||||
package net.corda.nodeapi.exceptions
|
||||
|
||||
import net.corda.core.ClientRelevantError
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.ClientRelevantError
|
||||
import net.corda.core.flows.IdentifiableException
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
|
||||
|
@ -3,7 +3,6 @@ package net.corda.nodeapi.internal.config
|
||||
import net.corda.core.crypto.internal.AliasPrivateKey
|
||||
import net.corda.core.internal.outputStream
|
||||
import net.corda.nodeapi.internal.crypto.X509KeyStore
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.crypto.addOrReplaceCertificate
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
|
@ -3,15 +3,10 @@
|
||||
package net.corda.nodeapi.internal.crypto
|
||||
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.internal.createDirectories
|
||||
import net.corda.core.internal.exists
|
||||
import net.corda.core.internal.read
|
||||
import net.corda.core.internal.write
|
||||
import net.corda.core.internal.safeSymbolicRead
|
||||
import net.corda.core.internal.*
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Files
|
||||
import java.security.*
|
||||
import java.security.cert.Certificate
|
||||
import java.security.cert.X509Certificate
|
||||
|
@ -2,7 +2,6 @@ package net.corda.nodeapi.internal.network
|
||||
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.internal.NODE_INFO_DIRECTORY
|
||||
import net.corda.core.utilities.debug
|
||||
import rx.Observable
|
||||
import rx.Scheduler
|
||||
|
@ -102,16 +102,16 @@ class SignatureConstraintVersioningTests {
|
||||
fun `auto migration from WhitelistConstraint to SignatureConstraint`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is WhitelistedByZoneAttachmentConstraint)
|
||||
@ -123,17 +123,17 @@ class SignatureConstraintVersioningTests {
|
||||
fun `WhitelistConstraint cannot be migrated to SignatureConstraint if platform version is not 4 or greater`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false,
|
||||
minimumPlatformVersion = 3
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false,
|
||||
minimumPlatformVersion = 3
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is WhitelistedByZoneAttachmentConstraint)
|
||||
@ -146,32 +146,32 @@ class SignatureConstraintVersioningTests {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
||||
upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(TEST_MESSAGE_CONTRACT_PROGRAM_ID to emptyList()),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = true
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(TEST_MESSAGE_CONTRACT_PROGRAM_ID to emptyList()),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = true
|
||||
)
|
||||
}
|
||||
.withMessageContaining("Selected output constraint: $WhitelistedByZoneAttachmentConstraint not satisfying")
|
||||
.withMessageContaining("Selected output constraint: $WhitelistedByZoneAttachmentConstraint not satisfying")
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `auto migration from WhitelistConstraint to SignatureConstraint will only transition states that do not have a constraint specified`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = true,
|
||||
specifyExistingConstraint = true,
|
||||
addAnotherAutomaticConstraintState = true
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = mapOf(
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID to listOf(
|
||||
oldUnsignedCordapp,
|
||||
newCordapp
|
||||
)
|
||||
),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = true,
|
||||
specifyExistingConstraint = true,
|
||||
addAnotherAutomaticConstraintState = true
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is WhitelistedByZoneAttachmentConstraint)
|
||||
@ -179,9 +179,9 @@ class SignatureConstraintVersioningTests {
|
||||
assertTrue(consumingTransaction.outputs[0].constraint is WhitelistedByZoneAttachmentConstraint)
|
||||
assertTrue(consumingTransaction.outputs[1].constraint is SignatureAttachmentConstraint)
|
||||
assertEquals(
|
||||
issuanceTransaction.outputs.single().constraint,
|
||||
consumingTransaction.outputs.first().constraint,
|
||||
"The constraint from the issuance transaction should be the same constraint used in the consuming transaction for the first state"
|
||||
issuanceTransaction.outputs.single().constraint,
|
||||
consumingTransaction.outputs.first().constraint,
|
||||
"The constraint from the issuance transaction should be the same constraint used in the consuming transaction for the first state"
|
||||
)
|
||||
}
|
||||
|
||||
@ -189,11 +189,11 @@ class SignatureConstraintVersioningTests {
|
||||
fun `auto migration from HashConstraint to SignatureConstraint`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is HashAttachmentConstraint)
|
||||
@ -205,11 +205,11 @@ class SignatureConstraintVersioningTests {
|
||||
fun `HashConstraint cannot be migrated if 'disableHashConstraints' system property is not set to true`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = emptyMap(),
|
||||
startNodesInProcess = false
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is HashAttachmentConstraint)
|
||||
@ -221,11 +221,11 @@ class SignatureConstraintVersioningTests {
|
||||
fun `HashConstraint cannot be migrated to SignatureConstraint if new jar is not signed`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newUnsignedCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newUnsignedCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is HashAttachmentConstraint)
|
||||
@ -237,12 +237,12 @@ class SignatureConstraintVersioningTests {
|
||||
fun `HashConstraint cannot be migrated to SignatureConstraint if platform version is not 4 or greater`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false,
|
||||
minimumPlatformVersion = 3
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false,
|
||||
minimumPlatformVersion = 3
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is HashAttachmentConstraint)
|
||||
@ -254,13 +254,13 @@ class SignatureConstraintVersioningTests {
|
||||
fun `HashConstraint cannot be migrated to SignatureConstraint if a HashConstraint is specified for one state and another uses an AutomaticPlaceholderConstraint`() {
|
||||
assumeFalse(System.getProperty("os.name").toLowerCase().startsWith("win")) // See NodeStatePersistenceTests.kt.
|
||||
val (issuanceTransaction, consumingTransaction) = upgradeCorDappBetweenTransactions(
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false,
|
||||
specifyExistingConstraint = true,
|
||||
addAnotherAutomaticConstraintState = true
|
||||
cordapp = oldUnsignedCordapp,
|
||||
newCordapp = newCordapp,
|
||||
whiteListedCordapps = emptyMap(),
|
||||
systemProperties = mapOf("net.corda.node.disableHashConstraints" to true.toString()),
|
||||
startNodesInProcess = false,
|
||||
specifyExistingConstraint = true,
|
||||
addAnotherAutomaticConstraintState = true
|
||||
)
|
||||
assertEquals(1, issuanceTransaction.outputs.size)
|
||||
assertTrue(issuanceTransaction.outputs.single().constraint is HashAttachmentConstraint)
|
||||
@ -268,14 +268,14 @@ class SignatureConstraintVersioningTests {
|
||||
assertTrue(consumingTransaction.outputs[0].constraint is HashAttachmentConstraint)
|
||||
assertTrue(consumingTransaction.outputs[1].constraint is HashAttachmentConstraint)
|
||||
assertEquals(
|
||||
issuanceTransaction.outputs.single().constraint,
|
||||
consumingTransaction.outputs.first().constraint,
|
||||
"The constraint from the issuance transaction should be the same constraint used in the consuming transaction"
|
||||
issuanceTransaction.outputs.single().constraint,
|
||||
consumingTransaction.outputs.first().constraint,
|
||||
"The constraint from the issuance transaction should be the same constraint used in the consuming transaction"
|
||||
)
|
||||
assertEquals(
|
||||
consumingTransaction.outputs[0].constraint,
|
||||
consumingTransaction.outputs[1].constraint,
|
||||
"The AutomaticPlaceholderConstraint of the second state should become the same HashConstraint used in other state"
|
||||
consumingTransaction.outputs[0].constraint,
|
||||
consumingTransaction.outputs[1].constraint,
|
||||
"The AutomaticPlaceholderConstraint of the second state should become the same HashConstraint used in other state"
|
||||
)
|
||||
}
|
||||
|
||||
@ -284,14 +284,14 @@ class SignatureConstraintVersioningTests {
|
||||
* Upgrade the cordapp and create a consuming transaction using it
|
||||
*/
|
||||
private fun upgradeCorDappBetweenTransactions(
|
||||
cordapp: CustomCordapp,
|
||||
newCordapp: CustomCordapp,
|
||||
whiteListedCordapps: Map<ContractClassName, List<CustomCordapp>>,
|
||||
systemProperties: Map<String, String>,
|
||||
startNodesInProcess: Boolean,
|
||||
minimumPlatformVersion: Int = 4,
|
||||
specifyExistingConstraint: Boolean = false,
|
||||
addAnotherAutomaticConstraintState: Boolean = false
|
||||
cordapp: CustomCordapp,
|
||||
newCordapp: CustomCordapp,
|
||||
whiteListedCordapps: Map<ContractClassName, List<CustomCordapp>>,
|
||||
systemProperties: Map<String, String>,
|
||||
startNodesInProcess: Boolean,
|
||||
minimumPlatformVersion: Int = 4,
|
||||
specifyExistingConstraint: Boolean = false,
|
||||
addAnotherAutomaticConstraintState: Boolean = false
|
||||
): Pair<CoreTransaction, CoreTransaction> {
|
||||
|
||||
val whitelistedAttachmentHashes = whiteListedCordapps.mapValues { (_, cordapps) ->
|
||||
@ -302,13 +302,13 @@ class SignatureConstraintVersioningTests {
|
||||
|
||||
return internalDriver(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = startNodesInProcess,
|
||||
startNodesInProcess = startNodesInProcess,
|
||||
networkParameters = testNetworkParameters(
|
||||
notaries = emptyList(),
|
||||
minimumPlatformVersion = minimumPlatformVersion,
|
||||
whitelistedContractImplementations = whitelistedAttachmentHashes
|
||||
minimumPlatformVersion = minimumPlatformVersion,
|
||||
whitelistedContractImplementations = whitelistedAttachmentHashes
|
||||
),
|
||||
systemProperties = systemProperties
|
||||
systemProperties = systemProperties
|
||||
) {
|
||||
// create transaction using first Cordapp
|
||||
val (nodeName, baseDirectory, issuanceTransaction) = createIssuanceTransaction(cordapp)
|
||||
@ -316,10 +316,10 @@ class SignatureConstraintVersioningTests {
|
||||
deleteCorDapp(baseDirectory, cordapp)
|
||||
// create transaction using the upgraded cordapp resuing input for transaction
|
||||
val consumingTransaction = createConsumingTransaction(
|
||||
nodeName,
|
||||
newCordapp,
|
||||
specifyExistingConstraint,
|
||||
addAnotherAutomaticConstraintState
|
||||
nodeName,
|
||||
newCordapp,
|
||||
specifyExistingConstraint,
|
||||
addAnotherAutomaticConstraintState
|
||||
).coreTransaction
|
||||
issuanceTransaction to consumingTransaction
|
||||
}
|
||||
@ -327,15 +327,15 @@ class SignatureConstraintVersioningTests {
|
||||
|
||||
private fun DriverDSL.createIssuanceTransaction(cordapp: CustomCordapp): Triple<CordaX500Name, Path, CoreTransaction> {
|
||||
val nodeHandle = startNode(
|
||||
NodeParameters(
|
||||
rpcUsers = listOf(user),
|
||||
additionalCordapps = listOf(cordapp)
|
||||
)
|
||||
NodeParameters(
|
||||
rpcUsers = listOf(user),
|
||||
additionalCordapps = listOf(cordapp)
|
||||
)
|
||||
).getOrThrow()
|
||||
val nodeName = nodeHandle.nodeInfo.singleIdentity().name
|
||||
val tx = CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
it.proxy.startFlow(::CreateMessage, message, defaultNotaryIdentity)
|
||||
.returnValue.getOrThrow().coreTransaction
|
||||
.returnValue.getOrThrow().coreTransaction
|
||||
}
|
||||
nodeHandle.stop()
|
||||
return Triple(nodeName, nodeHandle.baseDirectory, tx)
|
||||
@ -343,15 +343,15 @@ class SignatureConstraintVersioningTests {
|
||||
|
||||
private fun deleteCorDapp(baseDirectory: Path, cordapp: CustomCordapp) {
|
||||
val cordappPath =
|
||||
baseDirectory.resolve(Paths.get("cordapps")).resolve(cordapp.jarFile.fileName)
|
||||
baseDirectory.resolve(Paths.get("cordapps")).resolve(cordapp.jarFile.fileName)
|
||||
cordappPath.delete()
|
||||
}
|
||||
|
||||
private fun DriverDSL.createConsumingTransaction(
|
||||
nodeName: CordaX500Name,
|
||||
cordapp: CustomCordapp,
|
||||
specifyExistingConstraint: Boolean,
|
||||
addAnotherAutomaticConstraintState: Boolean
|
||||
nodeName: CordaX500Name,
|
||||
cordapp: CustomCordapp,
|
||||
specifyExistingConstraint: Boolean,
|
||||
addAnotherAutomaticConstraintState: Boolean
|
||||
): SignedTransaction {
|
||||
val nodeHandle = startNode(
|
||||
NodeParameters(
|
||||
@ -361,21 +361,21 @@ class SignatureConstraintVersioningTests {
|
||||
)
|
||||
).getOrThrow()
|
||||
val result: StateAndRef<MessageState>? =
|
||||
CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
val page = it.proxy.vaultQuery(MessageState::class.java)
|
||||
page.states.singleOrNull()
|
||||
}
|
||||
CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
val page = it.proxy.vaultQuery(MessageState::class.java)
|
||||
page.states.singleOrNull()
|
||||
}
|
||||
val transaction =
|
||||
CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
it.proxy.startFlow(
|
||||
::ConsumeMessage,
|
||||
result!!,
|
||||
defaultNotaryIdentity,
|
||||
specifyExistingConstraint,
|
||||
addAnotherAutomaticConstraintState
|
||||
)
|
||||
.returnValue.getOrThrow()
|
||||
}
|
||||
CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
it.proxy.startFlow(
|
||||
::ConsumeMessage,
|
||||
result!!,
|
||||
defaultNotaryIdentity,
|
||||
specifyExistingConstraint,
|
||||
addAnotherAutomaticConstraintState
|
||||
)
|
||||
.returnValue.getOrThrow()
|
||||
}
|
||||
nodeHandle.stop()
|
||||
return transaction
|
||||
}
|
||||
@ -383,18 +383,18 @@ class SignatureConstraintVersioningTests {
|
||||
|
||||
@StartableByRPC
|
||||
class CreateMessage(private val message: Message, private val notary: Party) :
|
||||
FlowLogic<SignedTransaction>() {
|
||||
FlowLogic<SignedTransaction>() {
|
||||
@Suspendable
|
||||
override fun call(): SignedTransaction {
|
||||
val messageState = MessageState(message = message, by = ourIdentity)
|
||||
val txCommand = Command(
|
||||
DummyMessageContract.Commands.Send(),
|
||||
messageState.participants.map { it.owningKey })
|
||||
DummyMessageContract.Commands.Send(),
|
||||
messageState.participants.map { it.owningKey })
|
||||
val txBuilder = TransactionBuilder(notary).withItems(
|
||||
StateAndContract(
|
||||
messageState,
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID
|
||||
), txCommand
|
||||
StateAndContract(
|
||||
messageState,
|
||||
TEST_MESSAGE_CONTRACT_PROGRAM_ID
|
||||
), txCommand
|
||||
)
|
||||
txBuilder.toLedgerTransaction(serviceHub).verify()
|
||||
val signedTx = serviceHub.signInitialTransaction(txBuilder)
|
||||
@ -406,22 +406,22 @@ class CreateMessage(private val message: Message, private val notary: Party) :
|
||||
//TODO merge both flows?
|
||||
@StartableByRPC
|
||||
class ConsumeMessage(
|
||||
private val stateRef: StateAndRef<MessageState>,
|
||||
private val notary: Party,
|
||||
private val specifyExistingConstraint: Boolean,
|
||||
private val addAnotherAutomaticConstraintState: Boolean
|
||||
private val stateRef: StateAndRef<MessageState>,
|
||||
private val notary: Party,
|
||||
private val specifyExistingConstraint: Boolean,
|
||||
private val addAnotherAutomaticConstraintState: Boolean
|
||||
) : FlowLogic<SignedTransaction>() {
|
||||
@Suspendable
|
||||
override fun call(): SignedTransaction {
|
||||
val oldMessageState = stateRef.state.data
|
||||
val messageState = MessageState(
|
||||
Message(oldMessageState.message.value + "A"),
|
||||
ourIdentity,
|
||||
stateRef.state.data.linearId
|
||||
Message(oldMessageState.message.value + "A"),
|
||||
ourIdentity,
|
||||
stateRef.state.data.linearId
|
||||
)
|
||||
val txCommand = Command(
|
||||
DummyMessageContract.Commands.Send(),
|
||||
messageState.participants.map { it.owningKey })
|
||||
DummyMessageContract.Commands.Send(),
|
||||
messageState.participants.map { it.owningKey })
|
||||
val txBuilder = TransactionBuilder(notary).apply {
|
||||
if (specifyExistingConstraint) {
|
||||
addOutputState(messageState, stateRef.state.constraint)
|
||||
@ -430,11 +430,11 @@ class ConsumeMessage(
|
||||
}
|
||||
if (addAnotherAutomaticConstraintState) {
|
||||
addOutputState(
|
||||
MessageState(
|
||||
Message("Another message"),
|
||||
ourIdentity,
|
||||
UniqueIdentifier()
|
||||
)
|
||||
MessageState(
|
||||
Message("Another message"),
|
||||
ourIdentity,
|
||||
UniqueIdentifier()
|
||||
)
|
||||
)
|
||||
}
|
||||
addInputState(stateRef)
|
||||
@ -452,9 +452,9 @@ data class Message(val value: String)
|
||||
|
||||
@BelongsToContract(DummyMessageContract::class)
|
||||
data class MessageState(
|
||||
val message: Message,
|
||||
val by: Party,
|
||||
override val linearId: UniqueIdentifier = UniqueIdentifier()
|
||||
val message: Message,
|
||||
val by: Party,
|
||||
override val linearId: UniqueIdentifier = UniqueIdentifier()
|
||||
) : LinearState {
|
||||
override val participants: List<AbstractParty> = listOf(by)
|
||||
}
|
||||
|
@ -3,18 +3,13 @@ package net.corda.node.logging
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.flows.StartableByRPC
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.messaging.FlowHandle
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.NodeHandle
|
||||
import net.corda.testing.driver.driver
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Test
|
||||
import java.io.File
|
||||
|
||||
class ErrorCodeLoggingTests {
|
||||
@Test
|
||||
@ -43,11 +38,11 @@ class ErrorCodeLoggingTests {
|
||||
val linesWithoutError = logFile.useLines { lines ->
|
||||
lines.filterNot {
|
||||
it.contains("[ERROR")
|
||||
}.filter{
|
||||
}.filter {
|
||||
it.contains("[INFO")
|
||||
.or(it.contains("[WARN"))
|
||||
.or(it.contains("[DEBUG"))
|
||||
.or(it.contains("[TRACE"))
|
||||
.or(it.contains("[WARN"))
|
||||
.or(it.contains("[DEBUG"))
|
||||
.or(it.contains("[TRACE"))
|
||||
}.toList()
|
||||
}
|
||||
assertThat(linesWithoutError.isEmpty()).isTrue()
|
||||
|
@ -17,6 +17,7 @@ import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.node.services.rpc.RpcReconnectTests.Companion.NUMBER_OF_FLOWS_TO_RUN
|
||||
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
||||
import net.corda.testing.core.DUMMY_BANK_B_NAME
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
|
@ -176,8 +176,9 @@ internal class CordaRPCOpsImpl(
|
||||
platformVersion = CordaVersion.platformVersion,
|
||||
vendor = CordaVersion.vendor,
|
||||
cordapps = services.cordappProvider.cordapps
|
||||
.filter { !it.jarPath.toString().endsWith("corda-core-${CordaVersion.releaseVersion}.jar") }
|
||||
.map { CordappInfo(
|
||||
.filter { !it.jarPath.toString().endsWith("corda-core-${CordaVersion.releaseVersion}.jar") }
|
||||
.map {
|
||||
CordappInfo(
|
||||
type = when (it.info) {
|
||||
is Cordapp.Info.Contract -> "Contract CorDapp"
|
||||
is Cordapp.Info.Workflow -> "Workflow CorDapp"
|
||||
@ -191,7 +192,7 @@ internal class CordaRPCOpsImpl(
|
||||
vendor = it.info.vendor,
|
||||
licence = it.info.licence,
|
||||
jarHash = it.jarHash)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,10 @@ package net.corda.node.internal
|
||||
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.DigitalSignatureWithCert
|
||||
import net.corda.core.internal.NamedCacheFactory
|
||||
import net.corda.core.internal.NetworkParametersStorage
|
||||
import net.corda.core.internal.SignedDataWithCert
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
|
@ -304,13 +304,13 @@ open class Node(configuration: NodeConfiguration,
|
||||
)
|
||||
}
|
||||
|
||||
private fun makeBridgeControlListener(serverAddress: NetworkHostAndPort, networkParameters: NetworkParameters) : BridgeControlListener {
|
||||
private fun makeBridgeControlListener(serverAddress: NetworkHostAndPort, networkParameters: NetworkParameters): BridgeControlListener {
|
||||
val artemisMessagingClientFactory = {
|
||||
ArtemisMessagingClient(
|
||||
configuration.p2pSslOptions,
|
||||
serverAddress,
|
||||
networkParameters.maxMessageSize,
|
||||
failoverCallback = { errorAndTerminate("ArtemisMessagingClient failed. Shutting down.", null) }
|
||||
failoverCallback = { errorAndTerminate("ArtemisMessagingClient failed. Shutting down.", null) }
|
||||
)
|
||||
}
|
||||
return BridgeControlListener(configuration.p2pSslOptions, networkParameters.maxMessageSize, configuration.crlCheckSoftFail, artemisMessagingClientFactory)
|
||||
|
@ -1,10 +1,10 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import io.netty.channel.unix.Errors
|
||||
import net.corda.cliutils.printError
|
||||
import net.corda.cliutils.CliWrapperBase
|
||||
import net.corda.cliutils.CordaCliWrapper
|
||||
import net.corda.cliutils.ExitCodes
|
||||
import net.corda.cliutils.printError
|
||||
import net.corda.common.logging.CordaVersion
|
||||
import net.corda.core.contracts.HashAttachmentConstraint
|
||||
import net.corda.core.crypto.Crypto
|
||||
@ -12,7 +12,6 @@ import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.thenMatch
|
||||
import net.corda.core.internal.cordapp.CordappImpl
|
||||
import net.corda.core.internal.errors.AddressBindingException
|
||||
import net.corda.core.internal.safeSymbolicRead
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.loggerFor
|
||||
@ -39,7 +38,6 @@ import java.io.RandomAccessFile
|
||||
import java.lang.management.ManagementFactory
|
||||
import java.net.InetAddress
|
||||
import java.nio.channels.UnresolvedAddressException
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.time.DayOfWeek
|
||||
import java.time.ZonedDateTime
|
||||
|
@ -13,7 +13,7 @@ import java.lang.reflect.Proxy
|
||||
* without sensible fallbacks to the classloader of the current instance.
|
||||
* If clients' CorDapps use one of these libraries, this temporary adjustment can ensure that any provided classes from these libraries will be available during RPC calls.
|
||||
*/
|
||||
internal class ThreadContextAdjustingRpcOpsProxy(private val delegate: InternalCordaRPCOps, private val classLoader: ClassLoader): InternalCordaRPCOps by proxy(delegate, classLoader) {
|
||||
internal class ThreadContextAdjustingRpcOpsProxy(private val delegate: InternalCordaRPCOps, private val classLoader: ClassLoader) : InternalCordaRPCOps by proxy(delegate, classLoader) {
|
||||
private companion object {
|
||||
private fun proxy(delegate: InternalCordaRPCOps, classLoader: ClassLoader): InternalCordaRPCOps {
|
||||
val handler = ThreadContextAdjustingRpcOpsProxy.ThreadContextAdjustingInvocationHandler(delegate, classLoader)
|
||||
|
@ -0,0 +1,71 @@
|
||||
package net.corda.node.migration
|
||||
|
||||
import liquibase.change.custom.CustomSqlChange
|
||||
import liquibase.database.Database
|
||||
import liquibase.exception.ValidationErrors
|
||||
import liquibase.resource.ResourceAccessor
|
||||
import liquibase.statement.SqlStatement
|
||||
import liquibase.statement.core.UpdateStatement
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.services.identity.PersistentIdentityService
|
||||
import net.corda.nodeapi.internal.crypto.X509CertificateFactory
|
||||
|
||||
class PersistentIdentityMigration : CustomSqlChange {
|
||||
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
const val PUB_KEY_HASH_TO_PARTY_AND_CERT_TABLE = PersistentIdentityService.HASH_TO_IDENTITY_TABLE_NAME
|
||||
const val X500_NAME_TO_PUB_KEY_HASH_TABLE = PersistentIdentityService.NAME_TO_HASH_TABLE_NAME
|
||||
}
|
||||
|
||||
override fun validate(database: Database?): ValidationErrors? {
|
||||
return null
|
||||
}
|
||||
|
||||
override fun getConfirmationMessage(): String? {
|
||||
return null
|
||||
}
|
||||
|
||||
override fun setFileOpener(resourceAccessor: ResourceAccessor?) {
|
||||
}
|
||||
|
||||
override fun setUp() {
|
||||
}
|
||||
|
||||
override fun generateStatements(database: Database?): Array<SqlStatement> {
|
||||
val dataSource = MigrationDataSource(database!!)
|
||||
val connection = dataSource.connection
|
||||
val statement = connection.prepareStatement("SELECT * FROM $PUB_KEY_HASH_TO_PARTY_AND_CERT_TABLE")
|
||||
val resultSet = statement.executeQuery()
|
||||
val generatedStatements = mutableListOf<SqlStatement>()
|
||||
while (resultSet.next()) {
|
||||
val oldPkHash = resultSet.getString(1)
|
||||
val identityBytes = resultSet.getBytes(2)
|
||||
val partyAndCertificate = PartyAndCertificate(X509CertificateFactory().delegate.generateCertPath(identityBytes.inputStream()))
|
||||
generatedStatements.addAll(MigrationData(oldPkHash, partyAndCertificate).let { listOf(updateHashToIdentityRow(it, dataSource), updateNameToHashRow(it, dataSource)) })
|
||||
}
|
||||
return generatedStatements.toTypedArray()
|
||||
}
|
||||
|
||||
private fun updateHashToIdentityRow(migrationData: MigrationData, dataSource: MigrationDataSource): SqlStatement {
|
||||
return UpdateStatement(dataSource.connection.catalog, dataSource.connection.schema, PUB_KEY_HASH_TO_PARTY_AND_CERT_TABLE)
|
||||
.setWhereClause("pk_hash=?")
|
||||
.addNewColumnValue("pk_hash", migrationData.newPkHash)
|
||||
.addWhereParameter(migrationData.oldPkHash)
|
||||
}
|
||||
|
||||
private fun updateNameToHashRow(migrationData: MigrationData, dataSource: MigrationDataSource): UpdateStatement {
|
||||
return UpdateStatement(dataSource.connection.catalog, dataSource.connection.schema, X500_NAME_TO_PUB_KEY_HASH_TABLE)
|
||||
.setWhereClause("pk_hash=? AND name=?")
|
||||
.addNewColumnValue("pk_hash", migrationData.newPkHash)
|
||||
.addWhereParameters(migrationData.oldPkHash, migrationData.x500.toString())
|
||||
}
|
||||
|
||||
data class MigrationData(val oldPkHash: String,
|
||||
val partyAndCertificate: PartyAndCertificate,
|
||||
val x500: CordaX500Name = partyAndCertificate.name,
|
||||
val newPkHash: String = partyAndCertificate.owningKey.toStringShort())
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package net.corda.node.services.api
|
||||
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.CertRole
|
||||
import net.corda.core.node.services.IdentityService
|
||||
@ -9,7 +8,6 @@ import net.corda.core.utilities.contextLogger
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.crypto.x509Certificates
|
||||
import java.security.InvalidAlgorithmParameterException
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.CertPathValidatorException
|
||||
import java.security.cert.CertificateExpiredException
|
||||
import java.security.cert.CertificateNotYetValidException
|
||||
|
@ -3,14 +3,11 @@ package net.corda.node.services.config.schema.v1
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import net.corda.common.configuration.parsing.internal.*
|
||||
import net.corda.common.validation.internal.Validated
|
||||
import net.corda.common.validation.internal.Validated.Companion.invalid
|
||||
import net.corda.common.validation.internal.Validated.Companion.valid
|
||||
import net.corda.node.services.config.JmxReporterType
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.NodeConfigurationImpl
|
||||
import net.corda.node.services.config.*
|
||||
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
|
||||
import net.corda.node.services.config.Valid
|
||||
import net.corda.node.services.config.VerifierType
|
||||
import net.corda.node.services.config.schema.parsers.*
|
||||
|
||||
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
|
||||
@ -65,7 +62,7 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
||||
@Suppress("unused")
|
||||
private val systemProperties by nestedObject().optional()
|
||||
|
||||
override fun parseValid(configuration: Config): Valid<NodeConfiguration> {
|
||||
override fun parseValid(configuration: Config): Validated<NodeConfiguration, Configuration.Validation.Error> {
|
||||
|
||||
val messagingServerExternal = configuration[messagingServerExternal] ?: Defaults.messagingServerExternal(configuration[messagingServerAddress])
|
||||
val database = configuration[database] ?: Defaults.database(configuration[devMode])
|
||||
@ -127,7 +124,7 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
||||
else -> throw e
|
||||
}
|
||||
}
|
||||
return result.mapValid { conf -> Valid.withResult(conf as NodeConfiguration, conf.validate().map(::toError).toSet()) }
|
||||
return result.mapValid { conf -> Validated.withResult(conf as NodeConfiguration, conf.validate().map(::toError).toSet()) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
package net.corda.node.services.identity
|
||||
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.internal.NamedCacheFactory
|
||||
import net.corda.core.internal.hash
|
||||
import net.corda.core.internal.toSet
|
||||
import net.corda.core.node.services.UnknownAnonymousPartyException
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
@ -16,7 +15,7 @@ import net.corda.nodeapi.internal.crypto.X509CertificateFactory
|
||||
import net.corda.nodeapi.internal.crypto.x509Certificates
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
||||
import org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY
|
||||
import org.hibernate.internal.util.collections.ArrayHelper.EMPTY_BYTE_ARRAY
|
||||
import java.security.InvalidAlgorithmParameterException
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.*
|
||||
@ -36,62 +35,69 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
||||
companion object {
|
||||
private val log = contextLogger()
|
||||
|
||||
fun createPKMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<SecureHash, PartyAndCertificate, PersistentIdentity, String> {
|
||||
const val HASH_TO_IDENTITY_TABLE_NAME = "${NODE_DATABASE_PREFIX}identities"
|
||||
const val NAME_TO_HASH_TABLE_NAME = "${NODE_DATABASE_PREFIX}named_identities"
|
||||
const val PK_HASH_COLUMN_NAME = "pk_hash"
|
||||
const val IDENTITY_COLUMN_NAME = "identity_value"
|
||||
const val NAME_COLUMN_NAME = "name"
|
||||
|
||||
fun createPKMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<String, PartyAndCertificate, PersistentIdentity, String> {
|
||||
return AppendOnlyPersistentMap(
|
||||
cacheFactory = cacheFactory,
|
||||
name = "PersistentIdentityService_partyByKey",
|
||||
toPersistentEntityKey = { it.toString() },
|
||||
toPersistentEntityKey = { it },
|
||||
fromPersistentEntity = {
|
||||
Pair(
|
||||
SecureHash.parse(it.publicKeyHash),
|
||||
it.publicKeyHash,
|
||||
PartyAndCertificate(X509CertificateFactory().delegate.generateCertPath(it.identity.inputStream()))
|
||||
)
|
||||
},
|
||||
toPersistentEntity = { key: SecureHash, value: PartyAndCertificate ->
|
||||
PersistentIdentity(key.toString(), value.certPath.encoded)
|
||||
toPersistentEntity = { key: String, value: PartyAndCertificate ->
|
||||
PersistentIdentity(key, value.certPath.encoded)
|
||||
},
|
||||
persistentEntityClass = PersistentIdentity::class.java
|
||||
)
|
||||
}
|
||||
|
||||
fun createX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, SecureHash, PersistentIdentityNames, String> {
|
||||
fun createX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, String, PersistentIdentityNames, String> {
|
||||
return AppendOnlyPersistentMap(
|
||||
cacheFactory = cacheFactory,
|
||||
name = "PersistentIdentityService_partyByName",
|
||||
toPersistentEntityKey = { it.toString() },
|
||||
fromPersistentEntity = { Pair(CordaX500Name.parse(it.name), SecureHash.parse(it.publicKeyHash)) },
|
||||
toPersistentEntity = { key: CordaX500Name, value: SecureHash ->
|
||||
PersistentIdentityNames(key.toString(), value.toString())
|
||||
fromPersistentEntity = {
|
||||
Pair(CordaX500Name.parse(it.name), it.publicKeyHash)
|
||||
},
|
||||
toPersistentEntity = { key: CordaX500Name, value: String ->
|
||||
PersistentIdentityNames(key.toString(), value)
|
||||
},
|
||||
persistentEntityClass = PersistentIdentityNames::class.java
|
||||
)
|
||||
}
|
||||
|
||||
private fun mapToKey(owningKey: PublicKey) = owningKey.hash
|
||||
private fun mapToKey(party: PartyAndCertificate) = mapToKey(party.owningKey)
|
||||
private fun mapToKey(party: PartyAndCertificate) = party.owningKey.toStringShort()
|
||||
}
|
||||
|
||||
@Entity
|
||||
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}identities")
|
||||
@javax.persistence.Table(name = HASH_TO_IDENTITY_TABLE_NAME)
|
||||
class PersistentIdentity(
|
||||
@Id
|
||||
@Column(name = "pk_hash", length = MAX_HASH_HEX_SIZE, nullable = false)
|
||||
@Column(name = PK_HASH_COLUMN_NAME, length = MAX_HASH_HEX_SIZE, nullable = false)
|
||||
var publicKeyHash: String = "",
|
||||
|
||||
@Lob
|
||||
@Column(name = "identity_value", nullable = false)
|
||||
@Column(name = IDENTITY_COLUMN_NAME, nullable = false)
|
||||
var identity: ByteArray = EMPTY_BYTE_ARRAY
|
||||
)
|
||||
|
||||
@Entity
|
||||
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}named_identities")
|
||||
@javax.persistence.Table(name = NAME_TO_HASH_TABLE_NAME)
|
||||
class PersistentIdentityNames(
|
||||
@Id
|
||||
@Column(name = "name", length = 128, nullable = false)
|
||||
@Column(name = NAME_COLUMN_NAME, length = 128, nullable = false)
|
||||
var name: String = "",
|
||||
|
||||
@Column(name = "pk_hash", length = MAX_HASH_HEX_SIZE, nullable = true)
|
||||
var publicKeyHash: String? = ""
|
||||
@Column(name = PK_HASH_COLUMN_NAME, length = MAX_HASH_HEX_SIZE, nullable = false)
|
||||
var publicKeyHash: String = ""
|
||||
)
|
||||
|
||||
private lateinit var _caCertStore: CertStore
|
||||
@ -156,11 +162,11 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
||||
principalToParties.addWithDuplicatesAllowed(identity.name, key, false)
|
||||
}
|
||||
|
||||
val parentId = mapToKey(identityCertChain[1].publicKey)
|
||||
val parentId = identityCertChain[1].publicKey.toStringShort()
|
||||
return keyToParties[parentId]
|
||||
}
|
||||
|
||||
override fun certificateFromKey(owningKey: PublicKey): PartyAndCertificate? = database.transaction { keyToParties[mapToKey(owningKey)] }
|
||||
override fun certificateFromKey(owningKey: PublicKey): PartyAndCertificate? = database.transaction { keyToParties[owningKey.toStringShort()] }
|
||||
|
||||
private fun certificateFromCordaX500Name(name: CordaX500Name): PartyAndCertificate? {
|
||||
return database.transaction {
|
||||
|
@ -81,13 +81,14 @@ class BasicHSMKeyManagementService(cacheFactory: NamedCacheFactory,
|
||||
}
|
||||
}
|
||||
|
||||
override val keys: Set<PublicKey> get() {
|
||||
return database.transaction {
|
||||
val set = LinkedHashSet<PublicKey>(originalKeysMap.keys)
|
||||
keysMap.allPersisted.use { it.forEach { set += it.first } }
|
||||
set
|
||||
override val keys: Set<PublicKey>
|
||||
get() {
|
||||
return database.transaction {
|
||||
val set = LinkedHashSet<PublicKey>(originalKeysMap.keys)
|
||||
keysMap.allPersisted.use { it.forEach { set += it.first } }
|
||||
set
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun containsPublicKey(publicKey: PublicKey): Boolean {
|
||||
return (publicKey in originalKeysMap || publicKey in keysMap)
|
||||
|
@ -7,7 +7,6 @@ import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.core.internal.NODE_INFO_DIRECTORY
|
||||
import net.corda.nodeapi.internal.NodeInfoAndSigned
|
||||
import net.corda.nodeapi.internal.network.NodeInfoFilesCopier
|
||||
import rx.Observable
|
||||
@ -82,8 +81,10 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
val processedPaths = HashSet<Path>()
|
||||
val result = nodeInfosDir.list { paths ->
|
||||
paths
|
||||
.filter { logger.debug { "Examining $it" }
|
||||
true}
|
||||
.filter {
|
||||
logger.debug { "Examining $it" }
|
||||
true
|
||||
}
|
||||
.filter { it.isRegularFile() }
|
||||
.filter { file ->
|
||||
val lastModifiedTime = file.lastModifiedTime()
|
||||
|
@ -41,7 +41,6 @@ open class PersistentNetworkMapCache(cacheFactory: NamedCacheFactory,
|
||||
private val database: CordaPersistence,
|
||||
private val identityService: IdentityService) : NetworkMapCacheInternal, SingletonSerializeAsToken() {
|
||||
|
||||
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
|
||||
companion object {
|
||||
fun fromDatabaseValue(databaseValue: String): TransactionStatus {
|
||||
return when(databaseValue) {
|
||||
return when (databaseValue) {
|
||||
"V" -> VERIFIED
|
||||
"U" -> UNVERIFIED
|
||||
else -> throw UnexpectedStatusValueException(databaseValue)
|
||||
@ -74,7 +74,7 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
}
|
||||
}
|
||||
|
||||
private class UnexpectedStatusValueException(status: String): Exception("Found unexpected status value $status in transaction store")
|
||||
private class UnexpectedStatusValueException(status: String) : Exception("Found unexpected status value $status in transaction store")
|
||||
}
|
||||
|
||||
@Converter
|
||||
@ -223,7 +223,8 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
val transactions: List<SignedTransaction> get() = database.transaction { snapshot() }
|
||||
val transactions: List<SignedTransaction>
|
||||
get() = database.transaction { snapshot() }
|
||||
|
||||
private fun snapshot(): List<SignedTransaction> {
|
||||
return txStorage.content.allPersisted.use {
|
||||
@ -241,6 +242,7 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
stx.txBits,
|
||||
Collections.unmodifiableList(stx.sigs),
|
||||
status)
|
||||
|
||||
fun toSignedTx() = SignedTransaction(txBits, sigs)
|
||||
}
|
||||
}
|
||||
|
@ -7,20 +7,19 @@ import java.util.*
|
||||
import javax.persistence.*
|
||||
|
||||
@Entity
|
||||
@Table(name = "pk_hash_to_ext_id_map", indexes = [Index(name = "pk_hash_to_xid_idx", columnList = "public_key_hash")])
|
||||
@Table(name = "pk_hash_to_ext_id_map", indexes = [
|
||||
Index(name = "ext_id_idx", columnList = "external_id")
|
||||
])
|
||||
class PublicKeyHashToExternalId(
|
||||
@Id
|
||||
@GeneratedValue
|
||||
@Column(name = "id", unique = true, nullable = false)
|
||||
val key: Long?,
|
||||
|
||||
@Column(name = "external_id", nullable = false)
|
||||
@Type(type = "uuid-char")
|
||||
val externalId: UUID,
|
||||
|
||||
@Id
|
||||
@Column(name = "public_key_hash", nullable = false)
|
||||
val publicKeyHash: String
|
||||
|
||||
) {
|
||||
constructor(accountId: UUID, publicKey: PublicKey)
|
||||
: this(null, accountId, publicKey.toStringShort())
|
||||
: this(accountId, publicKey.toStringShort())
|
||||
}
|
@ -35,7 +35,7 @@ class PublicKeyToOwningIdentityCacheImpl(private val database: CordaPersistence,
|
||||
val queryRoot = criteriaQuery.from(PersistentIdentityService.PersistentIdentity::class.java)
|
||||
criteriaQuery.select(criteriaBuilder.count(queryRoot))
|
||||
criteriaQuery.where(
|
||||
criteriaBuilder.equal(queryRoot.get<String>(PersistentIdentityService.PersistentIdentity::publicKeyHash.name), key.hash.toString())
|
||||
criteriaBuilder.equal(queryRoot.get<String>(PersistentIdentityService.PersistentIdentity::publicKeyHash.name), key.toStringShort())
|
||||
)
|
||||
val query = session.createQuery(criteriaQuery)
|
||||
query.uniqueResult() > 0
|
||||
|
@ -111,7 +111,7 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
|
||||
val (bytes, fileName) = try {
|
||||
val checkpoint =
|
||||
serialisedCheckpoint.checkpointDeserialize(context = checkpointSerializationContext)
|
||||
serialisedCheckpoint.checkpointDeserialize(context = checkpointSerializationContext)
|
||||
val json = checkpoint.toJson(runId.uuid, now)
|
||||
val jsonBytes = writer.writeValueAsBytes(json)
|
||||
jsonBytes to "${json.topLevelFlowClass.simpleName}-${runId.uuid}.json"
|
||||
@ -144,8 +144,7 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
val instance = checkpointHook.objectOrNewInstance()
|
||||
val checkpointIdField = instance.declaredField<UUID>(instance.javaClass, "checkpointId")
|
||||
checkpointIdField.value = checkpointId.uuid
|
||||
}
|
||||
catch (e: Exception) {
|
||||
} catch (e: Exception) {
|
||||
log.error("Checkpoint agent instrumentation failed for checkpointId: $checkpointId\n. ${e.message}")
|
||||
}
|
||||
}
|
||||
@ -178,66 +177,66 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
}
|
||||
|
||||
return CheckpointJson(
|
||||
flowId = id,
|
||||
topLevelFlowClass = flowLogic.javaClass,
|
||||
topLevelFlowLogic = flowLogic,
|
||||
flowCallStackSummary = flowCallStack.toSummary(),
|
||||
flowCallStack = flowCallStack,
|
||||
suspendedOn = (flowState as? FlowState.Started)?.flowIORequest?.toSuspendedOn(
|
||||
suspendedTimestamp(),
|
||||
now
|
||||
),
|
||||
origin = invocationContext.origin.toOrigin(),
|
||||
ourIdentity = ourIdentity,
|
||||
activeSessions = sessions.mapNotNull { it.value.toActiveSession(it.key) },
|
||||
errored = errorState as? ErrorState.Errored
|
||||
flowId = id,
|
||||
topLevelFlowClass = flowLogic.javaClass,
|
||||
topLevelFlowLogic = flowLogic,
|
||||
flowCallStackSummary = flowCallStack.toSummary(),
|
||||
flowCallStack = flowCallStack,
|
||||
suspendedOn = (flowState as? FlowState.Started)?.flowIORequest?.toSuspendedOn(
|
||||
suspendedTimestamp(),
|
||||
now
|
||||
),
|
||||
origin = invocationContext.origin.toOrigin(),
|
||||
ourIdentity = ourIdentity,
|
||||
activeSessions = sessions.mapNotNull { it.value.toActiveSession(it.key) },
|
||||
errored = errorState as? ErrorState.Errored
|
||||
)
|
||||
}
|
||||
|
||||
private fun Checkpoint.suspendedTimestamp(): Instant = invocationContext.trace.invocationId.timestamp
|
||||
|
||||
private fun checkpointDeserializationErrorMessage(
|
||||
checkpointId: StateMachineRunId,
|
||||
exception: Exception
|
||||
checkpointId: StateMachineRunId,
|
||||
exception: Exception
|
||||
): String {
|
||||
return """
|
||||
*** Unable to deserialise checkpoint: ${exception.message} ***
|
||||
*** Check logs for further information, checkpoint flowId: ${checkpointId.uuid} ***
|
||||
"""
|
||||
.trimIndent()
|
||||
.trimIndent()
|
||||
}
|
||||
|
||||
private fun FlowStateMachineImpl<*>.getQuasarStack() =
|
||||
declaredField<Stack>("stack").value.declaredField<Array<*>>("dataObject").value
|
||||
declaredField<Stack>("stack").value.declaredField<Array<*>>("dataObject").value
|
||||
|
||||
private fun SubFlow.toJson(stackObjects: Array<*>): FlowCall {
|
||||
val subFlowLogic = stackObjects.find(flowClass::isInstance) as? FlowLogic<*>
|
||||
val currentStep = subFlowLogic?.progressTracker?.currentStep
|
||||
return FlowCall(
|
||||
flowClass = flowClass,
|
||||
progressStep = if (currentStep == ProgressTracker.UNSTARTED) null else currentStep?.label,
|
||||
flowLogic = subFlowLogic
|
||||
flowClass = flowClass,
|
||||
progressStep = if (currentStep == ProgressTracker.UNSTARTED) null else currentStep?.label,
|
||||
flowLogic = subFlowLogic
|
||||
)
|
||||
}
|
||||
|
||||
private fun List<FlowCall>.toSummary() = map {
|
||||
FlowCallSummary(
|
||||
it.flowClass,
|
||||
it.progressStep
|
||||
it.flowClass,
|
||||
it.progressStep
|
||||
)
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
private class FlowCallSummary(
|
||||
val flowClass: Class<*>,
|
||||
val progressStep: String?
|
||||
val flowClass: Class<*>,
|
||||
val progressStep: String?
|
||||
)
|
||||
|
||||
@Suppress("unused")
|
||||
private class FlowCall(
|
||||
val flowClass: Class<*>,
|
||||
val progressStep: String?,
|
||||
val flowLogic: FlowLogic<*>?
|
||||
val flowClass: Class<*>,
|
||||
val progressStep: String?,
|
||||
val flowLogic: FlowLogic<*>?
|
||||
)
|
||||
|
||||
@Suppress("unused")
|
||||
@ -262,16 +261,16 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
|
||||
@Suppress("unused")
|
||||
private class CheckpointJson(
|
||||
val flowId: UUID,
|
||||
val topLevelFlowClass: Class<FlowLogic<*>>,
|
||||
val topLevelFlowLogic: FlowLogic<*>,
|
||||
val flowCallStackSummary: List<FlowCallSummary>,
|
||||
val suspendedOn: SuspendedOn?,
|
||||
val flowCallStack: List<FlowCall>,
|
||||
val origin: Origin,
|
||||
val ourIdentity: Party,
|
||||
val activeSessions: List<ActiveSession>,
|
||||
val errored: ErrorState.Errored?
|
||||
val flowId: UUID,
|
||||
val topLevelFlowClass: Class<FlowLogic<*>>,
|
||||
val topLevelFlowLogic: FlowLogic<*>,
|
||||
val flowCallStackSummary: List<FlowCallSummary>,
|
||||
val suspendedOn: SuspendedOn?,
|
||||
val flowCallStack: List<FlowCall>,
|
||||
val origin: Origin,
|
||||
val ourIdentity: Party,
|
||||
val activeSessions: List<ActiveSession>,
|
||||
val errored: ErrorState.Errored?
|
||||
)
|
||||
|
||||
@Suppress("unused")
|
||||
@ -383,6 +382,7 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
writeObjectField("ourSessionId", value.sourceSessionId)
|
||||
}
|
||||
}
|
||||
|
||||
override fun handledType(): Class<FlowSessionImpl> = FlowSessionImpl::class.java
|
||||
}
|
||||
|
||||
@ -402,6 +402,7 @@ class CheckpointDumper(private val checkpointStorage: CheckpointStorage, private
|
||||
}
|
||||
gen.writeEndArray()
|
||||
}
|
||||
|
||||
override fun handledType(): Class<Map<Any, Any>> = uncheckedCast(Map::class.java)
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ import rx.Observable
|
||||
import rx.subjects.PublishSubject
|
||||
import java.lang.Integer.min
|
||||
import java.security.SecureRandom
|
||||
import java.util.HashSet
|
||||
import java.util.*
|
||||
import java.util.concurrent.*
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import kotlin.collections.ArrayList
|
||||
|
@ -209,7 +209,7 @@ class NodeVaultService(
|
||||
|
||||
/** Groups adjacent transactions into batches to generate separate net updates per transaction type. */
|
||||
override fun notifyAll(statesToRecord: StatesToRecord, txns: Iterable<CoreTransaction>, previouslySeenTxns: Iterable<CoreTransaction>) {
|
||||
if (statesToRecord == StatesToRecord.NONE || (!txns.any() && !previouslySeenTxns.any())) return
|
||||
if (statesToRecord == StatesToRecord.NONE || (!txns.any() && !previouslySeenTxns.any())) return
|
||||
val batch = mutableListOf<CoreTransaction>()
|
||||
|
||||
fun flushBatch(previouslySeen: Boolean) {
|
||||
@ -217,6 +217,7 @@ class NodeVaultService(
|
||||
processAndNotify(updates, previouslySeen)
|
||||
batch.clear()
|
||||
}
|
||||
|
||||
fun processTransactions(txs: Iterable<CoreTransaction>, previouslySeen: Boolean) {
|
||||
for (tx in txs) {
|
||||
if (batch.isNotEmpty() && tx.javaClass != batch.last().javaClass) {
|
||||
|
@ -138,17 +138,18 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
|
||||
* underlying storage if this races with another database transaction to store a value for the same key.
|
||||
* @return true if added key was unique, otherwise false
|
||||
*/
|
||||
fun addWithDuplicatesAllowed(key: K, value: V, logWarning: Boolean = true): Boolean =
|
||||
set(key, value, logWarning) { k, v ->
|
||||
val session = currentDBSession()
|
||||
val existingEntry = session.find(persistentEntityClass, toPersistentEntityKey(k))
|
||||
if (existingEntry == null) {
|
||||
session.save(toPersistentEntity(k, v))
|
||||
null
|
||||
} else {
|
||||
fromPersistentEntity(existingEntry).second
|
||||
}
|
||||
fun addWithDuplicatesAllowed(key: K, value: V, logWarning: Boolean = true): Boolean {
|
||||
return set(key, value, logWarning) { k, v ->
|
||||
val session = currentDBSession()
|
||||
val existingEntry = session.find(persistentEntityClass, toPersistentEntityKey(k))
|
||||
if (existingEntry == null) {
|
||||
session.save(toPersistentEntity(k, v))
|
||||
null
|
||||
} else {
|
||||
fromPersistentEntity(existingEntry).second
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Associates the specified value with the specified key in this map and persists it.
|
||||
|
@ -6,11 +6,11 @@ import net.corda.core.internal.notary.NotaryService
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.SerialFilter
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import net.corda.node.internal.cordapp.VirtualCordapp
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.NotaryConfig
|
||||
import net.corda.node.services.transactions.SimpleNotaryService
|
||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
||||
import net.corda.notary.experimental.raft.RaftNotaryService
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
|
@ -1,13 +1,12 @@
|
||||
package net.corda.node.utilities.registration
|
||||
|
||||
import net.corda.core.crypto.internal.AliasPrivateKey
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.internal.AliasPrivateKey
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.NodeRegistrationOption
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.nodeapi.internal.cryptoservice.bouncycastle.BCCryptoService
|
||||
import net.corda.nodeapi.internal.config.CertificateStore
|
||||
import net.corda.nodeapi.internal.config.MutualSslConfiguration
|
||||
import net.corda.nodeapi.internal.crypto.CertificateType
|
||||
@ -18,9 +17,10 @@ import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_CLIENT_CA
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_CLIENT_TLS
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_ROOT_CA
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.DEFAULT_VALIDITY_WINDOW
|
||||
import net.corda.nodeapi.internal.cryptoservice.CryptoService
|
||||
import net.corda.nodeapi.internal.cryptoservice.CryptoServiceFactory
|
||||
import net.corda.nodeapi.internal.cryptoservice.SupportedCryptoServices
|
||||
import net.corda.nodeapi.internal.cryptoservice.CryptoService
|
||||
import net.corda.nodeapi.internal.cryptoservice.bouncycastle.BCCryptoService
|
||||
import org.bouncycastle.asn1.x500.X500Name
|
||||
import org.bouncycastle.openssl.jcajce.JcaPEMWriter
|
||||
import org.bouncycastle.operator.ContentSigner
|
||||
@ -271,10 +271,10 @@ open class NetworkRegistrationHelper(
|
||||
protected open fun isTlsCrlIssuerCertRequired(): Boolean = false
|
||||
}
|
||||
|
||||
class NodeRegistrationConfiguration (
|
||||
class NodeRegistrationConfiguration(
|
||||
val p2pSslOptions: MutualSslConfiguration,
|
||||
val myLegalName : CordaX500Name,
|
||||
val tlsCertCrlIssuer : X500Principal?,
|
||||
val myLegalName: CordaX500Name,
|
||||
val tlsCertCrlIssuer: X500Principal?,
|
||||
val tlsCertCrlDistPoint: URL?,
|
||||
val certificatesDirectory: Path,
|
||||
val emailAddress: String,
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Build constants exported as resource file to make them visible in Node program
|
||||
# Note: sadly, due to present limitation of IntelliJ-IDEA in processing resource files, these constants cannot be
|
||||
# imported from top-level 'constants.properties' file
|
||||
|
||||
jolokiaAgentVersion=1.6.1
|
||||
|
@ -1,8 +1,7 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||
xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||
|
||||
<changeSet author="R3.Corda" id="1511451595465-1.1" dbms="h2">
|
||||
<preConditions onFail="MARK_RAN" onSqlOutput="TEST">
|
||||
@ -11,7 +10,7 @@
|
||||
<createSequence sequenceName="hibernate_sequence"/>
|
||||
</changeSet>
|
||||
|
||||
<changeSet author="R3.Corda" id="1511451595465-1.3" onValidationFail="MARK_RAN" dbms="postgresql,mssql">
|
||||
<changeSet author="R3.Corda" id="1511451595465-1.3" onValidationFail="MARK_RAN" dbms="postgresql,mssql">
|
||||
<preConditions onFail="MARK_RAN" onSqlOutput="TEST">
|
||||
<not><sequenceExists sequenceName="hibernate_sequence"/></not>
|
||||
</preConditions>
|
||||
|
@ -14,6 +14,7 @@
|
||||
<include file="migration/node-core.changelog-v9.xml"/>
|
||||
<include file="migration/node-core.changelog-v10.xml"/>
|
||||
<include file="migration/node-core.changelog-v11.xml"/>
|
||||
<include file="migration/node-core.changelog-v12.xml"/>
|
||||
<!-- This changeset (which creates extra columns in the transactions tables), must be run before the vault state migration (in
|
||||
vault-schema.changelog-v9.xml), as that will use the current hibernate mappings, and those require all DB columns to be
|
||||
created. -->
|
||||
|
@ -0,0 +1,29 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd"
|
||||
logicalFilePath="migration/node-services.changelog-init.xml">
|
||||
|
||||
<changeSet author="R3.Corda" id="migrate_pk_hash_to_ext_id">
|
||||
<!-- drop existing "ID" column -->
|
||||
<dropColumn columnName="id"
|
||||
tableName="pk_hash_to_ext_id_map"/>
|
||||
|
||||
<dropIndex tableName="pk_hash_to_ext_id_map" indexName="pk_hash_to_xid_idx"/>
|
||||
|
||||
<!-- create new primary key constraint on key hash -->
|
||||
<addPrimaryKey columnNames="public_key_hash" constraintName="pubkey_hash_to_external_id_pk" tableName="pk_hash_to_ext_id_map"/>
|
||||
|
||||
<createIndex indexName="ext_id_idx" tableName="pk_hash_to_ext_id_map">
|
||||
<column name="external_id"/>
|
||||
</createIndex>
|
||||
|
||||
</changeSet>
|
||||
|
||||
|
||||
<changeSet author="R3.Corda" id="migrate_identity_service_to_use_publicKey.toShortString()">
|
||||
<customChange class="net.corda.node.migration.PersistentIdentityMigration">
|
||||
</customChange>
|
||||
</changeSet>
|
||||
|
||||
</databaseChangeLog>
|
@ -14,8 +14,7 @@ class ThreadContextAdjustingRpcOpsProxyTest {
|
||||
private val mockClassloader = mock<ClassLoader>()
|
||||
private val proxy = ThreadContextAdjustingRpcOpsProxy(coreOps, mockClassloader)
|
||||
|
||||
|
||||
private interface InstrumentedCordaRPCOps: InternalCordaRPCOps {
|
||||
private interface InstrumentedCordaRPCOps : InternalCordaRPCOps {
|
||||
fun getThreadContextClassLoader(): ClassLoader = Thread.currentThread().contextClassLoader
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,120 @@
|
||||
package net.corda.node.migration
|
||||
|
||||
import liquibase.database.core.H2Database
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.hash
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.services.identity.PersistentIdentityService
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.contextTransactionOrNull
|
||||
import net.corda.testing.core.*
|
||||
import net.corda.testing.internal.configureDatabase
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
||||
import org.hamcrest.CoreMatchers
|
||||
import org.hamcrest.Matcher
|
||||
import org.hamcrest.Matchers.*
|
||||
import org.junit.After
|
||||
import org.junit.Assert
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
|
||||
/**
|
||||
*/
|
||||
class IdentityServiceToStringShortMigrationTest {
|
||||
companion object {
|
||||
val alice = TestIdentity(ALICE_NAME, 70)
|
||||
val bankOfCorda = TestIdentity(BOC_NAME)
|
||||
val bob = TestIdentity(BOB_NAME, 80)
|
||||
val dummyNotary = TestIdentity(DUMMY_NOTARY_NAME, 20)
|
||||
val ALICE_IDENTITY get() = alice.identity
|
||||
val BOB get() = bob.party
|
||||
val BOB_IDENTITY get() = bob.identity
|
||||
val BOC_IDENTITY get() = bankOfCorda.identity
|
||||
val bob2 = TestIdentity(BOB_NAME, 40)
|
||||
val BOB2_IDENTITY = bob2.identity
|
||||
val logger = contextLogger()
|
||||
}
|
||||
|
||||
lateinit var liquibaseDB: H2Database
|
||||
lateinit var cordaDB: CordaPersistence
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
cordaDB = configureDatabase(
|
||||
makeTestDataSourceProperties(),
|
||||
DatabaseConfig(),
|
||||
{ null },
|
||||
{ null },
|
||||
ourName = BOB_IDENTITY.name)
|
||||
liquibaseDB = H2Database()
|
||||
liquibaseDB.connection = JdbcConnection(cordaDB.dataSource.connection)
|
||||
liquibaseDB.isAutoCommit = true
|
||||
}
|
||||
|
||||
@After
|
||||
fun close() {
|
||||
contextTransactionOrNull?.close()
|
||||
cordaDB.close()
|
||||
liquibaseDB.close()
|
||||
}
|
||||
|
||||
private fun saveAllIdentitiesWithOldHashString(identities: List<PartyAndCertificate>) {
|
||||
cordaDB.transaction {
|
||||
val groupedIdentities = identities.groupBy { it.name }
|
||||
groupedIdentities.forEach { name, certs ->
|
||||
val persistentIDs = certs.map { PersistentIdentityService.PersistentIdentity(it.owningKey.hash.toString(), it.certPath.encoded) }
|
||||
val persistentName = PersistentIdentityService.PersistentIdentityNames(name.toString(), certs.first().owningKey.hash.toString())
|
||||
persistentIDs.forEach {
|
||||
session.persist(it)
|
||||
}
|
||||
session.persist(persistentName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `it should be possible to migrate all existing identities to new hash function`() {
|
||||
val identities = listOf(BOB_IDENTITY, ALICE_IDENTITY, BOC_IDENTITY, dummyNotary.identity, BOB2_IDENTITY)
|
||||
val groupedByNameIdentities = identities.groupBy { it.name }
|
||||
saveAllIdentitiesWithOldHashString(identities)
|
||||
val migration = PersistentIdentityMigration()
|
||||
liquibaseDB.execute(migration.generateStatements(liquibaseDB), listOf())
|
||||
val listOfNamesWithoutPkHash = mutableListOf<CordaX500Name>()
|
||||
identities.forEach {
|
||||
logger.info("Checking: ${it.name}")
|
||||
cordaDB.transaction {
|
||||
val hashToIdentityStatement = database.dataSource.connection.prepareStatement("SELECT ${PersistentIdentityService.PK_HASH_COLUMN_NAME} FROM ${PersistentIdentityService.HASH_TO_IDENTITY_TABLE_NAME} WHERE pk_hash=?")
|
||||
hashToIdentityStatement.setString(1, it.owningKey.toStringShort())
|
||||
val hashToIdentityResultSet = hashToIdentityStatement.executeQuery()
|
||||
|
||||
//check that there is a row for every "new" hash
|
||||
Assert.assertThat(hashToIdentityResultSet.next(), `is`(true))
|
||||
//check that the pk_hash actually matches what we expect (kinda redundant, but deserializing the whole PartyAndCertificate feels like overkill)
|
||||
Assert.assertThat(hashToIdentityResultSet.getString(1), `is`(it.owningKey.toStringShort()))
|
||||
|
||||
val nameToHashStatement = connection.prepareStatement("SELECT ${PersistentIdentityService.NAME_COLUMN_NAME} FROM ${PersistentIdentityService.NAME_TO_HASH_TABLE_NAME} WHERE pk_hash=?")
|
||||
nameToHashStatement.setString(1, it.owningKey.toStringShort())
|
||||
val nameToHashResultSet = nameToHashStatement.executeQuery()
|
||||
|
||||
//if there is no result for this key, this means its an identity that is not stored in the DB (IE, it's been seen after another identity has already been mapped to it)
|
||||
if (nameToHashResultSet.next()) {
|
||||
Assert.assertThat(nameToHashResultSet.getString(1), `is`(anyOf(groupedByNameIdentities.getValue(it.name).map<PartyAndCertificate, Matcher<String>?> { identity -> CoreMatchers.equalTo(identity.name.toString()) })))
|
||||
} else {
|
||||
logger.warn("did not find a PK_HASH for ${it.name}")
|
||||
listOfNamesWithoutPkHash.add(it.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
listOfNamesWithoutPkHash.forEach {
|
||||
//the only time an identity name does not have a PK_HASH is if there are multiple identities associated with that name
|
||||
Assert.assertThat(groupedByNameIdentities[it]?.size, `is`(greaterThan(1)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,15 +3,11 @@ package net.corda.node.migration
|
||||
import liquibase.database.Database
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignableData
|
||||
import net.corda.core.crypto.SignatureMetadata
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.NotaryChangeTransactionBuilder
|
||||
import net.corda.core.internal.hash
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.internal.signWithCert
|
||||
import net.corda.core.node.NetworkParameters
|
||||
@ -198,8 +194,8 @@ class VaultStateMigrationTest {
|
||||
private fun saveAllIdentities(identities: List<PartyAndCertificate>) {
|
||||
cordaDB.transaction {
|
||||
identities.groupBy { it.name }.forEach { name, certs ->
|
||||
val persistentIDs = certs.map { PersistentIdentityService.PersistentIdentity(it.owningKey.hash.toString(), it.certPath.encoded) }
|
||||
val persistentName = PersistentIdentityService.PersistentIdentityNames(name.toString(), certs.first().owningKey.hash.toString())
|
||||
val persistentIDs = certs.map { PersistentIdentityService.PersistentIdentity(it.owningKey.toStringShort(), it.certPath.encoded) }
|
||||
val persistentName = PersistentIdentityService.PersistentIdentityNames(name.toString(), certs.first().owningKey.toStringShort())
|
||||
persistentIDs.forEach { session.save(it) }
|
||||
session.save(persistentName)
|
||||
}
|
||||
@ -274,7 +270,7 @@ class VaultStateMigrationTest {
|
||||
|
||||
private fun addLinearStates(statesToAdd: Int, parties: List<AbstractParty>) {
|
||||
cordaDB.transaction {
|
||||
(1..statesToAdd).map { createLinearStateTransaction("A".repeat(it), parties)}.forEach {
|
||||
(1..statesToAdd).map { createLinearStateTransaction("A".repeat(it), parties) }.forEach {
|
||||
storeTransaction(it)
|
||||
createVaultStatesFromTransaction(it)
|
||||
}
|
||||
@ -289,7 +285,7 @@ class VaultStateMigrationTest {
|
||||
|
||||
private fun addCommodityStates(statesToAdd: Int, owner: AbstractParty) {
|
||||
cordaDB.transaction {
|
||||
(1..statesToAdd).map{
|
||||
(1..statesToAdd).map {
|
||||
createCommodityTransaction(Amount(it.toLong(), Issued(bankOfCorda.ref(2), Commodity.getInstance("FCOJ")!!)), owner)
|
||||
}.forEach {
|
||||
storeTransaction(it)
|
||||
|
@ -14,8 +14,10 @@ import net.corda.core.internal.*
|
||||
import net.corda.core.internal.cordapp.CordappImpl.Companion.DEFAULT_CORDAPP_VERSION
|
||||
import net.corda.core.node.ServicesForResolution
|
||||
import net.corda.core.node.services.AttachmentId
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.core.node.services.vault.AttachmentQueryCriteria.AttachmentsQueryCriteria
|
||||
import net.corda.core.node.services.vault.AttachmentSort
|
||||
import net.corda.core.node.services.vault.Builder
|
||||
import net.corda.core.node.services.vault.Sort
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
||||
import net.corda.nodeapi.exceptions.DuplicateAttachmentException
|
||||
@ -1082,39 +1084,39 @@ class NodeAttachmentServiceTest {
|
||||
val attachmentD = jarSignedByABCD.read { storage.privilegedImportAttachment(it, "app", "D.jar") }
|
||||
|
||||
assertEquals(
|
||||
listOf(attachmentA, attachmentB, attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA))
|
||||
listOf(attachmentA, attachmentB, attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA))
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
assertEquals(
|
||||
listOf(attachmentB, attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyB))
|
||||
listOf(attachmentB, attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyB))
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
assertEquals(
|
||||
listOf(attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyC))
|
||||
listOf(attachmentC, attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyC))
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
assertEquals(
|
||||
listOf(attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyD))
|
||||
listOf(attachmentD),
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyD))
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1153,21 +1155,21 @@ class NodeAttachmentServiceTest {
|
||||
val attachmentD = jarSignedByCD.read { storage.privilegedImportAttachment(it, "app", "D.jar") }
|
||||
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA, keyC))
|
||||
)
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA, keyC))
|
||||
)
|
||||
).let { result ->
|
||||
assertEquals(4, result.size)
|
||||
assertEquals(
|
||||
listOf(attachmentA, attachmentB, attachmentC, attachmentD),
|
||||
result
|
||||
listOf(attachmentA, attachmentB, attachmentC, attachmentD),
|
||||
result
|
||||
)
|
||||
}
|
||||
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA, keyB))
|
||||
)
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyA, keyB))
|
||||
)
|
||||
).let { result ->
|
||||
// made a [Set] due to [NodeAttachmentService.queryAttachments] not returning distinct results
|
||||
assertEquals(3, result.toSet().size)
|
||||
@ -1175,9 +1177,9 @@ class NodeAttachmentServiceTest {
|
||||
}
|
||||
|
||||
storage.queryAttachments(
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyB, keyC, keyD))
|
||||
)
|
||||
AttachmentsQueryCriteria(
|
||||
signersCondition = Builder.equal(listOf(keyB, keyC, keyD))
|
||||
)
|
||||
).let { result ->
|
||||
// made a [Set] due to [NodeAttachmentService.queryAttachments] not returning distinct results
|
||||
assertEquals(3, result.toSet().size)
|
||||
|
@ -48,7 +48,7 @@ class ObserverNodeTransactionTests {
|
||||
it.state.data.message.value.startsWith(initialMessage.value)
|
||||
}
|
||||
|
||||
for (_i in 0.until(chainLength -1 )) {
|
||||
for (_i in 0.until(chainLength - 1)) {
|
||||
node.services.startFlow(ContinueMessageChainFlow(result!!, notary)).resultFuture.getOrThrow()
|
||||
result = node.services.vaultService.queryBy(MessageChainState::class.java).states.singleOrNull {
|
||||
it.state.data.message.value.startsWith(initialMessage.value)
|
||||
@ -58,7 +58,8 @@ class ObserverNodeTransactionTests {
|
||||
|
||||
fun sendTransactionToObserver(transactionIdx: Int, node: TestStartedNode, regulator: TestStartedNode) {
|
||||
val transactionList = node.services.validatedTransactions.track().snapshot
|
||||
node.services.startFlow(ReportToCounterparty(regulator.info.singleIdentity(), transactionList[transactionIdx])).resultFuture.getOrThrow()
|
||||
node.services.startFlow(ReportToCounterparty(regulator.info.singleIdentity(), transactionList[transactionIdx]))
|
||||
.resultFuture.getOrThrow()
|
||||
}
|
||||
|
||||
fun sendTransactionToObserverOnlyRelevant(transactionIdx: Int, node: TestStartedNode, regulator: TestStartedNode) {
|
||||
@ -79,7 +80,6 @@ class ObserverNodeTransactionTests {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
fun `Broadcasting an old transaction does not cause 2 unconsumed states`() {
|
||||
val node = mockNet.createPartyNode(ALICE_NAME)
|
||||
@ -242,7 +242,7 @@ class ObserverNodeTransactionTests {
|
||||
@InitiatingFlow
|
||||
class SplitMessagesFlow(private val message: MessageData,
|
||||
private val counterparty: Party,
|
||||
private val notary: Party): FlowLogic<SignedTransaction>() {
|
||||
private val notary: Party) : FlowLogic<SignedTransaction>() {
|
||||
companion object {
|
||||
object GENERATING_TRANSACTION : ProgressTracker.Step("Generating transaction based on the message.")
|
||||
object VERIFYING_TRANSACTION : ProgressTracker.Step("Verifying contract constraints.")
|
||||
@ -285,14 +285,13 @@ class ObserverNodeTransactionTests {
|
||||
}
|
||||
|
||||
@InitiatedBy(SplitMessagesFlow::class)
|
||||
class ReceiveSplitMessagesFlow(private val otherSideSession: FlowSession): FlowLogic<SignedTransaction>() {
|
||||
class ReceiveSplitMessagesFlow(private val otherSideSession: FlowSession) : FlowLogic<SignedTransaction>() {
|
||||
|
||||
@Suspendable
|
||||
override fun call(): SignedTransaction {
|
||||
val flow = object : SignTransactionFlow(otherSideSession) {
|
||||
@Suspendable
|
||||
override fun checkTransaction(stx: SignedTransaction) {
|
||||
|
||||
}
|
||||
}
|
||||
subFlow(flow)
|
||||
|
@ -465,7 +465,8 @@ class FlowFrameworkTests {
|
||||
|
||||
@Test
|
||||
fun `initiating flow using unknown AnonymousParty`() {
|
||||
val anonymousBob = bobNode.services.keyManagementService.freshKeyAndCert(bobNode.info.legalIdentitiesAndCerts.single(), false).party.anonymise()
|
||||
val anonymousBob = bobNode.services.keyManagementService.freshKeyAndCert(bobNode.info.legalIdentitiesAndCerts.single(), false)
|
||||
.party.anonymise()
|
||||
bobNode.registerCordappFlowFactory(SendAndReceiveFlow::class) { SingleInlinedSubFlow(it) }
|
||||
val result = aliceNode.services.startFlow(SendAndReceiveFlow(anonymousBob, "Hello")).resultFuture
|
||||
mockNet.runNetwork()
|
||||
|
@ -3,7 +3,10 @@ package net.corda.node.services.vault
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.nhaarman.mockito_kotlin.*
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.flows.FinalityFlow
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
@ -19,8 +22,8 @@ import net.corda.core.utilities.NonEmptySet
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import net.corda.node.services.api.VaultServiceInternal
|
||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.testing.core.singleIdentity
|
||||
import net.corda.testing.flows.registerCoreFlowFactory
|
||||
|
@ -33,7 +33,7 @@ data class NodeParameters(
|
||||
val maximumHeapSize: String = System.getenv("DRIVER_NODE_MEMORY") ?: "512m",
|
||||
val additionalCordapps: Collection<TestCordapp> = emptySet(),
|
||||
val flowOverrides: Map<out Class<out FlowLogic<*>>, Class<out FlowLogic<*>>> = emptyMap(),
|
||||
val logLevelOverride : String? = null
|
||||
val logLevelOverride: String? = null
|
||||
) {
|
||||
/**
|
||||
* Create a new node parameters object with default values. Each parameter can be specified with its wither method which returns a copy
|
||||
@ -49,7 +49,7 @@ data class NodeParameters(
|
||||
fun withMaximumHeapSize(maximumHeapSize: String): NodeParameters = copy(maximumHeapSize = maximumHeapSize)
|
||||
fun withAdditionalCordapps(additionalCordapps: Set<TestCordapp>): NodeParameters = copy(additionalCordapps = additionalCordapps)
|
||||
fun withFlowOverrides(flowOverrides: Map<Class<out FlowLogic<*>>, Class<out FlowLogic<*>>>): NodeParameters = copy(flowOverrides = flowOverrides)
|
||||
fun withLogLevelOverride(logLevelOverride : String?) : NodeParameters = copy(logLevelOverride = logLevelOverride)
|
||||
fun withLogLevelOverride(logLevelOverride: String?): NodeParameters = copy(logLevelOverride = logLevelOverride)
|
||||
|
||||
constructor(
|
||||
providedName: CordaX500Name?,
|
||||
@ -83,6 +83,7 @@ data class NodeParameters(
|
||||
maximumHeapSize = maximumHeapSize,
|
||||
additionalCordapps = additionalCordapps
|
||||
)
|
||||
|
||||
constructor(
|
||||
providedName: CordaX500Name?,
|
||||
rpcUsers: List<User>,
|
||||
|
@ -39,7 +39,6 @@ import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.network.NetworkParametersCopier
|
||||
import net.corda.nodeapi.internal.network.NodeInfoFilesCopier
|
||||
import net.corda.notary.experimental.raft.RaftConfig
|
||||
import net.corda.serialization.internal.amqp.AbstractAMQPSerializationScheme
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
||||
|
@ -33,7 +33,7 @@ class MockCryptoService(initialKeyPairs: Map<String, KeyPair>) : CryptoService {
|
||||
|
||||
override fun sign(alias: String, data: ByteArray, signAlgorithm: String?): ByteArray {
|
||||
try {
|
||||
return when(signAlgorithm) {
|
||||
return when (signAlgorithm) {
|
||||
null -> Crypto.doSign(aliasToKey[alias]!!.private, data)
|
||||
else -> signWithAlgorithm(alias, data, signAlgorithm)
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ class PortAllocationTest {
|
||||
companion object {
|
||||
val logger = contextLogger()
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
fun `should allocate a port whilst cycling back round if exceeding start of ephemeral range`() {
|
||||
val startingPoint = PortAllocation.DEFAULT_START_PORT
|
||||
@ -48,7 +48,7 @@ class PortAllocationTest {
|
||||
logger.info("Starting multiprocess port allocation test")
|
||||
val spinnerFile = Files.newTemporaryFile().also { it.deleteOnExit() }.absolutePath
|
||||
val iterCount = 8_000 // Default port range 10000-30000 since we will have 2 processes we want to make sure there is enough leg room
|
||||
// If we rollover, we may well receive the ports that were already given to a different process
|
||||
// If we rollover, we may well receive the ports that were already given to a different process
|
||||
val process1 = buildJvmProcess(spinnerFile, 1, iterCount)
|
||||
val process2 = buildJvmProcess(spinnerFile, 2, iterCount)
|
||||
|
||||
@ -73,12 +73,12 @@ class PortAllocationTest {
|
||||
logger.info("Instructing child processes to start allocating ports")
|
||||
spinnerBuffer.putShort(0, 8)
|
||||
logger.info("Waiting for child processes to terminate")
|
||||
val terminationStatuses = processes.parallelStream().map { if(it.waitFor(1, TimeUnit.MINUTES)) "OK" else "STILL RUNNING" }.toList()
|
||||
val terminationStatuses = processes.parallelStream().map { if (it.waitFor(1, TimeUnit.MINUTES)) "OK" else "STILL RUNNING" }.toList()
|
||||
logger.info("child processes terminated: $terminationStatuses")
|
||||
|
||||
fun List<String>.setOfPorts() : Set<Int> {
|
||||
fun List<String>.setOfPorts(): Set<Int> {
|
||||
// May include warnings when ports are busy
|
||||
return map { Try.on { Integer.parseInt(it)} }.filter { it.isSuccess }.map { it.getOrThrow() }.toSet()
|
||||
return map { Try.on { Integer.parseInt(it) } }.filter { it.isSuccess }.map { it.getOrThrow() }.toSet()
|
||||
}
|
||||
|
||||
val lines1 = process1.inputStream.reader().readLines()
|
||||
|
@ -58,8 +58,8 @@ class DBRunnerExtension : Extension, BeforeAllCallback, AfterAllCallback, Before
|
||||
val rootContext = context?.root ?: return null
|
||||
|
||||
val testClass = context.testClass.orElse(null) ?: return null
|
||||
val annotation = testClass.requiredDb ?:
|
||||
throw IllegalStateException("Test run with DBRunnerExtension is not annotated with @RequiresDb")
|
||||
val annotation = testClass.requiredDb
|
||||
?: throw IllegalStateException("Test run with DBRunnerExtension is not annotated with @RequiresDb")
|
||||
val groupName = annotation.group
|
||||
val defaultContextClassName = annotation.defaultContextClassName
|
||||
|
||||
@ -84,13 +84,14 @@ class DBRunnerExtension : Extension, BeforeAllCallback, AfterAllCallback, Before
|
||||
private fun <T : Any> AnnotatedElement.findAnnotations(annotationClass: Class<T>): Sequence<T> = declaredAnnotations.asSequence()
|
||||
.filterNot { it.isInternal }
|
||||
.flatMap { annotation ->
|
||||
if (annotationClass.isAssignableFrom(annotation::class.java))sequenceOf(annotationClass.cast(annotation))
|
||||
if (annotationClass.isAssignableFrom(annotation::class.java)) sequenceOf(annotationClass.cast(annotation))
|
||||
else annotation.annotationClass.java.findAnnotations(annotationClass)
|
||||
}
|
||||
|
||||
private val Annotation.isInternal: Boolean get() = annotationClass.java.name.run {
|
||||
startsWith("java.lang") ||
|
||||
startsWith("org.junit") ||
|
||||
startsWith("kotlin")
|
||||
}
|
||||
private val Annotation.isInternal: Boolean
|
||||
get() = annotationClass.java.name.run {
|
||||
startsWith("java.lang") ||
|
||||
startsWith("org.junit") ||
|
||||
startsWith("kotlin")
|
||||
}
|
||||
}
|
@ -16,5 +16,4 @@ class NoOpTestDatabaseContext : TestDatabaseContext {
|
||||
override fun afterTest(teardownSql: List<String>) {}
|
||||
|
||||
override fun close() {}
|
||||
|
||||
}
|
@ -23,7 +23,7 @@ interface TestDatabaseContext : ExtensionContext.Store.CloseableResource {
|
||||
get() = _usingRemoteDatabase.get() ?: false
|
||||
set(value) = _usingRemoteDatabase.set(value)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Called once when the context is first instantiated, i.e. at the start of the test run, before any tests at all have been executed.
|
||||
*
|
||||
|
@ -3,7 +3,6 @@ package net.corda.testing.internal.db
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.slf4j.Logger
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.lang.IllegalStateException
|
||||
|
||||
class AssertingTestDatabaseContext : TestDatabaseContext {
|
||||
|
||||
@ -57,5 +56,4 @@ class AssertingTestDatabaseContext : TestDatabaseContext {
|
||||
throw IllegalStateException("Assertion failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -15,7 +15,5 @@ class GroupAMoreTests {
|
||||
@SpecialSql1
|
||||
@SpecialSql2
|
||||
fun moreSpecialSqlRequired() {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -14,13 +14,10 @@ class GroupATests {
|
||||
|
||||
@Test
|
||||
fun noSpecialSqlRequired() {
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
@SpecialSql1
|
||||
fun someSpecialSqlRequired() {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -13,13 +13,10 @@ class GroupBTests {
|
||||
|
||||
@Test
|
||||
fun noSpecialSqlRequired() {
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
@SpecialSql1
|
||||
fun someSpecialSqlRequired() {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -13,7 +13,8 @@
|
||||
<Appenders>
|
||||
<Console name="Console-Appender" target="SYSTEM_OUT">
|
||||
<PatternLayout>
|
||||
<ScriptPatternSelector defaultPattern="%highlight{[%level{length=5}] %date{HH:mm:ss,SSS} [%t] %c{2}.%method - %msg%n}{INFO=white,WARN=red,FATAL=bright red}">
|
||||
<ScriptPatternSelector
|
||||
defaultPattern="%highlight{[%level{length=5}] %date{HH:mm:ss,SSS} [%t] %c{2}.%method - %msg%n}{INFO=white,WARN=red,FATAL=bright red}">
|
||||
<Script name="MDCSelector" language="javascript"><![CDATA[
|
||||
result = null;
|
||||
if (!logEvent.getContextData().size() == 0) {
|
||||
@ -24,7 +25,8 @@
|
||||
result;
|
||||
]]>
|
||||
</Script>
|
||||
<PatternMatch key="WithMDC" pattern="%highlight{[%level{length=5}] %date{HH:mm:ss,SSS} [%t] %c{2}.%method - %msg %X%n}{INFO=white,WARN=red,FATAL=bright red}"/>
|
||||
<PatternMatch key="WithMDC"
|
||||
pattern="%highlight{[%level{length=5}] %date{HH:mm:ss,SSS} [%t] %c{2}.%method - %msg %X%n}{INFO=white,WARN=red,FATAL=bright red}"/>
|
||||
</ScriptPatternSelector>
|
||||
</PatternLayout>
|
||||
<ThresholdFilter level="trace"/>
|
||||
@ -32,14 +34,14 @@
|
||||
|
||||
<!-- Required for printBasicInfo -->
|
||||
<Console name="Console-Appender-Println" target="SYSTEM_OUT">
|
||||
<PatternLayout pattern="%msg%n" />
|
||||
<PatternLayout pattern="%msg%n"/>
|
||||
</Console>
|
||||
|
||||
<!-- Will generate up to 100 log files for a given day. During every rollover it will delete
|
||||
those that are older than 60 days, but keep the most recent 10 GB -->
|
||||
<RollingRandomAccessFile name="RollingFile-Appender"
|
||||
fileName="${log-path}/${log-name}.log"
|
||||
filePattern="${archive}/${log-name}.%date{yyyy-MM-dd}-%i.log.gz">
|
||||
fileName="${log-path}/${log-name}.log"
|
||||
filePattern="${archive}/${log-name}.%date{yyyy-MM-dd}-%i.log.gz">
|
||||
|
||||
<PatternLayout pattern="[%-5level] %date{ISO8601}{UTC}Z [%t] %c{2}.%method - %msg %X%n"/>
|
||||
|
||||
@ -83,7 +85,7 @@
|
||||
</Root>
|
||||
<Logger name="net.corda" level="${defaultLogLevel}" additivity="false">
|
||||
<AppenderRef ref="Console-ErrorCode-Appender"/>
|
||||
<AppenderRef ref="RollingFile-ErrorCode-Appender" />
|
||||
<AppenderRef ref="RollingFile-ErrorCode-Appender"/>
|
||||
</Logger>
|
||||
</Loggers>
|
||||
</Configuration>
|
||||
|
@ -15,7 +15,6 @@ class DummyContractV2 : UpgradedContractWithLegacyConstraint<DummyContract.State
|
||||
companion object {
|
||||
const val PROGRAM_ID: ContractClassName = "net.corda.testing.contracts.DummyContractV2"
|
||||
|
||||
|
||||
/**
|
||||
* An overload of move for just one input state.
|
||||
*/
|
||||
@ -27,7 +26,7 @@ class DummyContractV2 : UpgradedContractWithLegacyConstraint<DummyContract.State
|
||||
*/
|
||||
@JvmStatic
|
||||
fun move(priors: List<StateAndRef<State>>, newOwner: AbstractParty): TransactionBuilder {
|
||||
require(priors.isNotEmpty()){"States to move to new owner must not be empty"}
|
||||
require(priors.isNotEmpty()) { "States to move to new owner must not be empty" }
|
||||
val priorState = priors[0].state.data
|
||||
val (cmd, state) = priorState.withNewOwner(newOwner)
|
||||
return TransactionBuilder(notary = priors[0].state.notary).withItems(
|
||||
|
@ -235,7 +235,7 @@ fun <R> withTestSerializationEnvIfNotSet(block: () -> R): R {
|
||||
/**
|
||||
* Used to check if particular port is already bound i.e. not vacant
|
||||
*/
|
||||
fun isLocalPortBound(port: Int) : Boolean {
|
||||
fun isLocalPortBound(port: Int): Boolean {
|
||||
return try {
|
||||
ServerSocket(port).use {
|
||||
// Successful means that the port was vacant
|
||||
|
@ -8,13 +8,13 @@ import javassist.ClassPool
|
||||
import javassist.CtClass
|
||||
import net.corda.core.internal.ThreadBox
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.tools.CheckpointAgent.Companion.graphDepth
|
||||
import net.corda.tools.CheckpointAgent.Companion.instrumentClassname
|
||||
import net.corda.tools.CheckpointAgent.Companion.instrumentType
|
||||
import net.corda.tools.CheckpointAgent.Companion.log
|
||||
import net.corda.tools.CheckpointAgent.Companion.maximumSize
|
||||
import net.corda.tools.CheckpointAgent.Companion.minimumSize
|
||||
import net.corda.tools.CheckpointAgent.Companion.printOnce
|
||||
import net.corda.tools.CheckpointAgent.Companion.graphDepth
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.lang.instrument.ClassFileTransformer
|
||||
@ -65,24 +65,34 @@ class CheckpointAgent {
|
||||
if (nvpItem.size == 2) {
|
||||
when (nvpItem[0].trim()) {
|
||||
"instrumentClassname" -> instrumentClassname = nvpItem[1]
|
||||
"instrumentType" -> try { instrumentType = InstrumentationType.valueOf(nvpItem[1].toUpperCase()) } catch (e: Exception) {
|
||||
"instrumentType" -> try {
|
||||
instrumentType = InstrumentationType.valueOf(nvpItem[1].toUpperCase())
|
||||
} catch (e: Exception) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify read or write.")
|
||||
}
|
||||
"minimumSize" -> try { minimumSize = nvpItem[1].toInt() } catch (e: NumberFormatException) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify an integer value.") }
|
||||
"maximumSize" -> try { maximumSize = nvpItem[1].toInt() } catch (e: NumberFormatException) {
|
||||
"minimumSize" -> try {
|
||||
minimumSize = nvpItem[1].toInt()
|
||||
} catch (e: NumberFormatException) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify an integer value.")
|
||||
}
|
||||
"graphDepth" -> try { graphDepth = nvpItem[1].toInt() } catch (e: NumberFormatException) {
|
||||
"maximumSize" -> try {
|
||||
maximumSize = nvpItem[1].toInt()
|
||||
} catch (e: NumberFormatException) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify an integer value.")
|
||||
}
|
||||
"printOnce" -> try { printOnce = nvpItem[1].toBoolean() } catch (e: Exception) {
|
||||
"graphDepth" -> try {
|
||||
graphDepth = nvpItem[1].toInt()
|
||||
} catch (e: NumberFormatException) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify an integer value.")
|
||||
}
|
||||
"printOnce" -> try {
|
||||
printOnce = nvpItem[1].toBoolean()
|
||||
} catch (e: Exception) {
|
||||
display("Invalid value: ${nvpItem[1]}. Please specify true or false.")
|
||||
}
|
||||
else -> display("Invalid argument: $nvpItem")
|
||||
}
|
||||
}
|
||||
else display("Missing value for argument: $nvpItem")
|
||||
} else display("Missing value for argument: $nvpItem")
|
||||
}
|
||||
}
|
||||
println("Running Checkpoint agent with following arguments: instrumentClassname=$instrumentClassname, instrumentType=$instrumentType, minimumSize=$minimumSize, maximumSize=$maximumSize, graphDepth=$graphDepth, printOnce=$printOnce\n")
|
||||
@ -207,8 +217,7 @@ object CheckpointHook : ClassFileTransformer {
|
||||
val numberValue = value as Array<Number>
|
||||
log.debug { "readFieldExit array of number: $clazz = ${numberValue.joinToString(",")}" }
|
||||
return numberValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == Array<Boolean>::class.java) {
|
||||
} else if (clazz == Array<Boolean>::class.java) {
|
||||
val arrayValue = value as Array<Boolean>
|
||||
log.debug { "readFieldExit array of boolean: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
@ -226,38 +235,31 @@ object CheckpointHook : ClassFileTransformer {
|
||||
val arrayValue = value as CharArray
|
||||
log.debug { "readFieldExit char array: $clazz = ${arrayValue.joinToString("")}" }
|
||||
return arrayValue.joinToString("")
|
||||
}
|
||||
else if (clazz == ByteArray::class.java) {
|
||||
} else if (clazz == ByteArray::class.java) {
|
||||
val arrayValue = value as ByteArray
|
||||
log.debug { "readFieldExit byte array: $clazz = ${byteArrayToHex(arrayValue)}" }
|
||||
return byteArrayToHex(arrayValue)
|
||||
}
|
||||
else if (clazz == ShortArray::class.java) {
|
||||
} else if (clazz == ShortArray::class.java) {
|
||||
val arrayValue = value as ShortArray
|
||||
log.debug { "readFieldExit short array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == IntArray::class.java) {
|
||||
} else if (clazz == IntArray::class.java) {
|
||||
val arrayValue = value as IntArray
|
||||
log.debug { "readFieldExit int array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == LongArray::class.java) {
|
||||
} else if (clazz == LongArray::class.java) {
|
||||
val arrayValue = value as LongArray
|
||||
log.debug { "readFieldExit long array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == FloatArray::class.java) {
|
||||
} else if (clazz == FloatArray::class.java) {
|
||||
val arrayValue = value as FloatArray
|
||||
log.debug { "readFieldExit float array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == DoubleArray::class.java) {
|
||||
} else if (clazz == DoubleArray::class.java) {
|
||||
val arrayValue = value as DoubleArray
|
||||
log.debug { "readFieldExit double array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
}
|
||||
else if (clazz == BooleanArray::class.java) {
|
||||
} else if (clazz == BooleanArray::class.java) {
|
||||
val arrayValue = value as BooleanArray
|
||||
log.debug { "readFieldExit boolean array: $clazz = ${arrayValue.joinToString(",")}" }
|
||||
return arrayValue.joinToString(",")
|
||||
@ -340,22 +342,18 @@ object CheckpointHook : ClassFileTransformer {
|
||||
is StatsTree.Object -> {
|
||||
if (printOnce && identityInfo.refCount > 1) {
|
||||
log.debug { "Skipping $statsInfo, $statsTree (count:${identityInfo.refCount})" }
|
||||
}
|
||||
else if (indent/2 < graphDepth) {
|
||||
} else if (indent / 2 < graphDepth) {
|
||||
builder.append(String.format("%03d:", indent / 2))
|
||||
builder.append(CharArray(indent) { ' ' })
|
||||
builder.append(" ${statsInfo.fieldName} ")
|
||||
if (statsInfo.fieldType != null && statsInfo.fieldType.isArray) {
|
||||
val arrayValue = (statsTree.value as Array<Any?>)
|
||||
builder.append("${statsInfo.fieldType} (array length:${arrayValue.size})")
|
||||
}
|
||||
else if (statsInfo.fieldType != null && statsTree.value is Collection<*>) {
|
||||
} else if (statsInfo.fieldType != null && statsTree.value is Collection<*>) {
|
||||
builder.append("${statsInfo.fieldType} (collection size:${statsTree.value.size})")
|
||||
}
|
||||
else if (statsInfo.fieldType != null && statsTree.value is Map<*,*>) {
|
||||
} else if (statsInfo.fieldType != null && statsTree.value is Map<*, *>) {
|
||||
builder.append("${statsInfo.fieldType} (map size:${statsTree.value.size})")
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
builder.append("${statsTree.className} (hash:${statsTree.value?.hashCode()}) (count:${identityInfo.refCount})")
|
||||
}
|
||||
builder.append(" ")
|
||||
@ -367,7 +365,7 @@ object CheckpointHook : ClassFileTransformer {
|
||||
}
|
||||
}
|
||||
is StatsTree.BasicType -> {
|
||||
if (indent/2 < graphDepth) {
|
||||
if (indent / 2 < graphDepth) {
|
||||
builder.append(String.format("%03d:", indent / 2))
|
||||
builder.append(CharArray(indent) { ' ' })
|
||||
builder.append(" ${statsInfo.fieldName} ")
|
||||
@ -471,15 +469,14 @@ fun readTrees(events: List<StatsEvent>, index: Int, idMap: IdentityHashMap<Any,
|
||||
}
|
||||
is StatsEvent.ObjectField -> {
|
||||
val identityInfo =
|
||||
if (idMap.containsKey(event.value)) {
|
||||
val identityInfo = idMap[event.value]!!
|
||||
idMap[event.value] = IdentityInfo(identityInfo.tree, identityInfo.refCount + 1)
|
||||
log.debug { "Skipping repeated StatsEvent.ObjectField: ${event.value} (hashcode:${event.value!!.hashCode()}) (count:${idMap[event.value]?.refCount})" }
|
||||
identityInfo
|
||||
}
|
||||
else {
|
||||
IdentityInfo(StatsTree.Loop(0), 1)
|
||||
}
|
||||
if (idMap.containsKey(event.value)) {
|
||||
val identityInfo = idMap[event.value]!!
|
||||
idMap[event.value] = IdentityInfo(identityInfo.tree, identityInfo.refCount + 1)
|
||||
log.debug { "Skipping repeated StatsEvent.ObjectField: ${event.value} (hashcode:${event.value!!.hashCode()}) (count:${idMap[event.value]?.refCount})" }
|
||||
identityInfo
|
||||
} else {
|
||||
IdentityInfo(StatsTree.Loop(0), 1)
|
||||
}
|
||||
trees += StatsInfo(event.fieldName, event.fieldType) to identityInfo
|
||||
i++
|
||||
inField = false
|
||||
|
@ -22,6 +22,7 @@ interface NetworkBuilder {
|
||||
fun onNodeInstance(callback: (NodeInstance) -> Unit): NetworkBuilder
|
||||
/** Sets network name */
|
||||
fun withNetworkName(networkName: String): NetworkBuilder
|
||||
|
||||
fun withBasedir(baseDir: File): NetworkBuilder
|
||||
fun withBackend(backendType: Backend.BackendType): NetworkBuilder
|
||||
fun withBackendOptions(options: Map<String, String>): NetworkBuilder
|
||||
|
@ -1,6 +1,8 @@
|
||||
package net.corda.networkbuilder.cli
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.networkbuilder.Constants
|
||||
import net.corda.networkbuilder.NetworkBuilder
|
||||
import net.corda.networkbuilder.backends.Backend
|
||||
@ -8,8 +10,6 @@ import net.corda.networkbuilder.context.Context
|
||||
import net.corda.networkbuilder.nodes.NodeAdder
|
||||
import net.corda.networkbuilder.nodes.NodeInstantiator
|
||||
import net.corda.networkbuilder.toSingleFuture
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import java.io.File
|
||||
|
||||
class CommandLineInterface {
|
||||
|
@ -1,9 +1,9 @@
|
||||
package net.corda.networkbuilder.context
|
||||
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.networkbuilder.Constants
|
||||
import net.corda.networkbuilder.backends.Backend
|
||||
import net.corda.networkbuilder.nodes.NodeInstanceRequest
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import org.apache.activemq.artemis.utils.collections.ConcurrentHashSet
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
|
@ -14,6 +14,7 @@ import javafx.scene.layout.HBox
|
||||
import javafx.scene.layout.Priority
|
||||
import javafx.scene.layout.VBox
|
||||
import javafx.stage.DirectoryChooser
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.networkbuilder.Constants
|
||||
import net.corda.networkbuilder.GuiUtils
|
||||
import net.corda.networkbuilder.NetworkBuilder
|
||||
@ -21,7 +22,6 @@ import net.corda.networkbuilder.backends.Backend
|
||||
import net.corda.networkbuilder.baseArgs
|
||||
import net.corda.networkbuilder.context.Context
|
||||
import net.corda.networkbuilder.nodes.*
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import org.apache.commons.lang3.RandomStringUtils
|
||||
import org.controlsfx.control.SegmentedButton
|
||||
import tornadofx.*
|
||||
@ -104,6 +104,7 @@ class BootstrapperView : View("Corda Network Builder") {
|
||||
override fun get(index: Int): String {
|
||||
return controller.foundNodes[index].id
|
||||
}
|
||||
|
||||
override val size: Int
|
||||
get() = controller.foundNodes.size
|
||||
}
|
||||
@ -291,7 +292,6 @@ class BootstrapperView : View("Corda Network Builder") {
|
||||
unsortedNodes.add(NodeTemplateInfo(it.name, NodeType.NOTARY))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var baseDir = SimpleObjectProperty<File>(null)
|
||||
@ -363,7 +363,6 @@ class BootstrapperView : View("Corda Network Builder") {
|
||||
val locallyReachableAddress: String,
|
||||
val rpcPort: Int,
|
||||
val sshPort: Int)
|
||||
|
||||
}
|
||||
|
||||
data class FoundNodeTableEntry(val id: String, @Volatile var count: Int = 1)
|
@ -1,7 +1,7 @@
|
||||
package net.corda.networkbuilder.gui
|
||||
|
||||
import javafx.stage.Stage
|
||||
import tornadofx.App
|
||||
import tornadofx.*
|
||||
|
||||
class Gui : App(BootstrapperView::class) {
|
||||
override fun start(stage: Stage) {
|
||||
|
@ -19,7 +19,6 @@ open class CopiedNode(configFile: File, baseDirectory: File,
|
||||
return copiedNodeConfig
|
||||
}
|
||||
|
||||
|
||||
fun builtNode(nodeConfig: NodeConfiguration, imageId: String): BuiltNode {
|
||||
return BuiltNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, imageId)
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
package net.corda.networkbuilder.nodes
|
||||
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.networkbuilder.containers.instance.InstanceInfo
|
||||
import net.corda.networkbuilder.context.Context
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class NodeAdder(val context: Context,
|
||||
@ -13,7 +13,8 @@ class NodeAdder(val context: Context,
|
||||
val nodeGroup = context.nodes[nodeGroupName]!!
|
||||
val nodeInfo = nodeGroup.iterator().next()
|
||||
val currentNodeSize = nodeGroup.size
|
||||
val newInstanceX500 = x500ToAdd?.toString() ?: nodeInfo.groupX500!!.copy(commonName = nodeInfo.groupX500.commonName + (currentNodeSize)).toString()
|
||||
val newInstanceX500 = x500ToAdd?.toString()
|
||||
?: nodeInfo.groupX500!!.copy(commonName = nodeInfo.groupX500.commonName + (currentNodeSize)).toString()
|
||||
val newInstanceName = nodeGroupName + (currentNodeSize)
|
||||
val nextNodeInfo = nodeInfo.copy(
|
||||
instanceX500 = newInstanceX500,
|
||||
|
@ -6,9 +6,9 @@ import com.github.dockerjava.core.command.BuildImageResultCallback
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigValueFactory
|
||||
import net.corda.networkbuilder.docker.DockerUtils
|
||||
import net.corda.common.configuration.parsing.internal.Configuration
|
||||
import net.corda.common.validation.internal.Validated
|
||||
import net.corda.networkbuilder.docker.DockerUtils
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.parseAsNodeConfiguration
|
||||
import org.slf4j.LoggerFactory
|
||||
|
@ -17,7 +17,7 @@ open class NodeCopier(private val cacheDir: File) {
|
||||
LOG.info("copying: ${foundNode.baseDirectory} to $nodeCacheDir")
|
||||
foundNode.baseDirectory.copyRecursively(nodeCacheDir, overwrite = true)
|
||||
//docker-java lib doesn't copy an empty folder, so if it's empty add a dummy file
|
||||
ensureDirectoryIsNonEmpty(nodeCacheDir.toPath()/("cordapps"))
|
||||
ensureDirectoryIsNonEmpty(nodeCacheDir.toPath() / ("cordapps"))
|
||||
copyBootstrapperFiles(nodeCacheDir)
|
||||
val configInCacheDir = File(nodeCacheDir, "node.conf")
|
||||
LOG.info("Applying precanned config $configInCacheDir")
|
||||
@ -55,8 +55,8 @@ open class NodeCopier(private val cacheDir: File) {
|
||||
.classLoader
|
||||
.getResourceAsStream("rpc-settings.conf")
|
||||
.reader().use {
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("rpcSettings")
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("rpcSettings")
|
||||
}
|
||||
|
||||
internal fun getDefaultSshSettings(): ConfigValue {
|
||||
@ -64,8 +64,8 @@ open class NodeCopier(private val cacheDir: File) {
|
||||
.classLoader
|
||||
.getResourceAsStream("ssh.conf")
|
||||
.reader().use {
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("sshd")
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("sshd")
|
||||
}
|
||||
|
||||
internal fun mergeConfigs(configInCacheDir: File,
|
||||
|
@ -3,8 +3,8 @@ package net.corda.networkbuilder.nodes
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.networkbuilder.Constants
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.networkbuilder.Constants
|
||||
import java.io.File
|
||||
|
||||
class NodeFinder(private val dirToSearch: File) {
|
||||
|
@ -1,10 +1,10 @@
|
||||
package net.corda.networkbuilder.nodes
|
||||
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.networkbuilder.Constants
|
||||
import net.corda.networkbuilder.containers.instance.InstanceInfo
|
||||
import net.corda.networkbuilder.containers.instance.Instantiator
|
||||
import net.corda.networkbuilder.context.Context
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class NodeInstantiator(val instantiator: Instantiator,
|
||||
|
@ -7,7 +7,6 @@ class CopiedNotary(configFile: File, baseDirectory: File,
|
||||
copiedNodeConfig: File, copiedNodeDir: File, val nodeInfoFile: File) :
|
||||
CopiedNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir)
|
||||
|
||||
|
||||
fun CopiedNode.toNotary(nodeInfoFile: File): CopiedNotary {
|
||||
return CopiedNotary(this.configFile, this.baseDirectory, this.copiedNodeConfig, this.copiedNodeDir, nodeInfoFile)
|
||||
}
|
@ -15,7 +15,7 @@ class NotaryCopier(private val cacheDir: File) : NodeCopier(cacheDir) {
|
||||
LOG.info("copying: ${foundNotary.baseDirectory} to $nodeCacheDir")
|
||||
foundNotary.baseDirectory.copyRecursively(nodeCacheDir, overwrite = true)
|
||||
//docker-java lib doesn't copy an empty folder, so if it's empty add a dummy file
|
||||
ensureDirectoryIsNonEmpty(nodeCacheDir.toPath()/("cordapps"))
|
||||
ensureDirectoryIsNonEmpty(nodeCacheDir.toPath() / ("cordapps"))
|
||||
copyNotaryBootstrapperFiles(nodeCacheDir)
|
||||
val configInCacheDir = File(nodeCacheDir, "node.conf")
|
||||
LOG.info("Applying precanned config $configInCacheDir")
|
||||
|
@ -2,11 +2,11 @@ package net.corda.networkbuilder.volumes
|
||||
|
||||
import com.microsoft.azure.storage.file.CloudFile
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.networkbuilder.notaries.CopiedNotary
|
||||
import net.corda.networkbuilder.serialization.SerializationEngine
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.networkbuilder.notaries.CopiedNotary
|
||||
import net.corda.networkbuilder.serialization.SerializationEngine
|
||||
import net.corda.nodeapi.internal.DEV_ROOT_CA
|
||||
import net.corda.nodeapi.internal.SignedNodeInfo
|
||||
import net.corda.nodeapi.internal.config.getBooleanCaseInsensitive
|
||||
@ -28,7 +28,6 @@ interface Volume {
|
||||
internal val keyPair = networkMapCa.keyPair
|
||||
}
|
||||
|
||||
|
||||
fun CloudFile.uploadFromByteArray(array: ByteArray) {
|
||||
this.uploadFromByteArray(array, 0, array.size)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user