mirror of
https://github.com/corda/corda.git
synced 2024-12-26 08:01:09 +00:00
ENT-3801 Backport to OS (#5355)
* [ENT-3801] Store transactions in the database during transaction resolution (#2305)
* ENT-3801: Store downloaded txns as part of the backchain resolution into the db rather than the checkpoint
It's very inefficient to store the downloaded backchain in the checkpoint as more of it downloaded. Instead, if a threshold is reached (which currently defaults at 0) then the backchain is stored in the transactions table as unverified. A new is_verified column has been added to track this. Initially testing on the OS codebase has been very promising but unfortunately this current code is not quite ready. I had to quickly port it to ENT as this is meant to be an ENT-only optimisation.
To that effect, there is a TransactionResolver abstraction with two implementations: an in-memory one which has the old behaviour, and which will be the behaviour for OS, and a db one.
DBTransactionStorage hasn't been fully updated and I had to comment out the optimistic path for now.
Most of these changes will need to be ported to OS to keep the merge conflicts in check, but obviously not DbTransactionsResolver and the "is_verified" changes in DBTransactionStorage. DBTransactionStorage does have other refactoring which will make sense to port though.
* [ENT-3801] Start work on allowing modifications in AppendOnlyPersistentMap
* [ENT-3801] Add transaction resolver tests
* [ENT-3801] Adjust suspendable annotations
* [ENT-3801] Fix the ResolveTransactionFlow tests
* [ENT-3801] Update ResolveTransactionsFlow tests
* [ENT-3801] Add a liquibase migration script for isVerified
* [ENT-3801] Ensure the migration runs in the correct place
* [ENT-3801] Handle resolution of already present transactions
* [ENT-3801] Fix compile error in performance test app
* [ENT-3801] Logging and comment updates, plus a test case
* [ENT-3801] Add a notary change resolution test
* [ENT-3801] Add a contract upgrade transaction test
* [ENT-3801] Change new column to be a character based status
* [ENT-3801] Migration script type change
* [ENT-3801] Address first round of review comments
* [ENT-3801] Update variable names in AppendOnlyPersistentMap
* [ENT-3801] Another variable name clarification
* [ENT-3801] Fix missing name changes
* [ENT-3801] Make the signature list immutable when constructing cache value
* [ENT-3801] Add a locking strategy for unverified transactions
* [ENT-3801] Address tidying up review comments
* [ENT-3801] First attempt at ensuring locks are released after commit
* [ENT-3801] Remove references to old cache name
* [ENT-3801] Update locking logic
* [ENT-3801] Fix potential deadlock with read/write transaction locks
* [ENT-3801] Remove read locks, and ensure minimal extra suspends
* [ENT-3801] Fix build issues in tests
* [ENT-3801] Use the correct clock when calculating sleep durations
* [ENT-3801] Add a pessimism flag for writing verified transactions
* [ENT-3801] Change logging statement to debug
(cherry picked from commit 8ab6a55e17
)
* [NOTICK] Fix up imports for some changed files
* [NOTICK] Fix transaction resolution tests
* [NOTICK] Reinstate the DBTransactionsResolver
* [NOTICK] Add the topological sort back to recordTransactions
* [NOTICK] Adjust test case to remove dependency on query ordering
* [NOTICK] Make test code match that in ENT
This commit is contained in:
parent
39094f1918
commit
44428b6048
core-tests/src/test/kotlin/net/corda/coretests/internal
core/src/main/kotlin/net/corda/core/internal
FetchDataFlow.ktResolveTransactionsFlow.ktServiceHubCoreInternal.ktTopologicalSort.ktTransactionUtils.kt
node/src
main
kotlin/net/corda/node
resources/migration
test/kotlin/net/corda/node
messaging
migration
services
persistence
statemachine
transactions
vault
testing
node-driver/src/main/kotlin/net/corda/testing/node/internal
test-utils/src/main/kotlin/net/corda/testing/contracts
@ -1,28 +1,40 @@
|
|||||||
package net.corda.coretests.internal
|
package net.corda.coretests.internal
|
||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import junit.framework.TestCase.assertTrue
|
||||||
|
import net.corda.core.contracts.ContractState
|
||||||
|
import net.corda.core.contracts.StateRef
|
||||||
|
import net.corda.core.crypto.Crypto
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.crypto.SignableData
|
||||||
|
import net.corda.core.crypto.SignatureMetadata
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.FetchDataFlow
|
import net.corda.core.internal.*
|
||||||
import net.corda.core.internal.ResolveTransactionsFlow
|
import net.corda.core.transactions.ContractUpgradeWireTransaction
|
||||||
import net.corda.core.internal.TESTDSL_UPLOADER
|
import net.corda.core.transactions.NotaryChangeWireTransaction
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
import net.corda.core.utilities.NonEmptySet
|
import net.corda.core.utilities.NonEmptySet
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.sequence
|
import net.corda.core.utilities.sequence
|
||||||
import net.corda.core.utilities.unwrap
|
import net.corda.core.utilities.unwrap
|
||||||
import net.corda.coretests.flows.TestNoSecurityDataVendingFlow
|
import net.corda.coretests.flows.TestNoSecurityDataVendingFlow
|
||||||
|
import net.corda.node.services.DbTransactionsResolver
|
||||||
import net.corda.testing.contracts.DummyContract
|
import net.corda.testing.contracts.DummyContract
|
||||||
|
import net.corda.testing.contracts.DummyContractV2
|
||||||
|
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
||||||
|
import net.corda.testing.core.DUMMY_NOTARY_NAME
|
||||||
import net.corda.testing.core.singleIdentity
|
import net.corda.testing.core.singleIdentity
|
||||||
import net.corda.testing.node.MockNetwork
|
import net.corda.testing.node.MockNetwork
|
||||||
|
import net.corda.testing.node.MockNetworkNotarySpec
|
||||||
import net.corda.testing.node.MockNetworkParameters
|
import net.corda.testing.node.MockNetworkParameters
|
||||||
import net.corda.testing.node.StartedMockNode
|
import net.corda.testing.node.StartedMockNode
|
||||||
import net.corda.testing.node.internal.DUMMY_CONTRACTS_CORDAPP
|
import net.corda.testing.node.internal.DUMMY_CONTRACTS_CORDAPP
|
||||||
import net.corda.testing.node.internal.enclosedCordapp
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
import org.junit.Before
|
import org.junit.Before
|
||||||
|
import org.junit.Ignore
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
import java.io.ByteArrayOutputStream
|
import java.io.ByteArrayOutputStream
|
||||||
import java.io.InputStream
|
import java.io.InputStream
|
||||||
@ -39,24 +51,36 @@ class ResolveTransactionsFlowTest {
|
|||||||
private lateinit var notaryNode: StartedMockNode
|
private lateinit var notaryNode: StartedMockNode
|
||||||
private lateinit var megaCorpNode: StartedMockNode
|
private lateinit var megaCorpNode: StartedMockNode
|
||||||
private lateinit var miniCorpNode: StartedMockNode
|
private lateinit var miniCorpNode: StartedMockNode
|
||||||
|
private lateinit var newNotaryNode: StartedMockNode
|
||||||
private lateinit var megaCorp: Party
|
private lateinit var megaCorp: Party
|
||||||
private lateinit var miniCorp: Party
|
private lateinit var miniCorp: Party
|
||||||
private lateinit var notary: Party
|
private lateinit var notary: Party
|
||||||
|
private lateinit var newNotary: Party
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
fun setup() {
|
fun setup() {
|
||||||
mockNet = MockNetwork(MockNetworkParameters(cordappsForAllNodes = listOf(DUMMY_CONTRACTS_CORDAPP, enclosedCordapp())))
|
val mockNetworkParameters = MockNetworkParameters(
|
||||||
notaryNode = mockNet.defaultNotaryNode
|
cordappsForAllNodes = listOf(DUMMY_CONTRACTS_CORDAPP, enclosedCordapp()),
|
||||||
|
notarySpecs = listOf(
|
||||||
|
MockNetworkNotarySpec(DUMMY_NOTARY_NAME),
|
||||||
|
MockNetworkNotarySpec(DUMMY_BANK_A_NAME)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mockNet = MockNetwork(mockNetworkParameters)
|
||||||
|
notaryNode = mockNet.notaryNodes.first()
|
||||||
megaCorpNode = mockNet.createPartyNode(CordaX500Name("MegaCorp", "London", "GB"))
|
megaCorpNode = mockNet.createPartyNode(CordaX500Name("MegaCorp", "London", "GB"))
|
||||||
miniCorpNode = mockNet.createPartyNode(CordaX500Name("MiniCorp", "London", "GB"))
|
miniCorpNode = mockNet.createPartyNode(CordaX500Name("MiniCorp", "London", "GB"))
|
||||||
notary = mockNet.defaultNotaryIdentity
|
notary = notaryNode.info.singleIdentity()
|
||||||
megaCorp = megaCorpNode.info.singleIdentity()
|
megaCorp = megaCorpNode.info.singleIdentity()
|
||||||
miniCorp = miniCorpNode.info.singleIdentity()
|
miniCorp = miniCorpNode.info.singleIdentity()
|
||||||
|
newNotaryNode = mockNet.notaryNodes[1]
|
||||||
|
newNotary = mockNet.notaryNodes[1].info.singleIdentity()
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
fun tearDown() {
|
fun tearDown() {
|
||||||
mockNet.stopNodes()
|
mockNet.stopNodes()
|
||||||
|
System.setProperty("net.corda.node.dbtransactionsresolver.InMemoryResolutionLimit", "0")
|
||||||
}
|
}
|
||||||
// DOCEND 3
|
// DOCEND 3
|
||||||
|
|
||||||
@ -98,26 +122,6 @@ class ResolveTransactionsFlowTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
fun `denial of service check`() {
|
|
||||||
// Chain lots of txns together.
|
|
||||||
val stx2 = makeTransactions().second
|
|
||||||
val count = 50
|
|
||||||
var cursor = stx2
|
|
||||||
repeat(count) {
|
|
||||||
val builder = DummyContract.move(cursor.tx.outRef(0), miniCorp)
|
|
||||||
val stx = megaCorpNode.services.signInitialTransaction(builder)
|
|
||||||
megaCorpNode.transaction {
|
|
||||||
megaCorpNode.services.recordTransactions(stx)
|
|
||||||
}
|
|
||||||
cursor = stx
|
|
||||||
}
|
|
||||||
val p = TestFlow(setOf(cursor.id), megaCorp, 40)
|
|
||||||
val future = miniCorpNode.startFlow(p)
|
|
||||||
mockNet.runNetwork()
|
|
||||||
assertFailsWith<ResolveTransactionsFlow.ExcessivelyLargeTransactionGraph> { future.getOrThrow() }
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun `triangle of transactions resolves fine`() {
|
fun `triangle of transactions resolves fine`() {
|
||||||
val stx1 = makeTransactions().first
|
val stx1 = makeTransactions().first
|
||||||
@ -201,6 +205,85 @@ class ResolveTransactionsFlowTest {
|
|||||||
assertFailsWith<FetchDataFlow.IllegalTransactionRequest> { future.getOrThrow() }
|
assertFailsWith<FetchDataFlow.IllegalTransactionRequest> { future.getOrThrow() }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `Switches between checkpoint and DB based resolution correctly`() {
|
||||||
|
System.setProperty("${DbTransactionsResolver::class.java.name}.max-checkpoint-resolution", "20")
|
||||||
|
var numTransactions = 0
|
||||||
|
megaCorpNode.services.validatedTransactions.updates.subscribe {
|
||||||
|
numTransactions++
|
||||||
|
}
|
||||||
|
val txToResolve = makeLargeTransactionChain(50)
|
||||||
|
var numUpdates = 0
|
||||||
|
miniCorpNode.services.validatedTransactions.updates.subscribe {
|
||||||
|
numUpdates++
|
||||||
|
}
|
||||||
|
val p = TestFlow(txToResolve, megaCorp)
|
||||||
|
val future = miniCorpNode.startFlow(p)
|
||||||
|
mockNet.runNetwork()
|
||||||
|
future.getOrThrow()
|
||||||
|
// ResolveTransactionsFlow only stores transaction dependencies and not the requested transaction, so there will be one fewer
|
||||||
|
// transaction stored on the receiving node than on the sending one.
|
||||||
|
assertEquals(numTransactions - 1, numUpdates)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `resolution works when transaction in chain is already resolved`() {
|
||||||
|
val (tx1, tx2) = makeTransactions()
|
||||||
|
miniCorpNode.transaction {
|
||||||
|
miniCorpNode.services.recordTransactions(tx1)
|
||||||
|
}
|
||||||
|
|
||||||
|
val p = TestFlow(tx2, megaCorp)
|
||||||
|
val future = miniCorpNode.startFlow(p)
|
||||||
|
mockNet.runNetwork()
|
||||||
|
future.getOrThrow()
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `can resolve a chain of transactions containing a notary change transaction`() {
|
||||||
|
val tx = notaryChangeChain()
|
||||||
|
var numUpdates = 0
|
||||||
|
var notaryChangeTxSeen = false
|
||||||
|
miniCorpNode.services.validatedTransactions.updates.subscribe {
|
||||||
|
numUpdates++
|
||||||
|
notaryChangeTxSeen = it.coreTransaction is NotaryChangeWireTransaction || notaryChangeTxSeen
|
||||||
|
}
|
||||||
|
val p = TestFlow(tx, megaCorp)
|
||||||
|
val future = miniCorpNode.startFlow(p)
|
||||||
|
mockNet.runNetwork()
|
||||||
|
future.getOrThrow()
|
||||||
|
assertEquals(2, numUpdates)
|
||||||
|
assertTrue(notaryChangeTxSeen)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `can resolve a chain of transactions containing a contract upgrade transaction`() {
|
||||||
|
val tx = contractUpgradeChain()
|
||||||
|
var numUpdates = 0
|
||||||
|
var upgradeTxSeen = false
|
||||||
|
miniCorpNode.services.validatedTransactions.updates.subscribe {
|
||||||
|
numUpdates++
|
||||||
|
upgradeTxSeen = it.coreTransaction is ContractUpgradeWireTransaction || upgradeTxSeen
|
||||||
|
}
|
||||||
|
val p = TestFlow(tx, megaCorp)
|
||||||
|
val future = miniCorpNode.startFlow(p)
|
||||||
|
mockNet.runNetwork()
|
||||||
|
future.getOrThrow()
|
||||||
|
assertEquals(2, numUpdates)
|
||||||
|
assertTrue(upgradeTxSeen)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Used for checking larger chains resolve correctly. Note that this takes a long time to run, and so is not suitable for a CI gate.
|
||||||
|
@Test
|
||||||
|
@Ignore
|
||||||
|
fun `Can resolve large chain of transactions`() {
|
||||||
|
val txToResolve = makeLargeTransactionChain(2500)
|
||||||
|
val p = TestFlow(txToResolve, megaCorp)
|
||||||
|
val future = miniCorpNode.startFlow(p)
|
||||||
|
mockNet.runNetwork()
|
||||||
|
future.getOrThrow()
|
||||||
|
}
|
||||||
|
|
||||||
// DOCSTART 2
|
// DOCSTART 2
|
||||||
private fun makeTransactions(signFirstTX: Boolean = true, withAttachment: SecureHash? = null): Pair<SignedTransaction, SignedTransaction> {
|
private fun makeTransactions(signFirstTX: Boolean = true, withAttachment: SecureHash? = null): Pair<SignedTransaction, SignedTransaction> {
|
||||||
// Make a chain of custody of dummy states and insert into node A.
|
// Make a chain of custody of dummy states and insert into node A.
|
||||||
@ -231,23 +314,125 @@ class ResolveTransactionsFlowTest {
|
|||||||
}
|
}
|
||||||
// DOCEND 2
|
// DOCEND 2
|
||||||
|
|
||||||
|
private fun makeLargeTransactionChain(chainLength: Int): SignedTransaction {
|
||||||
|
var currentTx = DummyContract.generateInitial(0, notary, megaCorp.ref(1)).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
notaryNode.services.addSignature(ptx, notary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i in 1 until chainLength) {
|
||||||
|
currentTx = DummyContract.move(currentTx.tx.outRef(0), miniCorp).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
val ptx2 = miniCorpNode.services.addSignature(ptx, miniCorp.owningKey)
|
||||||
|
notaryNode.services.addSignature(ptx2, notary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return currentTx
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun createNotaryChangeTransaction(inputs: List<StateRef>): SignedTransaction {
|
||||||
|
val notaryTx = NotaryChangeTransactionBuilder(inputs, notary, newNotary, notaryNode.services.networkParametersService.defaultHash).build()
|
||||||
|
val notaryKey = notary.owningKey
|
||||||
|
val signableData = SignableData(notaryTx.id, SignatureMetadata(4, Crypto.findSignatureScheme(notaryKey).schemeNumberID))
|
||||||
|
val signature = notaryNode.services.keyManagementService.sign(signableData, notaryKey)
|
||||||
|
val newNotarySig = newNotaryNode.services.keyManagementService.sign(signableData, newNotary.owningKey)
|
||||||
|
val ownerSig = megaCorpNode.services.keyManagementService.sign(signableData, megaCorp.owningKey)
|
||||||
|
return SignedTransaction(notaryTx, listOf(signature, newNotarySig, ownerSig))
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun notaryChangeChain(): SignedTransaction {
|
||||||
|
var currentTx = DummyContract.generateInitial(0, notary, megaCorp.ref(1)).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
notaryNode.services.addSignature(ptx, notary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTx = createNotaryChangeTransaction(currentTx.tx.outRefsOfType<ContractState>().map { it.ref })
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
|
||||||
|
val ledgerTx = (currentTx.coreTransaction as NotaryChangeWireTransaction).resolve(megaCorpNode.services, currentTx.sigs)
|
||||||
|
val outState = ledgerTx.outRef<DummyContract.SingleOwnerState>(0)
|
||||||
|
|
||||||
|
currentTx = DummyContract.move(outState, miniCorp).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
val ptx2 = miniCorpNode.services.addSignature(ptx, miniCorp.owningKey)
|
||||||
|
newNotaryNode.services.addSignature(ptx2, newNotary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
return currentTx
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun createContractUpgradeTransaction(inputs: List<StateRef>, previousTx: SignedTransaction): SignedTransaction {
|
||||||
|
val contractTx = ContractUpgradeTransactionBuilder(
|
||||||
|
inputs,
|
||||||
|
notary,
|
||||||
|
previousTx.tx.attachments.first(),
|
||||||
|
DummyContractV2.PROGRAM_ID,
|
||||||
|
previousTx.tx.attachments.first(),
|
||||||
|
networkParametersHash = notaryNode.services.networkParametersService.defaultHash
|
||||||
|
).build()
|
||||||
|
val notaryKey = notary.owningKey
|
||||||
|
val signableData = SignableData(contractTx.id, SignatureMetadata(4, Crypto.findSignatureScheme(notaryKey).schemeNumberID))
|
||||||
|
val signature = notaryNode.services.keyManagementService.sign(signableData, notaryKey)
|
||||||
|
val ownerSig = megaCorpNode.services.keyManagementService.sign(signableData, megaCorp.owningKey)
|
||||||
|
return SignedTransaction(contractTx, listOf(signature, ownerSig))
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun contractUpgradeChain(): SignedTransaction {
|
||||||
|
var currentTx = DummyContract.generateInitial(0, notary, megaCorp.ref(1)).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
notaryNode.services.addSignature(ptx, notary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTx = createContractUpgradeTransaction(currentTx.tx.outRefsOfType<ContractState>().map { it.ref }, currentTx)
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
|
||||||
|
val ledgerTx = (currentTx.coreTransaction as ContractUpgradeWireTransaction).resolve(megaCorpNode.services, currentTx.sigs)
|
||||||
|
val outState = ledgerTx.outRef<DummyContractV2.State>(0)
|
||||||
|
|
||||||
|
currentTx = DummyContractV2.move(outState, miniCorp).let {
|
||||||
|
val ptx = megaCorpNode.services.signInitialTransaction(it)
|
||||||
|
val ptx2 = miniCorpNode.services.addSignature(ptx, miniCorp.owningKey)
|
||||||
|
notaryNode.services.addSignature(ptx2, notary.owningKey)
|
||||||
|
}
|
||||||
|
megaCorpNode.transaction {
|
||||||
|
megaCorpNode.services.recordTransactions(currentTx)
|
||||||
|
}
|
||||||
|
return currentTx
|
||||||
|
}
|
||||||
|
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
open class TestFlow(val otherSide: Party, private val resolveTransactionsFlowFactory: (FlowSession) -> ResolveTransactionsFlow, private val txCountLimit: Int? = null) : FlowLogic<Unit>() {
|
open class TestFlow(private val otherSide: Party, private val resolveTransactionsFlowFactory: (FlowSession) -> ResolveTransactionsFlow) : FlowLogic<Unit>() {
|
||||||
constructor(txHashes: Set<SecureHash>, otherSide: Party, txCountLimit: Int? = null) : this(otherSide, { ResolveTransactionsFlow(txHashes, it) }, txCountLimit = txCountLimit)
|
constructor(txHashes: Set<SecureHash>, otherSide: Party) : this(otherSide, { ResolveTransactionsFlow(txHashes, it) })
|
||||||
constructor(stx: SignedTransaction, otherSide: Party) : this(otherSide, { ResolveTransactionsFlow(stx, it) })
|
constructor(stx: SignedTransaction, otherSide: Party) : this(otherSide, { ResolveTransactionsFlow(stx, it) })
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
val session = initiateFlow(otherSide)
|
val session = initiateFlow(otherSide)
|
||||||
val resolveTransactionsFlow = resolveTransactionsFlowFactory(session)
|
val resolveTransactionsFlow = resolveTransactionsFlowFactory(session)
|
||||||
txCountLimit?.let { resolveTransactionsFlow.transactionCountLimit = it }
|
|
||||||
subFlow(resolveTransactionsFlow)
|
subFlow(resolveTransactionsFlow)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@Suppress("unused")
|
@Suppress("unused")
|
||||||
@InitiatedBy(TestFlow::class)
|
@InitiatedBy(TestFlow::class)
|
||||||
class TestResponseFlow(val otherSideSession: FlowSession) : FlowLogic<Void?>() {
|
class TestResponseFlow(private val otherSideSession: FlowSession) : FlowLogic<Void?>() {
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() = subFlow(TestNoSecurityDataVendingFlow(otherSideSession))
|
override fun call() = subFlow(TestNoSecurityDataVendingFlow(otherSideSession))
|
||||||
}
|
}
|
||||||
|
@ -1,109 +0,0 @@
|
|||||||
package net.corda.coretests.internal
|
|
||||||
|
|
||||||
import net.corda.client.mock.Generator
|
|
||||||
import net.corda.core.contracts.*
|
|
||||||
import net.corda.core.crypto.SecureHash
|
|
||||||
import net.corda.core.crypto.SignatureMetadata
|
|
||||||
import net.corda.core.crypto.TransactionSignature
|
|
||||||
import net.corda.core.crypto.sign
|
|
||||||
import net.corda.core.identity.AbstractParty
|
|
||||||
import net.corda.core.identity.Party
|
|
||||||
import net.corda.core.internal.topologicalSort
|
|
||||||
import net.corda.core.serialization.serialize
|
|
||||||
import net.corda.core.transactions.CoreTransaction
|
|
||||||
import net.corda.core.transactions.SignedTransaction
|
|
||||||
import net.corda.testing.common.internal.testNetworkParameters
|
|
||||||
import net.corda.testing.core.SerializationEnvironmentRule
|
|
||||||
import net.corda.testing.core.TestIdentity
|
|
||||||
import org.junit.Rule
|
|
||||||
import org.junit.Test
|
|
||||||
import rx.Observable
|
|
||||||
import java.util.*
|
|
||||||
|
|
||||||
class TopologicalSortTest {
|
|
||||||
class DummyTransaction constructor(
|
|
||||||
override val id: SecureHash,
|
|
||||||
override val inputs: List<StateRef>,
|
|
||||||
@Suppress("CanBeParameter") val numberOfOutputs: Int,
|
|
||||||
override val notary: Party,
|
|
||||||
override val references: List<StateRef> = emptyList()
|
|
||||||
) : CoreTransaction() {
|
|
||||||
override val outputs: List<TransactionState<ContractState>> = (1..numberOfOutputs).map {
|
|
||||||
TransactionState(DummyState(), Contract::class.java.name, notary)
|
|
||||||
}
|
|
||||||
override val networkParametersHash: SecureHash? = testNetworkParameters().serialize().hash
|
|
||||||
}
|
|
||||||
|
|
||||||
@BelongsToContract(Contract::class)
|
|
||||||
class DummyState : ContractState {
|
|
||||||
override val participants: List<AbstractParty> = emptyList()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
@JvmField
|
|
||||||
val testSerialization = SerializationEnvironmentRule()
|
|
||||||
|
|
||||||
@Test
|
|
||||||
fun topologicalObservableSort() {
|
|
||||||
val testIdentity = TestIdentity.fresh("asd")
|
|
||||||
|
|
||||||
val N = 10
|
|
||||||
// generate random tx DAG
|
|
||||||
val ids = (1..N).map { SecureHash.sha256("$it") }
|
|
||||||
val forwardsGenerators = (0 until ids.size).map { i ->
|
|
||||||
Generator.sampleBernoulli(ids.subList(i + 1, ids.size), 0.8).map { outputs -> ids[i] to outputs }
|
|
||||||
}
|
|
||||||
val transactions = Generator.sequence(forwardsGenerators).map { forwardGraph ->
|
|
||||||
val backGraph = forwardGraph.flatMap { it.second.map { output -> it.first to output } }.fold(HashMap<SecureHash, HashSet<SecureHash>>()) { backGraph, edge ->
|
|
||||||
backGraph.getOrPut(edge.second) { HashSet() }.add(edge.first)
|
|
||||||
backGraph
|
|
||||||
}
|
|
||||||
val outrefCounts = HashMap<SecureHash, Int>()
|
|
||||||
val transactions = ArrayList<SignedTransaction>()
|
|
||||||
for ((id, outputs) in forwardGraph) {
|
|
||||||
val inputs = (backGraph[id]?.toList() ?: emptyList()).map { inputTxId ->
|
|
||||||
val ref = outrefCounts.compute(inputTxId) { _, count ->
|
|
||||||
if (count == null) {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
count + 1
|
|
||||||
}
|
|
||||||
}!!
|
|
||||||
StateRef(inputTxId, ref)
|
|
||||||
}
|
|
||||||
val tx = DummyTransaction(id, inputs, outputs.size, testIdentity.party)
|
|
||||||
val bits = tx.serialize().bytes
|
|
||||||
val sig = TransactionSignature(testIdentity.keyPair.private.sign(bits).bytes, testIdentity.publicKey, SignatureMetadata(0, 0))
|
|
||||||
val stx = SignedTransaction(tx, listOf(sig))
|
|
||||||
transactions.add(stx)
|
|
||||||
}
|
|
||||||
transactions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap two random items
|
|
||||||
transactions.combine(Generator.intRange(0, N - 1), Generator.intRange(0, N - 2)) { txs, i, _ ->
|
|
||||||
val k = 0 // if (i == j) i + 1 else j
|
|
||||||
val tmp = txs[i]
|
|
||||||
txs[i] = txs[k]
|
|
||||||
txs[k] = tmp
|
|
||||||
txs
|
|
||||||
}
|
|
||||||
|
|
||||||
val random = SplittableRandom()
|
|
||||||
for (i in 1..100) {
|
|
||||||
val txs = transactions.generateOrFail(random)
|
|
||||||
val ordered = Observable.from(txs).topologicalSort(emptyList()).toList().toBlocking().first()
|
|
||||||
checkTopologicallyOrdered(ordered)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun checkTopologicallyOrdered(txs: List<SignedTransaction>) {
|
|
||||||
val outputs = HashSet<StateRef>()
|
|
||||||
for (tx in txs) {
|
|
||||||
if (!outputs.containsAll(tx.inputs)) {
|
|
||||||
throw IllegalStateException("Transaction $tx's inputs ${tx.inputs} are not satisfied by $outputs")
|
|
||||||
}
|
|
||||||
outputs.addAll(tx.coreTransaction.outputs.mapIndexed { i, _ -> StateRef(tx.id, i) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -77,7 +77,8 @@ sealed class FetchDataFlow<T : NamedByHash, in W : Any>(
|
|||||||
val (fromDisk, toFetch) = loadWhatWeHave()
|
val (fromDisk, toFetch) = loadWhatWeHave()
|
||||||
|
|
||||||
return if (toFetch.isEmpty()) {
|
return if (toFetch.isEmpty()) {
|
||||||
Result(fromDisk, emptyList())
|
val loadedFromDisk = loadExpected(fromDisk)
|
||||||
|
Result(loadedFromDisk, emptyList())
|
||||||
} else {
|
} else {
|
||||||
logger.debug { "Requesting ${toFetch.size} dependency(s) for verification from ${otherSideSession.counterparty.name}" }
|
logger.debug { "Requesting ${toFetch.size} dependency(s) for verification from ${otherSideSession.counterparty.name}" }
|
||||||
|
|
||||||
@ -99,7 +100,9 @@ sealed class FetchDataFlow<T : NamedByHash, in W : Any>(
|
|||||||
val downloaded = validateFetchResponse(UntrustworthyData(maybeItems), toFetch)
|
val downloaded = validateFetchResponse(UntrustworthyData(maybeItems), toFetch)
|
||||||
logger.debug { "Fetched ${downloaded.size} elements from ${otherSideSession.counterparty.name}" }
|
logger.debug { "Fetched ${downloaded.size} elements from ${otherSideSession.counterparty.name}" }
|
||||||
maybeWriteToDisk(downloaded)
|
maybeWriteToDisk(downloaded)
|
||||||
Result(fromDisk, downloaded)
|
// Re-load items already present before the download procedure. This ensures these objects are not unnecessarily checkpointed.
|
||||||
|
val loadedFromDisk = loadExpected(fromDisk)
|
||||||
|
Result(loadedFromDisk, downloaded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,19 +110,29 @@ sealed class FetchDataFlow<T : NamedByHash, in W : Any>(
|
|||||||
// Do nothing by default.
|
// Do nothing by default.
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun loadWhatWeHave(): Pair<List<T>, List<SecureHash>> {
|
private fun loadWhatWeHave(): Pair<List<SecureHash>, List<SecureHash>> {
|
||||||
val fromDisk = ArrayList<T>()
|
val fromDisk = ArrayList<SecureHash>()
|
||||||
val toFetch = ArrayList<SecureHash>()
|
val toFetch = ArrayList<SecureHash>()
|
||||||
for (txid in requests) {
|
for (txid in requests) {
|
||||||
val stx = load(txid)
|
val stx = load(txid)
|
||||||
if (stx == null)
|
if (stx == null)
|
||||||
toFetch += txid
|
toFetch += txid
|
||||||
else
|
else
|
||||||
fromDisk += stx
|
// Although the full object is loaded here, only return the id. This prevents the full set of objects already present from
|
||||||
|
// being checkpointed every time a request is made to download an object the node does not yet have.
|
||||||
|
fromDisk += txid
|
||||||
}
|
}
|
||||||
return Pair(fromDisk, toFetch)
|
return Pair(fromDisk, toFetch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun loadExpected(ids: List<SecureHash>): List<T> {
|
||||||
|
val loaded = ids.mapNotNull { load(it) }
|
||||||
|
require(ids.size == loaded.size) {
|
||||||
|
"Expected to find ${ids.size} items in database but only found ${loaded.size} items"
|
||||||
|
}
|
||||||
|
return loaded
|
||||||
|
}
|
||||||
|
|
||||||
protected abstract fun load(txid: SecureHash): T?
|
protected abstract fun load(txid: SecureHash): T?
|
||||||
|
|
||||||
protected open fun convert(wire: W): T = uncheckedCast(wire)
|
protected open fun convert(wire: W): T = uncheckedCast(wire)
|
||||||
|
@ -2,37 +2,28 @@ package net.corda.core.internal
|
|||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.core.DeleteForDJVM
|
import net.corda.core.DeleteForDJVM
|
||||||
import net.corda.core.contracts.TransactionResolutionException
|
|
||||||
import net.corda.core.contracts.TransactionVerificationException
|
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.flows.FlowException
|
|
||||||
import net.corda.core.flows.FlowLogic
|
import net.corda.core.flows.FlowLogic
|
||||||
import net.corda.core.flows.FlowSession
|
import net.corda.core.flows.FlowSession
|
||||||
import net.corda.core.node.ServiceHub
|
|
||||||
import net.corda.core.node.StatesToRecord
|
import net.corda.core.node.StatesToRecord
|
||||||
import net.corda.core.serialization.CordaSerializable
|
|
||||||
import net.corda.core.transactions.ContractUpgradeWireTransaction
|
import net.corda.core.transactions.ContractUpgradeWireTransaction
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
import net.corda.core.transactions.WireTransaction
|
import net.corda.core.transactions.WireTransaction
|
||||||
import net.corda.core.utilities.exactAdd
|
|
||||||
import java.util.*
|
|
||||||
import kotlin.collections.ArrayList
|
|
||||||
import kotlin.math.min
|
|
||||||
|
|
||||||
// TODO: This code is currently unit tested by TwoPartyTradeFlowTests, it should have its own tests.
|
|
||||||
/**
|
/**
|
||||||
* Resolves transactions for the specified [txHashes] along with their full history (dependency graph) from [otherSide].
|
* Resolves transactions for the specified [txHashes] along with their full history (dependency graph) from [otherSide].
|
||||||
* Each retrieved transaction is validated and inserted into the local transaction storage.
|
* Each retrieved transaction is validated and inserted into the local transaction storage.
|
||||||
*/
|
*/
|
||||||
@DeleteForDJVM
|
@DeleteForDJVM
|
||||||
class ResolveTransactionsFlow(txHashesArg: Set<SecureHash>,
|
class ResolveTransactionsFlow private constructor(
|
||||||
private val otherSide: FlowSession,
|
val initialTx: SignedTransaction?,
|
||||||
private val statesToRecord: StatesToRecord = StatesToRecord.NONE) : FlowLogic<Unit>() {
|
val txHashes: Set<SecureHash>,
|
||||||
|
val otherSide: FlowSession,
|
||||||
|
val statesToRecord: StatesToRecord
|
||||||
|
) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
// Need it ordered in terms of iteration. Needs to be a variable for the check-pointing logic to work.
|
constructor(txHashes: Set<SecureHash>, otherSide: FlowSession, statesToRecord: StatesToRecord = StatesToRecord.NONE)
|
||||||
private val txHashes = txHashesArg.toList()
|
: this(null, txHashes, otherSide, statesToRecord)
|
||||||
/** Transaction to fetch attachments for. */
|
|
||||||
private var signedTransaction: SignedTransaction? = null
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolves and validates the dependencies of the specified [SignedTransaction]. Fetches the attachments, but does
|
* Resolves and validates the dependencies of the specified [SignedTransaction]. Fetches the attachments, but does
|
||||||
@ -40,163 +31,74 @@ class ResolveTransactionsFlow(txHashesArg: Set<SecureHash>,
|
|||||||
*
|
*
|
||||||
* @return a list of verified [SignedTransaction] objects, in a depth-first order.
|
* @return a list of verified [SignedTransaction] objects, in a depth-first order.
|
||||||
*/
|
*/
|
||||||
constructor(signedTransaction: SignedTransaction, otherSide: FlowSession) : this(dependencyIDs(signedTransaction), otherSide) {
|
constructor(transaction: SignedTransaction, otherSide: FlowSession, statesToRecord: StatesToRecord = StatesToRecord.NONE)
|
||||||
this.signedTransaction = signedTransaction
|
: this(transaction, transaction.dependencies, otherSide, statesToRecord)
|
||||||
}
|
|
||||||
|
|
||||||
constructor(signedTransaction: SignedTransaction, otherSide: FlowSession, statesToRecord: StatesToRecord) : this(dependencyIDs(signedTransaction), otherSide, statesToRecord) {
|
private var fetchNetParamsFromCounterpart = false
|
||||||
this.signedTransaction = signedTransaction
|
|
||||||
}
|
|
||||||
|
|
||||||
@DeleteForDJVM
|
|
||||||
companion object {
|
|
||||||
private fun dependencyIDs(stx: SignedTransaction) = stx.inputs.map { it.txhash }.toSet() + stx.references.map { it.txhash }.toSet()
|
|
||||||
|
|
||||||
private const val RESOLUTION_PAGE_SIZE = 100
|
|
||||||
|
|
||||||
/** Topologically sorts the given transactions such that dependencies are listed before dependers. */
|
|
||||||
@JvmStatic
|
|
||||||
fun topologicalSort(transactions: Collection<SignedTransaction>): List<SignedTransaction> {
|
|
||||||
val sort = TopologicalSort()
|
|
||||||
for (tx in transactions) {
|
|
||||||
sort.add(tx)
|
|
||||||
}
|
|
||||||
return sort.complete()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class ExcessivelyLargeTransactionGraph : FlowException()
|
|
||||||
|
|
||||||
// TODO: Figure out a more appropriate DOS limit here, 5000 is simply a very bad guess.
|
|
||||||
/** The maximum number of transactions this flow will try to download before bailing out. */
|
|
||||||
var transactionCountLimit = 5000
|
|
||||||
set(value) {
|
|
||||||
require(value > 0) { "$value is not a valid count limit" }
|
|
||||||
field = value
|
|
||||||
}
|
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
@Throws(FetchDataFlow.HashNotFound::class, FetchDataFlow.IllegalTransactionRequest::class)
|
|
||||||
override fun call() {
|
override fun call() {
|
||||||
val counterpartyPlatformVersion = serviceHub.networkMapCache.getNodeByLegalIdentity(otherSide.counterparty)?.platformVersion
|
// TODO This error should actually cause the flow to be sent to the flow hospital to be retried
|
||||||
?: throw FlowException("Couldn't retrieve party's ${otherSide.counterparty} platform version from NetworkMapCache")
|
val counterpartyPlatformVersion = checkNotNull(serviceHub.networkMapCache.getNodeByLegalIdentity(otherSide.counterparty)?.platformVersion) {
|
||||||
val newTxns = ArrayList<SignedTransaction>(txHashes.size)
|
"Couldn't retrieve party's ${otherSide.counterparty} platform version from NetworkMapCache"
|
||||||
// Start fetching data.
|
}
|
||||||
for (pageNumber in 0..(txHashes.size - 1) / RESOLUTION_PAGE_SIZE) {
|
|
||||||
val page = page(pageNumber, RESOLUTION_PAGE_SIZE)
|
|
||||||
|
|
||||||
newTxns += downloadDependencies(page)
|
|
||||||
val txsWithMissingAttachments = if (pageNumber == 0) signedTransaction?.let { newTxns + it }
|
|
||||||
?: newTxns else newTxns
|
|
||||||
fetchMissingAttachments(txsWithMissingAttachments)
|
|
||||||
// Fetch missing parameters flow was added in version 4. This check is needed so we don't end up with node V4 sending parameters
|
// Fetch missing parameters flow was added in version 4. This check is needed so we don't end up with node V4 sending parameters
|
||||||
// request to node V3 that doesn't know about this protocol.
|
// request to node V3 that doesn't know about this protocol.
|
||||||
if (counterpartyPlatformVersion >= 4) {
|
fetchNetParamsFromCounterpart = counterpartyPlatformVersion >= 4
|
||||||
fetchMissingParameters(txsWithMissingAttachments)
|
|
||||||
}
|
if (initialTx != null) {
|
||||||
}
|
fetchMissingAttachments(initialTx)
|
||||||
otherSide.send(FetchDataFlow.Request.End)
|
fetchMissingNetworkParameters(initialTx)
|
||||||
// Finish fetching data.
|
}
|
||||||
|
|
||||||
|
val resolver = (serviceHub as ServiceHubCoreInternal).createTransactionsResolver(this)
|
||||||
|
resolver.downloadDependencies()
|
||||||
|
|
||||||
|
otherSide.send(FetchDataFlow.Request.End) // Finish fetching data.
|
||||||
|
|
||||||
val result = topologicalSort(newTxns)
|
|
||||||
// If transaction resolution is performed for a transaction where some states are relevant, then those should be
|
// If transaction resolution is performed for a transaction where some states are relevant, then those should be
|
||||||
// recorded if this has not already occurred.
|
// recorded if this has not already occurred.
|
||||||
val usedStatesToRecord = if (statesToRecord == StatesToRecord.NONE) StatesToRecord.ONLY_RELEVANT else statesToRecord
|
val usedStatesToRecord = if (statesToRecord == StatesToRecord.NONE) StatesToRecord.ONLY_RELEVANT else statesToRecord
|
||||||
result.forEach {
|
resolver.recordDependencies(usedStatesToRecord)
|
||||||
// For each transaction, verify it and insert it into the database. As we are iterating over them in a
|
|
||||||
// depth-first order, we should not encounter any verification failures due to missing data. If we fail
|
|
||||||
// half way through, it's no big deal, although it might result in us attempting to re-download data
|
|
||||||
// redundantly next time we attempt verification.
|
|
||||||
it.verify(serviceHub)
|
|
||||||
serviceHub.recordTransactions(usedStatesToRecord, listOf(it))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun page(pageNumber: Int, pageSize: Int): Set<SecureHash> {
|
|
||||||
val offset = pageNumber * pageSize
|
|
||||||
val limit = min(offset + pageSize, txHashes.size)
|
|
||||||
// call toSet() is needed because sub-lists are not checkpoint-friendly.
|
|
||||||
return txHashes.subList(offset, limit).toSet()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Suspendable
|
|
||||||
// TODO use paging here (we literally get the entire dependencies graph in memory)
|
|
||||||
private fun downloadDependencies(depsToCheck: Set<SecureHash>): List<SignedTransaction> {
|
|
||||||
// Maintain a work queue of all hashes to load/download, initialised with our starting set. Then do a breadth
|
|
||||||
// first traversal across the dependency graph.
|
|
||||||
//
|
|
||||||
// TODO: This approach has two problems. Analyze and resolve them:
|
|
||||||
//
|
|
||||||
// (1) This flow leaks private data. If you download a transaction and then do NOT request a
|
|
||||||
// dependency, it means you already have it, which in turn means you must have been involved with it before
|
|
||||||
// somehow, either in the tx itself or in any following spend of it. If there were no following spends, then
|
|
||||||
// your peer knows for sure that you were involved ... this is bad! The only obvious ways to fix this are
|
|
||||||
// something like onion routing of requests, secure hardware, or both.
|
|
||||||
//
|
|
||||||
// (2) If the identity service changes the assumed identity of one of the public keys, it's possible
|
|
||||||
// that the "tx in db is valid" invariant is violated if one of the contracts checks the identity! Should
|
|
||||||
// the db contain the identities that were resolved when the transaction was first checked, or should we
|
|
||||||
// accept this kind of change is possible? Most likely solution is for identity data to be an attachment.
|
|
||||||
|
|
||||||
val nextRequests = LinkedHashSet<SecureHash>() // Keep things unique but ordered, for unit test stability.
|
|
||||||
nextRequests.addAll(depsToCheck)
|
|
||||||
val resultQ = LinkedHashMap<SecureHash, SignedTransaction>()
|
|
||||||
|
|
||||||
val limit = transactionCountLimit
|
|
||||||
var limitCounter = 0
|
|
||||||
while (nextRequests.isNotEmpty()) {
|
|
||||||
// Don't re-download the same tx when we haven't verified it yet but it's referenced multiple times in the
|
|
||||||
// graph we're traversing.
|
|
||||||
val notAlreadyFetched: Set<SecureHash> = nextRequests - resultQ.keys
|
|
||||||
nextRequests.clear()
|
|
||||||
|
|
||||||
if (notAlreadyFetched.isEmpty()) // Done early.
|
|
||||||
break
|
|
||||||
|
|
||||||
// Request the standalone transaction data (which may refer to things we don't yet have).
|
|
||||||
// TODO use paging here
|
|
||||||
val downloads: List<SignedTransaction> = subFlow(FetchTransactionsFlow(notAlreadyFetched, otherSide)).downloaded
|
|
||||||
|
|
||||||
for (stx in downloads)
|
|
||||||
check(resultQ.putIfAbsent(stx.id, stx) == null) // Assert checks the filter at the start.
|
|
||||||
|
|
||||||
// Add all input states and reference input states to the work queue.
|
|
||||||
val inputHashes = downloads.flatMap { it.inputs + it.references }.map { it.txhash }
|
|
||||||
|
|
||||||
nextRequests.addAll(inputHashes)
|
|
||||||
|
|
||||||
limitCounter = limitCounter exactAdd nextRequests.size
|
|
||||||
if (limitCounter > limit)
|
|
||||||
throw ExcessivelyLargeTransactionGraph()
|
|
||||||
}
|
|
||||||
return resultQ.values.toList()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of all the dependencies of the given transactions, deepest first i.e. the last downloaded comes
|
* Fetches the set of attachments required to verify the given transaction. If these are not already present, they will be fetched from
|
||||||
* first in the returned list and thus doesn't have any unverified dependencies.
|
* a remote peer.
|
||||||
|
*
|
||||||
|
* @param transaction The transaction to fetch attachments for
|
||||||
|
* @return True if any attachments were fetched from a remote peer, false otherwise
|
||||||
*/
|
*/
|
||||||
// TODO: This could be done in parallel with other fetches for extra speed.
|
// TODO: This could be done in parallel with other fetches for extra speed.
|
||||||
@Suspendable
|
@Suspendable
|
||||||
private fun fetchMissingAttachments(downloads: List<SignedTransaction>) {
|
fun fetchMissingAttachments(transaction: SignedTransaction): Boolean {
|
||||||
val attachments = downloads.map(SignedTransaction::coreTransaction).flatMap { tx ->
|
val tx = transaction.coreTransaction
|
||||||
when (tx) {
|
val attachmentIds = when (tx) {
|
||||||
is WireTransaction -> tx.attachments
|
is WireTransaction -> tx.attachments.toSet()
|
||||||
is ContractUpgradeWireTransaction -> listOf(tx.legacyContractAttachmentId, tx.upgradedContractAttachmentId)
|
is ContractUpgradeWireTransaction -> setOf(tx.legacyContractAttachmentId, tx.upgradedContractAttachmentId)
|
||||||
else -> emptyList()
|
else -> return false
|
||||||
}
|
}
|
||||||
}
|
val downloads = subFlow(FetchAttachmentsFlow(attachmentIds, otherSide)).downloaded
|
||||||
val missingAttachments = attachments.filter { serviceHub.attachments.openAttachment(it) == null }
|
return (downloads.isNotEmpty())
|
||||||
if (missingAttachments.isNotEmpty())
|
|
||||||
subFlow(FetchAttachmentsFlow(missingAttachments.toSet(), otherSide))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches the network parameters under which the given transaction was created. Note that if the transaction was created pre-V4, or if
|
||||||
|
* the counterparty does not understand that network parameters may need to be fetched, no parameters will be requested.
|
||||||
|
*
|
||||||
|
* @param transaction The transaction to fetch the network parameters for, if the parameters are not already present
|
||||||
|
* @return True if the network parameters were fetched from a remote peer, false otherwise
|
||||||
|
*/
|
||||||
// TODO This can also be done in parallel. See comment to [fetchMissingAttachments] above.
|
// TODO This can also be done in parallel. See comment to [fetchMissingAttachments] above.
|
||||||
@Suspendable
|
@Suspendable
|
||||||
private fun fetchMissingParameters(downloads: List<SignedTransaction>) {
|
fun fetchMissingNetworkParameters(transaction: SignedTransaction): Boolean {
|
||||||
val parameters = downloads.mapNotNull { it.networkParametersHash }
|
return if (fetchNetParamsFromCounterpart) {
|
||||||
val missingParameters = parameters.filter { !(serviceHub.networkParametersService as NetworkParametersStorage).hasParameters(it) }
|
transaction.networkParametersHash?.let {
|
||||||
if (missingParameters.isNotEmpty())
|
val downloads = subFlow(FetchNetworkParametersFlow(setOf(it), otherSide)).downloaded
|
||||||
subFlow(FetchNetworkParametersFlow(missingParameters.toSet(), otherSide))
|
downloads.isNotEmpty()
|
||||||
|
} ?: false
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
package net.corda.core.internal
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.node.ServiceHub
|
||||||
|
import net.corda.core.node.StatesToRecord
|
||||||
|
|
||||||
|
// TODO: This should really be called ServiceHubInternal but that name is already taken by net.corda.node.services.api.ServiceHubInternal.
|
||||||
|
interface ServiceHubCoreInternal : ServiceHub {
|
||||||
|
fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TransactionsResolver {
|
||||||
|
@Suspendable
|
||||||
|
fun downloadDependencies()
|
||||||
|
|
||||||
|
fun recordDependencies(usedStatesToRecord: StatesToRecord)
|
||||||
|
}
|
@ -1,118 +0,0 @@
|
|||||||
package net.corda.core.internal
|
|
||||||
|
|
||||||
import net.corda.core.contracts.StateRef
|
|
||||||
import net.corda.core.crypto.SecureHash
|
|
||||||
import net.corda.core.transactions.SignedTransaction
|
|
||||||
import rx.Observable
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Provides a way to topologically sort SignedTransactions. This means that given any two transactions T1 and T2 in the
|
|
||||||
* list returned by [complete] if T1 is a dependency of T2 then T1 will occur earlier than T2.
|
|
||||||
*/
|
|
||||||
class TopologicalSort {
|
|
||||||
private val forwardGraph = HashMap<SecureHash, LinkedHashSet<SignedTransaction>>()
|
|
||||||
private val transactions = ArrayList<SignedTransaction>()
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Add a transaction to the to-be-sorted set of transactions.
|
|
||||||
*/
|
|
||||||
fun add(stx: SignedTransaction) {
|
|
||||||
val stateRefs = stx.references + stx.inputs
|
|
||||||
stateRefs.forEach { (txhash) ->
|
|
||||||
// Note that we use a LinkedHashSet here to make the traversal deterministic (as long as the input list is).
|
|
||||||
forwardGraph.getOrPut(txhash) { LinkedHashSet() }.add(stx)
|
|
||||||
}
|
|
||||||
transactions.add(stx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the sorted list of signed transactions.
|
|
||||||
*/
|
|
||||||
fun complete(): List<SignedTransaction> {
|
|
||||||
val visited = HashSet<SecureHash>(transactions.size)
|
|
||||||
val result = ArrayList<SignedTransaction>(transactions.size)
|
|
||||||
|
|
||||||
fun visit(transaction: SignedTransaction) {
|
|
||||||
if (transaction.id !in visited) {
|
|
||||||
visited.add(transaction.id)
|
|
||||||
forwardGraph[transaction.id]?.forEach(::visit)
|
|
||||||
result.add(transaction)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
transactions.forEach(::visit)
|
|
||||||
return result.reversed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun getOutputStateRefs(stx: SignedTransaction): List<StateRef> {
|
|
||||||
return stx.coreTransaction.outputs.mapIndexed { i, _ -> StateRef(stx.id, i) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Topologically sort a SignedTransaction Observable on the fly by buffering transactions until all dependencies are met.
|
|
||||||
* @param initialUnspentRefs the list of unspent references that may be spent by transactions in the observable. This is
|
|
||||||
* the initial set of references the sort uses to decide whether to buffer transactions or not. For example if this
|
|
||||||
* is empty then the Observable should start with issue transactions that don't have inputs.
|
|
||||||
*/
|
|
||||||
fun Observable<SignedTransaction>.topologicalSort(initialUnspentRefs: Collection<StateRef>): Observable<SignedTransaction> {
|
|
||||||
data class State(
|
|
||||||
val unspentRefs: HashSet<StateRef>,
|
|
||||||
val bufferedTopologicalSort: TopologicalSort,
|
|
||||||
val bufferedInputs: HashSet<StateRef>,
|
|
||||||
val bufferedOutputs: HashSet<StateRef>
|
|
||||||
)
|
|
||||||
|
|
||||||
var state = State(
|
|
||||||
unspentRefs = HashSet(initialUnspentRefs),
|
|
||||||
bufferedTopologicalSort = TopologicalSort(),
|
|
||||||
bufferedInputs = HashSet(),
|
|
||||||
bufferedOutputs = HashSet()
|
|
||||||
)
|
|
||||||
|
|
||||||
return concatMapIterable { stx ->
|
|
||||||
val results = ArrayList<SignedTransaction>()
|
|
||||||
if (state.unspentRefs.containsAll(stx.inputs)) {
|
|
||||||
// Dependencies are satisfied
|
|
||||||
state.unspentRefs.removeAll(stx.inputs)
|
|
||||||
state.unspentRefs.addAll(getOutputStateRefs(stx))
|
|
||||||
results.add(stx)
|
|
||||||
} else {
|
|
||||||
// Dependencies are not satisfied, buffer
|
|
||||||
state.bufferedTopologicalSort.add(stx)
|
|
||||||
state.bufferedInputs.addAll(stx.inputs)
|
|
||||||
for (outputRef in getOutputStateRefs(stx)) {
|
|
||||||
if (!state.bufferedInputs.remove(outputRef)) {
|
|
||||||
state.bufferedOutputs.add(outputRef)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (inputRef in stx.inputs) {
|
|
||||||
if (!state.bufferedOutputs.remove(inputRef)) {
|
|
||||||
state.bufferedInputs.add(inputRef)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (state.unspentRefs.containsAll(state.bufferedInputs)) {
|
|
||||||
// Buffer satisfied
|
|
||||||
results.addAll(state.bufferedTopologicalSort.complete())
|
|
||||||
state.unspentRefs.removeAll(state.bufferedInputs)
|
|
||||||
state.unspentRefs.addAll(state.bufferedOutputs)
|
|
||||||
state = State(
|
|
||||||
unspentRefs = state.unspentRefs,
|
|
||||||
bufferedTopologicalSort = TopologicalSort(),
|
|
||||||
bufferedInputs = HashSet(),
|
|
||||||
bufferedOutputs = HashSet()
|
|
||||||
)
|
|
||||||
results
|
|
||||||
} else {
|
|
||||||
// Buffer not satisfied
|
|
||||||
state = State(
|
|
||||||
unspentRefs = state.unspentRefs,
|
|
||||||
bufferedTopologicalSort = state.bufferedTopologicalSort,
|
|
||||||
bufferedInputs = state.bufferedInputs,
|
|
||||||
bufferedOutputs = state.bufferedOutputs
|
|
||||||
)
|
|
||||||
results
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -229,3 +229,6 @@ fun isAttachmentTrusted(attachment: Attachment, service: AttachmentStorage?): Bo
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val SignedTransaction.dependencies: Set<SecureHash>
|
||||||
|
get() = (inputs.asSequence() + references.asSequence()).map { it.txhash }.toSet()
|
||||||
|
@ -0,0 +1,204 @@
|
|||||||
|
package net.corda.node.services
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.internal.FetchTransactionsFlow
|
||||||
|
import net.corda.core.internal.ResolveTransactionsFlow
|
||||||
|
import net.corda.core.internal.TransactionsResolver
|
||||||
|
import net.corda.core.internal.dependencies
|
||||||
|
import net.corda.core.node.StatesToRecord
|
||||||
|
import net.corda.core.transactions.SignedTransaction
|
||||||
|
import net.corda.core.utilities.debug
|
||||||
|
import net.corda.core.utilities.seconds
|
||||||
|
import net.corda.node.services.api.WritableTransactionStorage
|
||||||
|
import java.util.*
|
||||||
|
|
||||||
|
private const val IN_MEMORY_RESOLUTION_LIMIT_PROP_NAME = "net.corda.node.dbtransactionsresolver.InMemoryResolutionLimit"
|
||||||
|
|
||||||
|
class DbTransactionsResolver(private val flow: ResolveTransactionsFlow) : TransactionsResolver {
|
||||||
|
companion object {
|
||||||
|
private val MAX_CHECKPOINT_RESOLUTION: Int = Integer.getInteger(IN_MEMORY_RESOLUTION_LIMIT_PROP_NAME, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
private var downloadedTxs: MutableMap<SecureHash, SignedTransaction>? = HashMap()
|
||||||
|
private var sortedDependencies: List<SecureHash>? = null
|
||||||
|
private val logger = flow.logger
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun downloadDependencies() {
|
||||||
|
logger.debug { "Downloading dependencies for transactions ${flow.txHashes}" }
|
||||||
|
val transactionStorage = flow.serviceHub.validatedTransactions as WritableTransactionStorage
|
||||||
|
|
||||||
|
// Maintain a work queue of all hashes to load/download, initialised with our starting set. Then do a breadth
|
||||||
|
// first traversal across the dependency graph.
|
||||||
|
//
|
||||||
|
// TODO: This approach has two problems. Analyze and resolve them:
|
||||||
|
//
|
||||||
|
// (1) This flow leaks private data. If you download a transaction and then do NOT request a
|
||||||
|
// dependency, it means you already have it, which in turn means you must have been involved with it before
|
||||||
|
// somehow, either in the tx itself or in any following spend of it. If there were no following spends, then
|
||||||
|
// your peer knows for sure that you were involved ... this is bad! The only obvious ways to fix this are
|
||||||
|
// something like onion routing of requests, secure hardware, or both.
|
||||||
|
//
|
||||||
|
// (2) If the identity service changes the assumed identity of one of the public keys, it's possible
|
||||||
|
// that the "tx in db is valid" invariant is violated if one of the contracts checks the identity! Should
|
||||||
|
// the db contain the identities that were resolved when the transaction was first checked, or should we
|
||||||
|
// accept this kind of change is possible? Most likely solution is for identity data to be an attachment.
|
||||||
|
|
||||||
|
val nextRequests = LinkedHashSet<SecureHash>(flow.txHashes) // Keep things unique but ordered, for unit test stability.
|
||||||
|
val topologicalSort = TopologicalSort()
|
||||||
|
|
||||||
|
while (nextRequests.isNotEmpty()) {
|
||||||
|
// Don't re-download the same tx when we haven't verified it yet but it's referenced multiple times in the
|
||||||
|
// graph we're traversing.
|
||||||
|
nextRequests.removeAll(topologicalSort.transactionIds)
|
||||||
|
if (nextRequests.isEmpty()) {
|
||||||
|
// Done early.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request the standalone transaction data (which may refer to things we don't yet have).
|
||||||
|
val requestedTxs = fetchRequiredTransactions(nextRequests)
|
||||||
|
|
||||||
|
// When acquiring the write locks for the transaction chain, it is important that all required locks are acquired in the same
|
||||||
|
// order when recording both verified and unverified transactions. In the verified case, the transactions must be recorded in
|
||||||
|
// back chain order (i.e. oldest first), so this must also happen for unverified transactions. This sort ensures that locks are
|
||||||
|
// acquired in the right order in the case the transactions should be stored in the database as unverified. The main topological
|
||||||
|
// sort is also updated here to ensure that this contains everything that needs locking in cases where the resolver switches
|
||||||
|
// from checkpointing to storing unverified transactions in the database.
|
||||||
|
val lockingSort = TopologicalSort()
|
||||||
|
for (tx in requestedTxs.second) {
|
||||||
|
val dependencies = tx.dependencies
|
||||||
|
lockingSort.add(tx.id, dependencies)
|
||||||
|
topologicalSort.add(tx.id, dependencies)
|
||||||
|
}
|
||||||
|
|
||||||
|
var suspended = true
|
||||||
|
for (downloaded in requestedTxs.second) {
|
||||||
|
suspended = false
|
||||||
|
val dependencies = downloaded.dependencies
|
||||||
|
val downloadedTxs = this.downloadedTxs
|
||||||
|
if (downloadedTxs != null) {
|
||||||
|
if (downloadedTxs.size < MAX_CHECKPOINT_RESOLUTION) {
|
||||||
|
downloadedTxs[downloaded.id] = downloaded
|
||||||
|
} else {
|
||||||
|
logger.debug {
|
||||||
|
"Resolving transaction dependencies has reached a checkpoint limit of $MAX_CHECKPOINT_RESOLUTION " +
|
||||||
|
"transactions. Switching to the node database for storing the unverified transactions."
|
||||||
|
}
|
||||||
|
downloadedTxs.values.forEach(transactionStorage::addUnverifiedTransaction)
|
||||||
|
// This acts as both a flag that we've switched over to storing the backchain into the db, and to remove what's been
|
||||||
|
// built up in the checkpoint
|
||||||
|
this.downloadedTxs = null
|
||||||
|
transactionStorage.addUnverifiedTransaction(downloaded)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
transactionStorage.addUnverifiedTransaction(downloaded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The write locks are only released over a suspend, so need to keep track of whether the flow has been suspended to ensure
|
||||||
|
// that locks are not held beyond each while loop iteration (as doing this would result in a deadlock due to claiming locks
|
||||||
|
// in the wrong order)
|
||||||
|
suspended = suspended || flow.fetchMissingAttachments(downloaded)
|
||||||
|
suspended = suspended || flow.fetchMissingNetworkParameters(downloaded)
|
||||||
|
|
||||||
|
// Add all input states and reference input states to the work queue.
|
||||||
|
nextRequests.addAll(dependencies)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the flow did not suspend on the last iteration of the downloaded loop above, perform a suspend here to ensure no write
|
||||||
|
// locks are held going into the next while loop iteration.
|
||||||
|
if (!suspended) {
|
||||||
|
FlowLogic.sleep(0.seconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's possible that the node has a transaction in storage already. Dependencies should also be present for this transaction,
|
||||||
|
// so just remove these IDs from the set of next requests.
|
||||||
|
nextRequests.removeAll(requestedTxs.first)
|
||||||
|
}
|
||||||
|
|
||||||
|
sortedDependencies = topologicalSort.complete()
|
||||||
|
logger.debug { "Downloaded ${sortedDependencies?.size ?: 0} dependencies from remote peer for transactions ${flow.txHashes}" }
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun recordDependencies(usedStatesToRecord: StatesToRecord) {
|
||||||
|
logger.debug { "Recording ${this.sortedDependencies?.size ?: 0} dependencies for ${flow.txHashes.size} transactions" }
|
||||||
|
val downloadedTxs = this.downloadedTxs
|
||||||
|
val sortedDependencies = checkNotNull(this.sortedDependencies)
|
||||||
|
val transactionStorage = flow.serviceHub.validatedTransactions as WritableTransactionStorage
|
||||||
|
if (downloadedTxs != null) {
|
||||||
|
for (txId in sortedDependencies) {
|
||||||
|
val tx = downloadedTxs.getValue(txId)
|
||||||
|
// For each transaction, verify it and insert it into the database. As we are iterating over them in a
|
||||||
|
// depth-first order, we should not encounter any verification failures due to missing data. If we fail
|
||||||
|
// half way through, it's no big deal, although it might result in us attempting to re-download data
|
||||||
|
// redundantly next time we attempt verification.
|
||||||
|
tx.verify(flow.serviceHub)
|
||||||
|
flow.serviceHub.recordTransactions(usedStatesToRecord, listOf(tx))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (txId in sortedDependencies) {
|
||||||
|
// Retrieve and delete the transaction from the unverified store.
|
||||||
|
val (tx, isVerified) = checkNotNull(transactionStorage.getTransactionInternal(txId)) {
|
||||||
|
"Somehow the unverified transaction ($txId) that we stored previously is no longer there."
|
||||||
|
}
|
||||||
|
if (!isVerified) {
|
||||||
|
tx.verify(flow.serviceHub)
|
||||||
|
flow.serviceHub.recordTransactions(usedStatesToRecord, listOf(tx))
|
||||||
|
} else {
|
||||||
|
logger.debug { "No need to record $txId as it's already been verified" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The transactions already present in the database do not need to be checkpointed on every iteration of downloading
|
||||||
|
// dependencies for other transactions, so strip these down to just the IDs here.
|
||||||
|
@Suspendable
|
||||||
|
private fun fetchRequiredTransactions(requests: Set<SecureHash>): Pair<List<SecureHash>, List<SignedTransaction>> {
|
||||||
|
val requestedTxs = flow.subFlow(FetchTransactionsFlow(requests, flow.otherSide))
|
||||||
|
return Pair(requestedTxs.fromDisk.map { it.id }, requestedTxs.downloaded)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides a way to topologically sort SignedTransactions represented just their [SecureHash] IDs. This means that given any two transactions
|
||||||
|
* T1 and T2 in the list returned by [complete] if T1 is a dependency of T2 then T1 will occur earlier than T2.
|
||||||
|
*/
|
||||||
|
class TopologicalSort {
|
||||||
|
private val forwardGraph = HashMap<SecureHash, MutableSet<SecureHash>>()
|
||||||
|
val transactionIds = LinkedHashSet<SecureHash>()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a transaction to the to-be-sorted set of transactions.
|
||||||
|
* @param txId The ID of the transaction.
|
||||||
|
* @param dependentIds the IDs of all the transactions [txId] depends on.
|
||||||
|
*/
|
||||||
|
fun add(txId: SecureHash, dependentIds: Set<SecureHash>) {
|
||||||
|
require(transactionIds.add(txId)) { "Transaction ID $txId already seen" }
|
||||||
|
dependentIds.forEach {
|
||||||
|
// Note that we use a LinkedHashSet here to make the traversal deterministic (as long as the input list is).
|
||||||
|
forwardGraph.computeIfAbsent(it) { LinkedHashSet() }.add(txId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the sorted list of transaction IDs.
|
||||||
|
*/
|
||||||
|
fun complete(): List<SecureHash> {
|
||||||
|
val visited = HashSet<SecureHash>(transactionIds.size)
|
||||||
|
val result = ArrayList<SecureHash>(transactionIds.size)
|
||||||
|
|
||||||
|
fun visit(txId: SecureHash) {
|
||||||
|
if (visited.add(txId)) {
|
||||||
|
forwardGraph[txId]?.forEach(::visit)
|
||||||
|
result += txId
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
transactionIds.forEach(::visit)
|
||||||
|
|
||||||
|
return result.apply(Collections::reverse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,142 @@
|
|||||||
|
package net.corda.node.services
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.flows.FlowException
|
||||||
|
import net.corda.core.internal.FetchTransactionsFlow
|
||||||
|
import net.corda.core.internal.ResolveTransactionsFlow
|
||||||
|
import net.corda.core.internal.TransactionsResolver
|
||||||
|
import net.corda.core.internal.dependencies
|
||||||
|
import net.corda.core.node.StatesToRecord
|
||||||
|
import net.corda.core.transactions.SignedTransaction
|
||||||
|
import net.corda.core.utilities.debug
|
||||||
|
import java.util.*
|
||||||
|
|
||||||
|
class InMemoryTransactionsResolver(private val flow: ResolveTransactionsFlow) : TransactionsResolver {
|
||||||
|
companion object {
|
||||||
|
/** The maximum number of transactions this flow will try to download before bailing out. */
|
||||||
|
var transactionCountLimit = 5000
|
||||||
|
set(value) {
|
||||||
|
require(value > 0) { "$value is not a valid count limit" }
|
||||||
|
field = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private var sortedDependencies: List<SignedTransaction>? = null
|
||||||
|
private val logger = flow.logger
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun downloadDependencies() {
|
||||||
|
// Maintain a work queue of all hashes to load/download, initialised with our starting set. Then do a breadth
|
||||||
|
// first traversal across the dependency graph.
|
||||||
|
//
|
||||||
|
// TODO: This approach has two problems. Analyze and resolve them:
|
||||||
|
//
|
||||||
|
// (1) This flow leaks private data. If you download a transaction and then do NOT request a
|
||||||
|
// dependency, it means you already have it, which in turn means you must have been involved with it before
|
||||||
|
// somehow, either in the tx itself or in any following spend of it. If there were no following spends, then
|
||||||
|
// your peer knows for sure that you were involved ... this is bad! The only obvious ways to fix this are
|
||||||
|
// something like onion routing of requests, secure hardware, or both.
|
||||||
|
//
|
||||||
|
// (2) If the identity service changes the assumed identity of one of the public keys, it's possible
|
||||||
|
// that the "tx in db is valid" invariant is violated if one of the contracts checks the identity! Should
|
||||||
|
// the db contain the identities that were resolved when the transaction was first checked, or should we
|
||||||
|
// accept this kind of change is possible? Most likely solution is for identity data to be an attachment.
|
||||||
|
|
||||||
|
logger.debug { "Downloading dependencies for transactions ${flow.txHashes}" }
|
||||||
|
val nextRequests = LinkedHashSet<SecureHash>(flow.txHashes) // Keep things unique but ordered, for unit test stability.
|
||||||
|
val topologicalSort = TopologicalSort()
|
||||||
|
val seenIds = HashSet<SecureHash>()
|
||||||
|
|
||||||
|
while (nextRequests.isNotEmpty()) {
|
||||||
|
// Don't re-download the same tx when we haven't verified it yet but it's referenced multiple times in the
|
||||||
|
// graph we're traversing.
|
||||||
|
nextRequests.removeAll(seenIds)
|
||||||
|
if (nextRequests.isEmpty()) {
|
||||||
|
// Done early.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request the standalone transaction data (which may refer to things we don't yet have).
|
||||||
|
val requestedTxs = flow.subFlow(FetchTransactionsFlow(nextRequests, flow.otherSide))
|
||||||
|
val freshDownloads = requestedTxs.downloaded
|
||||||
|
val existingTxs = requestedTxs.fromDisk
|
||||||
|
|
||||||
|
for (downloaded in freshDownloads) {
|
||||||
|
require(seenIds.add(downloaded.id)) { "Transaction ID ${downloaded.id} already seen" }
|
||||||
|
if (seenIds.size > transactionCountLimit) {
|
||||||
|
throw ExcessivelyLargeTransactionGraph()
|
||||||
|
}
|
||||||
|
|
||||||
|
val dependencies = downloaded.dependencies
|
||||||
|
topologicalSort.add(downloaded, dependencies)
|
||||||
|
|
||||||
|
flow.fetchMissingAttachments(downloaded)
|
||||||
|
flow.fetchMissingNetworkParameters(downloaded)
|
||||||
|
|
||||||
|
// Add all input states and reference input states to the work queue.
|
||||||
|
nextRequests.addAll(dependencies)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's possible that the node has a transaction in storage already. Dependencies should also be present for this transaction,
|
||||||
|
// so just remove these IDs from the set of next requests.
|
||||||
|
nextRequests.removeAll(existingTxs.map { it.id })
|
||||||
|
}
|
||||||
|
|
||||||
|
sortedDependencies = topologicalSort.complete()
|
||||||
|
logger.debug { "Downloaded ${sortedDependencies?.size ?: 0} dependencies from remote peer for transactions ${flow.txHashes}" }
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun recordDependencies(usedStatesToRecord: StatesToRecord) {
|
||||||
|
logger.debug { "Recording ${this.sortedDependencies?.size ?: 0} dependencies for ${flow.txHashes.size} transactions" }
|
||||||
|
for (tx in checkNotNull(sortedDependencies)) {
|
||||||
|
// For each transaction, verify it and insert it into the database. As we are iterating over them in a
|
||||||
|
// depth-first order, we should not encounter any verification failures due to missing data. If we fail
|
||||||
|
// half way through, it's no big deal, although it might result in us attempting to re-download data
|
||||||
|
// redundantly next time we attempt verification.
|
||||||
|
tx.verify(flow.serviceHub)
|
||||||
|
flow.serviceHub.recordTransactions(usedStatesToRecord, listOf(tx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ExcessivelyLargeTransactionGraph : FlowException()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides a way to topologically sort SignedTransactions. This means that given any two transactions T1 and T2 in the
|
||||||
|
* list returned by [complete] if T1 is a dependency of T2 then T1 will occur earlier than T2.
|
||||||
|
*/
|
||||||
|
class TopologicalSort {
|
||||||
|
private val forwardGraph = HashMap<SecureHash, MutableSet<SignedTransaction>>()
|
||||||
|
private val transactions = ArrayList<SignedTransaction>()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a transaction to the to-be-sorted set of transactions.
|
||||||
|
*/
|
||||||
|
fun add(stx: SignedTransaction, dependencies: Set<SecureHash>) {
|
||||||
|
dependencies.forEach {
|
||||||
|
// Note that we use a LinkedHashSet here to make the traversal deterministic (as long as the input list is).
|
||||||
|
forwardGraph.computeIfAbsent(it) { LinkedHashSet() }.add(stx)
|
||||||
|
}
|
||||||
|
transactions += stx
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the sorted list of signed transactions.
|
||||||
|
*/
|
||||||
|
fun complete(): List<SignedTransaction> {
|
||||||
|
val visited = HashSet<SecureHash>(transactions.size)
|
||||||
|
val result = ArrayList<SignedTransaction>(transactions.size)
|
||||||
|
|
||||||
|
fun visit(transaction: SignedTransaction) {
|
||||||
|
if (visited.add(transaction.id)) {
|
||||||
|
forwardGraph[transaction.id]?.forEach(::visit)
|
||||||
|
result += transaction
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
transactions.forEach(::visit)
|
||||||
|
|
||||||
|
return result.apply(Collections::reverse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -5,13 +5,11 @@ import net.corda.core.context.InvocationContext
|
|||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.flows.FlowLogic
|
import net.corda.core.flows.FlowLogic
|
||||||
import net.corda.core.flows.StateMachineRunId
|
import net.corda.core.flows.StateMachineRunId
|
||||||
import net.corda.core.internal.FlowStateMachine
|
import net.corda.core.internal.*
|
||||||
import net.corda.core.internal.NamedCacheFactory
|
|
||||||
import net.corda.core.internal.concurrent.OpenFuture
|
import net.corda.core.internal.concurrent.OpenFuture
|
||||||
import net.corda.core.messaging.DataFeed
|
import net.corda.core.messaging.DataFeed
|
||||||
import net.corda.core.messaging.StateMachineTransactionMapping
|
import net.corda.core.messaging.StateMachineTransactionMapping
|
||||||
import net.corda.core.node.NodeInfo
|
import net.corda.core.node.NodeInfo
|
||||||
import net.corda.core.node.ServiceHub
|
|
||||||
import net.corda.core.node.StatesToRecord
|
import net.corda.core.node.StatesToRecord
|
||||||
import net.corda.core.node.services.NetworkMapCache
|
import net.corda.core.node.services.NetworkMapCache
|
||||||
import net.corda.core.node.services.NetworkMapCacheBase
|
import net.corda.core.node.services.NetworkMapCacheBase
|
||||||
@ -20,6 +18,8 @@ import net.corda.core.transactions.SignedTransaction
|
|||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import net.corda.node.internal.InitiatedFlowFactory
|
import net.corda.node.internal.InitiatedFlowFactory
|
||||||
import net.corda.node.internal.cordapp.CordappProviderInternal
|
import net.corda.node.internal.cordapp.CordappProviderInternal
|
||||||
|
import net.corda.node.services.DbTransactionsResolver
|
||||||
|
import net.corda.node.services.InMemoryTransactionsResolver
|
||||||
import net.corda.node.services.config.NodeConfiguration
|
import net.corda.node.services.config.NodeConfiguration
|
||||||
import net.corda.node.services.messaging.MessagingService
|
import net.corda.node.services.messaging.MessagingService
|
||||||
import net.corda.node.services.network.NetworkMapUpdater
|
import net.corda.node.services.network.NetworkMapUpdater
|
||||||
@ -49,11 +49,22 @@ interface NetworkMapCacheInternal : NetworkMapCache, NetworkMapCacheBase {
|
|||||||
fun removeNode(node: NodeInfo)
|
fun removeNode(node: NodeInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
interface ServiceHubInternal : ServiceHub {
|
interface ServiceHubInternal : ServiceHubCoreInternal {
|
||||||
companion object {
|
companion object {
|
||||||
private val log = contextLogger()
|
private val log = contextLogger()
|
||||||
|
|
||||||
fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>,
|
private fun topologicalSort(transactions: Iterable<SignedTransaction>): List<SignedTransaction> {
|
||||||
|
if ((transactions as? List)?.size == 1) return transactions
|
||||||
|
val sort = InMemoryTransactionsResolver.TopologicalSort()
|
||||||
|
for (tx in transactions) {
|
||||||
|
sort.add(tx, tx.dependencies)
|
||||||
|
}
|
||||||
|
return sort.complete()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Why is txs an Iterable and not a Collection??
|
||||||
|
fun recordTransactions(statesToRecord: StatesToRecord,
|
||||||
|
txs: Iterable<SignedTransaction>,
|
||||||
validatedTransactions: WritableTransactionStorage,
|
validatedTransactions: WritableTransactionStorage,
|
||||||
stateMachineRecordedTransactionMapping: StateMachineRecordedTransactionMappingStorage,
|
stateMachineRecordedTransactionMapping: StateMachineRecordedTransactionMappingStorage,
|
||||||
vaultService: VaultServiceInternal,
|
vaultService: VaultServiceInternal,
|
||||||
@ -62,14 +73,15 @@ interface ServiceHubInternal : ServiceHub {
|
|||||||
database.transaction {
|
database.transaction {
|
||||||
require(txs.any()) { "No transactions passed in for recording" }
|
require(txs.any()) { "No transactions passed in for recording" }
|
||||||
|
|
||||||
|
val orderedTxs = topologicalSort(txs)
|
||||||
// Divide transactions into those seen before and those that are new to this node if ALL_VISIBLE states are being recorded.
|
// Divide transactions into those seen before and those that are new to this node if ALL_VISIBLE states are being recorded.
|
||||||
// This allows the node to re-record transactions that have previously only been seen at the ONLY_RELEVANT level. Note that
|
// This allows the node to re-record transactions that have previously only been seen at the ONLY_RELEVANT level. Note that
|
||||||
// for transactions being recorded at ONLY_RELEVANT, if this transaction has been seen before its outputs should already
|
// for transactions being recorded at ONLY_RELEVANT, if this transaction has been seen before its outputs should already
|
||||||
// have been recorded at ONLY_RELEVANT, so there shouldn't be anything to re-record here.
|
// have been recorded at ONLY_RELEVANT, so there shouldn't be anything to re-record here.
|
||||||
val (recordedTransactions, previouslySeenTxs) = if (statesToRecord != StatesToRecord.ALL_VISIBLE) {
|
val (recordedTransactions, previouslySeenTxs) = if (statesToRecord != StatesToRecord.ALL_VISIBLE) {
|
||||||
Pair(txs.filter { validatedTransactions.addTransaction(it) }, emptyList())
|
Pair(orderedTxs.filter { validatedTransactions.addTransaction(it) }, emptyList())
|
||||||
} else {
|
} else {
|
||||||
txs.partition { validatedTransactions.addTransaction(it) }
|
orderedTxs.partition { validatedTransactions.addTransaction(it) }
|
||||||
}
|
}
|
||||||
val stateMachineRunId = FlowStateMachineImpl.currentStateMachine()?.id
|
val stateMachineRunId = FlowStateMachineImpl.currentStateMachine()?.id
|
||||||
if (stateMachineRunId != null) {
|
if (stateMachineRunId != null) {
|
||||||
@ -139,12 +151,17 @@ interface ServiceHubInternal : ServiceHub {
|
|||||||
val nodeProperties: NodePropertiesStore
|
val nodeProperties: NodePropertiesStore
|
||||||
val networkMapUpdater: NetworkMapUpdater
|
val networkMapUpdater: NetworkMapUpdater
|
||||||
override val cordappProvider: CordappProviderInternal
|
override val cordappProvider: CordappProviderInternal
|
||||||
|
|
||||||
|
fun getFlowFactory(initiatingFlowClass: Class<out FlowLogic<*>>): InitiatedFlowFactory<*>?
|
||||||
|
val cacheFactory: NamedCacheFactory
|
||||||
|
|
||||||
override fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>) {
|
override fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>) {
|
||||||
recordTransactions(statesToRecord, txs, validatedTransactions, stateMachineRecordedTransactionMapping, vaultService, database)
|
recordTransactions(statesToRecord, txs, validatedTransactions, stateMachineRecordedTransactionMapping, vaultService, database)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun getFlowFactory(initiatingFlowClass: Class<out FlowLogic<*>>): InitiatedFlowFactory<*>?
|
override fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver {
|
||||||
val cacheFactory: NamedCacheFactory
|
return DbTransactionsResolver(flow)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
interface FlowStarter {
|
interface FlowStarter {
|
||||||
@ -177,18 +194,29 @@ interface FlowStarter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
interface StartedNodeServices : ServiceHubInternal, FlowStarter
|
interface StartedNodeServices : ServiceHubInternal, FlowStarter
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread-safe storage of transactions.
|
* Thread-safe storage of transactions.
|
||||||
*/
|
*/
|
||||||
interface WritableTransactionStorage : TransactionStorage {
|
interface WritableTransactionStorage : TransactionStorage {
|
||||||
/**
|
/**
|
||||||
* Add a new transaction to the store. If the store already has a transaction with the same id it will be
|
* Add a new *verified* transaction to the store, or convert the existing unverified transaction into a verified one.
|
||||||
* overwritten.
|
|
||||||
* @param transaction The transaction to be recorded.
|
* @param transaction The transaction to be recorded.
|
||||||
* @return true if the transaction was recorded successfully, false if it was already recorded.
|
* @return true if the transaction was recorded as a *new verified* transcation, false if the transaction already exists.
|
||||||
*/
|
*/
|
||||||
// TODO: Throw an exception if trying to add a transaction with fewer signatures than an existing entry.
|
// TODO: Throw an exception if trying to add a transaction with fewer signatures than an existing entry.
|
||||||
fun addTransaction(transaction: SignedTransaction): Boolean
|
fun addTransaction(transaction: SignedTransaction): Boolean
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a new *unverified* transaction to the store.
|
||||||
|
*/
|
||||||
|
fun addUnverifiedTransaction(transaction: SignedTransaction)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the transaction with the given ID from the store, and a flag of whether it's verified. Returns null if no transaction with the
|
||||||
|
* ID exists.
|
||||||
|
*/
|
||||||
|
fun getTransactionInternal(id: SecureHash): Pair<SignedTransaction, Boolean>?
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -14,26 +14,20 @@ import net.corda.core.serialization.internal.effectiveSerializationEnv
|
|||||||
import net.corda.core.toFuture
|
import net.corda.core.toFuture
|
||||||
import net.corda.core.transactions.CoreTransaction
|
import net.corda.core.transactions.CoreTransaction
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
|
import net.corda.core.utilities.contextLogger
|
||||||
|
import net.corda.core.utilities.debug
|
||||||
import net.corda.node.services.api.WritableTransactionStorage
|
import net.corda.node.services.api.WritableTransactionStorage
|
||||||
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||||
import net.corda.node.utilities.AppendOnlyPersistentMapBase
|
import net.corda.node.utilities.AppendOnlyPersistentMapBase
|
||||||
import net.corda.node.utilities.WeightBasedAppendOnlyPersistentMap
|
import net.corda.node.utilities.WeightBasedAppendOnlyPersistentMap
|
||||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
import net.corda.nodeapi.internal.persistence.*
|
||||||
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
import net.corda.serialization.internal.CordaSerializationEncoding.SNAPPY
|
||||||
import net.corda.nodeapi.internal.persistence.bufferUntilDatabaseCommit
|
|
||||||
import net.corda.nodeapi.internal.persistence.wrapWithDatabaseTransaction
|
|
||||||
import org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY
|
|
||||||
import rx.Observable
|
import rx.Observable
|
||||||
import rx.subjects.PublishSubject
|
import rx.subjects.PublishSubject
|
||||||
|
import java.util.*
|
||||||
import javax.persistence.*
|
import javax.persistence.*
|
||||||
import kotlin.streams.toList
|
import kotlin.streams.toList
|
||||||
|
|
||||||
// cache value type to just store the immutable bits of a signed transaction plus conversion helpers
|
|
||||||
typealias TxCacheValue = Pair<SerializedBytes<CoreTransaction>, List<TransactionSignature>>
|
|
||||||
|
|
||||||
fun TxCacheValue.toSignedTx() = SignedTransaction(this.first, this.second)
|
|
||||||
fun SignedTransaction.toTxCacheValue() = TxCacheValue(this.txBits, this.sigs)
|
|
||||||
|
|
||||||
class DBTransactionStorage(private val database: CordaPersistence, cacheFactory: NamedCacheFactory) : WritableTransactionStorage, SingletonSerializeAsToken() {
|
class DBTransactionStorage(private val database: CordaPersistence, cacheFactory: NamedCacheFactory) : WritableTransactionStorage, SingletonSerializeAsToken() {
|
||||||
|
|
||||||
@Entity
|
@Entity
|
||||||
@ -41,17 +35,55 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
|||||||
class DBTransaction(
|
class DBTransaction(
|
||||||
@Id
|
@Id
|
||||||
@Column(name = "tx_id", length = 64, nullable = false)
|
@Column(name = "tx_id", length = 64, nullable = false)
|
||||||
var txId: String = "",
|
val txId: String,
|
||||||
|
|
||||||
@Column(name = "state_machine_run_id", length = 36, nullable = true)
|
@Column(name = "state_machine_run_id", length = 36, nullable = true)
|
||||||
var stateMachineRunId: String? = "",
|
val stateMachineRunId: String?,
|
||||||
|
|
||||||
@Lob
|
@Lob
|
||||||
@Column(name = "transaction_value", nullable = false)
|
@Column(name = "transaction_value", nullable = false)
|
||||||
var transaction: ByteArray = EMPTY_BYTE_ARRAY
|
val transaction: ByteArray,
|
||||||
|
|
||||||
|
@Column(name = "status", nullable = false)
|
||||||
|
val status: Char
|
||||||
)
|
)
|
||||||
|
|
||||||
|
private enum class TransactionStatus {
|
||||||
|
UNVERIFIED,
|
||||||
|
VERIFIED;
|
||||||
|
|
||||||
|
fun toDatabaseChar(): Char {
|
||||||
|
return when (this) {
|
||||||
|
UNVERIFIED -> 'U'
|
||||||
|
VERIFIED -> 'V'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fun isVerified(): Boolean {
|
||||||
|
return this == VERIFIED
|
||||||
|
}
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
fun fromDatabaseChar(databaseValue: Char): TransactionStatus {
|
||||||
|
return when(databaseValue) {
|
||||||
|
'V' -> VERIFIED
|
||||||
|
'U' -> UNVERIFIED
|
||||||
|
else -> throw UnexpectedStatusValueException(databaseValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class UnexpectedStatusValueException(status: Char): Exception("Found unexpected status value $status in transaction store")
|
||||||
|
}
|
||||||
|
|
||||||
private companion object {
|
private companion object {
|
||||||
|
// Rough estimate for the average of a public key and the transaction metadata - hard to get exact figures here,
|
||||||
|
// as public keys can vary in size a lot, and if someone else is holding a reference to the key, it won't add
|
||||||
|
// to the memory pressure at all here.
|
||||||
|
private const val transactionSignatureOverheadEstimate = 1024
|
||||||
|
|
||||||
|
private val logger = contextLogger()
|
||||||
|
|
||||||
private fun contextToUse(): SerializationContext {
|
private fun contextToUse(): SerializationContext {
|
||||||
return if (effectiveSerializationEnv.serializationFactory.currentContext?.useCase == SerializationContext.UseCase.Storage) {
|
return if (effectiveSerializationEnv.serializationFactory.currentContext?.useCase == SerializationContext.UseCase.Storage) {
|
||||||
effectiveSerializationEnv.serializationFactory.currentContext!!
|
effectiveSerializationEnv.serializationFactory.currentContext!!
|
||||||
@ -65,46 +97,94 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
|||||||
return WeightBasedAppendOnlyPersistentMap<SecureHash, TxCacheValue, DBTransaction, String>(
|
return WeightBasedAppendOnlyPersistentMap<SecureHash, TxCacheValue, DBTransaction, String>(
|
||||||
cacheFactory = cacheFactory,
|
cacheFactory = cacheFactory,
|
||||||
name = "DBTransactionStorage_transactions",
|
name = "DBTransactionStorage_transactions",
|
||||||
toPersistentEntityKey = { it.toString() },
|
toPersistentEntityKey = SecureHash::toString,
|
||||||
fromPersistentEntity = {
|
fromPersistentEntity = {
|
||||||
Pair(SecureHash.parse(it.txId),
|
SecureHash.parse(it.txId) to TxCacheValue(
|
||||||
it.transaction.deserialize<SignedTransaction>(context = contextToUse())
|
it.transaction.deserialize(context = contextToUse()),
|
||||||
.toTxCacheValue())
|
TransactionStatus.fromDatabaseChar(it.status))
|
||||||
},
|
},
|
||||||
toPersistentEntity = { key: SecureHash, value: TxCacheValue ->
|
toPersistentEntity = { key: SecureHash, value: TxCacheValue ->
|
||||||
DBTransaction().apply {
|
DBTransaction(
|
||||||
txId = key.toString()
|
txId = key.toString(),
|
||||||
stateMachineRunId = FlowStateMachineImpl.currentStateMachine()?.id?.uuid?.toString()
|
stateMachineRunId = FlowStateMachineImpl.currentStateMachine()?.id?.uuid?.toString(),
|
||||||
transaction = value.toSignedTx().serialize(context = contextToUse()).bytes
|
transaction = value.toSignedTx().serialize(context = contextToUse().withEncoding(SNAPPY)).bytes,
|
||||||
}
|
status = value.status.toDatabaseChar()
|
||||||
|
)
|
||||||
},
|
},
|
||||||
persistentEntityClass = DBTransaction::class.java,
|
persistentEntityClass = DBTransaction::class.java,
|
||||||
weighingFunc = { hash, tx -> hash.size + weighTx(tx) }
|
weighingFunc = { hash, tx -> hash.size + weighTx(tx) }
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rough estimate for the average of a public key and the transaction metadata - hard to get exact figures here,
|
|
||||||
// as public keys can vary in size a lot, and if someone else is holding a reference to the key, it won't add
|
|
||||||
// to the memory pressure at all here.
|
|
||||||
private const val transactionSignatureOverheadEstimate = 1024
|
|
||||||
|
|
||||||
private fun weighTx(tx: AppendOnlyPersistentMapBase.Transactional<TxCacheValue>): Int {
|
private fun weighTx(tx: AppendOnlyPersistentMapBase.Transactional<TxCacheValue>): Int {
|
||||||
val actTx = tx.peekableValue ?: return 0
|
val actTx = tx.peekableValue ?: return 0
|
||||||
return actTx.second.sumBy { it.size + transactionSignatureOverheadEstimate } + actTx.first.size
|
return actTx.sigs.sumBy { it.size + transactionSignatureOverheadEstimate } + actTx.txBits.size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private val txStorage = ThreadBox(createTransactionsMap(cacheFactory))
|
private val txStorage = ThreadBox(createTransactionsMap(cacheFactory))
|
||||||
|
|
||||||
override fun addTransaction(transaction: SignedTransaction): Boolean = database.transaction {
|
private fun updateTransaction(txId: SecureHash): Boolean {
|
||||||
|
val session = currentDBSession()
|
||||||
|
val criteriaBuilder = session.criteriaBuilder
|
||||||
|
val criteriaUpdate = criteriaBuilder.createCriteriaUpdate(DBTransaction::class.java)
|
||||||
|
val updateRoot = criteriaUpdate.from(DBTransaction::class.java)
|
||||||
|
criteriaUpdate.set(DBTransaction::status.name, TransactionStatus.VERIFIED.toDatabaseChar())
|
||||||
|
criteriaUpdate.where(criteriaBuilder.and(
|
||||||
|
criteriaBuilder.equal(updateRoot.get<String>(DBTransaction::txId.name), txId.toString()),
|
||||||
|
criteriaBuilder.equal(updateRoot.get<Boolean>(DBTransaction::status.name), TransactionStatus.UNVERIFIED.toDatabaseChar())
|
||||||
|
))
|
||||||
|
val update = session.createQuery(criteriaUpdate)
|
||||||
|
val rowsUpdated = update.executeUpdate()
|
||||||
|
return rowsUpdated != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun addTransaction(transaction: SignedTransaction): Boolean {
|
||||||
|
return database.transaction {
|
||||||
txStorage.locked {
|
txStorage.locked {
|
||||||
addWithDuplicatesAllowed(transaction.id, transaction.toTxCacheValue()).apply {
|
val cachedValue = TxCacheValue(transaction, TransactionStatus.VERIFIED)
|
||||||
updatesPublisher.bufferUntilDatabaseCommit().onNext(transaction)
|
val addedOrUpdated = addOrUpdate(transaction.id, cachedValue) { k, _ -> updateTransaction(k) }
|
||||||
|
if (addedOrUpdated) {
|
||||||
|
logger.debug { "Transaction ${transaction.id} has been recorded as verified" }
|
||||||
|
onNewTx(transaction)
|
||||||
|
} else {
|
||||||
|
logger.debug { "Transaction ${transaction.id} is already recorded as verified, so no need to re-record" }
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun getTransaction(id: SecureHash): SignedTransaction? = database.transaction { txStorage.content[id]?.toSignedTx() }
|
private fun onNewTx(transaction: SignedTransaction): Boolean {
|
||||||
|
updatesPublisher.bufferUntilDatabaseCommit().onNext(transaction)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun getTransaction(id: SecureHash): SignedTransaction? {
|
||||||
|
return database.transaction {
|
||||||
|
txStorage.content[id]?.let { if (it.status.isVerified()) it.toSignedTx() else null }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun addUnverifiedTransaction(transaction: SignedTransaction) {
|
||||||
|
database.transaction {
|
||||||
|
txStorage.locked {
|
||||||
|
val cacheValue = TxCacheValue(transaction, status = TransactionStatus.UNVERIFIED)
|
||||||
|
val added = addWithDuplicatesAllowed(transaction.id, cacheValue)
|
||||||
|
if (added) {
|
||||||
|
logger.debug { "Transaction ${transaction.id} recorded as unverified." }
|
||||||
|
} else {
|
||||||
|
logger.info("Transaction ${transaction.id} already exists so no need to record.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun getTransactionInternal(id: SecureHash): Pair<SignedTransaction, Boolean>? {
|
||||||
|
return database.transaction {
|
||||||
|
txStorage.content[id]?.let { it.toSignedTx() to it.status.isVerified() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private val updatesPublisher = PublishSubject.create<SignedTransaction>().toSerialized()
|
private val updatesPublisher = PublishSubject.create<SignedTransaction>().toSerialized()
|
||||||
override val updates: Observable<SignedTransaction> = updatesPublisher.wrapWithDatabaseTransaction()
|
override val updates: Observable<SignedTransaction> = updatesPublisher.wrapWithDatabaseTransaction()
|
||||||
@ -120,11 +200,11 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
|||||||
override fun trackTransaction(id: SecureHash): CordaFuture<SignedTransaction> {
|
override fun trackTransaction(id: SecureHash): CordaFuture<SignedTransaction> {
|
||||||
return database.transaction {
|
return database.transaction {
|
||||||
txStorage.locked {
|
txStorage.locked {
|
||||||
val existingTransaction = get(id)
|
val existingTransaction = getTransaction(id)
|
||||||
if (existingTransaction == null) {
|
if (existingTransaction == null) {
|
||||||
updates.filter { it.id == id }.toFuture()
|
updates.filter { it.id == id }.toFuture()
|
||||||
} else {
|
} else {
|
||||||
doneFuture(existingTransaction.toSignedTx())
|
doneFuture(existingTransaction)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -133,5 +213,22 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
|||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
val transactions: List<SignedTransaction> get() = database.transaction { snapshot() }
|
val transactions: List<SignedTransaction> get() = database.transaction { snapshot() }
|
||||||
|
|
||||||
private fun snapshot() = txStorage.content.allPersisted.use { it.map { it.second.toSignedTx() }.toList() }
|
private fun snapshot(): List<SignedTransaction> {
|
||||||
|
return txStorage.content.allPersisted.use {
|
||||||
|
it.filter { it.second.status.isVerified() }.map { it.second.toSignedTx() }.toList()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache value type to just store the immutable bits of a signed transaction plus conversion helpers
|
||||||
|
private data class TxCacheValue(
|
||||||
|
val txBits: SerializedBytes<CoreTransaction>,
|
||||||
|
val sigs: List<TransactionSignature>,
|
||||||
|
val status: TransactionStatus
|
||||||
|
) {
|
||||||
|
constructor(stx: SignedTransaction, status: TransactionStatus) : this(
|
||||||
|
stx.txBits,
|
||||||
|
Collections.unmodifiableList(stx.sigs),
|
||||||
|
status)
|
||||||
|
fun toSignedTx() = SignedTransaction(txBits, sigs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ import co.paralleluniverse.fibers.Fiber
|
|||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import com.codahale.metrics.*
|
import com.codahale.metrics.*
|
||||||
import net.corda.core.internal.concurrent.thenMatch
|
import net.corda.core.internal.concurrent.thenMatch
|
||||||
import net.corda.core.serialization.*
|
import net.corda.core.serialization.SerializedBytes
|
||||||
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
||||||
import net.corda.core.serialization.internal.checkpointSerialize
|
import net.corda.core.serialization.internal.checkpointSerialize
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
@ -15,7 +15,6 @@ import net.corda.nodeapi.internal.persistence.contextDatabase
|
|||||||
import net.corda.nodeapi.internal.persistence.contextTransaction
|
import net.corda.nodeapi.internal.persistence.contextTransaction
|
||||||
import net.corda.nodeapi.internal.persistence.contextTransactionOrNull
|
import net.corda.nodeapi.internal.persistence.contextTransactionOrNull
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.Instant
|
|
||||||
import java.util.concurrent.TimeUnit
|
import java.util.concurrent.TimeUnit
|
||||||
import java.util.concurrent.atomic.AtomicLong
|
import java.util.concurrent.atomic.AtomicLong
|
||||||
|
|
||||||
@ -157,7 +156,7 @@ class ActionExecutorImpl(
|
|||||||
private fun executeSleepUntil(action: Action.SleepUntil) {
|
private fun executeSleepUntil(action: Action.SleepUntil) {
|
||||||
// TODO introduce explicit sleep state + wakeup event instead of relying on Fiber.sleep. This is so shutdown
|
// TODO introduce explicit sleep state + wakeup event instead of relying on Fiber.sleep. This is so shutdown
|
||||||
// conditions may "interrupt" the sleep instead of waiting until wakeup.
|
// conditions may "interrupt" the sleep instead of waiting until wakeup.
|
||||||
val duration = Duration.between(Instant.now(), action.time)
|
val duration = Duration.between(services.clock.instant(), action.time)
|
||||||
Fiber.sleep(duration.toNanos(), TimeUnit.NANOSECONDS)
|
Fiber.sleep(duration.toNanos(), TimeUnit.NANOSECONDS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@ import net.corda.core.utilities.contextLogger
|
|||||||
import net.corda.nodeapi.internal.persistence.DatabaseTransaction
|
import net.corda.nodeapi.internal.persistence.DatabaseTransaction
|
||||||
import net.corda.nodeapi.internal.persistence.contextTransaction
|
import net.corda.nodeapi.internal.persistence.contextTransaction
|
||||||
import net.corda.nodeapi.internal.persistence.currentDBSession
|
import net.corda.nodeapi.internal.persistence.currentDBSession
|
||||||
import java.lang.ref.WeakReference
|
|
||||||
import java.util.*
|
import java.util.*
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
import java.util.concurrent.atomic.AtomicBoolean
|
import java.util.concurrent.atomic.AtomicBoolean
|
||||||
@ -58,39 +57,59 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
|
|||||||
|
|
||||||
private fun set(key: K, value: V, logWarning: Boolean, store: (K, V) -> V?): Boolean {
|
private fun set(key: K, value: V, logWarning: Boolean, store: (K, V) -> V?): Boolean {
|
||||||
// Will be set to true if store says it isn't in the database.
|
// Will be set to true if store says it isn't in the database.
|
||||||
var isUnique = false
|
var wasWritten = false
|
||||||
cache.asMap().compute(key) { _, oldValueInCache ->
|
cache.asMap().compute(key) { _, oldValueInCache ->
|
||||||
// Always write to the database, unless we can see it's already committed.
|
// Always write to the database, unless we can see it's already committed.
|
||||||
when (oldValueInCache) {
|
when (oldValueInCache) {
|
||||||
is Transactional.InFlight<*, V> -> {
|
is Transactional.InFlight<*, V> -> {
|
||||||
// Someone else is writing, so store away!
|
// Someone else is writing, so store away!
|
||||||
val oldValueInDB = store(key, value)
|
val retainedValueFromDB = store(key, value)
|
||||||
isUnique = (oldValueInDB == null)
|
wasWritten = (retainedValueFromDB == null)
|
||||||
|
// If the store function claims the value is new in the DB, then either the value is brand new or updated. In this case,
|
||||||
|
// update the old value in the cache with the new value. Otherwise, leave it as it was before.
|
||||||
|
if (wasWritten) {
|
||||||
oldValueInCache.apply { alsoWrite(value) }
|
oldValueInCache.apply { alsoWrite(value) }
|
||||||
|
} else {
|
||||||
|
oldValueInCache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
is Transactional.Committed<V> -> if (oldValueInCache.value == value) {
|
||||||
|
oldValueInCache // The value is already globally visible and cached. So do nothing since the values are always the same.
|
||||||
|
} else {
|
||||||
|
val retainedValueFromDB = store(key, value)
|
||||||
|
wasWritten = (retainedValueFromDB == null)
|
||||||
|
// If the value has been updated, then isUnique will be true. In this case, the Committed value needs to be replaced
|
||||||
|
// with InFlight to indicate that this transaction has changed the value associated with this key. Note that this allows
|
||||||
|
// for cases where the value passed to set differs from that in the cache, but an update function has decided that this
|
||||||
|
// differing value should not be written to the database.
|
||||||
|
if (wasWritten) {
|
||||||
|
Transactional.InFlight(this, key, _readerValueLoader = { loadValue(key) }).apply { alsoWrite(value) }
|
||||||
|
} else {
|
||||||
|
oldValueInCache
|
||||||
|
}
|
||||||
}
|
}
|
||||||
is Transactional.Committed<V> -> oldValueInCache // The value is already globally visible and cached. So do nothing since the values are always the same.
|
|
||||||
is Transactional.Unknown<*, V> -> {
|
is Transactional.Unknown<*, V> -> {
|
||||||
if (oldValueInCache.isResolved && oldValueInCache.isPresent) {
|
if (oldValueInCache.isResolved && oldValueInCache.isPresent) {
|
||||||
Transactional.Committed(oldValueInCache.value)
|
Transactional.Committed(oldValueInCache.value)
|
||||||
} else {
|
} else {
|
||||||
// Unknown. Store away!
|
// Unknown. Store away!
|
||||||
val oldValueInDB = store(key, value)
|
val retainedValueInDB = store(key, value)
|
||||||
isUnique = (oldValueInDB == null)
|
wasWritten = (retainedValueInDB == null)
|
||||||
transactionalForStoreResult(key, value, oldValueInDB)
|
transactionalForStoreResult(key, value, retainedValueInDB)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else -> {
|
else -> {
|
||||||
// Missing or null. Store away!
|
// Missing or null. Store away!
|
||||||
val oldValueInDB = store(key, value)
|
val retainedValueInDB = store(key, value)
|
||||||
isUnique = (oldValueInDB == null)
|
wasWritten = (retainedValueInDB == null)
|
||||||
transactionalForStoreResult(key, value, oldValueInDB)
|
transactionalForStoreResult(key, value, retainedValueInDB)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (logWarning && !isUnique) {
|
if (logWarning && !wasWritten) {
|
||||||
log.warn("Double insert in ${this.javaClass.name} for entity class $persistentEntityClass key $key, not inserting the second time")
|
log.warn("Double insert in ${this.javaClass.name} for entity class $persistentEntityClass key $key, not inserting the second time")
|
||||||
}
|
}
|
||||||
return isUnique
|
return wasWritten
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun transactionalForStoreResult(key: K, value: V, oldValue: V?): Transactional<V> {
|
private fun transactionalForStoreResult(key: K, value: V, oldValue: V?): Transactional<V> {
|
||||||
@ -131,6 +150,38 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Associates the specified value with the specified key in this map and persists it.
|
||||||
|
* This function will first attempt to update the value currently in the map using the update function. If the update function returns
|
||||||
|
* true, this indicate that the persisted value has been updated and the corresponding cached value should also be updated. If it
|
||||||
|
* returns false, then this function attempts to add the value to the database, taking into account the possibility that the value
|
||||||
|
* already exists but an update is undesirable.
|
||||||
|
*
|
||||||
|
* @param key The key to associate the value with
|
||||||
|
* @param value The value to associate to the key
|
||||||
|
* @param updateFn A function to calculate an update to the persisted value. This should return true if the persisted value has been
|
||||||
|
* updated, and false if it has not or the value is not currently present in the database.
|
||||||
|
* @return true if the value was added or updated, false otherwise.
|
||||||
|
*/
|
||||||
|
fun addOrUpdate(key: K, value: V, updateFn: (K, V) -> Boolean): Boolean {
|
||||||
|
return set(key, value, logWarning = false) { k, v ->
|
||||||
|
val updated = updateFn(k, v)
|
||||||
|
if (updated) {
|
||||||
|
// This needs to be null to ensure that set returns true when a value is updated.
|
||||||
|
null
|
||||||
|
} else {
|
||||||
|
val session = currentDBSession()
|
||||||
|
val existingEntry = session.find(persistentEntityClass, toPersistentEntityKey(k))
|
||||||
|
if (existingEntry == null) {
|
||||||
|
session.save(toPersistentEntity(k, v))
|
||||||
|
null
|
||||||
|
} else {
|
||||||
|
fromPersistentEntity(existingEntry).second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fun putAll(entries: Map<K, V>) {
|
fun putAll(entries: Map<K, V>) {
|
||||||
entries.forEach {
|
entries.forEach {
|
||||||
set(it.key, it.value)
|
set(it.key, it.value)
|
||||||
@ -288,17 +339,22 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
|
|||||||
// retaining what could be a large memory footprint object.
|
// retaining what could be a large memory footprint object.
|
||||||
val tx = contextTransaction
|
val tx = contextTransaction
|
||||||
val strongKey = key
|
val strongKey = key
|
||||||
val weakValue = WeakReference<T>(_value)
|
|
||||||
val strongComitted = committed
|
|
||||||
val strongMap = map
|
val strongMap = map
|
||||||
if (map.addPendingKey(key, tx)) {
|
if (map.addPendingKey(key, tx)) {
|
||||||
// If the transaction commits, update cache to make globally visible if we're first for this key,
|
// If the transaction commits, update cache to make globally visible if we're first for this key,
|
||||||
// and then stop saying the transaction is writing the key.
|
// and then stop saying the transaction is writing the key.
|
||||||
tx.onCommit {
|
tx.onCommit {
|
||||||
if (strongComitted.compareAndSet(false, true)) {
|
strongMap.cache.asMap().computeIfPresent(strongKey) { _, transactional: Transactional<T> ->
|
||||||
val dereferencedValue = weakValue.get()
|
if (transactional is Transactional.InFlight<*, T>) {
|
||||||
if (dereferencedValue != null) {
|
transactional.committed.set(true)
|
||||||
strongMap.cache.put(strongKey, Committed(dereferencedValue))
|
val value = transactional.peekableValue
|
||||||
|
if (value != null) {
|
||||||
|
Transactional.Committed(value)
|
||||||
|
} else {
|
||||||
|
transactional
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
transactional
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
strongMap.removePendingKey(strongKey, tx)
|
strongMap.removePendingKey(strongKey, tx)
|
||||||
@ -345,7 +401,7 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
|
|||||||
|
|
||||||
// The value from the perspective of the eviction algorithm of the cache. i.e. we want to reveal memory footprint to it etc.
|
// The value from the perspective of the eviction algorithm of the cache. i.e. we want to reveal memory footprint to it etc.
|
||||||
override val peekableValue: T?
|
override val peekableValue: T?
|
||||||
get() = if (writerValueLoader.get() != _writerValueLoader) writerValueLoader.get()() else if (readerValueLoader.get() != _writerValueLoader) readerValueLoader.get()() else null
|
get() = if (writerValueLoader.get() != _writerValueLoader) writerValueLoader.get()() else if (readerValueLoader.get() != _readerValueLoader) readerValueLoader.get()() else null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,10 @@
|
|||||||
<include file="migration/node-core.changelog-v9.xml"/>
|
<include file="migration/node-core.changelog-v9.xml"/>
|
||||||
<include file="migration/node-core.changelog-v10.xml"/>
|
<include file="migration/node-core.changelog-v10.xml"/>
|
||||||
<include file="migration/node-core.changelog-v11.xml"/>
|
<include file="migration/node-core.changelog-v11.xml"/>
|
||||||
|
<!-- This changeset (which creates extra columns in the transactions tables), must be run before the vault state migration (in
|
||||||
|
vault-schema.changelog-v9.xml), as that will use the current hibernate mappings, and those require all DB columns to be
|
||||||
|
created. -->
|
||||||
|
<include file="migration/node-core.changelog-v13.xml"/>
|
||||||
<!-- This must run after node-core.changelog-init.xml, to prevent database columns being created twice. -->
|
<!-- This must run after node-core.changelog-init.xml, to prevent database columns being created twice. -->
|
||||||
<include file="migration/vault-schema.changelog-v9.xml"/>
|
<include file="migration/vault-schema.changelog-v9.xml"/>
|
||||||
</databaseChangeLog>
|
</databaseChangeLog>
|
||||||
|
@ -0,0 +1,14 @@
|
|||||||
|
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||||
|
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd"
|
||||||
|
logicalFilePath="migration/node-services.changelog-init.xml">
|
||||||
|
|
||||||
|
<changeSet author="R3.Corda" id="add_is_verified_column">
|
||||||
|
<addColumn tableName="node_transactions">
|
||||||
|
<column name="status" type="NCHAR(255)" defaultValue="V">
|
||||||
|
<constraints nullable="false"/>
|
||||||
|
</column>
|
||||||
|
</addColumn>
|
||||||
|
</changeSet>
|
||||||
|
</databaseChangeLog>
|
@ -751,12 +751,24 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override fun addUnverifiedTransaction(transaction: SignedTransaction) {
|
||||||
|
database.transaction {
|
||||||
|
delegate.addUnverifiedTransaction(transaction)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
override fun getTransaction(id: SecureHash): SignedTransaction? {
|
override fun getTransaction(id: SecureHash): SignedTransaction? {
|
||||||
return database.transaction {
|
return database.transaction {
|
||||||
records.add(TxRecord.Get(id))
|
records.add(TxRecord.Get(id))
|
||||||
delegate.getTransaction(id)
|
delegate.getTransaction(id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override fun getTransactionInternal(id: SecureHash): Pair<SignedTransaction, Boolean>? {
|
||||||
|
return database.transaction {
|
||||||
|
delegate.getTransactionInternal(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
interface TxRecord {
|
interface TxRecord {
|
||||||
|
@ -10,7 +10,10 @@ import net.corda.core.crypto.SignatureMetadata
|
|||||||
import net.corda.core.identity.AbstractParty
|
import net.corda.core.identity.AbstractParty
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.PartyAndCertificate
|
import net.corda.core.identity.PartyAndCertificate
|
||||||
import net.corda.core.internal.*
|
import net.corda.core.internal.NotaryChangeTransactionBuilder
|
||||||
|
import net.corda.core.internal.hash
|
||||||
|
import net.corda.core.internal.packageName
|
||||||
|
import net.corda.core.internal.signWithCert
|
||||||
import net.corda.core.node.NetworkParameters
|
import net.corda.core.node.NetworkParameters
|
||||||
import net.corda.core.node.NotaryInfo
|
import net.corda.core.node.NotaryInfo
|
||||||
import net.corda.core.node.services.Vault
|
import net.corda.core.node.services.Vault
|
||||||
@ -208,7 +211,8 @@ class VaultStateMigrationTest {
|
|||||||
val persistentTx = DBTransactionStorage.DBTransaction(
|
val persistentTx = DBTransactionStorage.DBTransaction(
|
||||||
txId = tx.id.toString(),
|
txId = tx.id.toString(),
|
||||||
stateMachineRunId = null,
|
stateMachineRunId = null,
|
||||||
transaction = tx.serialize(context = SerializationDefaults.STORAGE_CONTEXT).bytes
|
transaction = tx.serialize(context = SerializationDefaults.STORAGE_CONTEXT).bytes,
|
||||||
|
status = 'V'
|
||||||
)
|
)
|
||||||
session.save(persistentTx)
|
session.save(persistentTx)
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import net.corda.core.schemas.MappedSchema
|
|||||||
import net.corda.node.services.schema.NodeSchemaService
|
import net.corda.node.services.schema.NodeSchemaService
|
||||||
import net.corda.node.utilities.AppendOnlyPersistentMap
|
import net.corda.node.utilities.AppendOnlyPersistentMap
|
||||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||||
|
import net.corda.nodeapi.internal.persistence.currentDBSession
|
||||||
import net.corda.testing.internal.TestingNamedCacheFactory
|
import net.corda.testing.internal.TestingNamedCacheFactory
|
||||||
import net.corda.testing.internal.configureDatabase
|
import net.corda.testing.internal.configureDatabase
|
||||||
import net.corda.testing.node.MockServices
|
import net.corda.testing.node.MockServices
|
||||||
@ -73,4 +74,60 @@ class AppendOnlyPersistentMapNonConcurrentTest {
|
|||||||
assertThat(result).isEqualTo("1")
|
assertThat(result).isEqualTo("1")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun testUpdate(key: Long, value: String): Boolean {
|
||||||
|
val session = currentDBSession()
|
||||||
|
val criteriaBuilder = session.criteriaBuilder
|
||||||
|
val criteriaUpdate = criteriaBuilder.createCriteriaUpdate(PersistentMapEntry::class.java)
|
||||||
|
val queryRoot = criteriaUpdate.from(PersistentMapEntry::class.java)
|
||||||
|
criteriaUpdate.set(PersistentMapEntry::value.name, value)
|
||||||
|
criteriaUpdate.where(criteriaBuilder.equal(queryRoot.get<Long>("key"), key))
|
||||||
|
val update = session.createQuery(criteriaUpdate)
|
||||||
|
val rowsUpdated = update.executeUpdate()
|
||||||
|
return rowsUpdated != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `can update entry in map`() {
|
||||||
|
val map = createMap(1)
|
||||||
|
|
||||||
|
database.transaction {
|
||||||
|
map[1] = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
database.transaction {
|
||||||
|
map.addOrUpdate(1, "updated") { k, v -> testUpdate(k, v) }
|
||||||
|
}
|
||||||
|
|
||||||
|
val result = database.transaction { map[1] }
|
||||||
|
assertThat(result).isEqualTo("updated")
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `update succeeds if value not in cache but in database`() {
|
||||||
|
val map = createMap(1)
|
||||||
|
database.transaction {
|
||||||
|
map[1] = "1"
|
||||||
|
map[2] = "2"
|
||||||
|
map[3] = "3"
|
||||||
|
}
|
||||||
|
|
||||||
|
database.transaction {
|
||||||
|
map.addOrUpdate(1, "updated") { k, v -> testUpdate(k, v) }
|
||||||
|
}
|
||||||
|
|
||||||
|
val result = database.transaction { map[1] }
|
||||||
|
assertThat(result).isEqualTo("updated")
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `update succeeds if in same transaction as create`() {
|
||||||
|
val map = createMap(1)
|
||||||
|
database.transaction {
|
||||||
|
map[1] = "1"
|
||||||
|
map.addOrUpdate(1, "updated") { k, v -> testUpdate(k, v) }
|
||||||
|
}
|
||||||
|
|
||||||
|
val result = database.transaction { map[1] }
|
||||||
|
assertThat(result).isEqualTo("updated")
|
||||||
|
}
|
||||||
}
|
}
|
@ -1,5 +1,6 @@
|
|||||||
package net.corda.node.services.persistence
|
package net.corda.node.services.persistence
|
||||||
|
|
||||||
|
import junit.framework.TestCase.assertTrue
|
||||||
import net.corda.core.contracts.StateRef
|
import net.corda.core.contracts.StateRef
|
||||||
import net.corda.core.crypto.Crypto
|
import net.corda.core.crypto.Crypto
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
@ -8,11 +9,11 @@ import net.corda.core.crypto.TransactionSignature
|
|||||||
import net.corda.core.toFuture
|
import net.corda.core.toFuture
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
||||||
import net.corda.testing.internal.TestingNamedCacheFactory
|
|
||||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||||
import net.corda.testing.core.*
|
import net.corda.testing.core.*
|
||||||
import net.corda.testing.internal.LogHelper
|
import net.corda.testing.internal.LogHelper
|
||||||
|
import net.corda.testing.internal.TestingNamedCacheFactory
|
||||||
import net.corda.testing.internal.configureDatabase
|
import net.corda.testing.internal.configureDatabase
|
||||||
import net.corda.testing.internal.createWireTransaction
|
import net.corda.testing.internal.createWireTransaction
|
||||||
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
||||||
@ -153,6 +154,50 @@ class DBTransactionStorageTests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `unverified transaction is correctly added in add transaction`() {
|
||||||
|
val transaction = newTransaction()
|
||||||
|
val added = database.transaction {
|
||||||
|
transactionStorage.addUnverifiedTransaction(transaction)
|
||||||
|
transactionStorage.addTransaction(transaction)
|
||||||
|
}
|
||||||
|
assertTransactionIsRetrievable(transaction)
|
||||||
|
assertTrue(added)
|
||||||
|
|
||||||
|
val secondTransaction = newTransaction()
|
||||||
|
database.transaction {
|
||||||
|
transactionStorage.addUnverifiedTransaction(secondTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
val secondAdded = database.transaction {
|
||||||
|
transactionStorage.addTransaction(secondTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTransactionIsRetrievable(secondTransaction)
|
||||||
|
assertTrue(secondAdded)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `cannot move transaction from verified to unverified`() {
|
||||||
|
val transaction = newTransaction()
|
||||||
|
database.transaction {
|
||||||
|
transactionStorage.addTransaction(transaction)
|
||||||
|
transactionStorage.addUnverifiedTransaction(transaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTransactionIsRetrievable(transaction)
|
||||||
|
|
||||||
|
val secondTransaction = newTransaction()
|
||||||
|
database.transaction {
|
||||||
|
transactionStorage.addTransaction(secondTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
database.transaction {
|
||||||
|
transactionStorage.addUnverifiedTransaction(secondTransaction)
|
||||||
|
}
|
||||||
|
assertTransactionIsRetrievable(secondTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
private fun newTransactionStorage(cacheSizeBytesOverride: Long? = null) {
|
private fun newTransactionStorage(cacheSizeBytesOverride: Long? = null) {
|
||||||
transactionStorage = DBTransactionStorage(database, TestingNamedCacheFactory(cacheSizeBytesOverride
|
transactionStorage = DBTransactionStorage(database, TestingNamedCacheFactory(cacheSizeBytesOverride
|
||||||
?: 1024))
|
?: 1024))
|
||||||
|
@ -20,6 +20,7 @@ import net.corda.testing.core.TestIdentity
|
|||||||
import net.corda.testing.node.internal.*
|
import net.corda.testing.node.internal.*
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||||
|
import org.h2.util.Utils
|
||||||
import org.hibernate.exception.ConstraintViolationException
|
import org.hibernate.exception.ConstraintViolationException
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
import org.junit.Assert.assertTrue
|
import org.junit.Assert.assertTrue
|
||||||
@ -283,7 +284,7 @@ class RetryFlowMockTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private fun doInsert() {
|
private fun doInsert() {
|
||||||
val tx = DBTransactionStorage.DBTransaction("Foo")
|
val tx = DBTransactionStorage.DBTransaction("Foo", null, Utils.EMPTY_BYTES, 'V')
|
||||||
contextTransaction.session.save(tx)
|
contextTransaction.session.save(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,59 @@
|
|||||||
|
package net.corda.node.services.transactions
|
||||||
|
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.node.services.DbTransactionsResolver
|
||||||
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
|
import org.junit.Test
|
||||||
|
|
||||||
|
class DbTransactionsResolverTopologicalSortTest {
|
||||||
|
private val topologicalSort = DbTransactionsResolver.TopologicalSort()
|
||||||
|
private val t1 = SecureHash.randomSHA256()
|
||||||
|
private val t2 = SecureHash.randomSHA256()
|
||||||
|
private val t3 = SecureHash.randomSHA256()
|
||||||
|
private val t4 = SecureHash.randomSHA256()
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun issuance() {
|
||||||
|
topologicalSort.add(t1, emptySet())
|
||||||
|
assertThat(topologicalSort.complete()).containsExactly(t1)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `T1 to T2`() {
|
||||||
|
topologicalSort.add(t2, setOf(t1))
|
||||||
|
topologicalSort.add(t1, emptySet())
|
||||||
|
assertThat(topologicalSort.complete()).containsExactly(t1, t2)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `T1 to T2, T1 to T3`() {
|
||||||
|
topologicalSort.add(t3, setOf(t1))
|
||||||
|
topologicalSort.add(t2, setOf(t1))
|
||||||
|
topologicalSort.add(t1, emptySet())
|
||||||
|
val sorted = topologicalSort.complete()
|
||||||
|
assertThat(listOf(t1, t2).map(sorted::indexOf)).isSorted
|
||||||
|
assertThat(listOf(t1, t3).map(sorted::indexOf)).isSorted
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `T1 to T2 to T4, T1 to T3 to T4`() {
|
||||||
|
topologicalSort.add(t4, setOf(t2, t3))
|
||||||
|
topologicalSort.add(t3, setOf(t1))
|
||||||
|
topologicalSort.add(t2, setOf(t1))
|
||||||
|
topologicalSort.add(t1, emptySet())
|
||||||
|
val sorted = topologicalSort.complete()
|
||||||
|
assertThat(listOf(t1, t2, t4).map(sorted::indexOf)).isSorted
|
||||||
|
assertThat(listOf(t1, t3, t4).map(sorted::indexOf)).isSorted
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
fun `T1 to T2 to T3 to T4, T1 to T4`() {
|
||||||
|
topologicalSort.add(t4, setOf(t2, t1))
|
||||||
|
topologicalSort.add(t3, setOf(t2))
|
||||||
|
topologicalSort.add(t2, setOf(t1))
|
||||||
|
topologicalSort.add(t1, emptySet())
|
||||||
|
val sorted = topologicalSort.complete()
|
||||||
|
assertThat(listOf(t1, t2, t3, t4).map(sorted::indexOf)).isSorted
|
||||||
|
assertThat(listOf(t1, t4).map(sorted::indexOf)).isSorted
|
||||||
|
}
|
||||||
|
}
|
@ -110,11 +110,17 @@ class VaultWithCashTest {
|
|||||||
val w = vaultService.queryBy<Cash.State>().states
|
val w = vaultService.queryBy<Cash.State>().states
|
||||||
assertEquals(3, w.size)
|
assertEquals(3, w.size)
|
||||||
|
|
||||||
val state = w[0].state.data
|
// topological sort of transactions when writing them means the order in the
|
||||||
|
// vault may be different from the generated order in the vault filler (also
|
||||||
|
// bad practice to rely on the order of db records). We're only interested
|
||||||
|
// that the correct states exist, so sort them by amount for consistency.
|
||||||
|
val states = w.map { it.state.data }.sortedBy { it.amount }
|
||||||
|
|
||||||
|
val state = states[0]
|
||||||
assertEquals(30.45.DOLLARS `issued by` DUMMY_CASH_ISSUER, state.amount)
|
assertEquals(30.45.DOLLARS `issued by` DUMMY_CASH_ISSUER, state.amount)
|
||||||
assertEquals(servicesKey.public, state.owner.owningKey)
|
assertEquals(servicesKey.public, state.owner.owningKey)
|
||||||
assertEquals(34.70.DOLLARS `issued by` DUMMY_CASH_ISSUER, (w[2].state.data).amount)
|
assertEquals(34.70.DOLLARS `issued by` DUMMY_CASH_ISSUER, states[1].amount)
|
||||||
assertEquals(34.85.DOLLARS `issued by` DUMMY_CASH_ISSUER, (w[1].state.data).amount)
|
assertEquals(34.85.DOLLARS `issued by` DUMMY_CASH_ISSUER, states[2].amount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,12 +4,15 @@ import net.corda.core.crypto.SecureHash
|
|||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.NetworkParametersStorage
|
import net.corda.core.internal.NetworkParametersStorage
|
||||||
import net.corda.core.internal.SignedDataWithCert
|
import net.corda.core.internal.SignedDataWithCert
|
||||||
|
import net.corda.core.internal.signWithCert
|
||||||
import net.corda.core.node.NetworkParameters
|
import net.corda.core.node.NetworkParameters
|
||||||
import net.corda.core.node.NotaryInfo
|
import net.corda.core.node.NotaryInfo
|
||||||
import net.corda.core.serialization.serialize
|
import net.corda.core.serialization.serialize
|
||||||
import net.corda.nodeapi.internal.network.SignedNetworkParameters
|
import net.corda.nodeapi.internal.network.SignedNetworkParameters
|
||||||
import net.corda.nodeapi.internal.network.verifiedNetworkMapCert
|
import net.corda.nodeapi.internal.network.verifiedNetworkMapCert
|
||||||
import net.corda.testing.common.internal.testNetworkParameters
|
import net.corda.testing.common.internal.testNetworkParameters
|
||||||
|
import net.corda.testing.core.ALICE_NAME
|
||||||
|
import net.corda.testing.core.TestIdentity
|
||||||
import net.corda.testing.internal.withTestSerializationEnvIfNotSet
|
import net.corda.testing.internal.withTestSerializationEnvIfNotSet
|
||||||
import java.security.cert.X509Certificate
|
import java.security.cert.X509Certificate
|
||||||
import java.time.Instant
|
import java.time.Instant
|
||||||
@ -66,5 +69,10 @@ class MockNetworkParametersStorage(private var currentParameters: NetworkParamet
|
|||||||
|
|
||||||
private fun storeCurrentParameters() {
|
private fun storeCurrentParameters() {
|
||||||
hashToParametersMap[currentHash] = currentParameters
|
hashToParametersMap[currentHash] = currentParameters
|
||||||
|
val testIdentity = TestIdentity(ALICE_NAME, 20)
|
||||||
|
val signedData = withTestSerializationEnvIfNotSet {
|
||||||
|
currentParameters.signWithCert(testIdentity.keyPair.private, testIdentity.identity.certificate)
|
||||||
|
}
|
||||||
|
hashToSignedParametersMap[currentHash] = signedData
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken
|
|||||||
import net.corda.core.toFuture
|
import net.corda.core.toFuture
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
import net.corda.node.services.api.WritableTransactionStorage
|
import net.corda.node.services.api.WritableTransactionStorage
|
||||||
|
import net.corda.testing.node.MockServices
|
||||||
import rx.Observable
|
import rx.Observable
|
||||||
import rx.subjects.PublishSubject
|
import rx.subjects.PublishSubject
|
||||||
import java.util.*
|
import java.util.*
|
||||||
@ -17,29 +18,44 @@ import java.util.*
|
|||||||
*/
|
*/
|
||||||
open class MockTransactionStorage : WritableTransactionStorage, SingletonSerializeAsToken() {
|
open class MockTransactionStorage : WritableTransactionStorage, SingletonSerializeAsToken() {
|
||||||
override fun trackTransaction(id: SecureHash): CordaFuture<SignedTransaction> {
|
override fun trackTransaction(id: SecureHash): CordaFuture<SignedTransaction> {
|
||||||
return txns[id]?.let { doneFuture(it) } ?: _updatesPublisher.filter { it.id == id }.toFuture()
|
return getTransaction(id)?.let { doneFuture(it) } ?: _updatesPublisher.filter { it.id == id }.toFuture()
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun track(): DataFeed<List<SignedTransaction>, SignedTransaction> {
|
override fun track(): DataFeed<List<SignedTransaction>, SignedTransaction> {
|
||||||
return DataFeed(txns.values.toList(), _updatesPublisher)
|
return DataFeed(txns.values.mapNotNull { if (it.isVerified) it.stx else null }, _updatesPublisher)
|
||||||
}
|
}
|
||||||
|
|
||||||
private val txns = HashMap<SecureHash, SignedTransaction>()
|
private val txns = HashMap<SecureHash, TxHolder>()
|
||||||
|
|
||||||
private val _updatesPublisher = PublishSubject.create<SignedTransaction>()
|
private val _updatesPublisher = PublishSubject.create<SignedTransaction>()
|
||||||
|
|
||||||
override val updates: Observable<SignedTransaction>
|
override val updates: Observable<SignedTransaction>
|
||||||
get() = _updatesPublisher
|
get() = _updatesPublisher
|
||||||
|
|
||||||
private fun notify(transaction: SignedTransaction) = _updatesPublisher.onNext(transaction)
|
private fun notify(transaction: SignedTransaction): Boolean {
|
||||||
|
_updatesPublisher.onNext(transaction)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
override fun addTransaction(transaction: SignedTransaction): Boolean {
|
override fun addTransaction(transaction: SignedTransaction): Boolean {
|
||||||
val recorded = txns.putIfAbsent(transaction.id, transaction) == null
|
val current = txns.putIfAbsent(transaction.id, TxHolder(transaction, isVerified = true))
|
||||||
if (recorded) {
|
return if (current == null) {
|
||||||
notify(transaction)
|
notify(transaction)
|
||||||
|
} else if (!current.isVerified) {
|
||||||
|
current.isVerified = true
|
||||||
|
notify(transaction)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
}
|
}
|
||||||
return recorded
|
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun getTransaction(id: SecureHash): SignedTransaction? = txns[id]
|
override fun addUnverifiedTransaction(transaction: SignedTransaction) {
|
||||||
|
txns.putIfAbsent(transaction.id, TxHolder(transaction, isVerified = false))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun getTransaction(id: SecureHash): SignedTransaction? = txns[id]?.let { if (it.isVerified) it.stx else null }
|
||||||
|
|
||||||
|
override fun getTransactionInternal(id: SecureHash): Pair<SignedTransaction, Boolean>? = txns[id]?.let { Pair(it.stx, it.isVerified) }
|
||||||
|
|
||||||
|
private class TxHolder(val stx: SignedTransaction, var isVerified: Boolean)
|
||||||
}
|
}
|
@ -1,14 +1,9 @@
|
|||||||
package net.corda.testing.contracts
|
package net.corda.testing.contracts
|
||||||
|
|
||||||
import net.corda.core.contracts.AlwaysAcceptAttachmentConstraint
|
import net.corda.core.contracts.*
|
||||||
import net.corda.core.contracts.AttachmentConstraint
|
|
||||||
import net.corda.core.contracts.CommandData
|
|
||||||
import net.corda.core.contracts.ContractClassName
|
|
||||||
import net.corda.core.contracts.ContractState
|
|
||||||
import net.corda.core.contracts.TypeOnlyCommandData
|
|
||||||
import net.corda.core.contracts.UpgradedContractWithLegacyConstraint
|
|
||||||
import net.corda.core.identity.AbstractParty
|
import net.corda.core.identity.AbstractParty
|
||||||
import net.corda.core.transactions.LedgerTransaction
|
import net.corda.core.transactions.LedgerTransaction
|
||||||
|
import net.corda.core.transactions.TransactionBuilder
|
||||||
|
|
||||||
// The dummy contract doesn't do anything useful. It exists for testing purposes.
|
// The dummy contract doesn't do anything useful. It exists for testing purposes.
|
||||||
|
|
||||||
@ -19,6 +14,28 @@ import net.corda.core.transactions.LedgerTransaction
|
|||||||
class DummyContractV2 : UpgradedContractWithLegacyConstraint<DummyContract.State, DummyContractV2.State> {
|
class DummyContractV2 : UpgradedContractWithLegacyConstraint<DummyContract.State, DummyContractV2.State> {
|
||||||
companion object {
|
companion object {
|
||||||
const val PROGRAM_ID: ContractClassName = "net.corda.testing.contracts.DummyContractV2"
|
const val PROGRAM_ID: ContractClassName = "net.corda.testing.contracts.DummyContractV2"
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An overload of move for just one input state.
|
||||||
|
*/
|
||||||
|
@JvmStatic
|
||||||
|
fun move(prior: StateAndRef<State>, newOwner: AbstractParty) = move(listOf(prior), newOwner)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a [TransactionBuilder] that takes the given input states and transfers them to the newOwner.
|
||||||
|
*/
|
||||||
|
@JvmStatic
|
||||||
|
fun move(priors: List<StateAndRef<State>>, newOwner: AbstractParty): TransactionBuilder {
|
||||||
|
require(priors.isNotEmpty()){"States to move to new owner must not be empty"}
|
||||||
|
val priorState = priors[0].state.data
|
||||||
|
val (cmd, state) = priorState.withNewOwner(newOwner)
|
||||||
|
return TransactionBuilder(notary = priors[0].state.notary).withItems(
|
||||||
|
/* INPUTS */ *priors.toTypedArray(),
|
||||||
|
/* COMMAND */ Command(cmd, priorState.owners.map { it.owningKey }),
|
||||||
|
/* OUTPUT */ StateAndContract(state, DummyContractV2.PROGRAM_ID)
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override val legacyContract: String = DummyContract::class.java.name
|
override val legacyContract: String = DummyContract::class.java.name
|
||||||
@ -26,6 +43,11 @@ class DummyContractV2 : UpgradedContractWithLegacyConstraint<DummyContract.State
|
|||||||
|
|
||||||
data class State(val magicNumber: Int = 0, val owners: List<AbstractParty>) : ContractState {
|
data class State(val magicNumber: Int = 0, val owners: List<AbstractParty>) : ContractState {
|
||||||
override val participants: List<AbstractParty> = owners
|
override val participants: List<AbstractParty> = owners
|
||||||
|
|
||||||
|
fun withNewOwner(newOwner: AbstractParty): Pair<Commands, State> {
|
||||||
|
val newState = this.copy(owners = listOf(newOwner))
|
||||||
|
return Pair(Commands.Move(), newState)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
interface Commands : CommandData {
|
interface Commands : CommandData {
|
||||||
|
Loading…
Reference in New Issue
Block a user