Merge pull request #7617 from corda/merge-release/os/4.11-release/os/4.12-2023-12-15-79

ENT-11281: Merging forward updates from release/os/4.11 to release/os/4.12 - 2023-12-15
This commit is contained in:
Adel El-Beik 2023-12-19 13:15:51 +00:00 committed by GitHub
commit e815b381ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 133 additions and 59 deletions

View File

@ -3052,21 +3052,21 @@ public final class net.corda.core.flows.FlowRecoveryException extends net.corda.
@CordaSerializable
public final class net.corda.core.flows.FlowRecoveryQuery extends java.lang.Object
public <init>()
public <init>(net.corda.core.flows.FlowTimeWindow, net.corda.core.identity.CordaX500Name, java.util.List)
public <init>(net.corda.core.flows.FlowTimeWindow, net.corda.core.identity.CordaX500Name, java.util.List, int, kotlin.jvm.internal.DefaultConstructorMarker)
public <init>(net.corda.core.flows.FlowTimeWindow, java.util.List, java.util.List)
public <init>(net.corda.core.flows.FlowTimeWindow, java.util.List, java.util.List, int, kotlin.jvm.internal.DefaultConstructorMarker)
@Nullable
public final net.corda.core.flows.FlowTimeWindow component1()
@Nullable
public final net.corda.core.identity.CordaX500Name component2()
public final java.util.List component2()
@Nullable
public final java.util.List component3()
@NotNull
public final net.corda.core.flows.FlowRecoveryQuery copy(net.corda.core.flows.FlowTimeWindow, net.corda.core.identity.CordaX500Name, java.util.List)
public final net.corda.core.flows.FlowRecoveryQuery copy(net.corda.core.flows.FlowTimeWindow, java.util.List, java.util.List)
public boolean equals(Object)
@Nullable
public final java.util.List getCounterParties()
@Nullable
public final net.corda.core.identity.CordaX500Name getInitiatedBy()
public final java.util.List getInitiatedBy()
@Nullable
public final net.corda.core.flows.FlowTimeWindow getTimeframe()
public int hashCode()

View File

@ -28,4 +28,5 @@ forwardMerger(
targetBranch: targetBranch,
originBranch: originBranch,
slackChannel: '#c4-forward-merge-bot-notifications',
cloneTickets: true,
)

View File

@ -29,7 +29,8 @@ String COMMON_GRADLE_PARAMS = [
'--info',
'-Pcompilation.warningsAsErrors=false',
'-Ptests.failFast=true',
'-DexcludeShell',
'--build-cache',
'-DexcludeShell'
].join(' ')
pipeline {
@ -55,8 +56,12 @@ pipeline {
environment {
ARTIFACTORY_BUILD_NAME = "Corda :: Publish :: Publish Release to Artifactory :: ${env.BRANCH_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
BUILD_CACHE_CREDENTIALS = credentials('gradle-ent-cache-credentials')
BUILD_CACHE_PASSWORD = "${env.BUILD_CACHE_CREDENTIALS_PSW}"
BUILD_CACHE_USERNAME = "${env.BUILD_CACHE_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_GRADLE_SCAN_KEY = credentials('gradle-build-scans-key')
CORDA_BUILD_EDITION = "${buildEdition}"
CORDA_USE_CACHE = "corda-remotes"
DOCKER_URL = "https://index.docker.io/v1/"
@ -76,7 +81,8 @@ pipeline {
'./gradlew',
COMMON_GRADLE_PARAMS,
'clean',
'jar'
'jar',
'--parallel'
].join(' ')
}
}
@ -136,8 +142,8 @@ pipeline {
}
post {
always {
archiveArtifacts artifacts: '**/*.log', fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
archiveArtifacts artifacts: '**/*.log', allowEmptyArchive: true, fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
/*
* Copy all JUnit results files into a single top level directory.
* This is necessary to stop the allure plugin from hitting out
@ -173,7 +179,8 @@ pipeline {
sh script: [
'./gradlew',
COMMON_GRADLE_PARAMS,
'jar'
'jar',
'--parallel'
].join(' ')
}
}
@ -209,8 +216,8 @@ pipeline {
stage('Same agent') {
post {
always {
archiveArtifacts artifacts: '**/*.log', fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
archiveArtifacts artifacts: '**/*.log', allowEmptyArchive: true, fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
/*
* Copy all JUnit results files into a single top level directory.
* This is necessary to stop the allure plugin from hitting out
@ -332,6 +339,7 @@ pipeline {
post {
always {
script {
findBuildScans()
if (gitUtils.isReleaseTag()) {
gitUtils.getGitLog(env.TAG_NAME, env.GIT_URL.replace('https://github.com/corda/', ''))
}

View File

@ -8,12 +8,18 @@ jobs:
sync_assigned:
runs-on: ubuntu-latest
steps:
- name: Generate a token
id: generate_token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.AUTH_APP_ID }}
private-key: ${{ secrets.AUTH_APP_PK }}
- name: Assign
uses: corda/jira-sync-assigned-action@master
with:
jiraBaseUrl: ${{ secrets.JIRA_BASE_URL }}
jiraEmail: ${{ secrets.JIRA_USER_EMAIL }}
jiraToken: ${{ secrets.JIRA_API_TOKEN }}
token: ${{ secrets.GH_TOKEN }}
token: ${{ steps.generate_token.outputs.token }}
owner: corda
repository: corda

View File

@ -8,12 +8,18 @@ jobs:
sync_closed:
runs-on: ubuntu-latest
steps:
- name: Generate a token
id: generate_token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.AUTH_APP_ID }}
private-key: ${{ secrets.AUTH_APP_PK }}
- name: Close
uses: corda/jira-sync-closed-action@master
with:
jiraBaseUrl: https://r3-cev.atlassian.net
jiraEmail: ${{ secrets.JIRA_USER_EMAIL }}
jiraToken: ${{ secrets.JIRA_API_TOKEN }}
token: ${{ secrets.GH_TOKEN }}
token: ${{ steps.generate_token.outputs.token }}
owner: corda
repository: corda

View File

@ -10,6 +10,13 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Generate a token
id: generate_token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.AUTH_APP_ID }}
private-key: ${{ secrets.AUTH_APP_PK }}
- name: Jira Create issue
id: create
uses: corda/jira-create-issue-action@master
@ -30,7 +37,7 @@ jobs:
- name: Create comment
uses: peter-evans/create-or-update-comment@v1
with:
token: ${{ secrets.GH_TOKEN }}
token: ${{ steps.generate_token.outputs.token }}
issue-number: ${{ github.event.issue.number }}
body: |
Automatically created Jira issue: ${{ steps.create.outputs.issue }}

23
Jenkinsfile vendored
View File

@ -24,6 +24,7 @@ String COMMON_GRADLE_PARAMS = [
'-Ptests.failFast=true',
'-Ddependx.branch.origin="${GIT_COMMIT}"', // DON'T change quotation - GIT_COMMIT variable is substituted by SHELL!!!!
'-Ddependx.branch.target="${CHANGE_TARGET}"', // DON'T change quotation - CHANGE_TARGET variable is substituted by SHELL!!!!
'--build-cache',
].join(' ')
pipeline {
@ -45,8 +46,12 @@ pipeline {
*/
environment {
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
BUILD_CACHE_CREDENTIALS = credentials('gradle-ent-cache-credentials')
BUILD_CACHE_PASSWORD = "${env.BUILD_CACHE_CREDENTIALS_PSW}"
BUILD_CACHE_USERNAME = "${env.BUILD_CACHE_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_GRADLE_SCAN_KEY = credentials('gradle-build-scans-key')
CORDA_USE_CACHE = "corda-remotes"
JAVA_HOME="/usr/lib/jvm/java-17-amazon-corretto"
}
@ -59,7 +64,8 @@ pipeline {
'./gradlew',
COMMON_GRADLE_PARAMS,
'clean',
'jar'
'jar',
'--parallel'
].join(' ')
}
}
@ -81,8 +87,8 @@ pipeline {
}
post {
always {
archiveArtifacts artifacts: '**/*.log', fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
archiveArtifacts artifacts: '**/*.log', allowEmptyArchive: true, fingerprint: true
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true,allowEmptyResults: true
}
cleanup {
deleteDir() /* clean up our workspace */
@ -100,7 +106,8 @@ pipeline {
sh script: [
'./gradlew',
COMMON_GRADLE_PARAMS,
'jar'
'jar',
'--parallel'
].join(' ')
}
}
@ -136,8 +143,8 @@ pipeline {
stage('Same agent') {
post {
always {
archiveArtifacts artifacts: '**/*.log', fingerprint: false
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
archiveArtifacts artifacts: '**/*.log', allowEmptyArchive: true, fingerprint: true
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
}
}
stages {
@ -155,8 +162,10 @@ pipeline {
}
}
}
post {
always {
findBuildScans()
}
cleanup {
deleteDir() /* clean up our workspace */
}

View File

@ -56,7 +56,7 @@ class FlowRecoveryException(message: String, cause: Throwable? = null) : FlowExc
@CordaSerializable
data class FlowRecoveryQuery(
val timeframe: FlowTimeWindow? = null,
val initiatedBy: CordaX500Name? = null,
val initiatedBy: List<CordaX500Name>? = null,
val counterParties: List<CordaX500Name>? = null) {
init {
require(timeframe != null || initiatedBy != null || counterParties != null) {

View File

@ -11,8 +11,42 @@ import net.corda.core.utilities.ProgressTracker
*/
@StartableByRPC
class LedgerRecoveryFlow(
private val parameters: LedgerRecoveryParameters,
override val progressTracker: ProgressTracker = ProgressTracker()) : FlowLogic<LedgerRecoveryResult>() {
private val parameters: LedgerRecoveryParameters,
override val progressTracker: ProgressTracker = ProgressTracker()) : FlowLogic<LedgerRecoveryResult>() {
// constructors added to aid Corda Node Shell flow command invocation
constructor(recoveryPeer: Party) : this(LedgerRecoveryParameters(setOf(recoveryPeer)))
constructor(recoveryPeers: Collection<Party>) : this(LedgerRecoveryParameters(recoveryPeers))
constructor(useAllNetworkNodes: Boolean) : this(LedgerRecoveryParameters(emptySet(), useAllNetworkNodes = useAllNetworkNodes))
constructor(recoveryPeer: Party, timeWindow: RecoveryTimeWindow) :
this(LedgerRecoveryParameters(setOf(recoveryPeer), timeWindow))
constructor(recoveryPeer: Party, timeWindow: RecoveryTimeWindow, dryRun: Boolean) :
this(LedgerRecoveryParameters(setOf(recoveryPeer), timeWindow, dryRun = dryRun))
constructor(recoveryPeer: Party, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean) :
this(LedgerRecoveryParameters(setOf(recoveryPeer), timeWindow, dryRun = dryRun, verboseLogging = verboseLogging))
constructor(recoveryPeer: Party, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean, alsoFinalize: Boolean) :
this(LedgerRecoveryParameters(setOf(recoveryPeer), timeWindow, dryRun = dryRun, verboseLogging = verboseLogging, alsoFinalize = alsoFinalize))
constructor(recoveryPeers: Collection<Party>, timeWindow: RecoveryTimeWindow) :
this(LedgerRecoveryParameters(recoveryPeers, timeWindow))
constructor(recoveryPeers: Collection<Party>, timeWindow: RecoveryTimeWindow, dryRun: Boolean) :
this(LedgerRecoveryParameters(recoveryPeers, timeWindow, dryRun = dryRun))
constructor(recoveryPeers: Collection<Party>, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean) :
this(LedgerRecoveryParameters(recoveryPeers, timeWindow, dryRun = dryRun, verboseLogging = verboseLogging))
constructor(recoveryPeers: Collection<Party>, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean, alsoFinalize: Boolean) :
this(LedgerRecoveryParameters(recoveryPeers, timeWindow, dryRun = dryRun, verboseLogging = verboseLogging, alsoFinalize = alsoFinalize))
constructor(useAllNetworkNodes: Boolean, timeWindow: RecoveryTimeWindow) :
this(LedgerRecoveryParameters(emptySet(), timeWindow, useAllNetworkNodes = useAllNetworkNodes))
constructor(useAllNetworkNodes: Boolean, timeWindow: RecoveryTimeWindow, dryRun: Boolean) :
this(LedgerRecoveryParameters(emptySet(), timeWindow, useAllNetworkNodes = useAllNetworkNodes, dryRun = dryRun))
constructor(useAllNetworkNodes: Boolean, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean) :
this(LedgerRecoveryParameters(emptySet(), timeWindow, useAllNetworkNodes = useAllNetworkNodes, dryRun = dryRun, verboseLogging = verboseLogging))
constructor(useAllNetworkNodes: Boolean, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean, recoveryBatchSize: Int, alsoFinalize: Boolean) :
this(LedgerRecoveryParameters(emptySet(), timeWindow, useAllNetworkNodes = useAllNetworkNodes, dryRun = dryRun, verboseLogging = verboseLogging, recoveryBatchSize = recoveryBatchSize, alsoFinalize = alsoFinalize))
constructor(useAllNetworkNodes: Boolean, timeWindow: RecoveryTimeWindow, dryRun: Boolean, verboseLogging: Boolean, recoveryBatchSize: Int) :
this(LedgerRecoveryParameters(emptySet(), timeWindow, useAllNetworkNodes = useAllNetworkNodes, dryRun = dryRun, verboseLogging = verboseLogging, recoveryBatchSize = recoveryBatchSize))
constructor(recoveryPeers: Collection<Party>, timeWindow: RecoveryTimeWindow, useAllNetworkNodes: Boolean, dryRun: Boolean, useTimeWindowNarrowing: Boolean, verboseLogging: Boolean, recoveryBatchSize: Int) :
this(LedgerRecoveryParameters(recoveryPeers, timeWindow, useAllNetworkNodes,
dryRun = dryRun, useTimeWindowNarrowing = useTimeWindowNarrowing, verboseLogging = verboseLogging, recoveryBatchSize = recoveryBatchSize))
@CordaInternal
data class ExtraConstructorArgs(val parameters: LedgerRecoveryParameters)

View File

@ -80,7 +80,8 @@ sealed class FetchDataFlow<T : NamedByHash, in W : Any>(
// against not having unknown by using the platform version as a guard.
@CordaSerializationTransformEnumDefaults(
CordaSerializationTransformEnumDefault("BATCH_TRANSACTION", "TRANSACTION"),
CordaSerializationTransformEnumDefault("UNKNOWN", "TRANSACTION")
CordaSerializationTransformEnumDefault("UNKNOWN", "TRANSACTION"),
CordaSerializationTransformEnumDefault("TRANSACTION_RECOVERY", "TRANSACTION")
)
@CordaSerializable
enum class DataType {

View File

@ -4,7 +4,6 @@ import co.paralleluniverse.strands.Strand
import junit.framework.TestCase.assertEquals
import junit.framework.TestCase.assertNotNull
import junit.framework.TestCase.assertTrue
import net.corda.core.flows.UnexpectedFlowEndException
import net.corda.core.internal.InputStreamAndHash
import net.corda.core.internal.deleteRecursively
import net.corda.core.messaging.startFlow
@ -55,13 +54,15 @@ class VaultUpdateDeserializationTest {
/*
* Transaction sent from A -> B with Notarisation
* Test that a deserialization error is raised where the receiver node of a transaction has an incompatible contract jar.
* In the case of a notarised transaction, a deserialisation error is thrown in the receiver SignTransactionFlow (before finality)
* upon receiving the transaction to be signed and attempting to record its dependencies.
* The ledger will not record any transactions, and the flow must be retried by the sender upon installing the correct contract jar
* But only on new transactions, and not in the back chain.
* In the case of a notarised transaction, a deserialisation error is thrown in the receiver in the second phase of finality
* when updating the vault. The sender will not block, and the back chain is successfully recorded
* on the receiver even though those states have deserialization errors too. The flow on the receiver is hospitalised.
* The flow will be retried by the receiver upon installing the correct contract jar
* version at the receiver and re-starting the node.
*/
@Test(timeout=300_000)
fun `Notarised transaction fails completely upon receiver deserialization failure collecting signatures when using incompatible contract jar`() {
@Test(timeout = 300_000)
fun `Notarised transaction fails but back chain succeeds upon receiver deserialization failure when using incompatible contract jar`() {
driver(driverParameters(listOf(flowVersion1, contractVersion1))) {
val alice = startNode(NodeParameters(additionalCordapps = listOf(flowVersion1, contractVersion1)),
providedName = ALICE_NAME).getOrThrow()
@ -75,20 +76,15 @@ class VaultUpdateDeserializationTest {
val stx = alice.rpc.startFlow(::AttachmentIssueFlow, hash, defaultNotaryIdentity).returnValue.getOrThrow(30.seconds)
val spendableState = stx.coreTransaction.outRef<AttachmentContractV1.State>(0)
// NOTE: exception is propagated from Receiver
try {
alice.rpc.startFlow(::AttachmentFlowV1, bob.nodeInfo.singleIdentity(), defaultNotaryIdentity, hash, spendableState).returnValue.getOrThrow(30.seconds)
}
catch(e: UnexpectedFlowEndException) {
println("Bob fails to deserialise transaction upon receipt of transaction for signing.")
}
alice.rpc.startFlow(::AttachmentFlowV1, bob.nodeInfo.singleIdentity(), defaultNotaryIdentity, hash, spendableState).returnValue.getOrThrow(30.seconds)
assertEquals(0, bob.rpc.vaultQueryBy<AttachmentContractV1.State>().states.size)
assertEquals(1, alice.rpc.vaultQueryBy<AttachmentContractV1.State>().states.size)
// check transaction records
@Suppress("DEPRECATION")
assertEquals(1, alice.rpc.internalVerifiedTransactionsSnapshot().size) // issuance only
assertEquals(2, alice.rpc.internalVerifiedTransactionsSnapshot().size) // both
@Suppress("DEPRECATION")
assertTrue(bob.rpc.internalVerifiedTransactionsSnapshot().isEmpty())
assertEquals(1, bob.rpc.internalVerifiedTransactionsSnapshot().size) // issuance only
// restart Bob with correct contract jar version
(bob as OutOfProcess).process.destroyForcibly()
@ -97,13 +93,12 @@ class VaultUpdateDeserializationTest {
val restartedBob = startNode(NodeParameters(additionalCordapps = listOf(flowVersion1, contractVersion1)),
providedName = BOB_NAME).getOrThrow()
// re-run failed flow
alice.rpc.startFlow(::AttachmentFlowV1, restartedBob.nodeInfo.singleIdentity(), defaultNotaryIdentity, hash, spendableState).returnValue.getOrThrow(30.seconds)
// original hospitalized transaction should now have been re-processed with correct contract jar
assertEquals(1, waitForVaultUpdate(restartedBob))
assertEquals(1, alice.rpc.vaultQueryBy<AttachmentContractV1.State>().states.size)
@Suppress("DEPRECATION")
assertTrue(restartedBob.rpc.internalVerifiedTransactionsSnapshot().isNotEmpty())
assertEquals(2, restartedBob.rpc.internalVerifiedTransactionsSnapshot().size) // both
assertEquals(1, restartedBob.rpc.vaultQueryBy<AttachmentContractV1.State>().states.size)
}
}

View File

@ -13,6 +13,7 @@ import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.debug
import net.corda.core.utilities.seconds
import net.corda.core.utilities.trace
import net.corda.node.services.api.ServiceHubInternal
import net.corda.node.services.api.WritableTransactionStorage
import java.util.*
@ -107,7 +108,7 @@ class DbTransactionsResolver(private val flow: ResolveTransactionsFlow) : Transa
}
if (txStatus == TransactionStatus.UNVERIFIED) {
tx.verify(flow.serviceHub)
flow.serviceHub.recordTransactions(usedStatesToRecord, listOf(tx))
(flow.serviceHub as ServiceHubInternal).recordTransactions(usedStatesToRecord, listOf(tx), false, disableSoftLocking = true)
} else {
logger.debug { "No need to record $txId as it's already been verified" }
}

View File

@ -88,6 +88,7 @@ interface ServiceHubInternal : ServiceHubCoreInternal {
stateMachineRecordedTransactionMapping: StateMachineRecordedTransactionMappingStorage,
vaultService: VaultServiceInternal,
database: CordaPersistence,
disableSoftLocking: Boolean = false,
updateFn: (SignedTransaction) -> Boolean = validatedTransactions::addTransaction
) {
@ -147,7 +148,7 @@ interface ServiceHubInternal : ServiceHubCoreInternal {
//
// Because the primary use case for recording irrelevant states is observer/regulator nodes, who are unlikely
// to make writes to the ledger very often or at all, we choose to punt this issue for the time being.
vaultService.notifyAll(statesToRecord, recordedTransactions.map { it.coreTransaction }, previouslySeenTxs.map { it.coreTransaction })
vaultService.notifyAll(statesToRecord, recordedTransactions.map { it.coreTransaction }, previouslySeenTxs.map { it.coreTransaction }, disableSoftLocking)
}
}
@ -205,15 +206,14 @@ interface ServiceHubInternal : ServiceHubCoreInternal {
@Suppress("NestedBlockDepth")
@VisibleForTesting
fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>, disableSignatureVerification: Boolean) {
fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>, disableSignatureVerification: Boolean, disableSoftLocking: Boolean = false) {
txs.forEach {
requireSupportedHashType(it)
if (it.coreTransaction is WireTransaction) {
if (disableSignatureVerification) {
log.warnOnce("The current usage of recordTransactions is unsafe." +
"Recording transactions without signature verification may lead to severe problems with ledger consistency.")
}
else {
} else {
try {
it.verifyRequiredSignatures()
}
@ -229,7 +229,8 @@ interface ServiceHubInternal : ServiceHubCoreInternal {
validatedTransactions,
stateMachineRecordedTransactionMapping,
vaultService,
database
database,
disableSoftLocking
)
}

View File

@ -15,7 +15,8 @@ interface VaultServiceInternal : VaultService {
* indicate whether an update consists entirely of regular or notary change transactions, which may require
* different processing logic.
*/
fun notifyAll(statesToRecord: StatesToRecord, txns: Iterable<CoreTransaction>, previouslySeenTxns: Iterable<CoreTransaction> = emptyList())
fun notifyAll(statesToRecord: StatesToRecord, txns: Iterable<CoreTransaction>, previouslySeenTxns: Iterable<CoreTransaction> = emptyList(),
disableSoftLocking: Boolean = false)
/**
* Same as notifyAll but with a single transaction.

View File

@ -70,7 +70,7 @@ import java.security.PublicKey
import java.sql.SQLException
import java.time.Clock
import java.time.Instant
import java.util.*
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.CopyOnWriteArraySet
import java.util.stream.Stream
@ -283,12 +283,13 @@ class NodeVaultService(
internal val publishUpdates get() = mutex.locked { updatesPublisher }
/** Groups adjacent transactions into batches to generate separate net updates per transaction type. */
override fun notifyAll(statesToRecord: StatesToRecord, txns: Iterable<CoreTransaction>, previouslySeenTxns: Iterable<CoreTransaction>) {
override fun notifyAll(statesToRecord: StatesToRecord, txns: Iterable<CoreTransaction>, previouslySeenTxns: Iterable<CoreTransaction>,
disableSoftLocking: Boolean) {
if (statesToRecord == StatesToRecord.NONE || (!txns.any() && !previouslySeenTxns.any())) return
val batch = mutableListOf<CoreTransaction>()
fun flushBatch(previouslySeen: Boolean) {
val updates = makeUpdates(batch, statesToRecord, previouslySeen)
val updates = makeUpdates(batch, statesToRecord, previouslySeen, disableSoftLocking)
processAndNotify(updates)
batch.clear()
}
@ -307,7 +308,8 @@ class NodeVaultService(
processTransactions(txns, false)
}
private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean): List<Vault.Update<ContractState>> {
@Suppress("ComplexMethod", "ThrowsCount")
private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean, disableSoftLocking: Boolean): List<Vault.Update<ContractState>> {
fun <T> withValidDeserialization(list: List<T>, txId: SecureHash): Map<Int, T> {
var error: TransactionDeserialisationException? = null
@ -319,13 +321,15 @@ class NodeVaultService(
// This will cause a failure as we can't deserialize such states in the context of the `appClassloader`.
// For now we ignore these states.
// In the future we will use the AttachmentsClassloader to correctly deserialize and asses the relevancy.
if (IGNORE_TRANSACTION_DESERIALIZATION_ERRORS) {
// Disabled if soft locking disabled, as assumes you are in the back chain and that maybe it is less important than top
// level transaction.
if (IGNORE_TRANSACTION_DESERIALIZATION_ERRORS || disableSoftLocking) {
log.warnOnce("The current usage of transaction deserialization for the vault is unsafe." +
"Ignoring vault updates due to failed deserialized states may lead to severe problems with ledger consistency. ")
log.warn("Could not deserialize state $idx from transaction $txId. Cause: $e")
} else {
log.error("Could not deserialize state $idx from transaction $txId. Cause: $e")
if(error == null) error = e
if (error == null) error = e
}
null
}

View File

@ -75,7 +75,7 @@
</changeSet>
<changeSet author="R3.Corda" id="node_receiver_distr_recs_add_indexes">
<createIndex indexName="node_receiver_distr_recs_keyinfo_idx" tableName="node_receiver_distr_recs">
<createIndex indexName="node_receiver_distr_recs_idx1" tableName="node_receiver_distr_recs">
<column name="transaction_id"/>
<column name="timestamp"/>
<column name="timestamp_discriminator"/>