mirror of
https://github.com/corda/corda.git
synced 2025-06-17 14:48:16 +00:00
Fix support for large attachments by de-batching tx/attachment fetch. This is a workaround until the upstream Artemis large message streaming bugs are fixed.
This commit is contained in:
committed by
Andrius Dagys
parent
e83deb78ec
commit
a56540a3d6
@ -0,0 +1,73 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.InputStreamAndHash
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.testing.BOB
|
||||
import net.corda.testing.DUMMY_NOTARY
|
||||
import net.corda.testing.aliceBobAndNotary
|
||||
import net.corda.testing.contracts.DummyState
|
||||
import net.corda.testing.driver.driver
|
||||
import org.junit.Test
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
/**
|
||||
* Check that we can add lots of large attachments to a transaction and that it works OK, e.g. does not hit the
|
||||
* transaction size limit (which should only consider the hashes).
|
||||
*/
|
||||
class LargeTransactionsTest {
|
||||
@StartableByRPC @InitiatingFlow
|
||||
class SendLargeTransactionFlow(val hash1: SecureHash, val hash2: SecureHash, val hash3: SecureHash, val hash4: SecureHash) : FlowLogic<Unit>() {
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
val tx = TransactionBuilder(notary = DUMMY_NOTARY)
|
||||
.addOutputState(DummyState())
|
||||
.addAttachment(hash1)
|
||||
.addAttachment(hash2)
|
||||
.addAttachment(hash3)
|
||||
.addAttachment(hash4)
|
||||
val stx = serviceHub.signInitialTransaction(tx, serviceHub.legalIdentityKey)
|
||||
// Send to the other side and wait for it to trigger resolution from us.
|
||||
sendAndReceive<Unit>(serviceHub.networkMapCache.getNodeByLegalName(BOB.name)!!.legalIdentity, stx)
|
||||
}
|
||||
}
|
||||
|
||||
@InitiatedBy(SendLargeTransactionFlow::class) @Suppress("UNUSED")
|
||||
class ReceiveLargeTransactionFlow(private val counterParty: Party) : FlowLogic<Unit>() {
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
val stx = receive<SignedTransaction>(counterParty).unwrap { it }
|
||||
subFlow(ResolveTransactionsFlow(stx, counterParty))
|
||||
// Unblock the other side by sending some dummy object (Unit is fine here as it's a singleton).
|
||||
send(counterParty, Unit)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun checkCanSendLargeTransactions() {
|
||||
// These 4 attachments yield a transaction that's got >10mb attached, so it'd push us over the Artemis
|
||||
// max message size.
|
||||
val bigFile1 = InputStreamAndHash.createInMemoryTestZip(1024*1024*3, 0)
|
||||
val bigFile2 = InputStreamAndHash.createInMemoryTestZip(1024*1024*3, 1)
|
||||
val bigFile3 = InputStreamAndHash.createInMemoryTestZip(1024*1024*3, 2)
|
||||
val bigFile4 = InputStreamAndHash.createInMemoryTestZip(1024*1024*3, 3)
|
||||
driver(startNodesInProcess = true) {
|
||||
val (alice, _, _) = aliceBobAndNotary()
|
||||
alice.useRPC {
|
||||
val hash1 = it.uploadAttachment(bigFile1.inputStream)
|
||||
val hash2 = it.uploadAttachment(bigFile2.inputStream)
|
||||
val hash3 = it.uploadAttachment(bigFile3.inputStream)
|
||||
val hash4 = it.uploadAttachment(bigFile4.inputStream)
|
||||
assertEquals(hash1, bigFile1.sha256)
|
||||
// Should not throw any exceptions.
|
||||
it.startFlow(::SendLargeTransactionFlow, hash1, hash2, hash3, hash4).returnValue.get()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -31,7 +31,6 @@ class FetchTransactionsHandler(otherParty: Party) : FetchDataHandler<SignedTrans
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Use Artemis message streaming support here, called "large messages". This avoids the need to buffer.
|
||||
class FetchAttachmentsHandler(otherParty: Party) : FetchDataHandler<ByteArray>(otherParty) {
|
||||
override fun getData(id: SecureHash): ByteArray? {
|
||||
return serviceHub.attachments.openAttachment(id)?.open()?.readBytes()
|
||||
@ -46,10 +45,13 @@ abstract class FetchDataHandler<out T>(val otherParty: Party) : FlowLogic<Unit>(
|
||||
if (it.hashes.isEmpty()) throw FlowException("Empty hash list")
|
||||
it
|
||||
}
|
||||
val response = request.hashes.map {
|
||||
getData(it) ?: throw FetchDataFlow.HashNotFound(it)
|
||||
// TODO: Use Artemis message streaming support here, called "large messages". This avoids the need to buffer.
|
||||
// See the discussion in FetchDataFlow. We send each item individually here in a separate asynchronous send
|
||||
// call, and the other side picks them up with a straight receive call, because we batching would push us over
|
||||
// the (current) Artemis message size limit.
|
||||
request.hashes.forEach {
|
||||
send(otherParty, getData(it) ?: throw FetchDataFlow.HashNotFound(it))
|
||||
}
|
||||
send(otherParty, response)
|
||||
}
|
||||
|
||||
protected abstract fun getData(id: SecureHash): T?
|
||||
|
Reference in New Issue
Block a user