mirror of
https://github.com/corda/corda.git
synced 2025-06-21 00:23:09 +00:00
Merge remote-tracking branch 'open/master' into colljos-merge-171117
# Conflicts: # .idea/compiler.xml # build.gradle # client/rpc/src/integration-test/kotlin/net/corda/client/rpc/CordaRPCClientTest.kt # docs/source/changelog.rst # node/src/integration-test/kotlin/net/corda/node/services/AttachmentLoadingTests.kt # node/src/main/kotlin/net/corda/node/internal/StartedNode.kt # node/src/main/kotlin/net/corda/node/utilities/registration/HTTPNetworkRegistrationService.kt # samples/network-visualiser/build.gradle # samples/simm-valuation-demo/src/integration-test/kotlin/net/corda/vega/SimmValuationTest.kt # testing/node-driver/src/integration-test/kotlin/net/corda/testing/driver/DriverTests.kt # testing/node-driver/src/main/kotlin/net/corda/testing/driver/Driver.kt # testing/node-driver/src/main/kotlin/net/corda/testing/node/MockNode.kt
This commit is contained in:
@ -19,10 +19,10 @@ import net.corda.testing.IntegrationTest
|
||||
import net.corda.testing.driver.NodeHandle
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.NotarySpec
|
||||
import net.corda.testing.performance.div
|
||||
import net.corda.testing.performance.startPublishingFixedRateInjector
|
||||
import net.corda.testing.performance.startReporter
|
||||
import net.corda.testing.performance.startTightLoopInjector
|
||||
import net.corda.testing.internal.performance.div
|
||||
import net.corda.testing.internal.performance.startPublishingFixedRateInjector
|
||||
import net.corda.testing.internal.performance.startReporter
|
||||
import net.corda.testing.internal.performance.startTightLoopInjector
|
||||
import org.junit.Before
|
||||
import org.junit.Ignore
|
||||
import org.junit.Test
|
||||
|
@ -17,6 +17,7 @@ import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.internal.cordapp.CordappLoader
|
||||
import net.corda.node.internal.cordapp.CordappProviderImpl
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.DUMMY_BANK_A
|
||||
import net.corda.testing.DUMMY_NOTARY
|
||||
import net.corda.testing.SerializationEnvironmentRule
|
||||
@ -27,17 +28,12 @@ import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.MockServices
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import java.net.URLClassLoader
|
||||
import java.nio.file.Files
|
||||
import kotlin.test.assertFailsWith
|
||||
|
||||
class AttachmentLoadingTests : IntegrationTest() {
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
|
||||
private class Services : MockServices() {
|
||||
private val provider = CordappProviderImpl(CordappLoader.createDevMode(listOf(isolatedJAR)), attachments)
|
||||
private val cordapp get() = provider.cordapps.first()
|
||||
@ -85,7 +81,7 @@ class AttachmentLoadingTests : IntegrationTest() {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `test a wire transaction has loaded the correct attachment`() {
|
||||
fun `test a wire transaction has loaded the correct attachment`() = withTestSerialization {
|
||||
val appClassLoader = services.appContext.classLoader
|
||||
val contractClass = appClassLoader.loadClass(ISOLATED_CONTRACT_ID).asSubclass(Contract::class.java)
|
||||
val generateInitialMethod = contractClass.getDeclaredMethod("generateInitial", PartyAndReference::class.java, Integer.TYPE, Party::class.java)
|
||||
@ -103,7 +99,7 @@ class AttachmentLoadingTests : IntegrationTest() {
|
||||
|
||||
@Test
|
||||
fun `test that attachments retrieved over the network are not used for code`() {
|
||||
driver(initialiseSerialization = false) {
|
||||
driver {
|
||||
installIsolatedCordappTo(bankAName)
|
||||
val (bankA, bankB) = createTwoNodes()
|
||||
assertFailsWith<UnexpectedFlowEndException>("Party C=CH,L=Zurich,O=BankB rejected session request: Don't know net.corda.finance.contracts.isolated.IsolatedDummyFlow\$Initiator") {
|
||||
@ -114,7 +110,7 @@ class AttachmentLoadingTests : IntegrationTest() {
|
||||
|
||||
@Test
|
||||
fun `tests that if the attachment is loaded on both sides already that a flow can run`() {
|
||||
driver(initialiseSerialization = false) {
|
||||
driver {
|
||||
installIsolatedCordappTo(bankAName)
|
||||
installIsolatedCordappTo(bankBName)
|
||||
val (bankA, bankB) = createTwoNodes()
|
||||
|
@ -13,8 +13,6 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.deleteIfExists
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.node.services.NotaryService
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
@ -23,18 +21,18 @@ import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.internal.StartedNode
|
||||
import net.corda.node.services.config.BFTSMaRtConfiguration
|
||||
import net.corda.node.services.config.NotaryConfig
|
||||
import net.corda.node.services.transactions.BFTNonValidatingNotaryService
|
||||
import net.corda.node.services.transactions.minClusterSize
|
||||
import net.corda.node.services.transactions.minCorrectReplicas
|
||||
import net.corda.node.utilities.ServiceIdentityGenerator
|
||||
import net.corda.testing.IntegrationTest
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.common.internal.NetworkParametersCopier
|
||||
import net.corda.testing.common.internal.testNetworkParameters
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.dummyCommand
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNetwork.MockNode
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.startFlow
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
@ -63,26 +61,19 @@ class BFTNotaryServiceTests : IntegrationTest() {
|
||||
|
||||
notary = ServiceIdentityGenerator.generateToDisk(
|
||||
replicaIds.map { mockNet.baseDirectory(mockNet.nextNodeId + it) },
|
||||
CordaX500Name("BFT", "Zurich", "CH"),
|
||||
NotaryService.constructId(validating = false, bft = true))
|
||||
|
||||
val networkParameters = NetworkParametersCopier(testNetworkParameters(listOf(NotaryInfo(notary, false))))
|
||||
CordaX500Name(BFTNonValidatingNotaryService.id, "BFT", "Zurich", "CH")
|
||||
)
|
||||
|
||||
val clusterAddresses = replicaIds.map { NetworkHostAndPort("localhost", 11000 + it * 10) }
|
||||
|
||||
val nodes = replicaIds.map { replicaId ->
|
||||
mockNet.createUnstartedNode(MockNodeParameters(configOverrides = {
|
||||
replicaIds.forEach { replicaId ->
|
||||
mockNet.createNode(MockNodeParameters(configOverrides = {
|
||||
val notary = NotaryConfig(validating = false, bftSMaRt = BFTSMaRtConfiguration(replicaId, clusterAddresses, exposeRaces = exposeRaces))
|
||||
doReturn(notary).whenever(it).notary
|
||||
}))
|
||||
} + mockNet.createUnstartedNode()
|
||||
}
|
||||
|
||||
// MockNetwork doesn't support BFT clusters, so we create all the nodes we need unstarted, and then install the
|
||||
// network-parameters in their directories before they're started.
|
||||
node = nodes.map { node ->
|
||||
networkParameters.install(mockNet.baseDirectory(node.id))
|
||||
node.start()
|
||||
}.last()
|
||||
node = mockNet.createNode()
|
||||
}
|
||||
|
||||
/** Failure mode is the redundant replica gets stuck in startup, so we can't dispose it cleanly at the end. */
|
||||
|
@ -13,6 +13,7 @@ import net.corda.finance.flows.CashIssueFlow
|
||||
import net.corda.finance.flows.CashPaymentFlow
|
||||
import net.corda.node.services.Permissions.Companion.invokeRpc
|
||||
import net.corda.node.services.Permissions.Companion.startFlow
|
||||
import net.corda.node.services.transactions.RaftValidatingNotaryService
|
||||
import net.corda.nodeapi.User
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.driver.NodeHandle
|
||||
@ -41,7 +42,7 @@ class DistributedServiceTests : IntegrationTest() {
|
||||
|
||||
driver(
|
||||
extraCordappPackagesToScan = listOf("net.corda.finance.contracts"),
|
||||
notarySpecs = listOf(NotarySpec(DUMMY_NOTARY.name, rpcUsers = listOf(testUser), cluster = ClusterSpec.Raft(clusterSize = 3))))
|
||||
notarySpecs = listOf(NotarySpec(DUMMY_NOTARY.name.copy(commonName = RaftValidatingNotaryService.id), rpcUsers = listOf(testUser), cluster = ClusterSpec.Raft(clusterSize = 3))))
|
||||
{
|
||||
alice = startNode(providedName = ALICE.name, rpcUsers = listOf(testUser)).getOrThrow()
|
||||
raftNotaryIdentity = defaultNotaryIdentity
|
||||
|
@ -19,6 +19,7 @@ import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.driver.NodeHandle
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.dummyCommand
|
||||
import net.corda.testing.startFlow
|
||||
import net.corda.testing.node.ClusterSpec
|
||||
import net.corda.testing.node.NotarySpec
|
||||
import org.junit.Test
|
||||
|
@ -3,18 +3,22 @@ package net.corda.node.services.network
|
||||
import com.google.common.jimfs.Configuration
|
||||
import com.google.common.jimfs.Jimfs
|
||||
import net.corda.cordform.CordformNode
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.internal.createDirectories
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.services.KeyManagementService
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.node.services.identity.InMemoryIdentityService
|
||||
import net.corda.nodeapi.NodeInfoFilesCopier
|
||||
import net.corda.testing.ALICE
|
||||
import net.corda.testing.ALICE_KEY
|
||||
import net.corda.testing.getTestPartyAndCertificate
|
||||
import net.corda.testing.internal.NodeBasedTest
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.node.MockKeyManagementService
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.contentOf
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.rules.TemporaryFolder
|
||||
import rx.observers.TestSubscriber
|
||||
import rx.schedulers.TestScheduler
|
||||
import java.nio.file.Path
|
||||
@ -22,20 +26,29 @@ import java.util.concurrent.TimeUnit
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class NodeInfoWatcherTest : NodeBasedTest() {
|
||||
class NodeInfoWatcherTest {
|
||||
companion object {
|
||||
val nodeInfo = NodeInfo(listOf(), listOf(getTestPartyAndCertificate(ALICE)), 0, 0)
|
||||
}
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
@Rule
|
||||
@JvmField
|
||||
val tempFolder = TemporaryFolder()
|
||||
private lateinit var nodeInfoPath: Path
|
||||
private val scheduler = TestScheduler()
|
||||
private val testSubscriber = TestSubscriber<NodeInfo>()
|
||||
private lateinit var keyManagementService: KeyManagementService
|
||||
|
||||
// Object under test
|
||||
private lateinit var nodeInfoWatcher: NodeInfoWatcher
|
||||
|
||||
@Before
|
||||
fun start() {
|
||||
val identityService = InMemoryIdentityService(trustRoot = DEV_TRUST_ROOT)
|
||||
keyManagementService = MockKeyManagementService(identityService, ALICE_KEY)
|
||||
nodeInfoWatcher = NodeInfoWatcher(tempFolder.root.toPath(), scheduler = scheduler)
|
||||
nodeInfoPath = tempFolder.root.toPath() / CordformNode.NODE_INFO_DIRECTORY
|
||||
}
|
||||
@ -44,7 +57,8 @@ class NodeInfoWatcherTest : NodeBasedTest() {
|
||||
fun `save a NodeInfo`() {
|
||||
assertEquals(0,
|
||||
tempFolder.root.list().filter { it.startsWith(NodeInfoFilesCopier.NODE_INFO_FILE_NAME_PREFIX) }.size)
|
||||
NodeInfoWatcher.saveToFile(tempFolder.root.toPath(), nodeInfo, ALICE_KEY)
|
||||
val signedNodeInfo = SignedData(nodeInfo.serialize(), keyManagementService.sign(nodeInfo.serialize().bytes, nodeInfo.legalIdentities.first().owningKey))
|
||||
NodeInfoWatcher.saveToFile(tempFolder.root.toPath(), signedNodeInfo)
|
||||
|
||||
val nodeInfoFiles = tempFolder.root.list().filter { it.startsWith(NodeInfoFilesCopier.NODE_INFO_FILE_NAME_PREFIX) }
|
||||
assertEquals(1, nodeInfoFiles.size)
|
||||
@ -59,7 +73,8 @@ class NodeInfoWatcherTest : NodeBasedTest() {
|
||||
fun `save a NodeInfo to JimFs`() {
|
||||
val jimFs = Jimfs.newFileSystem(Configuration.unix())
|
||||
val jimFolder = jimFs.getPath("/nodeInfo")
|
||||
NodeInfoWatcher.saveToFile(jimFolder, nodeInfo, ALICE_KEY)
|
||||
val signedNodeInfo = SignedData(nodeInfo.serialize(), keyManagementService.sign(nodeInfo.serialize().bytes, nodeInfo.legalIdentities.first().owningKey))
|
||||
NodeInfoWatcher.saveToFile(jimFolder, signedNodeInfo)
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -128,6 +143,7 @@ class NodeInfoWatcherTest : NodeBasedTest() {
|
||||
|
||||
// Write a nodeInfo under the right path.
|
||||
private fun createNodeInfoFileInPath(nodeInfo: NodeInfo) {
|
||||
NodeInfoWatcher.saveToFile(nodeInfoPath, nodeInfo, ALICE_KEY)
|
||||
val signedNodeInfo = SignedData(nodeInfo.serialize(), keyManagementService.sign(nodeInfo.serialize().bytes, nodeInfo.legalIdentities.first().owningKey))
|
||||
NodeInfoWatcher.saveToFile(nodeInfoPath, signedNodeInfo)
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import net.corda.testing.ALICE
|
||||
import net.corda.testing.BOB
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.internal.NodeBasedTest
|
||||
import net.corda.testing.startFlow
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Test
|
||||
|
||||
|
@ -3,8 +3,8 @@ package net.corda.services.messaging
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.node.utilities.*
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.PEER_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEER_USER
|
||||
import net.corda.nodeapi.RPCApi
|
||||
import net.corda.nodeapi.config.SSLConfiguration
|
||||
import net.corda.testing.MEGA_CORP
|
||||
|
@ -2,6 +2,7 @@ package net.corda.services.messaging
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.client.rpc.CordaRPCClient
|
||||
import net.corda.client.rpc.CordaRPCConnection
|
||||
import net.corda.core.crypto.generateKeyPair
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.flows.FlowLogic
|
||||
@ -16,18 +17,14 @@ import net.corda.core.utilities.toBase58String
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.node.internal.StartedNode
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.INTERNAL_PREFIX
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.NETWORK_MAP_QUEUE
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.NOTIFICATIONS_ADDRESS
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.P2P_QUEUE
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.PEERS_PREFIX
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.INTERNAL_PREFIX
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NOTIFICATIONS_ADDRESS
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_QUEUE
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEERS_PREFIX
|
||||
import net.corda.nodeapi.RPCApi
|
||||
import net.corda.nodeapi.User
|
||||
import net.corda.nodeapi.config.SSLConfiguration
|
||||
import net.corda.testing.ALICE
|
||||
import net.corda.testing.BOB
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.configureTestSSL
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.internal.NodeBasedTest
|
||||
import net.corda.testing.messaging.SimpleMQClient
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQNonExistentQueueException
|
||||
@ -97,16 +94,6 @@ abstract class MQSecurityTest : NodeBasedTest() {
|
||||
assertAllQueueCreationAttacksFail(invalidPeerQueue)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `consume message from network map queue`() {
|
||||
assertConsumeAttackFails(NETWORK_MAP_QUEUE)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `send message to network map address`() {
|
||||
assertSendAttackFails(NETWORK_MAP_QUEUE)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `consume message from RPC requests queue`() {
|
||||
assertConsumeAttackFails(RPCApi.RPC_SERVER_QUEUE_NAME)
|
||||
@ -153,8 +140,14 @@ abstract class MQSecurityTest : NodeBasedTest() {
|
||||
return client
|
||||
}
|
||||
|
||||
fun loginToRPC(target: NetworkHostAndPort, rpcUser: User): CordaRPCOps {
|
||||
return CordaRPCClient(target).start(rpcUser.username, rpcUser.password).proxy
|
||||
private val rpcConnections = mutableListOf<CordaRPCConnection>()
|
||||
private fun loginToRPC(target: NetworkHostAndPort, rpcUser: User): CordaRPCOps {
|
||||
return CordaRPCClient(target).start(rpcUser.username, rpcUser.password).also { rpcConnections.add(it) }.proxy
|
||||
}
|
||||
|
||||
@After
|
||||
fun closeRPCConnections() {
|
||||
rpcConnections.forEach { it.forceClose() }
|
||||
}
|
||||
|
||||
fun loginToRPCAndGetClientQueue(): String {
|
||||
|
@ -4,6 +4,7 @@ import joptsimple.OptionParser
|
||||
import joptsimple.util.EnumConverter
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.node.services.config.ConfigHelper
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.parseAsNodeConfiguration
|
||||
import org.slf4j.event.Level
|
||||
import java.io.PrintStream
|
||||
@ -35,6 +36,7 @@ class ArgsParser {
|
||||
private val isVersionArg = optionParser.accepts("version", "Print the version and exit")
|
||||
private val justGenerateNodeInfoArg = optionParser.accepts("just-generate-node-info",
|
||||
"Perform the node start-up task necessary to generate its nodeInfo, save it to disk, then quit")
|
||||
private val bootstrapRaftClusterArg = optionParser.accepts("bootstrap-raft-cluster", "Bootstraps Raft cluster. The node forms a single node cluster (ignoring otherwise configured peer addresses), acting as a seed for other nodes to join the cluster.")
|
||||
private val helpArg = optionParser.accepts("help").forHelp()
|
||||
|
||||
fun parse(vararg args: String): CmdLineOptions {
|
||||
@ -52,8 +54,9 @@ class ArgsParser {
|
||||
val noLocalShell = optionSet.has(noLocalShellArg)
|
||||
val sshdServer = optionSet.has(sshdServerArg)
|
||||
val justGenerateNodeInfo = optionSet.has(justGenerateNodeInfoArg)
|
||||
val bootstrapRaftCluster = optionSet.has(bootstrapRaftClusterArg)
|
||||
return CmdLineOptions(baseDirectory, configFile, help, loggingLevel, logToConsole, isRegistration, isVersion,
|
||||
noLocalShell, sshdServer, justGenerateNodeInfo)
|
||||
noLocalShell, sshdServer, justGenerateNodeInfo, bootstrapRaftCluster)
|
||||
}
|
||||
|
||||
fun printHelp(sink: PrintStream) = optionParser.printHelpOn(sink)
|
||||
@ -68,7 +71,13 @@ data class CmdLineOptions(val baseDirectory: Path,
|
||||
val isVersion: Boolean,
|
||||
val noLocalShell: Boolean,
|
||||
val sshdServer: Boolean,
|
||||
val justGenerateNodeInfo: Boolean) {
|
||||
fun loadConfig() = ConfigHelper
|
||||
.loadConfig(baseDirectory, configFile).parseAsNodeConfiguration()
|
||||
val justGenerateNodeInfo: Boolean,
|
||||
val bootstrapRaftCluster: Boolean) {
|
||||
fun loadConfig(): NodeConfiguration {
|
||||
val config = ConfigHelper.loadConfig(baseDirectory, configFile).parseAsNodeConfiguration()
|
||||
if (isRegistration) {
|
||||
requireNotNull(config.compatibilityZoneURL) { "Compatibility Zone Url must be provided in registration mode." }
|
||||
}
|
||||
return config
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,9 @@ import net.corda.confidential.SwapIdentitiesFlow
|
||||
import net.corda.confidential.SwapIdentitiesHandler
|
||||
import net.corda.core.CordaException
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.crypto.sign
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
@ -21,7 +23,7 @@ import net.corda.core.node.services.*
|
||||
import net.corda.core.serialization.SerializationWhitelist
|
||||
import net.corda.core.serialization.SerializeAsToken
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.debug
|
||||
@ -44,13 +46,8 @@ import net.corda.node.services.events.ScheduledActivityObserver
|
||||
import net.corda.node.services.identity.PersistentIdentityService
|
||||
import net.corda.node.services.keys.PersistentKeyManagementService
|
||||
import net.corda.node.services.messaging.MessagingService
|
||||
import net.corda.node.services.network.NetworkMapCacheImpl
|
||||
import net.corda.node.services.network.NodeInfoWatcher
|
||||
import net.corda.node.services.network.PersistentNetworkMapCache
|
||||
import net.corda.node.services.persistence.DBCheckpointStorage
|
||||
import net.corda.node.services.persistence.DBTransactionMappingStorage
|
||||
import net.corda.node.services.persistence.DBTransactionStorage
|
||||
import net.corda.node.services.persistence.NodeAttachmentService
|
||||
import net.corda.node.services.network.*
|
||||
import net.corda.node.services.persistence.*
|
||||
import net.corda.node.services.schema.HibernateObserver
|
||||
import net.corda.node.services.schema.NodeSchemaService
|
||||
import net.corda.node.services.statemachine.*
|
||||
@ -62,6 +59,7 @@ import net.corda.node.utilities.*
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import org.slf4j.Logger
|
||||
import rx.Observable
|
||||
import rx.subjects.PublishSubject
|
||||
import java.io.IOException
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
import java.security.KeyPair
|
||||
@ -71,6 +69,7 @@ import java.security.cert.CertificateFactory
|
||||
import java.security.cert.X509Certificate
|
||||
import java.sql.Connection
|
||||
import java.time.Clock
|
||||
import java.time.Duration
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.ExecutorService
|
||||
@ -86,8 +85,6 @@ import net.corda.core.crypto.generateKeyPair as cryptoGenerateKeyPair
|
||||
* Marked as SingletonSerializeAsToken to prevent the invisible reference to AbstractNode in the ServiceHub accidentally
|
||||
* sweeping up the Node into the Kryo checkpoint serialization via any flows holding a reference to ServiceHub.
|
||||
*/
|
||||
// TODO Log warning if this node is a notary but not one of the ones specified in the network parameters, both for core and custom
|
||||
|
||||
// In theory the NodeInfo for the node should be passed in, instead, however currently this is constructed by the
|
||||
// AbstractNode. It should be possible to generate the NodeInfo outside of AbstractNode, so it can be passed in.
|
||||
abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
@ -107,7 +104,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
override val database: CordaPersistence,
|
||||
override val rpcOps: CordaRPCOps,
|
||||
flowStarter: FlowStarter,
|
||||
internal val schedulerService: NodeSchedulerService) : StartedNode<N> {
|
||||
override val notaryService: NotaryService?) : StartedNode<N> {
|
||||
override val services: StartedNodeServices = object : StartedNodeServices, ServiceHubInternal by services, FlowStarter by flowStarter {}
|
||||
}
|
||||
|
||||
@ -117,13 +114,12 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
// low-performance prototyping period.
|
||||
protected abstract val serverThread: AffinityExecutor
|
||||
|
||||
protected lateinit var networkParameters: NetworkParameters
|
||||
private val cordappServices = MutableClassToInstanceMap.create<SerializeAsToken>()
|
||||
private val flowFactories = ConcurrentHashMap<Class<out FlowLogic<*>>, InitiatedFlowFactory<*>>()
|
||||
|
||||
protected val services: ServiceHubInternal get() = _services
|
||||
private lateinit var _services: ServiceHubInternalImpl
|
||||
protected lateinit var info: NodeInfo
|
||||
protected val nodeStateObservable: PublishSubject<NodeState> = PublishSubject.create<NodeState>()
|
||||
protected var myNotaryIdentity: PartyAndCertificate? = null
|
||||
protected lateinit var checkpointStorage: CheckpointStorage
|
||||
protected lateinit var smm: StateMachineManager
|
||||
@ -131,8 +127,9 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
protected lateinit var attachments: NodeAttachmentService
|
||||
protected lateinit var network: MessagingService
|
||||
protected val runOnStop = ArrayList<() -> Any?>()
|
||||
protected lateinit var database: CordaPersistence
|
||||
protected val _nodeReadyFuture = openFuture<Unit>()
|
||||
protected val networkMapClient: NetworkMapClient? by lazy { configuration.compatibilityZoneURL?.let(::NetworkMapClient) }
|
||||
|
||||
/** Completes once the node has successfully registered with the network map service
|
||||
* or has loaded network map data from local database */
|
||||
val nodeReadyFuture: CordaFuture<Unit>
|
||||
@ -152,7 +149,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
@Volatile private var _started: StartedNode<AbstractNode>? = null
|
||||
|
||||
/** The implementation of the [CordaRPCOps] interface used by this node. */
|
||||
open fun makeRPCOps(flowStarter: FlowStarter): CordaRPCOps {
|
||||
open fun makeRPCOps(flowStarter: FlowStarter, database: CordaPersistence): CordaRPCOps {
|
||||
return SecureCordaRPCOps(services, smm, database, flowStarter)
|
||||
}
|
||||
|
||||
@ -168,26 +165,31 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
check(started == null) { "Node has already been started" }
|
||||
log.info("Generating nodeInfo ...")
|
||||
initCertificate()
|
||||
initNodeInfo()
|
||||
val (keyPairs, info) = initNodeInfo()
|
||||
val identityKeypair = keyPairs.first { it.public == info.legalIdentities.first().owningKey }
|
||||
val serialisedNodeInfo = info.serialize()
|
||||
val signature = identityKeypair.sign(serialisedNodeInfo)
|
||||
// TODO: Signed data might not be sufficient for multiple identities, as it only contains one signature.
|
||||
NodeInfoWatcher.saveToFile(configuration.baseDirectory, SignedData(serialisedNodeInfo, signature))
|
||||
}
|
||||
|
||||
open fun start(): StartedNode<AbstractNode> {
|
||||
check(started == null) { "Node has already been started" }
|
||||
log.info("Node starting up ...")
|
||||
initCertificate()
|
||||
val keyPairs = initNodeInfo()
|
||||
readNetworkParameters()
|
||||
val (keyPairs, info) = initNodeInfo()
|
||||
val schemaService = NodeSchemaService(cordappLoader)
|
||||
// Do all of this in a database transaction so anything that might need a connection has one.
|
||||
val startedImpl = initialiseDatabasePersistence(schemaService) {
|
||||
val transactionStorage = makeTransactionStorage()
|
||||
val (startedImpl, schedulerService) = initialiseDatabasePersistence(schemaService) { database ->
|
||||
val transactionStorage = makeTransactionStorage(database)
|
||||
val stateLoader = StateLoaderImpl(transactionStorage)
|
||||
val services = makeServices(keyPairs, schemaService, transactionStorage, stateLoader)
|
||||
smm = makeStateMachineManager()
|
||||
val nodeServices = makeServices(keyPairs, schemaService, transactionStorage, stateLoader, database, info)
|
||||
val notaryService = makeNotaryService(nodeServices, database)
|
||||
smm = makeStateMachineManager(database)
|
||||
val flowStarter = FlowStarterImpl(serverThread, smm)
|
||||
val schedulerService = NodeSchedulerService(
|
||||
platformClock,
|
||||
this@AbstractNode.database,
|
||||
database,
|
||||
flowStarter,
|
||||
stateLoader,
|
||||
unfinishedSchedules = busyNodeLatch,
|
||||
@ -200,19 +202,32 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
MoreExecutors.shutdownAndAwaitTermination(serverThread as ExecutorService, 50, SECONDS)
|
||||
}
|
||||
}
|
||||
makeVaultObservers(schedulerService)
|
||||
val rpcOps = makeRPCOps(flowStarter)
|
||||
makeVaultObservers(schedulerService, database.hibernateConfig)
|
||||
val rpcOps = makeRPCOps(flowStarter, database)
|
||||
startMessagingService(rpcOps)
|
||||
installCoreFlows()
|
||||
val cordaServices = installCordaServices(flowStarter)
|
||||
tokenizableServices = services + cordaServices + schedulerService
|
||||
tokenizableServices = nodeServices + cordaServices + schedulerService
|
||||
registerCordappFlows()
|
||||
_services.rpcFlows += cordappLoader.cordapps.flatMap { it.rpcFlows }
|
||||
FlowLogicRefFactoryImpl.classloader = cordappLoader.appClassLoader
|
||||
|
||||
runOnStop += network::stop
|
||||
StartedNodeImpl(this, _services, info, checkpointStorage, smm, attachments, network, database, rpcOps, flowStarter, schedulerService)
|
||||
Pair(StartedNodeImpl(this, _services, info, checkpointStorage, smm, attachments, network, database, rpcOps, flowStarter, notaryService), schedulerService)
|
||||
}
|
||||
|
||||
val networkMapUpdater = NetworkMapUpdater(services.networkMapCache,
|
||||
NodeInfoWatcher(configuration.baseDirectory, Duration.ofMillis(configuration.additionalNodeInfoPollingFrequencyMsec)),
|
||||
networkMapClient)
|
||||
runOnStop += networkMapUpdater::close
|
||||
|
||||
networkMapUpdater.updateNodeInfo(services.myInfo) {
|
||||
val serialisedNodeInfo = it.serialize()
|
||||
val signature = services.keyManagementService.sign(serialisedNodeInfo.bytes, it.legalIdentities.first().owningKey)
|
||||
SignedData(serialisedNodeInfo, signature)
|
||||
}
|
||||
networkMapUpdater.subscribeToNetworkMap()
|
||||
|
||||
// If we successfully loaded network data from database, we set this future to Unit.
|
||||
services.networkMapCache.addNode(info)
|
||||
_nodeReadyFuture.captureLater(services.networkMapCache.nodeReady.map { Unit })
|
||||
@ -228,36 +243,26 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
private fun initNodeInfo(): Set<KeyPair> {
|
||||
private fun initNodeInfo(): Pair<Set<KeyPair>, NodeInfo> {
|
||||
val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null)
|
||||
val keyPairs = mutableSetOf(identityKeyPair)
|
||||
|
||||
myNotaryIdentity = configuration.notary?.let {
|
||||
if (it.isClusterConfig) {
|
||||
val (notaryIdentity, notaryIdentityKeyPair) = obtainIdentity(it)
|
||||
keyPairs += notaryIdentityKeyPair
|
||||
notaryIdentity
|
||||
} else {
|
||||
// In case of a single notary service myNotaryIdentity will be the node's single identity.
|
||||
identity
|
||||
}
|
||||
val (notaryIdentity, notaryIdentityKeyPair) = obtainIdentity(it)
|
||||
keyPairs += notaryIdentityKeyPair
|
||||
notaryIdentity
|
||||
}
|
||||
|
||||
info = NodeInfo(
|
||||
val info = NodeInfo(
|
||||
myAddresses(),
|
||||
setOf(identity, myNotaryIdentity).filterNotNull(),
|
||||
listOf(identity, myNotaryIdentity).filterNotNull(),
|
||||
versionInfo.platformVersion,
|
||||
platformClock.instant().toEpochMilli()
|
||||
)
|
||||
|
||||
NodeInfoWatcher.saveToFile(configuration.baseDirectory, info, identityKeyPair)
|
||||
|
||||
return keyPairs
|
||||
return Pair(keyPairs, info)
|
||||
}
|
||||
|
||||
protected abstract fun myAddresses(): List<NetworkHostAndPort>
|
||||
|
||||
protected open fun makeStateMachineManager(): StateMachineManager {
|
||||
protected open fun makeStateMachineManager(database: CordaPersistence): StateMachineManager {
|
||||
return StateMachineManagerImpl(
|
||||
services,
|
||||
checkpointStorage,
|
||||
@ -329,8 +334,10 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
private fun <T> startFlowChecked(flow: FlowLogic<T>): FlowStateMachine<T> {
|
||||
val logicType = flow.javaClass
|
||||
require(logicType.isAnnotationPresent(StartableByService::class.java)) { "${logicType.name} was not designed for starting by a CordaService" }
|
||||
val currentUser = FlowInitiator.Service(serviceInstance.javaClass.name)
|
||||
return flowStarter.startFlow(flow, currentUser).getOrThrow()
|
||||
// TODO check service permissions
|
||||
// TODO switch from myInfo.legalIdentities[0].name to current node's identity as soon as available
|
||||
val context = InvocationContext.service(serviceInstance.javaClass.name, myInfo.legalIdentities[0].name)
|
||||
return flowStarter.startFlow(flow, context).getOrThrow()
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
@ -375,12 +382,6 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
return service
|
||||
}
|
||||
|
||||
fun <T : Any> findTokenizableService(clazz: Class<T>): T? {
|
||||
return tokenizableServices.firstOrNull { clazz.isAssignableFrom(it.javaClass) }?.let { uncheckedCast(it) }
|
||||
}
|
||||
|
||||
inline fun <reified T : Any> findTokenizableService() = findTokenizableService(T::class.java)
|
||||
|
||||
private fun handleCustomNotaryService(service: NotaryService) {
|
||||
runOnStop += service::stop
|
||||
service.start()
|
||||
@ -483,12 +484,12 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
* Builds node internal, advertised, and plugin services.
|
||||
* Returns a list of tokenizable services to be added to the serialisation context.
|
||||
*/
|
||||
private fun makeServices(keyPairs: Set<KeyPair>, schemaService: SchemaService, transactionStorage: WritableTransactionStorage, stateLoader: StateLoader): MutableList<Any> {
|
||||
private fun makeServices(keyPairs: Set<KeyPair>, schemaService: SchemaService, transactionStorage: WritableTransactionStorage, stateLoader: StateLoader, database: CordaPersistence, info: NodeInfo): MutableList<Any> {
|
||||
checkpointStorage = DBCheckpointStorage()
|
||||
val metrics = MetricRegistry()
|
||||
attachments = NodeAttachmentService(metrics)
|
||||
val cordappProvider = CordappProviderImpl(cordappLoader, attachments)
|
||||
val identityService = makeIdentityService()
|
||||
val identityService = makeIdentityService(info)
|
||||
val keyManagementService = makeKeyManagementService(identityService, keyPairs)
|
||||
_services = ServiceHubInternalImpl(
|
||||
identityService,
|
||||
@ -497,23 +498,23 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
transactionStorage,
|
||||
stateLoader,
|
||||
MonitoringService(metrics),
|
||||
cordappProvider)
|
||||
network = makeMessagingService()
|
||||
cordappProvider,
|
||||
database,
|
||||
info)
|
||||
network = makeMessagingService(database, info)
|
||||
val tokenizableServices = mutableListOf(attachments, network, services.vaultService,
|
||||
services.keyManagementService, services.identityService, platformClock,
|
||||
services.auditService, services.monitoringService, services.networkMapCache, services.schemaService,
|
||||
services.transactionVerifierService, services.validatedTransactions, services.contractUpgradeService,
|
||||
services, cordappProvider, this)
|
||||
makeNetworkServices(tokenizableServices)
|
||||
return tokenizableServices
|
||||
}
|
||||
|
||||
protected open fun makeTransactionStorage(): WritableTransactionStorage = DBTransactionStorage()
|
||||
|
||||
private fun makeVaultObservers(schedulerService: SchedulerService) {
|
||||
protected open fun makeTransactionStorage(database: CordaPersistence): WritableTransactionStorage = DBTransactionStorage()
|
||||
private fun makeVaultObservers(schedulerService: SchedulerService, hibernateConfig: HibernateConfiguration) {
|
||||
VaultSoftLockManager.install(services.vaultService, smm)
|
||||
ScheduledActivityObserver.install(services.vaultService, schedulerService)
|
||||
HibernateObserver.install(services.vaultService.rawUpdates, database.hibernateConfig)
|
||||
HibernateObserver.install(services.vaultService.rawUpdates, hibernateConfig)
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@ -542,31 +543,32 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
// Specific class so that MockNode can catch it.
|
||||
class DatabaseConfigurationException(msg: String) : CordaException(msg)
|
||||
|
||||
protected open fun <T> initialiseDatabasePersistence(schemaService: SchemaService, insideTransaction: () -> T): T {
|
||||
protected open fun <T> initialiseDatabasePersistence(schemaService: SchemaService, insideTransaction: (CordaPersistence) -> T): T {
|
||||
val props = configuration.dataSourceProperties
|
||||
if (props.isNotEmpty()) {
|
||||
this.database = configureDatabase(props, configuration.database, { _services.identityService }, schemaService)
|
||||
val database = configureDatabase(props, configuration.database, { _services.identityService }, schemaService)
|
||||
// Now log the vendor string as this will also cause a connection to be tested eagerly.
|
||||
database.transaction {
|
||||
log.info("Connected to ${database.dataSource.connection.metaData.databaseProductName} database.")
|
||||
}
|
||||
runOnStop += database::close
|
||||
return database.transaction {
|
||||
insideTransaction()
|
||||
insideTransaction(database)
|
||||
}
|
||||
} else {
|
||||
throw DatabaseConfigurationException("There must be a database configured.")
|
||||
}
|
||||
}
|
||||
|
||||
private fun makeNetworkServices(tokenizableServices: MutableList<Any>) {
|
||||
configuration.notary?.let {
|
||||
val notaryService = makeCoreNotaryService(it)
|
||||
tokenizableServices.add(notaryService)
|
||||
runOnStop += notaryService::stop
|
||||
installCoreFlow(NotaryFlow.Client::class, notaryService::createServiceFlow)
|
||||
log.info("Running core notary: ${notaryService.javaClass.name}")
|
||||
notaryService.start()
|
||||
private fun makeNotaryService(tokenizableServices: MutableList<Any>, database: CordaPersistence): NotaryService? {
|
||||
return configuration.notary?.let {
|
||||
makeCoreNotaryService(it, database).also {
|
||||
tokenizableServices.add(it)
|
||||
runOnStop += it::stop
|
||||
installCoreFlow(NotaryFlow.Client::class, it::createServiceFlow)
|
||||
log.info("Running core notary: ${it.javaClass.name}")
|
||||
it.start()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -581,31 +583,17 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
return PersistentKeyManagementService(identityService, keyPairs)
|
||||
}
|
||||
|
||||
private fun readNetworkParameters() {
|
||||
val file = configuration.baseDirectory / "network-parameters"
|
||||
networkParameters = file.readAll().deserialize<SignedData<NetworkParameters>>().verified()
|
||||
log.info(networkParameters.toString())
|
||||
check(networkParameters.minimumPlatformVersion <= versionInfo.platformVersion) { "Node is too old for the network" }
|
||||
}
|
||||
|
||||
private fun makeCoreNotaryService(notaryConfig: NotaryConfig): NotaryService {
|
||||
private fun makeCoreNotaryService(notaryConfig: NotaryConfig, database: CordaPersistence): NotaryService {
|
||||
val notaryKey = myNotaryIdentity?.owningKey ?: throw IllegalArgumentException("No notary identity initialized when creating a notary service")
|
||||
return if (notaryConfig.validating) {
|
||||
if (notaryConfig.raft != null) {
|
||||
RaftValidatingNotaryService(services, notaryKey, notaryConfig.raft)
|
||||
} else if (notaryConfig.bftSMaRt != null) {
|
||||
throw IllegalArgumentException("Validating BFTSMaRt notary not supported")
|
||||
return notaryConfig.run {
|
||||
if (raft != null) {
|
||||
val uniquenessProvider = RaftUniquenessProvider(configuration, database, services.monitoringService.metrics, raft)
|
||||
(if (validating) ::RaftValidatingNotaryService else ::RaftNonValidatingNotaryService)(services, notaryKey, uniquenessProvider)
|
||||
} else if (bftSMaRt != null) {
|
||||
if (validating) throw IllegalArgumentException("Validating BFTSMaRt notary not supported")
|
||||
BFTNonValidatingNotaryService(services, notaryKey, bftSMaRt, makeBFTCluster(notaryKey, bftSMaRt))
|
||||
} else {
|
||||
ValidatingNotaryService(services, notaryKey)
|
||||
}
|
||||
} else {
|
||||
if (notaryConfig.raft != null) {
|
||||
RaftNonValidatingNotaryService(services, notaryKey, notaryConfig.raft)
|
||||
} else if (notaryConfig.bftSMaRt != null) {
|
||||
val cluster = makeBFTCluster(notaryKey, notaryConfig.bftSMaRt)
|
||||
BFTNonValidatingNotaryService(services, notaryKey, notaryConfig.bftSMaRt, cluster)
|
||||
} else {
|
||||
SimpleNotaryService(services, notaryKey)
|
||||
(if (validating) ::ValidatingNotaryService else ::SimpleNotaryService)(services, notaryKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -618,7 +606,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
private fun makeIdentityService(): IdentityService {
|
||||
private fun makeIdentityService(info: NodeInfo): IdentityService {
|
||||
val trustStore = KeyStoreWrapper(configuration.trustStoreFile, configuration.trustStorePassword)
|
||||
val caKeyStore = KeyStoreWrapper(configuration.nodeKeystore, configuration.keyStorePassword)
|
||||
val trustRoot = trustStore.getX509Certificate(X509Utilities.CORDA_ROOT_CA)
|
||||
@ -636,6 +624,9 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
// Meanwhile, we let the remote service send us updates until the acknowledgment buffer overflows and it
|
||||
// unsubscribes us forcibly, rather than blocking the shutdown process.
|
||||
|
||||
// Notify observers that the node is shutting down
|
||||
nodeStateObservable.onNext(NodeState.SHUTTING_DOWN)
|
||||
|
||||
// Run shutdown hooks in opposite order to starting
|
||||
for (toRun in runOnStop.reversed()) {
|
||||
toRun()
|
||||
@ -644,23 +635,28 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
_started = null
|
||||
}
|
||||
|
||||
protected abstract fun makeMessagingService(): MessagingService
|
||||
|
||||
protected abstract fun makeMessagingService(database: CordaPersistence, info: NodeInfo): MessagingService
|
||||
protected abstract fun startMessagingService(rpcOps: RPCOps)
|
||||
|
||||
private fun obtainIdentity(notaryConfig: NotaryConfig?): Pair<PartyAndCertificate, KeyPair> {
|
||||
val keyStore = KeyStoreWrapper(configuration.nodeKeystore, configuration.keyStorePassword)
|
||||
|
||||
val (id, singleName) = if (notaryConfig == null || !notaryConfig.isClusterConfig) {
|
||||
// Node's main identity or if it's a single node notary
|
||||
val (id, singleName) = if (notaryConfig == null) {
|
||||
// Node's main identity
|
||||
Pair("identity", myLegalName)
|
||||
} else {
|
||||
val notaryId = notaryConfig.run {
|
||||
NotaryService.constructId(validating, raft != null, bftSMaRt != null, custom)
|
||||
}
|
||||
// The node is part of a distributed notary whose identity must already be generated beforehand.
|
||||
Pair(notaryId, null)
|
||||
if (!notaryConfig.isClusterConfig) {
|
||||
// Node's notary identity
|
||||
Pair(notaryId, myLegalName.copy(commonName = notaryId))
|
||||
} else {
|
||||
// The node is part of a distributed notary whose identity must already be generated beforehand
|
||||
Pair(notaryId, null)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Integrate with Key management service?
|
||||
val privateKeyAlias = "$id-private-key"
|
||||
|
||||
@ -703,8 +699,8 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
}
|
||||
|
||||
protected open fun generateKeyPair() = cryptoGenerateKeyPair()
|
||||
protected open fun makeVaultService(keyManagementService: KeyManagementService, stateLoader: StateLoader): VaultServiceInternal {
|
||||
return NodeVaultService(platformClock, keyManagementService, stateLoader, database.hibernateConfig)
|
||||
protected open fun makeVaultService(keyManagementService: KeyManagementService, stateLoader: StateLoader, hibernateConfig: HibernateConfiguration): VaultServiceInternal {
|
||||
return NodeVaultService(platformClock, keyManagementService, stateLoader, hibernateConfig)
|
||||
}
|
||||
|
||||
private inner class ServiceHubInternalImpl(
|
||||
@ -717,27 +713,21 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
override val validatedTransactions: WritableTransactionStorage,
|
||||
private val stateLoader: StateLoader,
|
||||
override val monitoringService: MonitoringService,
|
||||
override val cordappProvider: CordappProviderInternal
|
||||
override val cordappProvider: CordappProviderInternal,
|
||||
override val database: CordaPersistence,
|
||||
override val myInfo: NodeInfo
|
||||
) : SingletonSerializeAsToken(), ServiceHubInternal, StateLoader by stateLoader {
|
||||
override val rpcFlows = ArrayList<Class<out FlowLogic<*>>>()
|
||||
override val stateMachineRecordedTransactionMapping = DBTransactionMappingStorage()
|
||||
override val auditService = DummyAuditService()
|
||||
override val transactionVerifierService by lazy { makeTransactionVerifierService() }
|
||||
override val networkMapCache by lazy {
|
||||
NetworkMapCacheImpl(
|
||||
PersistentNetworkMapCache(
|
||||
this@AbstractNode.database,
|
||||
this@AbstractNode.configuration,
|
||||
networkParameters.notaries),
|
||||
identityService)
|
||||
}
|
||||
override val vaultService by lazy { makeVaultService(keyManagementService, stateLoader) }
|
||||
override val networkMapCache by lazy { NetworkMapCacheImpl(PersistentNetworkMapCache(database), identityService) }
|
||||
override val vaultService by lazy { makeVaultService(keyManagementService, stateLoader, database.hibernateConfig) }
|
||||
override val contractUpgradeService by lazy { ContractUpgradeServiceImpl() }
|
||||
override val attachments: AttachmentStorage get() = this@AbstractNode.attachments
|
||||
override val networkService: MessagingService get() = network
|
||||
override val clock: Clock get() = platformClock
|
||||
override val myInfo: NodeInfo get() = info
|
||||
override val database: CordaPersistence get() = this@AbstractNode.database
|
||||
override val myNodeStateObservable: Observable<NodeState> get() = nodeStateObservable
|
||||
override val configuration: NodeConfiguration get() = this@AbstractNode.configuration
|
||||
override fun <T : SerializeAsToken> cordaService(type: Class<T>): T {
|
||||
require(type.isAnnotationPresent(CordaService::class.java)) { "${type.name} is not a Corda service" }
|
||||
@ -759,12 +749,13 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
}
|
||||
|
||||
internal class FlowStarterImpl(private val serverThread: AffinityExecutor, private val smm: StateMachineManager) : FlowStarter {
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, flowInitiator: FlowInitiator, ourIdentity: Party?): CordaFuture<FlowStateMachine<T>> {
|
||||
return serverThread.fetchFrom { smm.startFlow(logic, flowInitiator, ourIdentity) }
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>> {
|
||||
return serverThread.fetchFrom { smm.startFlow(logic, context) }
|
||||
}
|
||||
}
|
||||
|
||||
class ConfigurationException(message: String) : CordaException(message)
|
||||
/**
|
||||
* Thrown when a node is about to start and its network map cache doesn't contain any node.
|
||||
*/
|
||||
internal class NetworkMapCacheEmptyException: Exception()
|
||||
internal class NetworkMapCacheEmptyException : Exception()
|
@ -2,6 +2,8 @@ package net.corda.node.internal
|
||||
|
||||
import net.corda.client.rpc.notUsed
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.context.Origin
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
@ -13,16 +15,16 @@ import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.messaging.*
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.services.AttachmentId
|
||||
import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.vault.PageSpecification
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.Sort
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.api.FlowStarter
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.messaging.rpcContext
|
||||
import net.corda.node.services.messaging.context
|
||||
import net.corda.node.services.statemachine.StateMachineManager
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import rx.Observable
|
||||
@ -115,6 +117,10 @@ internal class CordaRPCOpsImpl(
|
||||
return services.myInfo
|
||||
}
|
||||
|
||||
override fun nodeStateObservable(): Observable<NodeState> {
|
||||
return services.myNodeStateObservable
|
||||
}
|
||||
|
||||
override fun notaryIdentities(): List<Party> {
|
||||
return services.networkMapCache.notaryIdentities
|
||||
}
|
||||
@ -147,9 +153,7 @@ internal class CordaRPCOpsImpl(
|
||||
|
||||
private fun <T> startFlow(logicType: Class<out FlowLogic<T>>, args: Array<out Any?>): FlowStateMachine<T> {
|
||||
require(logicType.isAnnotationPresent(StartableByRPC::class.java)) { "${logicType.name} was not designed for RPC" }
|
||||
val currentUser = FlowInitiator.RPC(rpcContext().currentUser.username)
|
||||
// TODO RPC flows should have mapping user -> identity that should be resolved automatically on starting flow.
|
||||
return flowStarter.invokeFlowAsync(logicType, currentUser, *args).getOrThrow()
|
||||
return flowStarter.invokeFlowAsync(logicType, context(), *args).getOrThrow()
|
||||
}
|
||||
|
||||
override fun attachmentExists(id: SecureHash): Boolean {
|
||||
@ -173,6 +177,25 @@ internal class CordaRPCOpsImpl(
|
||||
}
|
||||
}
|
||||
|
||||
override fun uploadAttachmentWithMetadata(jar: InputStream, uploader:String, filename:String): SecureHash {
|
||||
// TODO: this operation should not require an explicit transaction
|
||||
return database.transaction {
|
||||
services.attachments.importAttachment(jar, uploader, filename)
|
||||
}
|
||||
}
|
||||
|
||||
override fun queryAttachments(query: AttachmentQueryCriteria, sorting: AttachmentSort?): List<AttachmentId> {
|
||||
try {
|
||||
return database.transaction {
|
||||
services.attachments.queryAttachments(query, sorting)
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
// log and rethrow exception so we keep a copy server side
|
||||
log.error(e.message)
|
||||
throw e.cause ?: e
|
||||
}
|
||||
}
|
||||
|
||||
override fun currentNodeTime(): Instant = Instant.now(services.clock)
|
||||
|
||||
override fun waitUntilNetworkReady(): CordaFuture<Void?> = services.networkMapCache.nodeReady
|
||||
@ -249,16 +272,30 @@ internal class CordaRPCOpsImpl(
|
||||
return vaultTrackBy(criteria, PageSpecification(), sorting, contractStateType)
|
||||
}
|
||||
|
||||
companion object {
|
||||
private fun stateMachineInfoFromFlowLogic(flowLogic: FlowLogic<*>): StateMachineInfo {
|
||||
return StateMachineInfo(flowLogic.runId, flowLogic.javaClass.name, flowLogic.stateMachine.flowInitiator, flowLogic.track())
|
||||
}
|
||||
private fun stateMachineInfoFromFlowLogic(flowLogic: FlowLogic<*>): StateMachineInfo {
|
||||
return StateMachineInfo(flowLogic.runId, flowLogic.javaClass.name, flowLogic.stateMachine.context.toFlowInitiator(), flowLogic.track(), flowLogic.stateMachine.context)
|
||||
}
|
||||
|
||||
private fun stateMachineUpdateFromStateMachineChange(change: StateMachineManager.Change): StateMachineUpdate {
|
||||
return when (change) {
|
||||
is StateMachineManager.Change.Add -> StateMachineUpdate.Added(stateMachineInfoFromFlowLogic(change.logic))
|
||||
is StateMachineManager.Change.Removed -> StateMachineUpdate.Removed(change.logic.runId, change.result)
|
||||
}
|
||||
private fun stateMachineUpdateFromStateMachineChange(change: StateMachineManager.Change): StateMachineUpdate {
|
||||
return when (change) {
|
||||
is StateMachineManager.Change.Add -> StateMachineUpdate.Added(stateMachineInfoFromFlowLogic(change.logic))
|
||||
is StateMachineManager.Change.Removed -> StateMachineUpdate.Removed(change.logic.runId, change.result)
|
||||
}
|
||||
}
|
||||
|
||||
private fun InvocationContext.toFlowInitiator(): FlowInitiator {
|
||||
|
||||
val principal = origin.principal().name
|
||||
return when (origin) {
|
||||
is Origin.RPC -> FlowInitiator.RPC(principal)
|
||||
is Origin.Peer -> services.identityService.wellKnownPartyFromX500Name((origin as Origin.Peer).party)?.let { FlowInitiator.Peer(it) } ?: throw IllegalStateException("Unknown peer with name ${(origin as Origin.Peer).party}.")
|
||||
is Origin.Service -> FlowInitiator.Service(principal)
|
||||
is Origin.Shell -> FlowInitiator.Shell
|
||||
is Origin.Scheduled -> FlowInitiator.Scheduled((origin as Origin.Scheduled).scheduledState)
|
||||
}
|
||||
}
|
||||
|
||||
companion object {
|
||||
private val log = loggerFor<CordaRPCOpsImpl>()
|
||||
}
|
||||
}
|
@ -1,17 +1,17 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import com.codahale.metrics.JmxReporter
|
||||
import net.corda.core.CordaException
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.internal.concurrent.thenMatch
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.messaging.RPCOps
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.serialization.internal.SerializationEnvironmentImpl
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.node.internal.cordapp.CordappLoader
|
||||
import net.corda.node.serialization.KryoServerSerializationScheme
|
||||
@ -24,8 +24,8 @@ import net.corda.node.services.messaging.MessagingService
|
||||
import net.corda.node.services.messaging.NodeMessagingClient
|
||||
import net.corda.node.utilities.AddressUtils
|
||||
import net.corda.node.utilities.AffinityExecutor
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.node.utilities.DemoClock
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent
|
||||
import net.corda.nodeapi.internal.ShutdownHook
|
||||
import net.corda.nodeapi.internal.addShutdownHook
|
||||
import net.corda.nodeapi.internal.serialization.*
|
||||
@ -128,8 +128,7 @@ open class Node(configuration: NodeConfiguration,
|
||||
private var shutdownHook: ShutdownHook? = null
|
||||
|
||||
private lateinit var userService: RPCUserService
|
||||
|
||||
override fun makeMessagingService(): MessagingService {
|
||||
override fun makeMessagingService(database: CordaPersistence, info: NodeInfo): MessagingService {
|
||||
userService = RPCUserServiceImpl(configuration.rpcUsers)
|
||||
|
||||
val serverAddress = configuration.messagingServerAddress ?: makeLocalMessageBroker()
|
||||
@ -144,7 +143,7 @@ open class Node(configuration: NodeConfiguration,
|
||||
info.legalIdentities[0].owningKey,
|
||||
serverThread,
|
||||
database,
|
||||
services.monitoringService,
|
||||
services.monitoringService.metrics,
|
||||
advertisedAddress)
|
||||
}
|
||||
|
||||
@ -180,15 +179,21 @@ open class Node(configuration: NodeConfiguration,
|
||||
* TODO this code used to rely on the networkmap node, we might want to look at a different solution.
|
||||
*/
|
||||
private fun tryDetectIfNotPublicHost(host: String): String? {
|
||||
if (!AddressUtils.isPublic(host)) {
|
||||
return if (!AddressUtils.isPublic(host)) {
|
||||
val foundPublicIP = AddressUtils.tryDetectPublicIP()
|
||||
|
||||
if (foundPublicIP != null) {
|
||||
if (foundPublicIP == null) {
|
||||
val retrievedHostName = networkMapClient?.myPublicHostname()
|
||||
if (retrievedHostName != null) {
|
||||
log.info("Retrieved public IP from Network Map Service: $this. This will be used instead of the provided \"$host\" as the advertised address.")
|
||||
}
|
||||
retrievedHostName
|
||||
} else {
|
||||
log.info("Detected public IP: ${foundPublicIP.hostAddress}. This will be used instead of the provided \"$host\" as the advertised address.")
|
||||
return foundPublicIP.hostAddress
|
||||
foundPublicIP.hostAddress
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
override fun startMessagingService(rpcOps: RPCOps) {
|
||||
@ -212,7 +217,7 @@ open class Node(configuration: NodeConfiguration,
|
||||
* This is not using the H2 "automatic mixed mode" directly but leans on many of the underpinnings. For more details
|
||||
* on H2 URLs and configuration see: http://www.h2database.com/html/features.html#database_url
|
||||
*/
|
||||
override fun <T> initialiseDatabasePersistence(schemaService: SchemaService, insideTransaction: () -> T): T {
|
||||
override fun <T> initialiseDatabasePersistence(schemaService: SchemaService, insideTransaction: (CordaPersistence) -> T): T {
|
||||
val databaseUrl = configuration.dataSourceProperties.getProperty("dataSource.url")
|
||||
val h2Prefix = "jdbc:h2:file:"
|
||||
if (databaseUrl != null && databaseUrl.startsWith(h2Prefix)) {
|
||||
@ -278,14 +283,15 @@ open class Node(configuration: NodeConfiguration,
|
||||
|
||||
private fun initialiseSerialization() {
|
||||
val classloader = cordappLoader.appClassLoader
|
||||
SerializationDefaults.SERIALIZATION_FACTORY = SerializationFactoryImpl().apply {
|
||||
registerScheme(KryoServerSerializationScheme())
|
||||
registerScheme(AMQPServerSerializationScheme())
|
||||
}
|
||||
SerializationDefaults.P2P_CONTEXT = KRYO_P2P_CONTEXT.withClassLoader(classloader)
|
||||
SerializationDefaults.RPC_SERVER_CONTEXT = KRYO_RPC_SERVER_CONTEXT.withClassLoader(classloader)
|
||||
SerializationDefaults.STORAGE_CONTEXT = KRYO_STORAGE_CONTEXT.withClassLoader(classloader)
|
||||
SerializationDefaults.CHECKPOINT_CONTEXT = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader)
|
||||
nodeSerializationEnv = SerializationEnvironmentImpl(
|
||||
SerializationFactoryImpl().apply {
|
||||
registerScheme(KryoServerSerializationScheme())
|
||||
registerScheme(AMQPServerSerializationScheme())
|
||||
},
|
||||
KRYO_P2P_CONTEXT.withClassLoader(classloader),
|
||||
rpcServerContext = KRYO_RPC_SERVER_CONTEXT.withClassLoader(classloader),
|
||||
storageContext = KRYO_STORAGE_CONTEXT.withClassLoader(classloader),
|
||||
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader))
|
||||
}
|
||||
|
||||
/** Starts a blocking event loop for message dispatch. */
|
||||
@ -314,7 +320,3 @@ open class Node(configuration: NodeConfiguration,
|
||||
log.info("Shutdown complete")
|
||||
}
|
||||
}
|
||||
|
||||
class ConfigurationException(message: String) : CordaException(message)
|
||||
|
||||
data class NetworkMapInfo(val address: NetworkHostAndPort, val legalName: CordaX500Name)
|
||||
|
@ -8,6 +8,7 @@ import net.corda.core.internal.concurrent.thenMatch
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.*
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.NodeConfigurationImpl
|
||||
import net.corda.node.services.transactions.bftSMaRtSerialFilter
|
||||
import net.corda.node.shell.InteractiveShell
|
||||
import net.corda.node.utilities.registration.HTTPNetworkRegistrationService
|
||||
@ -62,7 +63,21 @@ open class NodeStartup(val args: Array<String>) {
|
||||
|
||||
drawBanner(versionInfo)
|
||||
Node.printBasicNodeInfo(LOGS_CAN_BE_FOUND_IN_STRING, System.getProperty("log-path"))
|
||||
val conf = loadConfigFile(cmdlineOptions)
|
||||
val conf0 = loadConfigFile(cmdlineOptions)
|
||||
|
||||
val conf = if (cmdlineOptions.bootstrapRaftCluster) {
|
||||
if (conf0 is NodeConfigurationImpl) {
|
||||
println("Bootstrapping raft cluster (starting up as seed node).")
|
||||
// Ignore the configured clusterAddresses to make the node bootstrap a cluster instead of joining.
|
||||
conf0.copy(notary = conf0.notary?.copy(raft = conf0.notary?.raft?.copy(clusterAddresses = emptyList())))
|
||||
} else {
|
||||
println("bootstrap-raft-notaries flag not recognized, exiting...")
|
||||
exitProcess(1)
|
||||
}
|
||||
} else {
|
||||
conf0
|
||||
}
|
||||
|
||||
banJavaSerialisation(conf)
|
||||
preNetworkRegistration(conf)
|
||||
maybeRegisterWithNetworkAndExit(cmdlineOptions, conf)
|
||||
@ -141,14 +156,15 @@ open class NodeStartup(val args: Array<String>) {
|
||||
}
|
||||
|
||||
open protected fun maybeRegisterWithNetworkAndExit(cmdlineOptions: CmdLineOptions, conf: NodeConfiguration) {
|
||||
if (!cmdlineOptions.isRegistration) return
|
||||
val compatibilityZoneURL = conf.compatibilityZoneURL
|
||||
if (!cmdlineOptions.isRegistration || compatibilityZoneURL == null) return
|
||||
println()
|
||||
println("******************************************************************")
|
||||
println("* *")
|
||||
println("* Registering as a new participant with Corda network *")
|
||||
println("* *")
|
||||
println("******************************************************************")
|
||||
NetworkRegistrationHelper(conf, HTTPNetworkRegistrationService(conf.certificateSigningService)).buildKeystore()
|
||||
NetworkRegistrationHelper(conf, HTTPNetworkRegistrationService(compatibilityZoneURL)).buildKeystore()
|
||||
exitProcess(0)
|
||||
}
|
||||
|
||||
|
@ -8,19 +8,27 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.messaging.CordaRPCOps
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.messaging.NodeState
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.services.AttachmentId
|
||||
import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.vault.PageSpecification
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.Sort
|
||||
import net.corda.node.services.messaging.RpcContext
|
||||
import net.corda.node.services.messaging.requireEitherPermission
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.node.services.messaging.RpcAuthContext
|
||||
import rx.Observable
|
||||
import java.io.InputStream
|
||||
import java.security.PublicKey
|
||||
|
||||
// TODO change to KFunction reference after Kotlin fixes https://youtrack.jetbrains.com/issue/KT-12140
|
||||
class RpcAuthorisationProxy(private val implementation: CordaRPCOps, private val context: () -> RpcContext, private val permissionsAllowing: (methodName: String, args: List<Any?>) -> Set<String>) : CordaRPCOps {
|
||||
class RpcAuthorisationProxy(private val implementation: CordaRPCOps, private val context: () -> RpcAuthContext, private val permissionsAllowing: (methodName: String, args: List<Any?>) -> Set<String>) : CordaRPCOps {
|
||||
|
||||
override fun uploadAttachmentWithMetadata(jar: InputStream, uploader: String, filename: String): SecureHash = guard("uploadAttachmentWithMetadata") {
|
||||
implementation.uploadAttachmentWithMetadata(jar, uploader, filename)
|
||||
}
|
||||
|
||||
override fun queryAttachments(query: AttachmentQueryCriteria, sorting: AttachmentSort?): List<AttachmentId> = guard("queryAttachments") {
|
||||
implementation.queryAttachments(query, sorting)
|
||||
}
|
||||
|
||||
override fun stateMachinesSnapshot() = guard("stateMachinesSnapshot") {
|
||||
implementation.stateMachinesSnapshot()
|
||||
@ -60,6 +68,8 @@ class RpcAuthorisationProxy(private val implementation: CordaRPCOps, private val
|
||||
|
||||
override fun nodeInfo(): NodeInfo = guard("nodeInfo", implementation::nodeInfo)
|
||||
|
||||
override fun nodeStateObservable(): Observable<NodeState> = guard("nodeStateObservable", implementation::nodeStateObservable)
|
||||
|
||||
override fun notaryIdentities(): List<Party> = guard("notaryIdentities", implementation::notaryIdentities)
|
||||
|
||||
override fun addVaultTransactionNote(txnId: SecureHash, txnNote: String) = guard("addVaultTransactionNote") {
|
||||
@ -152,7 +162,7 @@ class RpcAuthorisationProxy(private val implementation: CordaRPCOps, private val
|
||||
// TODO change to KFunction reference after Kotlin fixes https://youtrack.jetbrains.com/issue/KT-12140
|
||||
private inline fun <RESULT> guard(methodName: String, args: List<Any?>, action: () -> RESULT): RESULT {
|
||||
|
||||
context.invoke().requireEitherPermission(permissionsAllowing.invoke(methodName, args))
|
||||
return action.invoke()
|
||||
context().requireEitherPermission(permissionsAllowing.invoke(methodName, args))
|
||||
return action()
|
||||
}
|
||||
}
|
@ -1,13 +1,12 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.contracts.TransactionResolutionException
|
||||
import net.corda.core.contracts.TransactionState
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.messaging.CordaRPCOps
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.StateLoader
|
||||
import net.corda.core.node.services.CordaService
|
||||
import net.corda.core.node.services.NotaryService
|
||||
import net.corda.core.node.services.TransactionStorage
|
||||
import net.corda.core.serialization.SerializeAsToken
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
@ -27,6 +26,7 @@ interface StartedNode<out N : AbstractNode> {
|
||||
val network: MessagingService
|
||||
val database: CordaPersistence
|
||||
val rpcOps: CordaRPCOps
|
||||
val notaryService: NotaryService?
|
||||
fun dispose() = internals.stop()
|
||||
fun <T : FlowLogic<*>> registerInitiatedFlow(initiatedFlowClass: Class<T>) = internals.registerInitiatedFlow(initiatedFlowClass)
|
||||
}
|
||||
@ -37,4 +37,10 @@ class StateLoaderImpl(private val validatedTransactions: TransactionStorage) : S
|
||||
val stx = validatedTransactions.getTransaction(stateRef.txhash) ?: throw TransactionResolutionException(stateRef.txhash)
|
||||
return stx.resolveBaseTransaction(this).outputs[stateRef.index]
|
||||
}
|
||||
|
||||
@Throws(TransactionResolutionException::class)
|
||||
// TODO: future implementation to retrieve contract states from a Vault BLOB store
|
||||
override fun loadStates(stateRefs: Set<StateRef>): Set<StateAndRef<ContractState>> {
|
||||
return (stateRefs.map { StateAndRef(loadState(it), it) }).toSet()
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ class KryoServerSerializationScheme : AbstractKryoSerializationScheme() {
|
||||
|
||||
override fun rpcServerKryoPool(context: SerializationContext): KryoPool {
|
||||
return KryoPool.Builder {
|
||||
DefaultKryoCustomizer.customize(RPCKryo(RpcServerObservableSerializer, context)).apply {
|
||||
DefaultKryoCustomizer.customize(RPCKryo(RpcServerObservableSerializer, context), publicKeySerializer).apply {
|
||||
classLoader = context.deserializationClassLoader
|
||||
}
|
||||
}.build()
|
||||
|
@ -1,5 +1,6 @@
|
||||
package net.corda.node.services
|
||||
|
||||
import net.corda.core.context.AuthServiceId
|
||||
import net.corda.nodeapi.User
|
||||
|
||||
/**
|
||||
@ -11,12 +12,17 @@ interface RPCUserService {
|
||||
|
||||
fun getUser(username: String): User?
|
||||
val users: List<User>
|
||||
|
||||
val id: AuthServiceId
|
||||
}
|
||||
|
||||
// TODO Store passwords as salted hashes
|
||||
// TODO Or ditch this and consider something like Apache Shiro
|
||||
// TODO Need access to permission checks from inside flows and at other point during audit checking.
|
||||
class RPCUserServiceImpl(override val users: List<User>) : RPCUserService {
|
||||
|
||||
override val id: AuthServiceId = AuthServiceId("NODE_FILE_CONFIGURATION")
|
||||
|
||||
init {
|
||||
users.forEach {
|
||||
require(it.username.matches("\\w+".toRegex())) { "Username ${it.username} contains invalid characters" }
|
||||
|
@ -1,56 +0,0 @@
|
||||
package net.corda.node.services.api
|
||||
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.node.services.messaging.*
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
|
||||
/**
|
||||
* Abstract superclass for services that a node can host, which provides helper functions.
|
||||
*/
|
||||
@ThreadSafe
|
||||
abstract class AbstractNodeService(val network: MessagingService) : SingletonSerializeAsToken() {
|
||||
/**
|
||||
* Register a handler for a message topic. In comparison to using net.addMessageHandler() this manages a lot of
|
||||
* common boilerplate code. Exceptions are caught and passed to the provided consumer. If you just want a simple
|
||||
* acknowledgement response with no content, use [net.corda.core.messaging.Ack].
|
||||
*
|
||||
* @param topic the topic, without the default session ID postfix (".0).
|
||||
* @param handler a function to handle the deserialised request and return an optional response (if return type not Unit)
|
||||
* @param exceptionConsumer a function to which any thrown exception is passed.
|
||||
*/
|
||||
protected inline fun <reified Q : ServiceRequestMessage, reified R : Any>
|
||||
addMessageHandler(topic: String,
|
||||
crossinline handler: (Q) -> R,
|
||||
crossinline exceptionConsumer: (Message, Exception) -> Unit): MessageHandlerRegistration {
|
||||
return network.addMessageHandler(topic, MessagingService.DEFAULT_SESSION_ID) { message, _ ->
|
||||
try {
|
||||
val request = message.data.deserialize<Q>()
|
||||
val response = handler(request)
|
||||
// If the return type R is Unit, then do not send a response
|
||||
if (response.javaClass != Unit.javaClass) {
|
||||
val msg = network.createMessage(topic, request.sessionID, response.serialize().bytes)
|
||||
network.send(msg, request.replyTo)
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
exceptionConsumer(message, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a handler for a message topic. In comparison to using net.addMessageHandler() this manages a lot of
|
||||
* common boilerplate code. Exceptions are propagated to the messaging layer. If you just want a simple
|
||||
* acknowledgement response with no content, use [net.corda.core.messaging.Ack].
|
||||
*
|
||||
* @param topic the topic, without the default session ID postfix (".0).
|
||||
* @param handler a function to handle the deserialised request and return an optional response (if return type not Unit).
|
||||
*/
|
||||
protected inline fun <reified Q : ServiceRequestMessage, reified R : Any>
|
||||
addMessageHandler(topic: String,
|
||||
crossinline handler: (Q) -> R): MessageHandlerRegistration {
|
||||
return addMessageHandler(topic, handler, { _: Message, exception: Exception -> throw exception })
|
||||
}
|
||||
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
package net.corda.node.services.api
|
||||
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
import java.security.Principal
|
||||
import java.time.Instant
|
||||
|
||||
/**
|
||||
@ -17,9 +17,9 @@ sealed class AuditEvent {
|
||||
*/
|
||||
abstract val timestamp: Instant
|
||||
/**
|
||||
* The responsible individual, node, or subsystem to which the audit event can be mapped.
|
||||
* The invocation context at the time the event was generated.
|
||||
*/
|
||||
abstract val principal: Principal
|
||||
abstract val context: InvocationContext
|
||||
/**
|
||||
* A human readable description of audit event including any permission check results.
|
||||
*/
|
||||
@ -36,7 +36,7 @@ sealed class AuditEvent {
|
||||
* Sealed data class to mark system related events as a distinct category.
|
||||
*/
|
||||
data class SystemAuditEvent(override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val contextData: Map<String, String>) : AuditEvent()
|
||||
|
||||
@ -60,7 +60,7 @@ interface FlowAuditInfo {
|
||||
*/
|
||||
data class FlowAppAuditEvent(
|
||||
override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val contextData: Map<String, String>,
|
||||
override val flowType: Class<out FlowLogic<*>>,
|
||||
@ -73,7 +73,7 @@ data class FlowAppAuditEvent(
|
||||
*/
|
||||
data class FlowStartEvent(
|
||||
override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val contextData: Map<String, String>,
|
||||
override val flowType: Class<out FlowLogic<*>>,
|
||||
@ -86,7 +86,7 @@ data class FlowStartEvent(
|
||||
*/
|
||||
data class FlowProgressAuditEvent(
|
||||
override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val flowType: Class<out FlowLogic<*>>,
|
||||
override val flowId: StateMachineRunId,
|
||||
@ -98,7 +98,7 @@ data class FlowProgressAuditEvent(
|
||||
* Sealed data class to record any FlowExceptions, or other unexpected terminations of a Flow.
|
||||
*/
|
||||
data class FlowErrorAuditEvent(override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val contextData: Map<String, String>,
|
||||
override val flowType: Class<out FlowLogic<*>>,
|
||||
@ -111,7 +111,7 @@ data class FlowErrorAuditEvent(override val timestamp: Instant,
|
||||
* after recording the FlowPermissionAuditEvent. This may cause an extra FlowErrorAuditEvent to be recorded too.
|
||||
*/
|
||||
data class FlowPermissionAuditEvent(override val timestamp: Instant,
|
||||
override val principal: Principal,
|
||||
override val context: InvocationContext,
|
||||
override val description: String,
|
||||
override val contextData: Map<String, String>,
|
||||
override val flowType: Class<out FlowLogic<*>>,
|
||||
|
@ -2,12 +2,10 @@ package net.corda.node.services.api
|
||||
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.messaging.StateMachineTransactionMapping
|
||||
@ -18,7 +16,6 @@ import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.core.node.services.NetworkMapCacheBase
|
||||
import net.corda.core.node.services.TransactionStorage
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.internal.InitiatedFlowFactory
|
||||
import net.corda.node.internal.cordapp.CordappProviderInternal
|
||||
@ -30,6 +27,10 @@ import net.corda.node.utilities.CordaPersistence
|
||||
|
||||
interface NetworkMapCacheInternal : NetworkMapCache, NetworkMapCacheBaseInternal
|
||||
interface NetworkMapCacheBaseInternal : NetworkMapCacheBase {
|
||||
val allNodeHashes: List<SecureHash>
|
||||
|
||||
fun getNodeByHash(nodeHash: SecureHash): NodeInfo?
|
||||
|
||||
/** Adds a node to the local cache (generally only used for adding ourselves). */
|
||||
fun addNode(node: NodeInfo)
|
||||
|
||||
@ -118,34 +119,28 @@ interface ServiceHubInternal : ServiceHub {
|
||||
}
|
||||
|
||||
interface FlowStarter {
|
||||
/**
|
||||
* Starts an already constructed flow. Note that you must be on the server thread to call this method. [FlowInitiator]
|
||||
* defaults to [FlowInitiator.RPC] with username "Only For Testing".
|
||||
*/
|
||||
@VisibleForTesting
|
||||
fun <T> startFlow(logic: FlowLogic<T>): FlowStateMachine<T> = startFlow(logic, FlowInitiator.RPC("Only For Testing")).getOrThrow()
|
||||
|
||||
/**
|
||||
* Starts an already constructed flow. Note that you must be on the server thread to call this method.
|
||||
* @param flowInitiator indicates who started the flow, see: [FlowInitiator].
|
||||
* @param context indicates who started the flow, see: [InvocationContext].
|
||||
*/
|
||||
fun <T> startFlow(logic: FlowLogic<T>, flowInitiator: FlowInitiator, ourIdentity: Party? = null): CordaFuture<FlowStateMachine<T>>
|
||||
fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>>
|
||||
|
||||
/**
|
||||
* Will check [logicType] and [args] against a whitelist and if acceptable then construct and initiate the flow.
|
||||
* Note that you must be on the server thread to call this method. [flowInitiator] points how flow was started,
|
||||
* See: [FlowInitiator].
|
||||
* Note that you must be on the server thread to call this method. [context] points how flow was started,
|
||||
* See: [InvocationContext].
|
||||
*
|
||||
* @throws net.corda.core.flows.IllegalFlowLogicException or IllegalArgumentException if there are problems with the
|
||||
* [logicType] or [args].
|
||||
*/
|
||||
fun <T> invokeFlowAsync(
|
||||
logicType: Class<out FlowLogic<T>>,
|
||||
flowInitiator: FlowInitiator,
|
||||
context: InvocationContext,
|
||||
vararg args: Any?): CordaFuture<FlowStateMachine<T>> {
|
||||
val logicRef = FlowLogicRefFactoryImpl.createForRPC(logicType, *args)
|
||||
val logic: FlowLogic<T> = uncheckedCast(FlowLogicRefFactoryImpl.toFlowLogic(logicRef))
|
||||
return startFlow(logic, flowInitiator, ourIdentity = null)
|
||||
return startFlow(logic, context)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ interface NodeConfiguration : NodeSSLConfiguration {
|
||||
val rpcUsers: List<User>
|
||||
val devMode: Boolean
|
||||
val devModeOptions: DevModeOptions?
|
||||
val certificateSigningService: URL
|
||||
val compatibilityZoneURL: URL?
|
||||
val certificateChainCheckPolicies: List<CertChainPolicyConfig>
|
||||
val verifierType: VerifierType
|
||||
val messageRedeliveryDelaySeconds: Int
|
||||
@ -42,6 +42,10 @@ interface NodeConfiguration : NodeSSLConfiguration {
|
||||
val relay: RelayConfiguration?
|
||||
}
|
||||
|
||||
fun NodeConfiguration.shouldCheckCheckpoints(): Boolean {
|
||||
return this.devMode && this.devModeOptions?.disableCheckpointChecker != true
|
||||
}
|
||||
|
||||
data class NotaryConfig(val validating: Boolean,
|
||||
val raft: RaftConfig? = null,
|
||||
val bftSMaRt: BFTSMaRtConfiguration? = null,
|
||||
@ -86,7 +90,7 @@ data class NodeConfigurationImpl(
|
||||
override val trustStorePassword: String,
|
||||
override val dataSourceProperties: Properties,
|
||||
override val database: Properties?,
|
||||
override val certificateSigningService: URL,
|
||||
override val compatibilityZoneURL: URL? = null,
|
||||
override val rpcUsers: List<User>,
|
||||
override val verifierType: VerifierType,
|
||||
// TODO typesafe config supports the notion of durations. Make use of that by mapping it to java.time.Duration.
|
||||
@ -115,6 +119,7 @@ data class NodeConfigurationImpl(
|
||||
// This is a sanity feature do not remove.
|
||||
require(!useTestClock || devMode) { "Cannot use test clock outside of dev mode" }
|
||||
require(devModeOptions == null || devMode) { "Cannot use devModeOptions outside of dev mode" }
|
||||
require(myLegalName.commonName == null) { "Common name must be null: $myLegalName" }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,17 +2,17 @@ package net.corda.node.services.events
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.google.common.util.concurrent.ListenableFuture
|
||||
import com.google.common.util.concurrent.SettableFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.contracts.SchedulableState
|
||||
import net.corda.core.contracts.ScheduledActivity
|
||||
import net.corda.core.contracts.ScheduledStateRef
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.internal.ThreadBox
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.concurrent.flatMap
|
||||
import net.corda.core.context.Origin
|
||||
import net.corda.core.internal.until
|
||||
import net.corda.core.node.StateLoader
|
||||
import net.corda.core.schemas.PersistentStateRef
|
||||
@ -247,7 +247,9 @@ class NodeSchedulerService(private val clock: Clock,
|
||||
val scheduledFlow = getScheduledFlow(scheduledState)
|
||||
if (scheduledFlow != null) {
|
||||
flowName = scheduledFlow.javaClass.name
|
||||
val future = flowStarter.startFlow(scheduledFlow, FlowInitiator.Scheduled(scheduledState)).flatMap { it.resultFuture }
|
||||
// TODO refactor the scheduler to store and propagate the original invocation context
|
||||
val context = InvocationContext.newInstance(Origin.Scheduled(scheduledState))
|
||||
val future = flowStarter.startFlow(scheduledFlow, context).flatMap { it.resultFuture }
|
||||
future.then {
|
||||
unfinishedSchedules.countDown()
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import java.security.PublicKey
|
||||
import java.security.cert.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import javax.security.auth.x500.X500Principal
|
||||
|
||||
/**
|
||||
* Simple identity service which caches parties and provides functionality for efficient lookup.
|
||||
@ -68,6 +69,21 @@ class InMemoryIdentityService(identities: Iterable<PartyAndCertificate> = emptyS
|
||||
}
|
||||
throw e
|
||||
}
|
||||
|
||||
// Ensure we record the first identity of the same name, first
|
||||
val identityPrincipal = identity.name.x500Principal
|
||||
val firstCertWithThisName: Certificate = identity.certPath.certificates.last { it ->
|
||||
val principal = (it as? X509Certificate)?.subjectX500Principal
|
||||
principal == identityPrincipal
|
||||
}
|
||||
if (firstCertWithThisName != identity.certificate) {
|
||||
val certificates = identity.certPath.certificates
|
||||
val idx = certificates.lastIndexOf(firstCertWithThisName)
|
||||
val certFactory = CertificateFactory.getInstance("X509")
|
||||
val firstPath = certFactory.generateCertPath(certificates.slice(idx..certificates.size - 1))
|
||||
verifyAndRegisterIdentity(PartyAndCertificate(firstPath))
|
||||
}
|
||||
|
||||
log.trace { "Registering identity $identity" }
|
||||
keyToParties[identity.owningKey] = identity
|
||||
// Always keep the first party we registered, as that's the well known identity
|
||||
|
@ -14,6 +14,7 @@ import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.utilities.MAX_HASH_HEX_SIZE
|
||||
import net.corda.node.utilities.AppendOnlyPersistentMap
|
||||
import net.corda.node.utilities.NODE_DATABASE_PREFIX
|
||||
import net.corda.node.utilities.X509Utilities
|
||||
import org.bouncycastle.cert.X509CertificateHolder
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.security.InvalidAlgorithmParameterException
|
||||
@ -77,7 +78,7 @@ class PersistentIdentityService(identities: Iterable<PartyAndCertificate> = empt
|
||||
var publicKeyHash: String,
|
||||
|
||||
@Lob
|
||||
@Column
|
||||
@Column(name = "identity_value")
|
||||
var identity: ByteArray = ByteArray(0)
|
||||
)
|
||||
|
||||
@ -126,6 +127,20 @@ class PersistentIdentityService(identities: Iterable<PartyAndCertificate> = empt
|
||||
throw e
|
||||
}
|
||||
|
||||
// Ensure we record the first identity of the same name, first
|
||||
val identityPrincipal = identity.name.x500Principal
|
||||
val firstCertWithThisName: Certificate = identity.certPath.certificates.last { it ->
|
||||
val principal = (it as? X509Certificate)?.subjectX500Principal
|
||||
principal == identityPrincipal
|
||||
}
|
||||
if (firstCertWithThisName != identity.certificate) {
|
||||
val certificates = identity.certPath.certificates
|
||||
val idx = certificates.lastIndexOf(firstCertWithThisName)
|
||||
val certFactory = CertificateFactory.getInstance("X509")
|
||||
val firstPath = certFactory.generateCertPath(certificates.slice(idx..certificates.size - 1))
|
||||
verifyAndRegisterIdentity(PartyAndCertificate(firstPath))
|
||||
}
|
||||
|
||||
log.debug { "Registering identity $identity" }
|
||||
val key = mapToKey(identity)
|
||||
keyToParties.addWithDuplicatesAllowed(key, identity)
|
||||
|
@ -0,0 +1,28 @@
|
||||
package net.corda.node.services.logging
|
||||
|
||||
import net.corda.core.context.InvocationContext
|
||||
import org.slf4j.MDC
|
||||
|
||||
internal fun InvocationContext.pushToLoggingContext() {
|
||||
|
||||
MDC.put("invocation_id", trace.invocationId.value)
|
||||
MDC.put("invocation_timestamp", trace.invocationId.timestamp.toString())
|
||||
MDC.put("session_id", trace.sessionId.value)
|
||||
MDC.put("session_timestamp", trace.sessionId.timestamp.toString())
|
||||
actor?.let {
|
||||
MDC.put("actor_id", it.id.value)
|
||||
MDC.put("actor_store_id", it.serviceId.value)
|
||||
MDC.put("actor_owningIdentity", it.owningLegalIdentity.toString())
|
||||
}
|
||||
externalTrace?.let {
|
||||
MDC.put("external_invocation_id", it.invocationId.value)
|
||||
MDC.put("external_invocation_timestamp", it.invocationId.timestamp.toString())
|
||||
MDC.put("external_session_id", it.sessionId.value)
|
||||
MDC.put("external_session_timestamp", it.sessionId.timestamp.toString())
|
||||
}
|
||||
impersonatedActor?.let {
|
||||
MDC.put("impersonating_actor_id", it.id.value)
|
||||
MDC.put("impersonating_actor_store_id", it.serviceId.value)
|
||||
MDC.put("impersonating_actor_owningIdentity", it.owningLegalIdentity.toString())
|
||||
}
|
||||
}
|
@ -11,6 +11,7 @@ import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.core.node.services.NetworkMapCache.MapChange
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.core.utilities.loggerFor
|
||||
@ -27,8 +28,14 @@ import net.corda.node.utilities.X509Utilities.CORDA_CLIENT_TLS
|
||||
import net.corda.node.utilities.X509Utilities.CORDA_ROOT_CA
|
||||
import net.corda.node.utilities.loadKeyStore
|
||||
import net.corda.nodeapi.*
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.PEER_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.INTERNAL_PREFIX
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NOTIFICATIONS_ADDRESS
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_QUEUE
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEERS_PREFIX
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEER_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ArtemisPeerAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.NodeAddress
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl
|
||||
import org.apache.activemq.artemis.core.config.BridgeConfiguration
|
||||
@ -86,19 +93,16 @@ import javax.security.cert.CertificateException
|
||||
* a fully connected network, trusted network or on localhost.
|
||||
*/
|
||||
@ThreadSafe
|
||||
class ArtemisMessagingServer(override val config: NodeConfiguration,
|
||||
val p2pPort: Int,
|
||||
class ArtemisMessagingServer(private val config: NodeConfiguration,
|
||||
private val p2pPort: Int,
|
||||
val rpcPort: Int?,
|
||||
val networkMapCache: NetworkMapCache,
|
||||
val userService: RPCUserService) : ArtemisMessagingComponent() {
|
||||
val userService: RPCUserService) : SingletonSerializeAsToken() {
|
||||
companion object {
|
||||
private val log = loggerFor<ArtemisMessagingServer>()
|
||||
/** 10 MiB maximum allowed file size for attachments, including message headers. TODO: acquire this value from Network Map when supported. */
|
||||
@JvmStatic
|
||||
val MAX_FILE_SIZE = 10485760
|
||||
|
||||
val ipDetectRequestProperty = "ip-request-id"
|
||||
val ipDetectResponseProperty = "ip-address"
|
||||
}
|
||||
|
||||
private class InnerState {
|
||||
@ -183,7 +187,6 @@ class ArtemisMessagingServer(override val config: NodeConfiguration,
|
||||
// by having its password be an unknown securely random 128-bit value.
|
||||
clusterPassword = BigInteger(128, newSecureRandom()).toString(16)
|
||||
queueConfigurations = listOf(
|
||||
queueConfig(NETWORK_MAP_QUEUE, durable = true),
|
||||
queueConfig(P2P_QUEUE, durable = true),
|
||||
// Create an RPC queue: this will service locally connected clients only (not via a bridge) and those
|
||||
// clients must have authenticated. We could use a single consumer for everything and perhaps we should,
|
||||
|
@ -205,19 +205,6 @@ fun MessagingService.send(topic: String, sessionID: Long, payload: Any, to: Mess
|
||||
fun MessagingService.send(topicSession: TopicSession, payload: Any, to: MessageRecipients, uuid: UUID = UUID.randomUUID(), retryId: Long? = null)
|
||||
= send(createMessage(topicSession, payload.serialize().bytes, uuid), to, retryId)
|
||||
|
||||
/**
|
||||
* This class lets you start up a [MessagingService]. Its purpose is to stop you from getting access to the methods
|
||||
* on the messaging service interface until you have successfully started up the system. One of these objects should
|
||||
* be the only way to obtain a reference to a [MessagingService]. Startup may be a slow process: some implementations
|
||||
* may let you cast the returned future to an object that lets you get status info.
|
||||
*
|
||||
* A specific implementation of the controller class will have extra features that let you customise it before starting
|
||||
* it up.
|
||||
*/
|
||||
interface MessagingServiceBuilder<out T : MessagingService> {
|
||||
fun start(): ListenableFuture<out T>
|
||||
}
|
||||
|
||||
interface MessageHandlerRegistration
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,6 @@
|
||||
package net.corda.node.services.messaging
|
||||
|
||||
import com.codahale.metrics.MetricRegistry
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.ThreadBox
|
||||
@ -10,7 +11,9 @@ import net.corda.core.messaging.SingleMessageRecipient
|
||||
import net.corda.core.node.services.PartyInfo
|
||||
import net.corda.core.node.services.TransactionVerifierService
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
@ -19,14 +22,17 @@ import net.corda.core.utilities.sequence
|
||||
import net.corda.core.utilities.trace
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.node.services.RPCUserService
|
||||
import net.corda.node.services.api.MonitoringService
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.VerifierType
|
||||
import net.corda.node.services.statemachine.StateMachineManagerImpl
|
||||
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
|
||||
import net.corda.node.services.transactions.OutOfProcessTransactionVerifierService
|
||||
import net.corda.node.utilities.*
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_QUEUE
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ArtemisAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.NodeAddress
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ServiceAddress
|
||||
import net.corda.nodeapi.ArtemisTcpTransport
|
||||
import net.corda.nodeapi.ConnectionDirection
|
||||
import net.corda.nodeapi.VerifierApi
|
||||
@ -71,15 +77,15 @@ import javax.persistence.Lob
|
||||
* If not provided, will default to [serverAddress].
|
||||
*/
|
||||
@ThreadSafe
|
||||
class NodeMessagingClient(override val config: NodeConfiguration,
|
||||
class NodeMessagingClient(private val config: NodeConfiguration,
|
||||
private val versionInfo: VersionInfo,
|
||||
private val serverAddress: NetworkHostAndPort,
|
||||
private val myIdentity: PublicKey,
|
||||
private val nodeExecutor: AffinityExecutor.ServiceAffinityExecutor,
|
||||
val database: CordaPersistence,
|
||||
val monitoringService: MonitoringService,
|
||||
private val database: CordaPersistence,
|
||||
private val metrics: MetricRegistry,
|
||||
advertisedAddress: NetworkHostAndPort = serverAddress
|
||||
) : ArtemisMessagingComponent(), MessagingService {
|
||||
) : SingletonSerializeAsToken(), MessagingService {
|
||||
companion object {
|
||||
private val log = loggerFor<NodeMessagingClient>()
|
||||
|
||||
@ -211,12 +217,14 @@ class NodeMessagingClient(override val config: NodeConfiguration,
|
||||
log.info("Connecting to message broker: $serverAddress")
|
||||
// TODO Add broker CN to config for host verification in case the embedded broker isn't used
|
||||
val tcpTransport = ArtemisTcpTransport.tcpTransport(ConnectionDirection.Outbound(), serverAddress, config)
|
||||
val locator = ActiveMQClient.createServerLocatorWithoutHA(tcpTransport)
|
||||
// Never time out on our loopback Artemis connections. If we switch back to using the InVM transport this
|
||||
// would be the default and the two lines below can be deleted.
|
||||
locator.connectionTTL = -1
|
||||
locator.clientFailureCheckPeriod = -1
|
||||
locator.minLargeMessageSize = ArtemisMessagingServer.MAX_FILE_SIZE
|
||||
val locator = ActiveMQClient.createServerLocatorWithoutHA(tcpTransport).apply {
|
||||
// Never time out on our loopback Artemis connections. If we switch back to using the InVM transport this
|
||||
// would be the default and the two lines below can be deleted.
|
||||
connectionTTL = -1
|
||||
clientFailureCheckPeriod = -1
|
||||
minLargeMessageSize = ArtemisMessagingServer.MAX_FILE_SIZE
|
||||
isUseGlobalPools = nodeSerializationEnv != null
|
||||
}
|
||||
sessionFactory = locator.createSessionFactory()
|
||||
|
||||
// Login using the node username. The broker will authentiate us as its node (as opposed to another peer)
|
||||
@ -557,7 +565,7 @@ class NodeMessagingClient(override val config: NodeConfiguration,
|
||||
}
|
||||
|
||||
private fun createOutOfProcessVerifierService(): TransactionVerifierService {
|
||||
return object : OutOfProcessTransactionVerifierService(monitoringService) {
|
||||
return object : OutOfProcessTransactionVerifierService(metrics) {
|
||||
override fun sendRequest(nonce: Long, transaction: LedgerTransaction) {
|
||||
messagingExecutor.fetchFrom {
|
||||
state.locked {
|
||||
|
@ -12,7 +12,11 @@ import com.google.common.collect.Multimaps
|
||||
import com.google.common.collect.SetMultimap
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder
|
||||
import net.corda.client.rpc.RPCException
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.context.Actor
|
||||
import net.corda.core.context.Actor.Id
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.context.Trace
|
||||
import net.corda.core.context.Trace.InvocationId
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.LazyStickyPool
|
||||
import net.corda.core.internal.LifeCycle
|
||||
@ -25,8 +29,9 @@ import net.corda.core.utilities.debug
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.RPCUserService
|
||||
import net.corda.node.services.logging.pushToLoggingContext
|
||||
import net.corda.nodeapi.*
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
|
||||
import org.apache.activemq.artemis.api.core.Message
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import org.apache.activemq.artemis.api.core.client.ActiveMQClient.DEFAULT_ACK_BATCH_SIZE
|
||||
@ -37,6 +42,7 @@ import org.apache.activemq.artemis.api.core.client.ServerLocator
|
||||
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl
|
||||
import org.apache.activemq.artemis.api.core.management.CoreNotificationType
|
||||
import org.apache.activemq.artemis.api.core.management.ManagementHelper
|
||||
import org.slf4j.MDC
|
||||
import rx.Notification
|
||||
import rx.Observable
|
||||
import rx.Subscriber
|
||||
@ -106,7 +112,7 @@ class RPCServer(
|
||||
/** The observable subscription mapping. */
|
||||
private val observableMap = createObservableSubscriptionMap()
|
||||
/** A mapping from client addresses to IDs of associated Observables */
|
||||
private val clientAddressToObservables = Multimaps.synchronizedSetMultimap(HashMultimap.create<SimpleString, RPCApi.ObservableId>())
|
||||
private val clientAddressToObservables = Multimaps.synchronizedSetMultimap(HashMultimap.create<SimpleString, InvocationId>())
|
||||
/** The scheduled reaper handle. */
|
||||
private var reaperScheduledFuture: ScheduledFuture<*>? = null
|
||||
|
||||
@ -138,7 +144,7 @@ class RPCServer(
|
||||
}
|
||||
|
||||
private fun createObservableSubscriptionMap(): ObservableSubscriptionMap {
|
||||
val onObservableRemove = RemovalListener<RPCApi.ObservableId, ObservableSubscription> {
|
||||
val onObservableRemove = RemovalListener<InvocationId, ObservableSubscription> {
|
||||
log.debug { "Unsubscribing from Observable with id ${it.key} because of ${it.cause}" }
|
||||
it.value.subscription.unsubscribe()
|
||||
}
|
||||
@ -269,18 +275,19 @@ class RPCServer(
|
||||
val arguments = Try.on {
|
||||
clientToServer.serialisedArguments.deserialize<List<Any?>>(context = RPC_SERVER_CONTEXT)
|
||||
}
|
||||
val context = artemisMessage.context(clientToServer.sessionId)
|
||||
context.invocation.pushToLoggingContext()
|
||||
when (arguments) {
|
||||
is Try.Success -> {
|
||||
val rpcContext = RpcContext(currentUser = getUser(artemisMessage))
|
||||
rpcExecutor!!.submit {
|
||||
val result = invokeRpc(rpcContext, clientToServer.methodName, arguments.value)
|
||||
sendReply(clientToServer.id, clientToServer.clientAddress, result)
|
||||
val result = invokeRpc(context, clientToServer.methodName, arguments.value)
|
||||
sendReply(clientToServer.replyId, clientToServer.clientAddress, result)
|
||||
}
|
||||
}
|
||||
is Try.Failure -> {
|
||||
// We failed to deserialise the arguments, route back the error
|
||||
log.warn("Inbound RPC failed", arguments.exception)
|
||||
sendReply(clientToServer.id, clientToServer.clientAddress, arguments)
|
||||
sendReply(clientToServer.replyId, clientToServer.clientAddress, arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -291,10 +298,10 @@ class RPCServer(
|
||||
artemisMessage.acknowledge()
|
||||
}
|
||||
|
||||
private fun invokeRpc(rpcContext: RpcContext, methodName: String, arguments: List<Any?>): Try<Any> {
|
||||
private fun invokeRpc(context: RpcAuthContext, methodName: String, arguments: List<Any?>): Try<Any> {
|
||||
return Try.on {
|
||||
try {
|
||||
CURRENT_RPC_CONTEXT.set(rpcContext)
|
||||
CURRENT_RPC_CONTEXT.set(context)
|
||||
log.debug { "Calling $methodName" }
|
||||
val method = methodTable[methodName] ?:
|
||||
throw RPCException("Received RPC for unknown method $methodName - possible client/server version skew?")
|
||||
@ -307,10 +314,10 @@ class RPCServer(
|
||||
}
|
||||
}
|
||||
|
||||
private fun sendReply(requestId: RPCApi.RpcRequestId, clientAddress: SimpleString, result: Try<Any>) {
|
||||
val reply = RPCApi.ServerToClient.RpcReply(requestId, result)
|
||||
private fun sendReply(replyId: InvocationId, clientAddress: SimpleString, result: Try<Any>) {
|
||||
val reply = RPCApi.ServerToClient.RpcReply(replyId, result)
|
||||
val observableContext = ObservableContext(
|
||||
requestId,
|
||||
replyId,
|
||||
observableMap,
|
||||
clientAddressToObservables,
|
||||
clientAddress,
|
||||
@ -352,51 +359,83 @@ class RPCServer(
|
||||
// TODO remove this User once webserver doesn't need it
|
||||
private val nodeUser = User(NODE_USER, NODE_USER, setOf())
|
||||
|
||||
private fun getUser(message: ClientMessage): User {
|
||||
private fun ClientMessage.context(sessionId: Trace.SessionId): RpcAuthContext {
|
||||
val trace = Trace.newInstance(sessionId = sessionId)
|
||||
val externalTrace = externalTrace()
|
||||
val rpcActor = actorFrom(this)
|
||||
val impersonatedActor = impersonatedActor()
|
||||
return RpcAuthContext(InvocationContext.rpc(rpcActor.first, trace, externalTrace, impersonatedActor), rpcActor.second)
|
||||
}
|
||||
|
||||
private fun actorFrom(message: ClientMessage): Pair<Actor, RpcPermissions> {
|
||||
val validatedUser = message.getStringProperty(Message.HDR_VALIDATED_USER) ?: throw IllegalArgumentException("Missing validated user from the Artemis message")
|
||||
val targetLegalIdentity = message.getStringProperty(RPCApi.RPC_TARGET_LEGAL_IDENTITY)?.let(CordaX500Name.Companion::parse) ?: nodeLegalName
|
||||
// TODO switch userService based on targetLegalIdentity
|
||||
val rpcUser = userService.getUser(validatedUser)
|
||||
if (rpcUser != null) {
|
||||
return rpcUser
|
||||
return if (rpcUser != null) {
|
||||
Actor(Id(rpcUser.username), userService.id, targetLegalIdentity) to RpcPermissions(rpcUser.permissions)
|
||||
} else if (CordaX500Name.parse(validatedUser) == nodeLegalName) {
|
||||
return nodeUser
|
||||
// TODO remove this after Shell and WebServer will no longer need it
|
||||
Actor(Id(nodeUser.username), userService.id, targetLegalIdentity) to RpcPermissions(nodeUser.permissions)
|
||||
} else {
|
||||
throw IllegalArgumentException("Validated user '$validatedUser' is not an RPC user nor the NODE user")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO replace this by creating a new CordaRPCImpl for each request, passing the context, after we fix Shell and WebServer
|
||||
@JvmField
|
||||
internal val CURRENT_RPC_CONTEXT: ThreadLocal<RpcContext> = ThreadLocal()
|
||||
internal val CURRENT_RPC_CONTEXT: ThreadLocal<RpcAuthContext> = CurrentRpcContext()
|
||||
|
||||
internal class CurrentRpcContext : ThreadLocal<RpcAuthContext>() {
|
||||
|
||||
override fun remove() {
|
||||
super.remove()
|
||||
MDC.clear()
|
||||
}
|
||||
|
||||
override fun set(context: RpcAuthContext?) {
|
||||
when {
|
||||
context != null -> {
|
||||
super.set(context)
|
||||
// this is needed here as well because the Shell sets the context without going through the RpcServer
|
||||
context.invocation.pushToLoggingContext()
|
||||
}
|
||||
else -> remove()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a context specific to the current RPC call. Note that trying to call this function outside of an RPC will
|
||||
* throw. If you'd like to use the context outside of the call (e.g. in another thread) then pass the returned reference
|
||||
* around explicitly.
|
||||
* The [InvocationContext] does not include permissions.
|
||||
*/
|
||||
fun rpcContext(): RpcContext = CURRENT_RPC_CONTEXT.get()
|
||||
internal fun context(): InvocationContext = rpcContext().invocation
|
||||
|
||||
/**
|
||||
* @param currentUser This is available to RPC implementations to query the validated [User] that is calling it. Each
|
||||
* user has a set of permissions they're entitled to which can be used to control access.
|
||||
* Returns a context specific to the current RPC call. Note that trying to call this function outside of an RPC will
|
||||
* throw. If you'd like to use the context outside of the call (e.g. in another thread) then pass the returned reference
|
||||
* around explicitly.
|
||||
* The [RpcAuthContext] includes permissions.
|
||||
*/
|
||||
data class RpcContext(
|
||||
val currentUser: User
|
||||
)
|
||||
fun rpcContext(): RpcAuthContext = CURRENT_RPC_CONTEXT.get()
|
||||
|
||||
class ObservableSubscription(
|
||||
val subscription: Subscription
|
||||
)
|
||||
|
||||
typealias ObservableSubscriptionMap = Cache<RPCApi.ObservableId, ObservableSubscription>
|
||||
typealias ObservableSubscriptionMap = Cache<InvocationId, ObservableSubscription>
|
||||
|
||||
// We construct an observable context on each RPC request. If subsequently a nested Observable is
|
||||
// encountered this same context is propagated by the instrumented KryoPool. This way all
|
||||
// observations rooted in a single RPC will be muxed correctly. Note that the context construction
|
||||
// itself is quite cheap.
|
||||
class ObservableContext(
|
||||
val rpcRequestId: RPCApi.RpcRequestId,
|
||||
val invocationId: InvocationId,
|
||||
val observableMap: ObservableSubscriptionMap,
|
||||
val clientAddressToObservables: SetMultimap<SimpleString, RPCApi.ObservableId>,
|
||||
val clientAddressToObservables: SetMultimap<SimpleString, InvocationId>,
|
||||
val clientAddress: SimpleString,
|
||||
val serverControl: ActiveMQServerControl,
|
||||
val sessionAndProducerPool: LazyStickyPool<ArtemisProducer>,
|
||||
@ -410,7 +449,7 @@ class ObservableContext(
|
||||
|
||||
fun sendMessage(serverToClient: RPCApi.ServerToClient) {
|
||||
try {
|
||||
sessionAndProducerPool.run(rpcRequestId) {
|
||||
sessionAndProducerPool.run(invocationId) {
|
||||
val artemisMessage = it.session.createMessage(false)
|
||||
serverToClient.writeToClientMessage(serializationContextWithObservableContext, artemisMessage)
|
||||
it.producer.send(clientAddress, artemisMessage)
|
||||
@ -437,9 +476,9 @@ object RpcServerObservableSerializer : Serializer<Observable<*>>() {
|
||||
}
|
||||
|
||||
override fun write(kryo: Kryo, output: Output, observable: Observable<*>) {
|
||||
val observableId = RPCApi.ObservableId(random63BitValue())
|
||||
val observableId = InvocationId.newInstance()
|
||||
val observableContext = kryo.context[RpcObservableContextKey] as ObservableContext
|
||||
output.writeLong(observableId.toLong, true)
|
||||
output.writeInvocationId(observableId)
|
||||
val observableWithSubscription = ObservableSubscription(
|
||||
// We capture [observableContext] in the subscriber. Note that all synchronisation/kryo borrowing
|
||||
// must be done again within the subscriber
|
||||
@ -465,4 +504,10 @@ object RpcServerObservableSerializer : Serializer<Observable<*>>() {
|
||||
observableContext.clientAddressToObservables.put(observableContext.clientAddress, observableId)
|
||||
observableContext.observableMap.put(observableId, observableWithSubscription)
|
||||
}
|
||||
|
||||
private fun Output.writeInvocationId(id: InvocationId) {
|
||||
|
||||
writeString(id.value)
|
||||
writeLong(id.timestamp.toEpochMilli())
|
||||
}
|
||||
}
|
||||
|
@ -1,20 +0,0 @@
|
||||
@file:JvmName("RPCServerStructures")
|
||||
|
||||
package net.corda.node.services.messaging
|
||||
|
||||
import net.corda.client.rpc.PermissionException
|
||||
import net.corda.node.services.Permissions.Companion.all
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent
|
||||
|
||||
/** Helper method which checks that the current RPC user is entitled for the given permission. Throws a [PermissionException] otherwise. */
|
||||
fun RpcContext.requirePermission(permission: String): RpcContext = requireEitherPermission(setOf(permission))
|
||||
|
||||
/** Helper method which checks that the current RPC user is entitled with any of the given permissions. Throws a [PermissionException] otherwise. */
|
||||
fun RpcContext.requireEitherPermission(permissions: Set<String>): RpcContext {
|
||||
// TODO remove the NODE_USER condition once webserver doesn't need it
|
||||
val currentUserPermissions = currentUser.permissions
|
||||
if (currentUser.username != ArtemisMessagingComponent.NODE_USER && currentUserPermissions.intersect(permissions + all()).isEmpty()) {
|
||||
throw PermissionException("User not permissioned with any of $permissions, permissions are $currentUserPermissions")
|
||||
}
|
||||
return this
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
package net.corda.node.services.messaging
|
||||
|
||||
import net.corda.client.rpc.PermissionException
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent
|
||||
|
||||
data class RpcAuthContext(val invocation: InvocationContext, val grantedPermissions: RpcPermissions) {
|
||||
|
||||
fun requirePermission(permission: String) = requireEitherPermission(setOf(permission))
|
||||
|
||||
fun requireEitherPermission(permissions: Set<String>): RpcAuthContext {
|
||||
|
||||
// TODO remove the NODE_USER condition once webserver and shell won't need it anymore
|
||||
if (invocation.principal().name != ArtemisMessagingComponent.NODE_USER && !grantedPermissions.coverAny(permissions)) {
|
||||
throw PermissionException("User not permissioned with any of $permissions, permissions are ${this.grantedPermissions}.")
|
||||
}
|
||||
return this
|
||||
}
|
||||
}
|
||||
|
||||
data class RpcPermissions(private val values: Set<String> = emptySet()) {
|
||||
|
||||
companion object {
|
||||
val NONE = RpcPermissions()
|
||||
}
|
||||
|
||||
fun coverAny(permissions: Set<String>) = !values.intersect(permissions + Permissions.all()).isEmpty()
|
||||
}
|
@ -1,70 +1,152 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import com.google.common.util.concurrent.MoreExecutors
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.internal.openHttpConnection
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.utilities.minutes
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.api.NetworkMapCacheInternal
|
||||
import net.corda.node.utilities.NamedThreadFactory
|
||||
import okhttp3.CacheControl
|
||||
import okhttp3.Headers
|
||||
import rx.Subscription
|
||||
import java.io.BufferedReader
|
||||
import java.io.Closeable
|
||||
import java.net.HttpURLConnection
|
||||
import java.net.URL
|
||||
import java.time.Duration
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
interface NetworkMapClient {
|
||||
/**
|
||||
* Publish node info to network map service.
|
||||
*/
|
||||
fun publish(signedNodeInfo: SignedData<NodeInfo>)
|
||||
class NetworkMapClient(compatibilityZoneURL: URL) {
|
||||
companion object {
|
||||
val logger = loggerFor<NetworkMapClient>()
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve [NetworkMap] from the network map service containing list of node info hashes and network parameter hash.
|
||||
*/
|
||||
// TODO: Use NetworkMap object when available.
|
||||
fun getNetworkMap(): List<SecureHash>
|
||||
private val networkMapUrl = URL("$compatibilityZoneURL/network-map")
|
||||
|
||||
/**
|
||||
* Retrieve [NodeInfo] from network map service using the node info hash.
|
||||
*/
|
||||
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo?
|
||||
|
||||
// TODO: Implement getNetworkParameter when its available.
|
||||
//fun getNetworkParameter(networkParameterHash: SecureHash): NetworkParameter
|
||||
}
|
||||
|
||||
class HTTPNetworkMapClient(private val networkMapUrl: String) : NetworkMapClient {
|
||||
override fun publish(signedNodeInfo: SignedData<NodeInfo>) {
|
||||
fun publish(signedNodeInfo: SignedData<NodeInfo>) {
|
||||
val publishURL = URL("$networkMapUrl/publish")
|
||||
val conn = publishURL.openConnection() as HttpURLConnection
|
||||
val conn = publishURL.openHttpConnection()
|
||||
conn.doOutput = true
|
||||
conn.requestMethod = "POST"
|
||||
conn.setRequestProperty("Content-Type", "application/octet-stream")
|
||||
conn.outputStream.write(signedNodeInfo.serialize().bytes)
|
||||
when (conn.responseCode) {
|
||||
HttpURLConnection.HTTP_OK -> return
|
||||
HttpURLConnection.HTTP_UNAUTHORIZED -> throw IllegalArgumentException(conn.errorStream.bufferedReader().readLine())
|
||||
else -> throw IllegalArgumentException("Unexpected response code ${conn.responseCode}, response error message: '${conn.errorStream.bufferedReader().readLines()}'")
|
||||
conn.outputStream.use { it.write(signedNodeInfo.serialize().bytes) }
|
||||
|
||||
// This will throw IOException if the response code is not HTTP 200.
|
||||
// This gives a much better exception then reading the error stream.
|
||||
conn.inputStream.close()
|
||||
}
|
||||
|
||||
fun getNetworkMap(): NetworkMapResponse {
|
||||
val conn = networkMapUrl.openHttpConnection()
|
||||
val response = conn.inputStream.bufferedReader().use(BufferedReader::readLine)
|
||||
val networkMap = ObjectMapper().readValue(response, List::class.java).map { SecureHash.parse(it.toString()) }
|
||||
val timeout = CacheControl.parse(Headers.of(conn.headerFields.filterKeys { it != null }.mapValues { it.value.first() })).maxAgeSeconds().seconds
|
||||
return NetworkMapResponse(networkMap, timeout)
|
||||
}
|
||||
|
||||
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo? {
|
||||
val conn = URL("$networkMapUrl/$nodeInfoHash").openHttpConnection()
|
||||
return if (conn.responseCode == HttpURLConnection.HTTP_NOT_FOUND) {
|
||||
null
|
||||
} else {
|
||||
conn.inputStream.use { it.readBytes() }.deserialize()
|
||||
}
|
||||
}
|
||||
|
||||
override fun getNetworkMap(): List<SecureHash> {
|
||||
val conn = URL(networkMapUrl).openConnection() as HttpURLConnection
|
||||
fun myPublicHostname(): String {
|
||||
val conn = URL("$networkMapUrl/my-hostname").openHttpConnection()
|
||||
return conn.inputStream.bufferedReader().use(BufferedReader::readLine)
|
||||
}
|
||||
}
|
||||
|
||||
return when (conn.responseCode) {
|
||||
HttpURLConnection.HTTP_OK -> {
|
||||
val response = conn.inputStream.bufferedReader().use { it.readLine() }
|
||||
ObjectMapper().readValue(response, List::class.java).map { SecureHash.parse(it.toString()) }
|
||||
data class NetworkMapResponse(val networkMap: List<SecureHash>, val cacheMaxAge: Duration)
|
||||
|
||||
class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
||||
private val fileWatcher: NodeInfoWatcher,
|
||||
private val networkMapClient: NetworkMapClient?) : Closeable {
|
||||
companion object {
|
||||
private val logger = loggerFor<NetworkMapUpdater>()
|
||||
private val retryInterval = 1.minutes
|
||||
}
|
||||
|
||||
private val executor = Executors.newSingleThreadScheduledExecutor(NamedThreadFactory("Network Map Updater Thread", Executors.defaultThreadFactory()))
|
||||
private var fileWatcherSubscription: Subscription? = null
|
||||
|
||||
override fun close() {
|
||||
fileWatcherSubscription?.unsubscribe()
|
||||
MoreExecutors.shutdownAndAwaitTermination(executor, 50, TimeUnit.SECONDS)
|
||||
}
|
||||
|
||||
fun updateNodeInfo(newInfo: NodeInfo, signNodeInfo: (NodeInfo) -> SignedData<NodeInfo>) {
|
||||
val oldInfo = networkMapCache.getNodeByLegalIdentity(newInfo.legalIdentities.first())
|
||||
// Compare node info without timestamp.
|
||||
if (newInfo.copy(serial = 0L) == oldInfo?.copy(serial = 0L)) return
|
||||
|
||||
// Only publish and write to disk if there are changes to the node info.
|
||||
val signedNodeInfo = signNodeInfo(newInfo)
|
||||
fileWatcher.saveToFile(signedNodeInfo)
|
||||
|
||||
if (networkMapClient != null) {
|
||||
tryPublishNodeInfoAsync(signedNodeInfo, networkMapClient)
|
||||
}
|
||||
}
|
||||
|
||||
fun subscribeToNetworkMap() {
|
||||
require(fileWatcherSubscription == null) { "Should not call this method twice." }
|
||||
// Subscribe to file based networkMap
|
||||
fileWatcherSubscription = fileWatcher.nodeInfoUpdates().subscribe(networkMapCache::addNode)
|
||||
|
||||
if (networkMapClient == null) return
|
||||
// Subscribe to remote network map if configured.
|
||||
val task = object : Runnable {
|
||||
override fun run() {
|
||||
val nextScheduleDelay = try {
|
||||
val (networkMap, cacheTimeout) = networkMapClient.getNetworkMap()
|
||||
val currentNodeHashes = networkMapCache.allNodeHashes
|
||||
(networkMap - currentNodeHashes).mapNotNull {
|
||||
// Download new node info from network map
|
||||
networkMapClient.getNodeInfo(it)
|
||||
}.forEach {
|
||||
// Add new node info to the network map cache, these could be new node info or modification of node info for existing nodes.
|
||||
networkMapCache.addNode(it)
|
||||
}
|
||||
// Remove node info from network map.
|
||||
(currentNodeHashes - networkMap - fileWatcher.processedNodeInfoHashes)
|
||||
.mapNotNull(networkMapCache::getNodeByHash)
|
||||
.forEach(networkMapCache::removeNode)
|
||||
|
||||
cacheTimeout
|
||||
} catch (t: Throwable) {
|
||||
logger.warn("Error encountered while updating network map, will retry in $retryInterval", t)
|
||||
retryInterval
|
||||
}
|
||||
// Schedule the next update.
|
||||
executor.schedule(this, nextScheduleDelay.toMillis(), TimeUnit.MILLISECONDS)
|
||||
}
|
||||
else -> throw IllegalArgumentException("Unexpected response code ${conn.responseCode}, response error message: '${conn.errorStream.bufferedReader().readLines()}'")
|
||||
}
|
||||
executor.submit(task) // The check may be expensive, so always run it in the background even the first time.
|
||||
}
|
||||
|
||||
override fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo? {
|
||||
val nodeInfoURL = URL("$networkMapUrl/$nodeInfoHash")
|
||||
val conn = nodeInfoURL.openConnection() as HttpURLConnection
|
||||
|
||||
return when (conn.responseCode) {
|
||||
HttpURLConnection.HTTP_OK -> conn.inputStream.readBytes().deserialize()
|
||||
HttpURLConnection.HTTP_NOT_FOUND -> null
|
||||
else -> throw IllegalArgumentException("Unexpected response code ${conn.responseCode}, response error message: '${conn.errorStream.bufferedReader().readLines()}'")
|
||||
private fun tryPublishNodeInfoAsync(signedNodeInfo: SignedData<NodeInfo>, networkMapClient: NetworkMapClient) {
|
||||
val task = object : Runnable {
|
||||
override fun run() {
|
||||
try {
|
||||
networkMapClient.publish(signedNodeInfo)
|
||||
} catch (t: Throwable) {
|
||||
logger.warn("Error encountered while publishing node info, will retry in $retryInterval.", t)
|
||||
// TODO: Exponential backoff?
|
||||
executor.schedule(this, retryInterval.toMillis(), TimeUnit.MILLISECONDS)
|
||||
}
|
||||
}
|
||||
}
|
||||
executor.submit(task)
|
||||
}
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import net.corda.cordform.CordformNode
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.crypto.sign
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
@ -15,7 +15,7 @@ import rx.Scheduler
|
||||
import rx.schedulers.Schedulers
|
||||
import java.io.IOException
|
||||
import java.nio.file.Path
|
||||
import java.security.KeyPair
|
||||
import java.time.Duration
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.streams.toList
|
||||
|
||||
@ -24,19 +24,20 @@ import kotlin.streams.toList
|
||||
* - Serialize and de-serialize a [NodeInfo] to disk and reading it back.
|
||||
* - Poll a directory for new serialized [NodeInfo]
|
||||
*
|
||||
* @param path the base path of a node.
|
||||
* @param pollFrequencyMsec how often to poll the filesystem in milliseconds. Any value smaller than 5 seconds will
|
||||
* be treated as 5 seconds.
|
||||
* @param nodePath the base path of a node.
|
||||
* @param pollInterval how often to poll the filesystem in milliseconds. Must be longer then 5 seconds.
|
||||
* @param scheduler a [Scheduler] for the rx [Observable] returned by [nodeInfoUpdates], this is mainly useful for
|
||||
* testing. It defaults to the io scheduler which is the appropriate value for production uses.
|
||||
*/
|
||||
// TODO: Use NIO watch service instead?
|
||||
class NodeInfoWatcher(private val nodePath: Path,
|
||||
pollFrequencyMsec: Long = 5.seconds.toMillis(),
|
||||
private val pollInterval: Duration = 5.seconds,
|
||||
private val scheduler: Scheduler = Schedulers.io()) {
|
||||
|
||||
private val nodeInfoDirectory = nodePath / CordformNode.NODE_INFO_DIRECTORY
|
||||
private val pollFrequencyMsec: Long = maxOf(pollFrequencyMsec, 5.seconds.toMillis())
|
||||
private val successfullyProcessedFiles = mutableSetOf<Path>()
|
||||
private val processedNodeInfoFiles = mutableSetOf<Path>()
|
||||
private val _processedNodeInfoHashes = mutableSetOf<SecureHash>()
|
||||
val processedNodeInfoHashes: Set<SecureHash> get() = _processedNodeInfoHashes.toSet()
|
||||
|
||||
companion object {
|
||||
private val logger = loggerFor<NodeInfoWatcher>()
|
||||
@ -48,17 +49,14 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
* is used so that one can freely copy these files without fearing to overwrite another one.
|
||||
*
|
||||
* @param path the path where to write the file, if non-existent it will be created.
|
||||
* @param nodeInfo the NodeInfo to serialize.
|
||||
* @param signingKey used to sign the NodeInfo data.
|
||||
* @param signedNodeInfo the signed NodeInfo.
|
||||
*/
|
||||
fun saveToFile(path: Path, nodeInfo: NodeInfo, signingKey: KeyPair) {
|
||||
fun saveToFile(path: Path, signedNodeInfo: SignedData<NodeInfo>) {
|
||||
try {
|
||||
path.createDirectories()
|
||||
val serializedBytes = nodeInfo.serialize()
|
||||
val regSig = signingKey.sign(serializedBytes.bytes)
|
||||
val signedData = SignedData(serializedBytes, regSig)
|
||||
signedData.serialize().open().copyTo(
|
||||
path / "${NodeInfoFilesCopier.NODE_INFO_FILE_NAME_PREFIX}${serializedBytes.hash}")
|
||||
signedNodeInfo.serialize()
|
||||
.open()
|
||||
.copyTo(path / "${NodeInfoFilesCopier.NODE_INFO_FILE_NAME_PREFIX}${signedNodeInfo.raw.hash}")
|
||||
} catch (e: Exception) {
|
||||
logger.warn("Couldn't write node info to file", e)
|
||||
}
|
||||
@ -66,6 +64,7 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
}
|
||||
|
||||
init {
|
||||
require(pollInterval >= 5.seconds) { "Poll interval must be 5 seconds or longer." }
|
||||
if (!nodeInfoDirectory.isDirectory()) {
|
||||
try {
|
||||
nodeInfoDirectory.createDirectories()
|
||||
@ -85,10 +84,12 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
* @return an [Observable] returning [NodeInfo]s, at most one [NodeInfo] is returned for each processed file.
|
||||
*/
|
||||
fun nodeInfoUpdates(): Observable<NodeInfo> {
|
||||
return Observable.interval(pollFrequencyMsec, TimeUnit.MILLISECONDS, scheduler)
|
||||
return Observable.interval(pollInterval.toMillis(), TimeUnit.MILLISECONDS, scheduler)
|
||||
.flatMapIterable { loadFromDirectory() }
|
||||
}
|
||||
|
||||
fun saveToFile(signedNodeInfo: SignedData<NodeInfo>) = Companion.saveToFile(nodePath, signedNodeInfo)
|
||||
|
||||
/**
|
||||
* Loads all the files contained in a given path and returns the deserialized [NodeInfo]s.
|
||||
* Signatures are checked before returning a value.
|
||||
@ -100,10 +101,13 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
return emptyList()
|
||||
}
|
||||
val result = nodeInfoDirectory.list { paths ->
|
||||
paths.filter { it !in successfullyProcessedFiles }
|
||||
paths.filter { it !in processedNodeInfoFiles }
|
||||
.filter { it.isRegularFile() }
|
||||
.map { path ->
|
||||
processFile(path)?.apply { successfullyProcessedFiles.add(path) }
|
||||
processFile(path)?.apply {
|
||||
processedNodeInfoFiles.add(path)
|
||||
_processedNodeInfoHashes.add(this.serialize().hash)
|
||||
}
|
||||
}
|
||||
.toList()
|
||||
.filterNotNull()
|
||||
@ -115,13 +119,13 @@ class NodeInfoWatcher(private val nodePath: Path,
|
||||
}
|
||||
|
||||
private fun processFile(file: Path): NodeInfo? {
|
||||
try {
|
||||
return try {
|
||||
logger.info("Reading NodeInfo from file: $file")
|
||||
val signedData = file.readAll().deserialize<SignedData<NodeInfo>>()
|
||||
return signedData.verified()
|
||||
signedData.verified()
|
||||
} catch (e: Exception) {
|
||||
logger.warn("Exception parsing NodeInfo from file. $file", e)
|
||||
return null
|
||||
null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
@ -11,16 +12,16 @@ import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.internal.schemas.NodeInfoSchemaV1
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.node.services.NetworkMapCache.MapChange
|
||||
import net.corda.core.node.services.NotaryService
|
||||
import net.corda.core.node.services.PartyInfo
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.api.NetworkMapCacheBaseInternal
|
||||
import net.corda.node.services.api.NetworkMapCacheInternal
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.node.utilities.bufferUntilDatabaseCommit
|
||||
import net.corda.node.utilities.wrapWithDatabaseTransaction
|
||||
@ -31,7 +32,6 @@ import java.security.PublicKey
|
||||
import java.util.*
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import kotlin.collections.HashMap
|
||||
import kotlin.collections.HashSet
|
||||
|
||||
class NetworkMapCacheImpl(
|
||||
networkMapCacheBase: NetworkMapCacheBaseInternal,
|
||||
@ -61,16 +61,13 @@ class NetworkMapCacheImpl(
|
||||
* Extremely simple in-memory cache of the network map.
|
||||
*/
|
||||
@ThreadSafe
|
||||
open class PersistentNetworkMapCache(
|
||||
private val database: CordaPersistence,
|
||||
val configuration: NodeConfiguration,
|
||||
notaries: List<NotaryInfo>
|
||||
) : SingletonSerializeAsToken(), NetworkMapCacheBaseInternal {
|
||||
open class PersistentNetworkMapCache(private val database: CordaPersistence) : SingletonSerializeAsToken(), NetworkMapCacheBaseInternal {
|
||||
companion object {
|
||||
val logger = loggerFor<PersistentNetworkMapCache>()
|
||||
}
|
||||
|
||||
// TODO Cleanup registered and party nodes
|
||||
// TODO Small explanation, partyNodes and registeredNodes is left in memory as it was before, because it will be removed in
|
||||
// next PR that gets rid of services. These maps are used only for queries by service.
|
||||
protected val registeredNodes: MutableMap<PublicKey, NodeInfo> = Collections.synchronizedMap(HashMap())
|
||||
protected val partyNodes: MutableList<NodeInfo> get() = registeredNodes.map { it.value }.toMutableList()
|
||||
private val _changed = PublishSubject.create<MapChange>()
|
||||
@ -84,24 +81,53 @@ open class PersistentNetworkMapCache(
|
||||
override val nodeReady: CordaFuture<Void?> get() = _registrationFuture
|
||||
private var _loadDBSuccess: Boolean = false
|
||||
override val loadDBSuccess get() = _loadDBSuccess
|
||||
|
||||
override val notaryIdentities: List<Party> = notaries.map { it.identity }
|
||||
private val validatingNotaries = notaries.mapNotNullTo(HashSet()) { if (it.validating) it.identity else null }
|
||||
|
||||
private val nodeInfoSerializer = NodeInfoWatcher(configuration.baseDirectory,
|
||||
configuration.additionalNodeInfoPollingFrequencyMsec)
|
||||
// TODO From the NetworkMapService redesign doc: Remove the concept of network services.
|
||||
// As a temporary hack, just assume for now that every network has a notary service named "Notary Service" that can be looked up in the map.
|
||||
// This should eliminate the only required usage of services.
|
||||
// It is ensured on node startup when constructing a notary that the name contains "notary".
|
||||
override val notaryIdentities: List<Party>
|
||||
get() {
|
||||
return partyNodes
|
||||
.flatMap {
|
||||
// TODO: validate notary identity certificates before loading into network map cache.
|
||||
// Notary certificates have to be signed by the doorman directly
|
||||
it.legalIdentities
|
||||
}
|
||||
.filter { it.name.commonName?.startsWith(NotaryService.ID_PREFIX) ?: false }
|
||||
.toSet() // Distinct, because of distributed service nodes
|
||||
.sortedBy { it.name.toString() }
|
||||
}
|
||||
|
||||
init {
|
||||
loadFromFiles()
|
||||
database.transaction { loadFromDB(session) }
|
||||
}
|
||||
|
||||
private fun loadFromFiles() {
|
||||
logger.info("Loading network map from files..")
|
||||
nodeInfoSerializer.nodeInfoUpdates().subscribe { node -> addNode(node) }
|
||||
override val allNodeHashes: List<SecureHash>
|
||||
get() {
|
||||
return database.transaction {
|
||||
val builder = session.criteriaBuilder
|
||||
val query = builder.createQuery(String::class.java).run {
|
||||
from(NodeInfoSchemaV1.PersistentNodeInfo::class.java).run {
|
||||
select(get<String>(NodeInfoSchemaV1.PersistentNodeInfo::hash.name))
|
||||
}
|
||||
}
|
||||
session.createQuery(query).resultList.map { SecureHash.sha256(it) }
|
||||
}
|
||||
}
|
||||
|
||||
override fun getNodeByHash(nodeHash: SecureHash): NodeInfo? {
|
||||
return database.transaction {
|
||||
val builder = session.criteriaBuilder
|
||||
val query = builder.createQuery(NodeInfoSchemaV1.PersistentNodeInfo::class.java).run {
|
||||
from(NodeInfoSchemaV1.PersistentNodeInfo::class.java).run {
|
||||
where(builder.equal(get<String>(NodeInfoSchemaV1.PersistentNodeInfo::hash.name), nodeHash.toString()))
|
||||
}
|
||||
}
|
||||
session.createQuery(query).resultList.singleOrNull()?.toNodeInfo()
|
||||
}
|
||||
}
|
||||
|
||||
override fun isValidatingNotary(party: Party): Boolean = party in validatingNotaries
|
||||
override fun isValidatingNotary(party: Party): Boolean = isNotary(party) && "validating" in party.name.commonName!!
|
||||
|
||||
override fun getPartyInfo(party: Party): PartyInfo? {
|
||||
val nodes = database.transaction { queryByIdentityKey(session, party.owningKey) }
|
||||
@ -277,11 +303,14 @@ open class PersistentNetworkMapCache(
|
||||
else result.map { it.toNodeInfo() }.singleOrNull() ?: throw IllegalStateException("More than one node with the same host and port")
|
||||
}
|
||||
|
||||
|
||||
/** Object Relational Mapping support. */
|
||||
private fun generateMappedObject(nodeInfo: NodeInfo): NodeInfoSchemaV1.PersistentNodeInfo {
|
||||
return NodeInfoSchemaV1.PersistentNodeInfo(
|
||||
id = 0,
|
||||
hash = nodeInfo.serialize().hash.toString(),
|
||||
addresses = nodeInfo.addresses.map { NodeInfoSchemaV1.DBHostAndPort.fromHostAndPort(it) },
|
||||
// TODO Another ugly hack with special first identity...
|
||||
legalIdentitiesAndCerts = nodeInfo.legalIdentitiesAndCerts.mapIndexed { idx, elem ->
|
||||
NodeInfoSchemaV1.DBPartyAndCertificate(elem, isMain = idx == 0)
|
||||
},
|
||||
|
@ -0,0 +1,68 @@
|
||||
package net.corda.node.services.persistence
|
||||
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import org.hibernate.type.descriptor.WrapperOptions
|
||||
import org.hibernate.type.descriptor.java.AbstractTypeDescriptor
|
||||
import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan
|
||||
import org.hibernate.type.descriptor.java.MutabilityPlan
|
||||
|
||||
class AbstractPartyDescriptor(identitySvc: () -> IdentityService) : AbstractTypeDescriptor<AbstractParty>(AbstractParty::class.java) {
|
||||
companion object {
|
||||
private val log = loggerFor<AbstractPartyDescriptor>()
|
||||
}
|
||||
|
||||
private val identityService: IdentityService by lazy(identitySvc)
|
||||
|
||||
override fun fromString(dbData: String?): AbstractParty? {
|
||||
return if (dbData != null) {
|
||||
val party = identityService.wellKnownPartyFromX500Name(CordaX500Name.parse(dbData))
|
||||
if (party == null) log.warn("Identity service unable to resolve X500name: $dbData")
|
||||
party
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
override fun getMutabilityPlan(): MutabilityPlan<AbstractParty> = ImmutableMutabilityPlan()
|
||||
|
||||
override fun toString(party: AbstractParty?): String? {
|
||||
return if (party != null) {
|
||||
val partyName = party.nameOrNull() ?: identityService.wellKnownPartyFromAnonymous(party)?.name
|
||||
if (partyName == null) log.warn("Identity service unable to resolve AbstractParty: $party")
|
||||
partyName.toString()
|
||||
} else {
|
||||
return null // non resolvable anonymous parties
|
||||
}
|
||||
}
|
||||
|
||||
override fun <X : Any> wrap(value: X?, options: WrapperOptions): AbstractParty? {
|
||||
return if (value != null) {
|
||||
if (String::class.java.isInstance(value)) {
|
||||
return fromString(value as String)!!
|
||||
}
|
||||
if (AbstractParty::class.java.isInstance(value)) {
|
||||
return value as AbstractParty
|
||||
}
|
||||
throw unknownWrap(value::class.java)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
override fun <X : Any> unwrap(value: AbstractParty?, type: Class<X>, options: WrapperOptions): X? {
|
||||
return if (value != null) {
|
||||
if (AbstractParty::class.java.isAssignableFrom(type)) {
|
||||
return value as X
|
||||
}
|
||||
if (String::class.java.isAssignableFrom(type)) {
|
||||
return toString(value) as X
|
||||
}
|
||||
throw unknownUnwrap(type)
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
}
|
@ -23,7 +23,7 @@ class DBCheckpointStorage : CheckpointStorage {
|
||||
var checkpointId: String = "",
|
||||
|
||||
@Lob
|
||||
@Column(name = "checkpoint")
|
||||
@Column(name = "checkpoint_value")
|
||||
var checkpoint: ByteArray = ByteArray(0)
|
||||
)
|
||||
|
||||
|
@ -22,7 +22,7 @@ class DBTransactionStorage : WritableTransactionStorage, SingletonSerializeAsTok
|
||||
var txId: String = "",
|
||||
|
||||
@Lob
|
||||
@Column
|
||||
@Column(name = "transaction_value")
|
||||
var transaction: ByteArray = ByteArray(0)
|
||||
)
|
||||
|
||||
|
@ -3,6 +3,7 @@ package net.corda.node.services.persistence
|
||||
import net.corda.core.internal.castIfPossible
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.utilities.toHexString
|
||||
import net.corda.node.services.api.SchemaService
|
||||
@ -18,8 +19,10 @@ import org.hibernate.engine.jdbc.connections.spi.ConnectionProvider
|
||||
import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment
|
||||
import org.hibernate.service.UnknownUnwrapTypeException
|
||||
import org.hibernate.type.AbstractSingleColumnStandardBasicType
|
||||
import org.hibernate.type.descriptor.java.JavaTypeDescriptorRegistry
|
||||
import org.hibernate.type.descriptor.java.PrimitiveByteArrayTypeDescriptor
|
||||
import org.hibernate.type.descriptor.sql.BlobTypeDescriptor
|
||||
import org.hibernate.type.descriptor.sql.VarbinaryTypeDescriptor
|
||||
import java.sql.Connection
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
@ -35,6 +38,11 @@ class HibernateConfiguration(val schemaService: SchemaService, private val datab
|
||||
private val transactionIsolationLevel = parserTransactionIsolationLevel(databaseProperties.getProperty("transactionIsolationLevel") ?: "")
|
||||
val sessionFactoryForRegisteredSchemas = schemaService.schemaOptions.keys.let {
|
||||
logger.info("Init HibernateConfiguration for schemas: $it")
|
||||
// Register the AbstractPartyDescriptor so Hibernate doesn't warn when encountering AbstractParty. Unfortunately
|
||||
// Hibernate warns about not being able to find a descriptor if we don't provide one, but won't use it by default
|
||||
// so we end up providing both descriptor and converter. We should re-examine this in later versions to see if
|
||||
// either Hibernate can be convinced to stop warning, use the descriptor by default, or something else.
|
||||
JavaTypeDescriptorRegistry.INSTANCE.addDescriptor(AbstractPartyDescriptor(createIdentityService))
|
||||
sessionFactoryForSchemas(it)
|
||||
}
|
||||
|
||||
@ -77,6 +85,7 @@ class HibernateConfiguration(val schemaService: SchemaService, private val datab
|
||||
// Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages.
|
||||
// to avoid OOM when large blobs might get logged.
|
||||
applyBasicType(CordaMaterializedBlobType, CordaMaterializedBlobType.name)
|
||||
applyBasicType(CordaWrapperBinaryType, CordaWrapperBinaryType.name)
|
||||
build()
|
||||
}
|
||||
|
||||
@ -133,4 +142,15 @@ class HibernateConfiguration(val schemaService: SchemaService, private val datab
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A tweaked version of `org.hibernate.type.WrapperBinaryType` that deals with ByteArray (java primitive byte[] type).
|
||||
private object CordaWrapperBinaryType : AbstractSingleColumnStandardBasicType<ByteArray>(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) {
|
||||
override fun getRegistrationKeys(): Array<String> {
|
||||
return arrayOf(name, "ByteArray", ByteArray::class.java.name)
|
||||
}
|
||||
|
||||
override fun getName(): String {
|
||||
return "corda-wrapper-binary"
|
||||
}
|
||||
}
|
||||
}
|
@ -7,19 +7,26 @@ import com.google.common.hash.Hashing
|
||||
import com.google.common.hash.HashingInputStream
|
||||
import com.google.common.io.CountingInputStream
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.internal.AbstractAttachment
|
||||
import net.corda.core.contracts.Attachment
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.node.services.AttachmentId
|
||||
import net.corda.core.node.services.AttachmentStorage
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.core.serialization.*
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.vault.HibernateAttachmentQueryCriteriaParser
|
||||
import net.corda.node.utilities.DatabaseTransactionManager
|
||||
import net.corda.node.utilities.NODE_DATABASE_PREFIX
|
||||
import net.corda.node.utilities.currentDBSession
|
||||
import java.io.*
|
||||
import java.lang.Exception
|
||||
import java.nio.file.Paths
|
||||
import java.time.Instant
|
||||
import java.util.jar.JarInputStream
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import javax.persistence.*
|
||||
import javax.persistence.Column
|
||||
|
||||
/**
|
||||
* Stores attachments using Hibernate to database.
|
||||
@ -32,12 +39,21 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
|
||||
indexes = arrayOf(Index(name = "att_id_idx", columnList = "att_id")))
|
||||
class DBAttachment(
|
||||
@Id
|
||||
@Column(name = "att_id", length = 65535)
|
||||
@Column(name = "att_id")
|
||||
var attId: String,
|
||||
|
||||
@Column(name = "content")
|
||||
@Lob
|
||||
var content: ByteArray
|
||||
var content: ByteArray,
|
||||
|
||||
@Column(name = "insertion_date", nullable = false, updatable = false)
|
||||
var insertionDate: Instant = Instant.now(),
|
||||
|
||||
@Column(name = "uploader", updatable = false)
|
||||
var uploader: String? = null,
|
||||
|
||||
@Column(name = "filename", updatable = false)
|
||||
var filename: String? = null
|
||||
) : Serializable
|
||||
|
||||
companion object {
|
||||
@ -147,8 +163,16 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
|
||||
return null
|
||||
}
|
||||
|
||||
override fun importAttachment(jar: InputStream): AttachmentId {
|
||||
return import(jar, null, null)
|
||||
}
|
||||
|
||||
override fun importAttachment(jar: InputStream, uploader: String, filename: String): AttachmentId {
|
||||
return import(jar, uploader, filename)
|
||||
}
|
||||
|
||||
// TODO: PLT-147: The attachment should be randomised to prevent brute force guessing and thus privacy leaks.
|
||||
override fun importAttachment(jar: InputStream): SecureHash {
|
||||
private fun import(jar: InputStream, uploader: String?, filename: String?): AttachmentId {
|
||||
require(jar !is JarInputStream)
|
||||
|
||||
// Read the file into RAM, hashing it to find the ID as we go. The attachment must fit into memory.
|
||||
@ -169,7 +193,7 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
|
||||
criteriaQuery.where(criteriaBuilder.equal(attachments.get<String>(DBAttachment::attId.name), id.toString()))
|
||||
val count = session.createQuery(criteriaQuery).singleResult
|
||||
if (count == 0L) {
|
||||
val attachment = NodeAttachmentService.DBAttachment(attId = id.toString(), content = bytes)
|
||||
val attachment = NodeAttachmentService.DBAttachment(attId = id.toString(), content = bytes, uploader = uploader, filename = filename)
|
||||
session.save(attachment)
|
||||
|
||||
attachmentCount.inc()
|
||||
@ -179,6 +203,30 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
|
||||
return id
|
||||
}
|
||||
|
||||
override fun queryAttachments(criteria: AttachmentQueryCriteria, sorting: AttachmentSort?): List<AttachmentId> {
|
||||
log.info("Attachment query criteria: $criteria, sorting: $sorting")
|
||||
|
||||
val session = DatabaseTransactionManager.current().session
|
||||
val criteriaBuilder = session.criteriaBuilder
|
||||
|
||||
val criteriaQuery = criteriaBuilder.createQuery(DBAttachment::class.java)
|
||||
val root = criteriaQuery.from(DBAttachment::class.java)
|
||||
|
||||
val criteriaParser = HibernateAttachmentQueryCriteriaParser(criteriaBuilder, criteriaQuery, root)
|
||||
|
||||
// parse criteria and build where predicates
|
||||
criteriaParser.parse(criteria, sorting)
|
||||
|
||||
// prepare query for execution
|
||||
val query = session.createQuery(criteriaQuery)
|
||||
|
||||
// execution
|
||||
val results = query.resultList
|
||||
|
||||
return results.map { AttachmentId.parse(it.attId) }
|
||||
}
|
||||
|
||||
|
||||
private fun checkIsAValidJAR(stream: InputStream) {
|
||||
// Just iterate over the entries with verification enabled: should be good enough to catch mistakes.
|
||||
// Note that JarInputStream won't throw any kind of error at all if the file stream is in fact not
|
||||
|
@ -15,6 +15,7 @@ import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
@ -22,6 +23,7 @@ import net.corda.core.utilities.*
|
||||
import net.corda.node.services.api.FlowAppAuditEvent
|
||||
import net.corda.node.services.api.FlowPermissionAuditEvent
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.logging.pushToLoggingContext
|
||||
import net.corda.node.services.statemachine.FlowSessionState.Initiating
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.node.utilities.DatabaseTransaction
|
||||
@ -40,9 +42,8 @@ class FlowPermissionException(message: String) : FlowException(message)
|
||||
class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
override val logic: FlowLogic<R>,
|
||||
scheduler: FiberScheduler,
|
||||
override val flowInitiator: FlowInitiator,
|
||||
// Store the Party rather than the full cert path with PartyAndCertificate
|
||||
val ourIdentity: Party) : Fiber<Unit>(id.toString(), scheduler), FlowStateMachine<R> {
|
||||
val ourIdentity: Party,
|
||||
override val context: InvocationContext) : Fiber<Unit>(id.toString(), scheduler), FlowStateMachine<R> {
|
||||
|
||||
companion object {
|
||||
// Used to work around a small limitation in Quasar.
|
||||
@ -254,7 +255,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
val permissionGranted = true // TODO define permission control service on ServiceHubInternal and actually check authorization.
|
||||
val checkPermissionEvent = FlowPermissionAuditEvent(
|
||||
serviceHub.clock.instant(),
|
||||
flowInitiator,
|
||||
context,
|
||||
"Flow Permission Required: $permissionName",
|
||||
extraAuditData,
|
||||
logic.javaClass,
|
||||
@ -264,7 +265,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
serviceHub.auditService.recordAuditEvent(checkPermissionEvent)
|
||||
@Suppress("ConstantConditionIf")
|
||||
if (!permissionGranted) {
|
||||
throw FlowPermissionException("User $flowInitiator not permissioned for $permissionName on flow $id")
|
||||
throw FlowPermissionException("User ${context.principal()} not permissioned for $permissionName on flow $id")
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,7 +273,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
override fun recordAuditEvent(eventType: String, comment: String, extraAuditData: Map<String, String>) {
|
||||
val flowAuditEvent = FlowAppAuditEvent(
|
||||
serviceHub.clock.instant(),
|
||||
flowInitiator,
|
||||
context,
|
||||
comment,
|
||||
extraAuditData,
|
||||
logic.javaClass,
|
||||
@ -306,6 +307,8 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
return result
|
||||
}
|
||||
|
||||
internal fun pushToLoggingContext() = context.pushToLoggingContext()
|
||||
|
||||
/**
|
||||
* This method will suspend the state machine and wait for incoming session init response from other party.
|
||||
*/
|
||||
@ -392,6 +395,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
session.retryable = retryable
|
||||
val (version, initiatingFlowClass) = session.flow.javaClass.flowVersionAndInitiatingClass
|
||||
val payloadBytes = firstPayload?.serialize(context = SerializationDefaults.P2P_CONTEXT)
|
||||
logger.info("Initiating flow session with party ${otherParty.name}. Session id for tracing purposes is ${session.ourSessionId}.")
|
||||
val sessionInit = SessionInit(session.ourSessionId, initiatingFlowClass.name, version, session.flow.javaClass.appName, payloadBytes)
|
||||
sendInternal(session, sessionInit)
|
||||
if (waitForConfirmation) {
|
||||
|
@ -1,10 +1,9 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.utilities.Try
|
||||
import rx.Observable
|
||||
@ -20,8 +19,7 @@ import rx.Observable
|
||||
* A flow is a class with a single call method. The call method and any others it invokes are rewritten by a bytecode
|
||||
* rewriting engine called Quasar, to ensure the code can be suspended and resumed at any point.
|
||||
*
|
||||
* TODO: Consider the issue of continuation identity more deeply: is it a safe assumption that a serialised
|
||||
* continuation is always unique?
|
||||
* TODO: Consider the issue of continuation identity more deeply: is it a safe assumption that a serialised continuation is always unique?
|
||||
* TODO: Think about how to bring the system to a clean stop so it can be upgraded without any serialised stacks on disk
|
||||
* TODO: Timeouts
|
||||
* TODO: Surfacing of exceptions via an API and/or management UI
|
||||
@ -43,9 +41,9 @@ interface StateMachineManager {
|
||||
* Starts a new flow.
|
||||
*
|
||||
* @param flowLogic The flow's code.
|
||||
* @param flowInitiator The initiator of the flow.
|
||||
* @param context The context of the flow.
|
||||
*/
|
||||
fun <A> startFlow(flowLogic: FlowLogic<A>, flowInitiator: FlowInitiator, ourIdentity: Party? = null): CordaFuture<FlowStateMachine<A>>
|
||||
fun <A> startFlow(flowLogic: FlowLogic<A>, context: InvocationContext): CordaFuture<FlowStateMachine<A>>
|
||||
|
||||
/**
|
||||
* Represents an addition/removal of a state machine.
|
||||
|
@ -11,9 +11,13 @@ import com.google.common.collect.HashMultimap
|
||||
import com.google.common.util.concurrent.MoreExecutors
|
||||
import net.corda.core.CordaException
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.flows.FlowException
|
||||
import net.corda.core.flows.FlowInfo
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.doneFuture
|
||||
@ -31,6 +35,7 @@ import net.corda.node.internal.InitiatedFlowFactory
|
||||
import net.corda.node.services.api.Checkpoint
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.shouldCheckCheckpoints
|
||||
import net.corda.node.services.messaging.ReceivedMessage
|
||||
import net.corda.node.services.messaging.TopicSession
|
||||
import net.corda.node.utilities.*
|
||||
@ -88,7 +93,7 @@ class StateMachineManagerImpl(
|
||||
private val scheduler = FiberScheduler()
|
||||
private val mutex = ThreadBox(InnerState())
|
||||
// This thread (only enabled in dev mode) deserialises checkpoints in the background to shake out bugs in checkpoint restore.
|
||||
private val checkpointCheckerThread = if (serviceHub.configuration.devModeOptions?.disableCheckpointChecker != true) {
|
||||
private val checkpointCheckerThread = if (serviceHub.configuration.shouldCheckCheckpoints()) {
|
||||
newNamedSingleThreadExecutor("CheckpointChecker")
|
||||
} else {
|
||||
null
|
||||
@ -261,26 +266,28 @@ class StateMachineManagerImpl(
|
||||
}
|
||||
|
||||
private fun onSessionMessage(message: ReceivedMessage) {
|
||||
val peer = message.peer
|
||||
val sessionMessage = try {
|
||||
message.data.deserialize<SessionMessage>()
|
||||
} catch (ex: Exception) {
|
||||
logger.error("Received corrupt SessionMessage data from ${message.peer}")
|
||||
logger.error("Received corrupt SessionMessage data from $peer")
|
||||
return
|
||||
}
|
||||
val sender = serviceHub.networkMapCache.getPeerByLegalName(message.peer)
|
||||
val sender = serviceHub.networkMapCache.getPeerByLegalName(peer)
|
||||
if (sender != null) {
|
||||
when (sessionMessage) {
|
||||
is ExistingSessionMessage -> onExistingSessionMessage(sessionMessage, sender)
|
||||
is SessionInit -> onSessionInit(sessionMessage, message, sender)
|
||||
}
|
||||
} else {
|
||||
logger.error("Unknown peer ${message.peer} in $sessionMessage")
|
||||
logger.error("Unknown peer $peer in $sessionMessage")
|
||||
}
|
||||
}
|
||||
|
||||
private fun onExistingSessionMessage(message: ExistingSessionMessage, sender: Party) {
|
||||
val session = openSessions[message.recipientSessionId]
|
||||
if (session != null) {
|
||||
session.fiber.pushToLoggingContext()
|
||||
session.fiber.logger.trace { "Received $message on $session from $sender" }
|
||||
if (session.retryable) {
|
||||
if (message is SessionConfirm && session.state is FlowSessionState.Initiated) {
|
||||
@ -326,6 +333,7 @@ class StateMachineManagerImpl(
|
||||
}
|
||||
|
||||
private fun onSessionInit(sessionInit: SessionInit, receivedMessage: ReceivedMessage, sender: Party) {
|
||||
|
||||
logger.trace { "Received $sessionInit from $sender" }
|
||||
val senderSessionId = sessionInit.initiatorSessionId
|
||||
|
||||
@ -349,9 +357,10 @@ class StateMachineManagerImpl(
|
||||
session.receivedMessages += ReceivedSessionMessage(sender, SessionData(session.ourSessionId, sessionInit.firstPayload))
|
||||
}
|
||||
openSessions[session.ourSessionId] = session
|
||||
// TODO Perhaps the session-init will specificy which of our multiple identies to use, which we would have to
|
||||
// double-check is actually ours. However, what if we want to control how our identities gets used?
|
||||
val fiber = createFiber(flow, FlowInitiator.Peer(sender))
|
||||
val context = InvocationContext.peer(sender.name)
|
||||
val fiber = createFiber(flow, context)
|
||||
fiber.pushToLoggingContext()
|
||||
logger.info("Accepting flow session from party ${sender.name}. Session id for tracing purposes is ${sessionInit.initiatorSessionId}.")
|
||||
flowSession.sessionFlow = flow
|
||||
flowSession.stateMachine = fiber
|
||||
fiber.openSessions[Pair(flow, sender)] = session
|
||||
@ -406,13 +415,13 @@ class StateMachineManagerImpl(
|
||||
}
|
||||
}
|
||||
|
||||
private fun <T> createFiber(logic: FlowLogic<T>, flowInitiator: FlowInitiator, ourIdentity: Party? = null): FlowStateMachineImpl<T> {
|
||||
private fun <T> createFiber(logic: FlowLogic<T>, context: InvocationContext, ourIdentity: Party? = null): FlowStateMachineImpl<T> {
|
||||
val fsm = FlowStateMachineImpl(
|
||||
StateMachineRunId.createRandom(),
|
||||
logic,
|
||||
scheduler,
|
||||
flowInitiator,
|
||||
ourIdentity ?: serviceHub.myInfo.legalIdentities[0])
|
||||
ourIdentity ?: serviceHub.myInfo.legalIdentities[0],
|
||||
context)
|
||||
initFiber(fsm)
|
||||
return fsm
|
||||
}
|
||||
@ -422,7 +431,7 @@ class StateMachineManagerImpl(
|
||||
fiber.database = database
|
||||
fiber.serviceHub = serviceHub
|
||||
fiber.ourIdentityAndCert = serviceHub.myInfo.legalIdentitiesAndCerts.find { it.party == fiber.ourIdentity }
|
||||
?: throw IllegalStateException("Identity specified by ${fiber.id} (${fiber.ourIdentity}) is not one of ours!")
|
||||
?: throw IllegalStateException("Identity specified by ${fiber.id} (${fiber.ourIdentity.name}) is not one of ours!")
|
||||
fiber.actionOnSuspend = { ioRequest ->
|
||||
updateCheckpoint(fiber)
|
||||
// We commit on the fibers transaction that was copied across ThreadLocals during suspend
|
||||
@ -468,7 +477,7 @@ class StateMachineManagerImpl(
|
||||
private fun endAllFiberSessions(fiber: FlowStateMachineImpl<*>, result: Try<*>, propagated: Boolean) {
|
||||
openSessions.values.removeIf { session ->
|
||||
if (session.fiber == fiber) {
|
||||
session.endSession((result as? Try.Failure)?.exception, propagated)
|
||||
session.endSession(fiber.context, (result as? Try.Failure)?.exception, propagated)
|
||||
true
|
||||
} else {
|
||||
false
|
||||
@ -476,7 +485,7 @@ class StateMachineManagerImpl(
|
||||
}
|
||||
}
|
||||
|
||||
private fun FlowSessionInternal.endSession(exception: Throwable?, propagated: Boolean) {
|
||||
private fun FlowSessionInternal.endSession(context: InvocationContext, exception: Throwable?, propagated: Boolean) {
|
||||
val initiatedState = state as? FlowSessionState.Initiated ?: return
|
||||
val sessionEnd = if (exception == null) {
|
||||
NormalSessionEnd(initiatedState.peerSessionId)
|
||||
@ -501,11 +510,11 @@ class StateMachineManagerImpl(
|
||||
*
|
||||
* Note that you must be on the [executor] thread.
|
||||
*/
|
||||
override fun <A> startFlow(flowLogic: FlowLogic<A>, flowInitiator: FlowInitiator, ourIdentity: Party?): CordaFuture<FlowStateMachine<A>> {
|
||||
override fun <A> startFlow(flowLogic: FlowLogic<A>, context: InvocationContext): CordaFuture<FlowStateMachine<A>> {
|
||||
// TODO: Check that logic has @Suspendable on its call method.
|
||||
executor.checkOnThread()
|
||||
val fiber = database.transaction {
|
||||
val fiber = createFiber(flowLogic, flowInitiator, ourIdentity)
|
||||
val fiber = createFiber(flowLogic, context)
|
||||
updateCheckpoint(fiber)
|
||||
fiber
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ class BFTNonValidatingNotaryService(override val services: ServiceHubInternal,
|
||||
log.info("BFT SMaRt replica $replicaId is running.")
|
||||
}
|
||||
}
|
||||
BFTSMaRt.Client(it, replicaId, cluster)
|
||||
BFTSMaRt.Client(it, replicaId, cluster, this)
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ class BFTNonValidatingNotaryService(override val services: ServiceHubInternal,
|
||||
}
|
||||
|
||||
@Entity
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}bft_smart_notary_committed_states")
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}bft_committed_states")
|
||||
class PersistedCommittedState(id: PersistentStateRef, consumingTxHash: String, consumingIndex: Int, party: PersistentUniquenessProvider.PersistentParty)
|
||||
: PersistentUniquenessProvider.PersistentUniqueness(id, consumingTxHash, consumingIndex, party)
|
||||
|
||||
|
@ -75,7 +75,7 @@ object BFTSMaRt {
|
||||
fun waitUntilAllReplicasHaveInitialized()
|
||||
}
|
||||
|
||||
class Client(config: BFTSMaRtConfig, private val clientId: Int, private val cluster: Cluster) : SingletonSerializeAsToken() {
|
||||
class Client(config: BFTSMaRtConfig, private val clientId: Int, private val cluster: Cluster, private val notaryService: BFTNonValidatingNotaryService) : SingletonSerializeAsToken() {
|
||||
companion object {
|
||||
private val log = loggerFor<Client>()
|
||||
}
|
||||
|
@ -3,10 +3,7 @@ package net.corda.node.services.transactions
|
||||
import io.atomix.copycat.Command
|
||||
import io.atomix.copycat.Query
|
||||
import io.atomix.copycat.server.Commit
|
||||
import io.atomix.copycat.server.Snapshottable
|
||||
import io.atomix.copycat.server.StateMachine
|
||||
import io.atomix.copycat.server.storage.snapshot.SnapshotReader
|
||||
import io.atomix.copycat.server.storage.snapshot.SnapshotWriter
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.utilities.*
|
||||
import java.util.LinkedHashMap
|
||||
@ -15,11 +12,10 @@ import java.util.LinkedHashMap
|
||||
* A distributed map state machine that doesn't allow overriding values. The state machine is replicated
|
||||
* across a Copycat Raft cluster.
|
||||
*
|
||||
* The map contents are backed by a JDBC table. State re-synchronisation is achieved by periodically persisting snapshots
|
||||
* to disk, and sharing them across the cluster. A new node joining the cluster will have to obtain and install a snapshot
|
||||
* containing the entire JDBC table contents.
|
||||
* The map contents are backed by a JDBC table. State re-synchronisation is achieved by replaying the command log to the
|
||||
* new (or re-joining) cluster member.
|
||||
*/
|
||||
class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence, createMap: () -> AppendOnlyPersistentMap<K, V, E, EK>) : StateMachine(), Snapshottable {
|
||||
class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence, createMap: () -> AppendOnlyPersistentMap<K, Pair<Long, V>, E, EK>) : StateMachine() {
|
||||
companion object {
|
||||
private val log = loggerFor<DistributedImmutableMap<*, *, *, *>>()
|
||||
}
|
||||
@ -27,9 +23,16 @@ class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence,
|
||||
object Commands {
|
||||
class PutAll<K, V>(val entries: Map<K, V>) : Command<Map<K, V>> {
|
||||
override fun compaction(): Command.CompactionMode {
|
||||
// The SNAPSHOT compaction mode indicates that a command can be removed from the Raft log once
|
||||
// a snapshot of the state machine has been written to disk
|
||||
return Command.CompactionMode.SNAPSHOT
|
||||
// The FULL compaction mode retains the command in the log until it has been stored and applied on all
|
||||
// servers in the cluster. Once the commit has been applied to a state machine and closed it may be
|
||||
// removed from the log during minor or major compaction.
|
||||
//
|
||||
// Note that we are not closing the commits, thus our log grows without bounds. We let the log grow on
|
||||
// purpose to be able to increase the size of a running cluster, e.g. to add and decommission nodes.
|
||||
// TODO: Cluster membership changes need testing.
|
||||
// TODO: I'm wondering if we should support resizing notary clusters, or if we could require users to
|
||||
// setup a new cluster of the desired size and transfer the data.
|
||||
return Command.CompactionMode.FULL
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,7 +46,7 @@ class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence,
|
||||
fun get(commit: Commit<Commands.Get<K, V>>): V? {
|
||||
commit.use {
|
||||
val key = it.operation().key
|
||||
return db.transaction { map[key] }
|
||||
return db.transaction { map[key]?.second }
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,12 +57,13 @@ class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence,
|
||||
*/
|
||||
fun put(commit: Commit<Commands.PutAll<K, V>>): Map<K, V> {
|
||||
commit.use {
|
||||
val index = commit.index()
|
||||
val conflicts = LinkedHashMap<K, V>()
|
||||
db.transaction {
|
||||
val entries = commit.operation().entries
|
||||
log.debug("State machine commit: storing entries with keys (${entries.keys.joinToString()})")
|
||||
for (key in entries.keys) map[key]?.let { conflicts[key] = it }
|
||||
if (conflicts.isEmpty()) map.putAll(entries)
|
||||
for (key in entries.keys) map[key]?.let { conflicts[key] = it.second }
|
||||
if (conflicts.isEmpty()) map.putAll(entries.mapValues { Pair(index, it.value) })
|
||||
}
|
||||
return conflicts
|
||||
}
|
||||
@ -70,29 +74,4 @@ class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence,
|
||||
return db.transaction { map.size }
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes out all [map] entries to disk. Note that this operation does not load all entries into memory, as the
|
||||
* [SnapshotWriter] is using a disk-backed buffer internally, and iterating map entries results in only a
|
||||
* fixed number of recently accessed entries to ever be kept in memory.
|
||||
*/
|
||||
override fun snapshot(writer: SnapshotWriter) {
|
||||
db.transaction {
|
||||
writer.writeInt(map.size)
|
||||
map.allPersisted().forEach { writer.writeObject(it.first to it.second) }
|
||||
}
|
||||
}
|
||||
|
||||
/** Reads entries from disk and adds them to [map]. */
|
||||
override fun install(reader: SnapshotReader) {
|
||||
val size = reader.readInt()
|
||||
db.transaction {
|
||||
map.clear()
|
||||
// TODO: read & put entries in batches
|
||||
for (i in 1..size) {
|
||||
val (key, value) = reader.readObject<Pair<K, V>>()
|
||||
map[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
package net.corda.node.services.transactions
|
||||
|
||||
import com.google.common.util.concurrent.MoreExecutors
|
||||
import net.corda.core.internal.concurrent.fork
|
||||
import net.corda.core.node.services.TransactionVerifierService
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
@ -8,7 +7,6 @@ import net.corda.core.transactions.LedgerTransaction
|
||||
import java.util.concurrent.Executors
|
||||
|
||||
class InMemoryTransactionVerifierService(numberOfWorkers: Int) : SingletonSerializeAsToken(), TransactionVerifierService {
|
||||
private val workerPool = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numberOfWorkers))
|
||||
|
||||
private val workerPool = Executors.newFixedThreadPool(numberOfWorkers)
|
||||
override fun verify(transaction: LedgerTransaction) = workerPool.fork(transaction::verify)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.services.transactions
|
||||
|
||||
import com.codahale.metrics.Gauge
|
||||
import com.codahale.metrics.MetricRegistry
|
||||
import com.codahale.metrics.Timer
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.crypto.SecureHash
|
||||
@ -11,13 +12,12 @@ import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.api.MonitoringService
|
||||
import net.corda.nodeapi.VerifierApi
|
||||
import org.apache.activemq.artemis.api.core.client.ClientConsumer
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
abstract class OutOfProcessTransactionVerifierService(
|
||||
val monitoringService: MonitoringService
|
||||
private val metrics: MetricRegistry
|
||||
) : SingletonSerializeAsToken(), TransactionVerifierService {
|
||||
companion object {
|
||||
val log = loggerFor<OutOfProcessTransactionVerifierService>()
|
||||
@ -34,16 +34,16 @@ abstract class OutOfProcessTransactionVerifierService(
|
||||
// Metrics
|
||||
private fun metric(name: String) = "OutOfProcessTransactionVerifierService.$name"
|
||||
|
||||
private val durationTimer = monitoringService.metrics.timer(metric("Verification.Duration"))
|
||||
private val successMeter = monitoringService.metrics.meter(metric("Verification.Success"))
|
||||
private val failureMeter = monitoringService.metrics.meter(metric("Verification.Failure"))
|
||||
private val durationTimer = metrics.timer(metric("Verification.Duration"))
|
||||
private val successMeter = metrics.meter(metric("Verification.Success"))
|
||||
private val failureMeter = metrics.meter(metric("Verification.Failure"))
|
||||
|
||||
class VerificationResultForUnknownTransaction(nonce: Long) :
|
||||
Exception("Verification result arrived for unknown transaction nonce $nonce")
|
||||
|
||||
fun start(responseConsumer: ClientConsumer) {
|
||||
log.info("Starting out of process verification service")
|
||||
monitoringService.metrics.register(metric("VerificationsInFlight"), Gauge { verificationHandles.size })
|
||||
metrics.register(metric("VerificationsInFlight"), Gauge { verificationHandles.size })
|
||||
responseConsumer.setMessageHandler { message ->
|
||||
val response = VerifierApi.VerificationResponse.fromClientMessage(message)
|
||||
val handle = verificationHandles.remove(response.verificationId) ?:
|
||||
|
@ -2,23 +2,20 @@ package net.corda.node.services.transactions
|
||||
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.NotaryFlow
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.node.services.TimeWindowChecker
|
||||
import net.corda.core.node.services.TrustedAuthorityNotaryService
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.RaftConfig
|
||||
import java.security.PublicKey
|
||||
|
||||
/** A non-validating notary service operated by a group of mutually trusting parties, uses the Raft algorithm to achieve consensus. */
|
||||
class RaftNonValidatingNotaryService(override val services: ServiceHubInternal,
|
||||
class RaftNonValidatingNotaryService(override val services: ServiceHub,
|
||||
override val notaryIdentityKey: PublicKey,
|
||||
raftConfig: RaftConfig) : TrustedAuthorityNotaryService() {
|
||||
override val uniquenessProvider: RaftUniquenessProvider) : TrustedAuthorityNotaryService() {
|
||||
companion object {
|
||||
val id = constructId(validating = false, raft = true)
|
||||
}
|
||||
|
||||
override val timeWindowChecker: TimeWindowChecker = TimeWindowChecker(services.clock)
|
||||
override val uniquenessProvider: RaftUniquenessProvider = RaftUniquenessProvider(services, raftConfig)
|
||||
|
||||
override fun createServiceFlow(otherPartySession: FlowSession): NotaryFlow.Service {
|
||||
return NonValidatingNotaryFlow(otherPartySession, this)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.services.transactions
|
||||
|
||||
import com.codahale.metrics.Gauge
|
||||
import com.codahale.metrics.MetricRegistry
|
||||
import io.atomix.catalyst.buffer.BufferInput
|
||||
import io.atomix.catalyst.buffer.BufferOutput
|
||||
import io.atomix.catalyst.serializer.Serializer
|
||||
@ -13,6 +14,7 @@ import io.atomix.copycat.client.ConnectionStrategies
|
||||
import io.atomix.copycat.client.CopycatClient
|
||||
import io.atomix.copycat.client.RecoveryStrategies
|
||||
import io.atomix.copycat.server.CopycatServer
|
||||
import io.atomix.copycat.server.cluster.Member
|
||||
import io.atomix.copycat.server.storage.Storage
|
||||
import io.atomix.copycat.server.storage.StorageLevel
|
||||
import net.corda.core.contracts.StateRef
|
||||
@ -25,10 +27,11 @@ import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.RaftConfig
|
||||
import net.corda.node.utilities.AppendOnlyPersistentMap
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.node.utilities.NODE_DATABASE_PREFIX
|
||||
import net.corda.nodeapi.config.NodeSSLConfiguration
|
||||
import net.corda.nodeapi.config.SSLConfiguration
|
||||
import java.nio.file.Path
|
||||
import java.util.concurrent.CompletableFuture
|
||||
@ -44,20 +47,21 @@ import javax.persistence.*
|
||||
* to the cluster leader to be actioned.
|
||||
*/
|
||||
@ThreadSafe
|
||||
class RaftUniquenessProvider(private val services: ServiceHubInternal, private val raftConfig: RaftConfig) : UniquenessProvider, SingletonSerializeAsToken() {
|
||||
class RaftUniquenessProvider(private val transportConfiguration: NodeSSLConfiguration, private val db: CordaPersistence, private val metrics: MetricRegistry, private val raftConfig: RaftConfig) : UniquenessProvider, SingletonSerializeAsToken() {
|
||||
companion object {
|
||||
private val log = loggerFor<RaftUniquenessProvider>()
|
||||
|
||||
fun createMap(): AppendOnlyPersistentMap<String, Any, RaftState, String> =
|
||||
fun createMap(): AppendOnlyPersistentMap<String, Pair<Long, Any>, RaftState, String> =
|
||||
AppendOnlyPersistentMap(
|
||||
toPersistentEntityKey = { it },
|
||||
fromPersistentEntity = {
|
||||
Pair(it.key, it.value.deserialize(context = SerializationDefaults.STORAGE_CONTEXT))
|
||||
Pair(it.key, Pair(it.index, it.value.deserialize(context = SerializationDefaults.STORAGE_CONTEXT)))
|
||||
},
|
||||
toPersistentEntity = { k: String, v: Any ->
|
||||
toPersistentEntity = { k: String, v: Pair<Long, Any> ->
|
||||
RaftState().apply {
|
||||
key = k
|
||||
value = v.serialize(context = SerializationDefaults.STORAGE_CONTEXT).bytes
|
||||
value = v.second.serialize(context = SerializationDefaults.STORAGE_CONTEXT).bytes
|
||||
index = v.first
|
||||
}
|
||||
},
|
||||
persistentEntityClass = RaftState::class.java
|
||||
@ -65,25 +69,22 @@ class RaftUniquenessProvider(private val services: ServiceHubInternal, private v
|
||||
}
|
||||
|
||||
@Entity
|
||||
@Table(name = "notary_committed_states")
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}raft_committed_states")
|
||||
class RaftState(
|
||||
@Id
|
||||
@Column
|
||||
@Column(name = "id")
|
||||
var key: String = "",
|
||||
|
||||
@Lob
|
||||
@Column
|
||||
var value: ByteArray = ByteArray(0)
|
||||
var value: ByteArray = ByteArray(0),
|
||||
|
||||
@Column
|
||||
var index: Long = 0
|
||||
)
|
||||
|
||||
/** Directory storing the Raft log and state machine snapshots */
|
||||
private val storagePath: Path = services.configuration.baseDirectory
|
||||
/** Address of the Copycat node run by this Corda node */
|
||||
/** The database to store the state machine state in */
|
||||
private val db: CordaPersistence = services.database
|
||||
/** SSL configuration */
|
||||
private val transportConfiguration: SSLConfiguration = services.configuration
|
||||
|
||||
private val storagePath: Path = transportConfiguration.baseDirectory
|
||||
private lateinit var _clientFuture: CompletableFuture<CopycatClient>
|
||||
private lateinit var server: CopycatServer
|
||||
|
||||
@ -177,17 +178,23 @@ class RaftUniquenessProvider(private val services: ServiceHubInternal, private v
|
||||
}
|
||||
|
||||
private fun registerMonitoring() {
|
||||
services.monitoringService.metrics.register("RaftCluster.ThisServerStatus", Gauge<String> {
|
||||
metrics.register("RaftCluster.ThisServerStatus", Gauge<String> {
|
||||
server.state().name
|
||||
})
|
||||
|
||||
services.monitoringService.metrics.register("RaftCluster.MembersCount", Gauge<Int> {
|
||||
metrics.register("RaftCluster.MembersCount", Gauge<Int> {
|
||||
server.cluster().members().size
|
||||
})
|
||||
|
||||
services.monitoringService.metrics.register("RaftCluster.Members", Gauge<List<String>> {
|
||||
metrics.register("RaftCluster.Members", Gauge<List<String>> {
|
||||
server.cluster().members().map { it.address().toString() }
|
||||
})
|
||||
|
||||
metrics.register("RaftCluster.AvailableMembers", Gauge<List<String>> {
|
||||
server.cluster().members().filter { it.status() == Member.Status.AVAILABLE }.map { it.address().toString() }
|
||||
})
|
||||
|
||||
metrics.register("RaftCluster.AvailableMembersCount", Gauge<Int> {
|
||||
server.cluster().members().filter { it.status() == Member.Status.AVAILABLE }.size
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
|
@ -2,23 +2,20 @@ package net.corda.node.services.transactions
|
||||
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.NotaryFlow
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.node.services.TimeWindowChecker
|
||||
import net.corda.core.node.services.TrustedAuthorityNotaryService
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.config.RaftConfig
|
||||
import java.security.PublicKey
|
||||
|
||||
/** A validating notary service operated by a group of mutually trusting parties, uses the Raft algorithm to achieve consensus. */
|
||||
class RaftValidatingNotaryService(override val services: ServiceHubInternal,
|
||||
class RaftValidatingNotaryService(override val services: ServiceHub,
|
||||
override val notaryIdentityKey: PublicKey,
|
||||
raftConfig: RaftConfig) : TrustedAuthorityNotaryService() {
|
||||
override val uniquenessProvider: RaftUniquenessProvider) : TrustedAuthorityNotaryService() {
|
||||
companion object {
|
||||
val id = constructId(validating = true, raft = true)
|
||||
}
|
||||
|
||||
override val timeWindowChecker: TimeWindowChecker = TimeWindowChecker(services.clock)
|
||||
override val uniquenessProvider: RaftUniquenessProvider = RaftUniquenessProvider(services, raftConfig)
|
||||
|
||||
override fun createServiceFlow(otherPartySession: FlowSession): NotaryFlow.Service {
|
||||
return ValidatingNotaryFlow(otherPartySession, this)
|
||||
}
|
||||
|
@ -12,97 +12,45 @@ import net.corda.core.schemas.PersistentState
|
||||
import net.corda.core.schemas.PersistentStateRef
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.core.utilities.toHexString
|
||||
import net.corda.core.utilities.trace
|
||||
import net.corda.node.services.persistence.NodeAttachmentService
|
||||
import org.hibernate.query.criteria.internal.expression.LiteralExpression
|
||||
import org.hibernate.query.criteria.internal.path.SingularAttributePath
|
||||
import org.hibernate.query.criteria.internal.predicate.ComparisonPredicate
|
||||
import org.hibernate.query.criteria.internal.predicate.InPredicate
|
||||
import java.time.Instant
|
||||
import java.util.*
|
||||
import javax.persistence.Tuple
|
||||
import javax.persistence.criteria.*
|
||||
|
||||
|
||||
class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractState>,
|
||||
val contractStateTypeMappings: Map<String, Set<String>>,
|
||||
val criteriaBuilder: CriteriaBuilder,
|
||||
val criteriaQuery: CriteriaQuery<Tuple>,
|
||||
val vaultStates: Root<VaultSchemaV1.VaultStates>) : IQueryCriteriaParser {
|
||||
private companion object {
|
||||
val log = loggerFor<HibernateQueryCriteriaParser>()
|
||||
}
|
||||
abstract class AbstractQueryCriteriaParser<Q : GenericQueryCriteria<Q,P>, in P: BaseQueryCriteriaParser<Q, P, S>, in S: BaseSort> : BaseQueryCriteriaParser<Q, P, S> {
|
||||
|
||||
// incrementally build list of join predicates
|
||||
private val joinPredicates = mutableListOf<Predicate>()
|
||||
// incrementally build list of root entities (for later use in Sort parsing)
|
||||
private val rootEntities = mutableMapOf<Class<out PersistentState>, Root<*>>(Pair(VaultSchemaV1.VaultStates::class.java, vaultStates))
|
||||
private val aggregateExpressions = mutableListOf<Expression<*>>()
|
||||
private val commonPredicates = mutableMapOf<Pair<String, Operator>, Predicate>() // schema attribute Name, operator -> predicate
|
||||
abstract val criteriaBuilder: CriteriaBuilder
|
||||
|
||||
var stateTypes: Vault.StateStatus = Vault.StateStatus.UNCONSUMED
|
||||
|
||||
override fun parseCriteria(criteria: QueryCriteria.VaultQueryCriteria): Collection<Predicate> {
|
||||
log.trace { "Parsing VaultQueryCriteria: $criteria" }
|
||||
override fun parseOr(left: Q, right: Q): Collection<Predicate> {
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
val leftPredicates = parse(left)
|
||||
val rightPredicates = parse(right)
|
||||
|
||||
// soft locking
|
||||
criteria.softLockingCondition?.let {
|
||||
val softLocking = criteria.softLockingCondition
|
||||
val type = softLocking!!.type
|
||||
when (type) {
|
||||
QueryCriteria.SoftLockingType.UNLOCKED_ONLY ->
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").isNull))
|
||||
QueryCriteria.SoftLockingType.LOCKED_ONLY ->
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").isNotNull))
|
||||
QueryCriteria.SoftLockingType.UNLOCKED_AND_SPECIFIED -> {
|
||||
require(softLocking.lockIds.isNotEmpty()) { "Must specify one or more lockIds" }
|
||||
predicateSet.add(criteriaBuilder.or(vaultStates.get<String>("lockId").isNull,
|
||||
vaultStates.get<String>("lockId").`in`(softLocking.lockIds.map { it.toString() })))
|
||||
}
|
||||
QueryCriteria.SoftLockingType.SPECIFIED -> {
|
||||
require(softLocking.lockIds.isNotEmpty()) { "Must specify one or more lockIds" }
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").`in`(softLocking.lockIds.map { it.toString() })))
|
||||
}
|
||||
}
|
||||
}
|
||||
val orPredicate = criteriaBuilder.or(*leftPredicates.toTypedArray(), *rightPredicates.toTypedArray())
|
||||
predicateSet.add(orPredicate)
|
||||
|
||||
// notary names
|
||||
criteria.notary?.let {
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<AbstractParty>("notary").`in`(criteria.notary)))
|
||||
}
|
||||
|
||||
// state references
|
||||
criteria.stateRefs?.let {
|
||||
val persistentStateRefs = (criteria.stateRefs as List<StateRef>).map { PersistentStateRef(it.txhash.bytes.toHexString(), it.index) }
|
||||
val compositeKey = vaultStates.get<PersistentStateRef>("stateRef")
|
||||
predicateSet.add(criteriaBuilder.and(compositeKey.`in`(persistentStateRefs)))
|
||||
}
|
||||
|
||||
// time constraints (recorded, consumed)
|
||||
criteria.timeCondition?.let {
|
||||
val timeCondition = criteria.timeCondition
|
||||
val timeInstantType = timeCondition!!.type
|
||||
val timeColumn = when (timeInstantType) {
|
||||
QueryCriteria.TimeInstantType.RECORDED -> Column(VaultSchemaV1.VaultStates::recordedTime)
|
||||
QueryCriteria.TimeInstantType.CONSUMED -> Column(VaultSchemaV1.VaultStates::consumedTime)
|
||||
}
|
||||
val expression = CriteriaExpression.ColumnPredicateExpression(timeColumn, timeCondition.predicate)
|
||||
predicateSet.add(parseExpression(vaultStates, expression) as Predicate)
|
||||
}
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
private fun deriveContractStateTypes(contractStateTypes: Set<Class<out ContractState>>? = null): Set<String> {
|
||||
log.trace { "Contract types to be derived: primary ($contractStateType), additional ($contractStateTypes)" }
|
||||
val combinedContractStateTypes = contractStateTypes?.plus(contractStateType) ?: setOf(contractStateType)
|
||||
combinedContractStateTypes.filter { it.name != ContractState::class.java.name }.let {
|
||||
val interfaces = it.flatMap { contractStateTypeMappings[it.name] ?: setOf(it.name) }
|
||||
val concrete = it.filter { !it.isInterface }.map { it.name }
|
||||
log.trace { "Derived contract types: ${interfaces.union(concrete)}" }
|
||||
return interfaces.union(concrete)
|
||||
}
|
||||
override fun parseAnd(left: Q, right: Q): Collection<Predicate> {
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
val leftPredicates = parse(left)
|
||||
val rightPredicates = parse(right)
|
||||
|
||||
val andPredicate = criteriaBuilder.and(*leftPredicates.toTypedArray(), *rightPredicates.toTypedArray())
|
||||
predicateSet.add(andPredicate)
|
||||
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
private fun columnPredicateToPredicate(column: Path<out Any?>, columnPredicate: ColumnPredicate<*>): Predicate {
|
||||
protected fun columnPredicateToPredicate(column: Path<out Any?>, columnPredicate: ColumnPredicate<*>): Predicate {
|
||||
return when (columnPredicate) {
|
||||
is ColumnPredicate.EqualityComparison -> {
|
||||
val literal = columnPredicate.rightLiteral
|
||||
@ -152,6 +100,150 @@ class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractStat
|
||||
else -> throw VaultQueryException("Not expecting $columnPredicate")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class HibernateAttachmentQueryCriteriaParser(override val criteriaBuilder: CriteriaBuilder,
|
||||
private val criteriaQuery: CriteriaQuery<NodeAttachmentService.DBAttachment>, val root: Root<NodeAttachmentService.DBAttachment>) :
|
||||
AbstractQueryCriteriaParser<AttachmentQueryCriteria, AttachmentsQueryCriteriaParser, AttachmentSort>(), AttachmentsQueryCriteriaParser {
|
||||
|
||||
private companion object {
|
||||
val log = loggerFor<HibernateAttachmentQueryCriteriaParser>()
|
||||
}
|
||||
|
||||
init {
|
||||
criteriaQuery.select(root)
|
||||
}
|
||||
|
||||
override fun parse(criteria: AttachmentQueryCriteria, sorting: AttachmentSort?): Collection<Predicate> {
|
||||
val predicateSet = criteria.visit(this)
|
||||
|
||||
sorting?.let {
|
||||
if (sorting.columns.isNotEmpty())
|
||||
parse(sorting)
|
||||
}
|
||||
|
||||
criteriaQuery.where(*predicateSet.toTypedArray())
|
||||
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
private fun parse(sorting: AttachmentSort) {
|
||||
log.trace { "Parsing sorting specification: $sorting" }
|
||||
|
||||
val orderCriteria = mutableListOf<Order>()
|
||||
|
||||
sorting.columns.map { (sortAttribute, direction) ->
|
||||
when (direction) {
|
||||
Sort.Direction.ASC -> orderCriteria.add(criteriaBuilder.asc(root.get<String>(sortAttribute.columnName)))
|
||||
Sort.Direction.DESC -> orderCriteria.add(criteriaBuilder.desc(root.get<String>(sortAttribute.columnName)))
|
||||
}
|
||||
}
|
||||
if (orderCriteria.isNotEmpty()) {
|
||||
criteriaQuery.orderBy(orderCriteria)
|
||||
}
|
||||
}
|
||||
|
||||
override fun parseCriteria(criteria: AttachmentQueryCriteria.AttachmentsQueryCriteria): Collection<Predicate> {
|
||||
log.trace { "Parsing AttachmentsQueryCriteria: $criteria" }
|
||||
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
|
||||
criteria.filenameCondition?.let {
|
||||
predicateSet.add(columnPredicateToPredicate(root.get<String>("filename"), it))
|
||||
}
|
||||
|
||||
criteria.uploaderCondition?.let {
|
||||
predicateSet.add(columnPredicateToPredicate(root.get<String>("uploader"), it))
|
||||
}
|
||||
|
||||
criteria.uploadDateCondition?.let {
|
||||
predicateSet.add(columnPredicateToPredicate(root.get<Instant>("upload_date"), it))
|
||||
}
|
||||
|
||||
return predicateSet
|
||||
}
|
||||
}
|
||||
|
||||
class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractState>,
|
||||
val contractStateTypeMappings: Map<String, Set<String>>,
|
||||
override val criteriaBuilder: CriteriaBuilder,
|
||||
val criteriaQuery: CriteriaQuery<Tuple>,
|
||||
val vaultStates: Root<VaultSchemaV1.VaultStates>) : AbstractQueryCriteriaParser<QueryCriteria, IQueryCriteriaParser, Sort>(), IQueryCriteriaParser {
|
||||
private companion object {
|
||||
val log = loggerFor<HibernateQueryCriteriaParser>()
|
||||
}
|
||||
|
||||
// incrementally build list of join predicates
|
||||
private val joinPredicates = mutableListOf<Predicate>()
|
||||
// incrementally build list of root entities (for later use in Sort parsing)
|
||||
private val rootEntities = mutableMapOf<Class<out PersistentState>, Root<*>>(Pair(VaultSchemaV1.VaultStates::class.java, vaultStates))
|
||||
private val aggregateExpressions = mutableListOf<Expression<*>>()
|
||||
private val commonPredicates = mutableMapOf<Pair<String, Operator>, Predicate>() // schema attribute Name, operator -> predicate
|
||||
|
||||
var stateTypes: Vault.StateStatus = Vault.StateStatus.UNCONSUMED
|
||||
|
||||
override fun parseCriteria(criteria: QueryCriteria.VaultQueryCriteria): Collection<Predicate> {
|
||||
log.trace { "Parsing VaultQueryCriteria: $criteria" }
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
|
||||
// soft locking
|
||||
criteria.softLockingCondition?.let {
|
||||
val softLocking = criteria.softLockingCondition
|
||||
val type = softLocking!!.type
|
||||
when (type) {
|
||||
QueryCriteria.SoftLockingType.UNLOCKED_ONLY ->
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").isNull))
|
||||
QueryCriteria.SoftLockingType.LOCKED_ONLY ->
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").isNotNull))
|
||||
QueryCriteria.SoftLockingType.UNLOCKED_AND_SPECIFIED -> {
|
||||
require(softLocking.lockIds.isNotEmpty()) { "Must specify one or more lockIds" }
|
||||
predicateSet.add(criteriaBuilder.or(vaultStates.get<String>("lockId").isNull,
|
||||
vaultStates.get<String>("lockId").`in`(softLocking.lockIds.map { it.toString() })))
|
||||
}
|
||||
QueryCriteria.SoftLockingType.SPECIFIED -> {
|
||||
require(softLocking.lockIds.isNotEmpty()) { "Must specify one or more lockIds" }
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<String>("lockId").`in`(softLocking.lockIds.map { it.toString() })))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// notary names
|
||||
criteria.notary?.let {
|
||||
predicateSet.add(criteriaBuilder.and(vaultStates.get<AbstractParty>("notary").`in`(criteria.notary)))
|
||||
}
|
||||
|
||||
// state references
|
||||
criteria.stateRefs?.let {
|
||||
val persistentStateRefs = (criteria.stateRefs as List<StateRef>).map(::PersistentStateRef)
|
||||
val compositeKey = vaultStates.get<PersistentStateRef>("stateRef")
|
||||
predicateSet.add(criteriaBuilder.and(compositeKey.`in`(persistentStateRefs)))
|
||||
}
|
||||
|
||||
// time constraints (recorded, consumed)
|
||||
criteria.timeCondition?.let {
|
||||
val timeCondition = criteria.timeCondition
|
||||
val timeInstantType = timeCondition!!.type
|
||||
val timeColumn = when (timeInstantType) {
|
||||
QueryCriteria.TimeInstantType.RECORDED -> Column(VaultSchemaV1.VaultStates::recordedTime)
|
||||
QueryCriteria.TimeInstantType.CONSUMED -> Column(VaultSchemaV1.VaultStates::consumedTime)
|
||||
}
|
||||
val expression = CriteriaExpression.ColumnPredicateExpression(timeColumn, timeCondition.predicate)
|
||||
predicateSet.add(parseExpression(vaultStates, expression) as Predicate)
|
||||
}
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
private fun deriveContractStateTypes(contractStateTypes: Set<Class<out ContractState>>? = null): Set<String> {
|
||||
log.trace { "Contract types to be derived: primary ($contractStateType), additional ($contractStateTypes)" }
|
||||
val combinedContractStateTypes = contractStateTypes?.plus(contractStateType) ?: setOf(contractStateType)
|
||||
combinedContractStateTypes.filter { it.name != ContractState::class.java.name }.let {
|
||||
val interfaces = it.flatMap { contractStateTypeMappings[it.name] ?: setOf(it.name) }
|
||||
val concrete = it.filter { !it.isInterface }.map { it.name }
|
||||
log.trace { "Derived contract types: ${interfaces.union(concrete)}" }
|
||||
return interfaces.union(concrete)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private fun <O> parseExpression(entityRoot: Root<O>, expression: CriteriaExpression<O, Boolean>, predicateSet: MutableSet<Predicate>) {
|
||||
if (expression is CriteriaExpression.AggregateFunctionExpression<O, *>) {
|
||||
@ -195,6 +287,7 @@ class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractStat
|
||||
AggregateFunctionType.MAX -> criteriaBuilder.max(column)
|
||||
AggregateFunctionType.MIN -> criteriaBuilder.min(column)
|
||||
}
|
||||
//TODO investigate possibility to avoid producing redundant joins in SQL for multiple aggregate functions against the same table
|
||||
aggregateExpressions.add(aggregateExpression)
|
||||
// optionally order by this aggregate function
|
||||
expression.orderBy?.let {
|
||||
@ -210,6 +303,10 @@ class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractStat
|
||||
val groupByExpressions =
|
||||
columns.map { _column ->
|
||||
val path = root.get<Any?>(getColumnName(_column))
|
||||
if (path is SingularAttributePath) //remove the same columns from different joins to match the single column in 'group by' only (from the last join)
|
||||
aggregateExpressions.removeAll {
|
||||
elem -> if (elem is SingularAttributePath) elem.attribute.javaMember == path.attribute.javaMember else false
|
||||
}
|
||||
aggregateExpressions.add(path)
|
||||
path
|
||||
}
|
||||
@ -326,32 +423,6 @@ class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractStat
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
override fun parseOr(left: QueryCriteria, right: QueryCriteria): Collection<Predicate> {
|
||||
log.trace { "Parsing OR QueryCriteria composition: $left OR $right" }
|
||||
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
val leftPredicates = parse(left)
|
||||
val rightPredicates = parse(right)
|
||||
|
||||
val orPredicate = criteriaBuilder.or(*leftPredicates.toTypedArray(), *rightPredicates.toTypedArray())
|
||||
predicateSet.add(orPredicate)
|
||||
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
override fun parseAnd(left: QueryCriteria, right: QueryCriteria): Collection<Predicate> {
|
||||
log.trace { "Parsing AND QueryCriteria composition: $left AND $right" }
|
||||
|
||||
val predicateSet = mutableSetOf<Predicate>()
|
||||
val leftPredicates = parse(left)
|
||||
val rightPredicates = parse(right)
|
||||
|
||||
val andPredicate = criteriaBuilder.and(*leftPredicates.toTypedArray(), *rightPredicates.toTypedArray())
|
||||
predicateSet.add(andPredicate)
|
||||
|
||||
return predicateSet
|
||||
}
|
||||
|
||||
override fun parse(criteria: QueryCriteria, sorting: Sort?): Collection<Predicate> {
|
||||
val predicateSet = criteria.visit(this)
|
||||
|
||||
|
@ -5,22 +5,13 @@ import co.paralleluniverse.strands.Strand
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.node.StateLoader
|
||||
import net.corda.core.node.services.*
|
||||
import net.corda.core.node.services.StatesNotAvailableException
|
||||
import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.Sort
|
||||
import net.corda.core.node.services.vault.SortAttribute
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.node.StateLoader
|
||||
import net.corda.core.node.StatesToRecord
|
||||
import net.corda.core.node.services.VaultQueryException
|
||||
import net.corda.core.node.services.*
|
||||
import net.corda.core.node.services.vault.*
|
||||
import net.corda.core.schemas.PersistentStateRef
|
||||
import net.corda.core.serialization.SerializationDefaults.STORAGE_CONTEXT
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.transactions.CoreTransaction
|
||||
import net.corda.core.transactions.NotaryChangeWireTransaction
|
||||
import net.corda.core.transactions.WireTransaction
|
||||
@ -92,7 +83,6 @@ class NodeVaultService(
|
||||
val state = VaultSchemaV1.VaultStates(
|
||||
notary = stateAndRef.value.state.notary,
|
||||
contractStateClassName = stateAndRef.value.state.data.javaClass.name,
|
||||
contractState = stateAndRef.value.state.serialize(context = STORAGE_CONTEXT).bytes,
|
||||
stateStatus = Vault.StateStatus.UNCONSUMED,
|
||||
recordedTime = clock.instant())
|
||||
state.stateRef = PersistentStateRef(stateAndRef.key)
|
||||
@ -172,7 +162,7 @@ class NodeVaultService(
|
||||
return Vault.NoUpdate
|
||||
}
|
||||
|
||||
return Vault.Update(consumedStates, ourNewStates.toHashSet())
|
||||
return Vault.Update(consumedStates.toSet(), ourNewStates.toSet())
|
||||
}
|
||||
|
||||
val netDelta = txns.fold(Vault.NoUpdate) { netDelta, txn -> netDelta + makeUpdate(txn) }
|
||||
@ -210,28 +200,10 @@ class NodeVaultService(
|
||||
processAndNotify(netDelta)
|
||||
}
|
||||
|
||||
// TODO: replace this method in favour of a VaultQuery query
|
||||
private fun loadStates(refs: Collection<StateRef>): HashSet<StateAndRef<ContractState>> {
|
||||
val states = HashSet<StateAndRef<ContractState>>()
|
||||
if (refs.isNotEmpty()) {
|
||||
val session = currentDBSession()
|
||||
val criteriaBuilder = session.criteriaBuilder
|
||||
val criteriaQuery = criteriaBuilder.createQuery(VaultSchemaV1.VaultStates::class.java)
|
||||
val vaultStates = criteriaQuery.from(VaultSchemaV1.VaultStates::class.java)
|
||||
val statusPredicate = criteriaBuilder.equal(vaultStates.get<Vault.StateStatus>(VaultSchemaV1.VaultStates::stateStatus.name), Vault.StateStatus.UNCONSUMED)
|
||||
val persistentStateRefs = refs.map { PersistentStateRef(it.txhash.bytes.toHexString(), it.index) }
|
||||
val compositeKey = vaultStates.get<PersistentStateRef>(VaultSchemaV1.VaultStates::stateRef.name)
|
||||
val stateRefsPredicate = criteriaBuilder.and(compositeKey.`in`(persistentStateRefs))
|
||||
criteriaQuery.where(statusPredicate, stateRefsPredicate)
|
||||
val results = session.createQuery(criteriaQuery).resultList
|
||||
results.asSequence().forEach {
|
||||
val txHash = SecureHash.parse(it.stateRef?.txId!!)
|
||||
val index = it.stateRef?.index!!
|
||||
val state = it.contractState.deserialize<TransactionState<ContractState>>(context = STORAGE_CONTEXT)
|
||||
states.add(StateAndRef(state, StateRef(txHash, index)))
|
||||
}
|
||||
}
|
||||
return states
|
||||
private fun loadStates(refs: Collection<StateRef>): Collection<StateAndRef<ContractState>> {
|
||||
return if (refs.isNotEmpty())
|
||||
queryBy<ContractState>(QueryCriteria.VaultQueryCriteria(stateRefs = refs.toList())).states
|
||||
else emptySet()
|
||||
}
|
||||
|
||||
private fun processAndNotify(update: Vault.Update<ContractState>) {
|
||||
@ -431,70 +403,70 @@ class NodeVaultService(
|
||||
|
||||
val session = getSession()
|
||||
|
||||
session.use {
|
||||
val criteriaQuery = criteriaBuilder.createQuery(Tuple::class.java)
|
||||
val queryRootVaultStates = criteriaQuery.from(VaultSchemaV1.VaultStates::class.java)
|
||||
val criteriaQuery = criteriaBuilder.createQuery(Tuple::class.java)
|
||||
val queryRootVaultStates = criteriaQuery.from(VaultSchemaV1.VaultStates::class.java)
|
||||
|
||||
// TODO: revisit (use single instance of parser for all queries)
|
||||
val criteriaParser = HibernateQueryCriteriaParser(contractStateType, contractStateTypeMappings, criteriaBuilder, criteriaQuery, queryRootVaultStates)
|
||||
// TODO: revisit (use single instance of parser for all queries)
|
||||
val criteriaParser = HibernateQueryCriteriaParser(contractStateType, contractStateTypeMappings, criteriaBuilder, criteriaQuery, queryRootVaultStates)
|
||||
|
||||
try {
|
||||
// parse criteria and build where predicates
|
||||
criteriaParser.parse(criteria, sorting)
|
||||
try {
|
||||
// parse criteria and build where predicates
|
||||
criteriaParser.parse(criteria, sorting)
|
||||
|
||||
// prepare query for execution
|
||||
val query = session.createQuery(criteriaQuery)
|
||||
// prepare query for execution
|
||||
val query = session.createQuery(criteriaQuery)
|
||||
|
||||
// pagination checks
|
||||
if (!paging.isDefault) {
|
||||
// pagination
|
||||
if (paging.pageNumber < DEFAULT_PAGE_NUM) throw VaultQueryException("Page specification: invalid page number ${paging.pageNumber} [page numbers start from $DEFAULT_PAGE_NUM]")
|
||||
if (paging.pageSize < 1) throw VaultQueryException("Page specification: invalid page size ${paging.pageSize} [must be a value between 1 and $MAX_PAGE_SIZE]")
|
||||
}
|
||||
|
||||
query.firstResult = (paging.pageNumber - 1) * paging.pageSize
|
||||
query.maxResults = paging.pageSize + 1 // detection too many results
|
||||
|
||||
// execution
|
||||
val results = query.resultList
|
||||
|
||||
// final pagination check (fail-fast on too many results when no pagination specified)
|
||||
if (paging.isDefault && results.size > DEFAULT_PAGE_SIZE)
|
||||
throw VaultQueryException("Please specify a `PageSpecification` as there are more results [${results.size}] than the default page size [$DEFAULT_PAGE_SIZE]")
|
||||
|
||||
val statesAndRefs: MutableList<StateAndRef<T>> = mutableListOf()
|
||||
val statesMeta: MutableList<Vault.StateMetadata> = mutableListOf()
|
||||
val otherResults: MutableList<Any> = mutableListOf()
|
||||
|
||||
results.asSequence()
|
||||
.forEachIndexed { index, result ->
|
||||
if (result[0] is VaultSchemaV1.VaultStates) {
|
||||
if (!paging.isDefault && index == paging.pageSize) // skip last result if paged
|
||||
return@forEachIndexed
|
||||
val vaultState = result[0] as VaultSchemaV1.VaultStates
|
||||
val stateRef = StateRef(SecureHash.parse(vaultState.stateRef!!.txId!!), vaultState.stateRef!!.index!!)
|
||||
val state = vaultState.contractState.deserialize<TransactionState<T>>(context = STORAGE_CONTEXT)
|
||||
statesMeta.add(Vault.StateMetadata(stateRef,
|
||||
vaultState.contractStateClassName,
|
||||
vaultState.recordedTime,
|
||||
vaultState.consumedTime,
|
||||
vaultState.stateStatus,
|
||||
vaultState.notary,
|
||||
vaultState.lockId,
|
||||
vaultState.lockUpdateTime))
|
||||
statesAndRefs.add(StateAndRef(state, stateRef))
|
||||
} else {
|
||||
// TODO: improve typing of returned other results
|
||||
log.debug { "OtherResults: ${Arrays.toString(result.toArray())}" }
|
||||
otherResults.addAll(result.toArray().asList())
|
||||
}
|
||||
}
|
||||
|
||||
return Vault.Page(states = statesAndRefs, statesMetadata = statesMeta, stateTypes = criteriaParser.stateTypes, totalStatesAvailable = totalStates, otherResults = otherResults)
|
||||
} catch (e: java.lang.Exception) {
|
||||
log.error(e.message)
|
||||
throw e.cause ?: e
|
||||
// pagination checks
|
||||
if (!paging.isDefault) {
|
||||
// pagination
|
||||
if (paging.pageNumber < DEFAULT_PAGE_NUM) throw VaultQueryException("Page specification: invalid page number ${paging.pageNumber} [page numbers start from $DEFAULT_PAGE_NUM]")
|
||||
if (paging.pageSize < 1) throw VaultQueryException("Page specification: invalid page size ${paging.pageSize} [must be a value between 1 and $MAX_PAGE_SIZE]")
|
||||
}
|
||||
|
||||
query.firstResult = (paging.pageNumber - 1) * paging.pageSize
|
||||
query.maxResults = paging.pageSize + 1 // detection too many results
|
||||
|
||||
// execution
|
||||
val results = query.resultList
|
||||
|
||||
// final pagination check (fail-fast on too many results when no pagination specified)
|
||||
if (paging.isDefault && results.size > DEFAULT_PAGE_SIZE)
|
||||
throw VaultQueryException("Please specify a `PageSpecification` as there are more results [${results.size}] than the default page size [$DEFAULT_PAGE_SIZE]")
|
||||
|
||||
val statesAndRefs: MutableList<StateAndRef<T>> = mutableListOf()
|
||||
val statesMeta: MutableList<Vault.StateMetadata> = mutableListOf()
|
||||
val otherResults: MutableList<Any> = mutableListOf()
|
||||
val stateRefs = mutableSetOf<StateRef>()
|
||||
|
||||
results.asSequence()
|
||||
.forEachIndexed { index, result ->
|
||||
if (result[0] is VaultSchemaV1.VaultStates) {
|
||||
if (!paging.isDefault && index == paging.pageSize) // skip last result if paged
|
||||
return@forEachIndexed
|
||||
val vaultState = result[0] as VaultSchemaV1.VaultStates
|
||||
val stateRef = StateRef(SecureHash.parse(vaultState.stateRef!!.txId!!), vaultState.stateRef!!.index!!)
|
||||
stateRefs.add(stateRef)
|
||||
statesMeta.add(Vault.StateMetadata(stateRef,
|
||||
vaultState.contractStateClassName,
|
||||
vaultState.recordedTime,
|
||||
vaultState.consumedTime,
|
||||
vaultState.stateStatus,
|
||||
vaultState.notary,
|
||||
vaultState.lockId,
|
||||
vaultState.lockUpdateTime))
|
||||
} else {
|
||||
// TODO: improve typing of returned other results
|
||||
log.debug { "OtherResults: ${Arrays.toString(result.toArray())}" }
|
||||
otherResults.addAll(result.toArray().asList())
|
||||
}
|
||||
}
|
||||
if (stateRefs.isNotEmpty())
|
||||
statesAndRefs.addAll(stateLoader.loadStates(stateRefs) as Collection<StateAndRef<T>>)
|
||||
|
||||
return Vault.Page(states = statesAndRefs, statesMetadata = statesMeta, stateTypes = criteriaParser.stateTypes, totalStatesAvailable = totalStates, otherResults = otherResults)
|
||||
} catch (e: java.lang.Exception) {
|
||||
log.error(e.message)
|
||||
throw e.cause ?: e
|
||||
}
|
||||
}
|
||||
|
||||
@ -507,11 +479,7 @@ class NodeVaultService(
|
||||
}
|
||||
}
|
||||
|
||||
private fun getSession(): Session {
|
||||
return sessionFactory.withOptions().
|
||||
connection(DatabaseTransactionManager.current().connection).
|
||||
openSession()
|
||||
}
|
||||
private fun getSession() = DatabaseTransactionManager.currentOrNew().session
|
||||
|
||||
/**
|
||||
* Derive list from existing vault states and then incrementally update using vault observables
|
||||
@ -521,22 +489,21 @@ class NodeVaultService(
|
||||
val vaultStates = criteria.from(VaultSchemaV1.VaultStates::class.java)
|
||||
criteria.select(vaultStates.get("contractStateClassName")).distinct(true)
|
||||
val session = getSession()
|
||||
session.use {
|
||||
val query = session.createQuery(criteria)
|
||||
val results = query.resultList
|
||||
val distinctTypes = results.map { it }
|
||||
|
||||
val contractInterfaceToConcreteTypes = mutableMapOf<String, MutableSet<String>>()
|
||||
distinctTypes.forEach { type ->
|
||||
val concreteType: Class<ContractState> = uncheckedCast(Class.forName(type))
|
||||
val contractInterfaces = deriveContractInterfaces(concreteType)
|
||||
contractInterfaces.map {
|
||||
val contractInterface = contractInterfaceToConcreteTypes.getOrPut(it.name, { mutableSetOf() })
|
||||
contractInterface.add(concreteType.name)
|
||||
}
|
||||
val query = session.createQuery(criteria)
|
||||
val results = query.resultList
|
||||
val distinctTypes = results.map { it }
|
||||
|
||||
val contractInterfaceToConcreteTypes = mutableMapOf<String, MutableSet<String>>()
|
||||
distinctTypes.forEach { type ->
|
||||
val concreteType: Class<ContractState> = uncheckedCast(Class.forName(type))
|
||||
val contractInterfaces = deriveContractInterfaces(concreteType)
|
||||
contractInterfaces.map {
|
||||
val contractInterface = contractInterfaceToConcreteTypes.getOrPut(it.name, { mutableSetOf() })
|
||||
contractInterface.add(concreteType.name)
|
||||
}
|
||||
return contractInterfaceToConcreteTypes
|
||||
}
|
||||
return contractInterfaceToConcreteTypes
|
||||
}
|
||||
|
||||
private fun <T : ContractState> deriveContractInterfaces(clazz: Class<T>): Set<Class<T>> {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.services.vault
|
||||
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.contracts.MAX_ISSUER_REF_SIZE
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.Party
|
||||
@ -9,6 +10,7 @@ import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.PersistentState
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import org.hibernate.annotations.Type
|
||||
import java.io.Serializable
|
||||
import java.time.Instant
|
||||
import java.util.*
|
||||
@ -29,6 +31,9 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
@Table(name = "vault_states",
|
||||
indexes = arrayOf(Index(name = "state_status_idx", columnList = "state_status")))
|
||||
class VaultStates(
|
||||
/** NOTE: serialized transaction state (including contract state) is now resolved from transaction store */
|
||||
// TODO: create a distinct table to hold serialized state data (once DBTransactionStore is encrypted)
|
||||
|
||||
/** refers to the X500Name of the notary a state is attached to */
|
||||
@Column(name = "notary_name")
|
||||
var notary: Party,
|
||||
@ -37,11 +42,6 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
@Column(name = "contract_state_class_name")
|
||||
var contractStateClassName: String,
|
||||
|
||||
/** refers to serialized transaction Contract State */
|
||||
@Lob
|
||||
@Column(name = "contract_state")
|
||||
var contractState: ByteArray,
|
||||
|
||||
/** state lifecycle: unconsumed, consumed */
|
||||
@Column(name = "state_status")
|
||||
var stateStatus: Vault.StateStatus,
|
||||
@ -73,6 +73,10 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
|
||||
/** X500Name of participant parties **/
|
||||
@ElementCollection
|
||||
@CollectionTable(name = "vault_linear_states_parts",
|
||||
joinColumns = arrayOf(
|
||||
JoinColumn(name = "output_index", referencedColumnName = "output_index"),
|
||||
JoinColumn(name = "transaction_id", referencedColumnName = "transaction_id")))
|
||||
@Column(name = "participants")
|
||||
var participants: MutableSet<AbstractParty>? = null,
|
||||
// Reason for not using Set is described here:
|
||||
@ -100,6 +104,10 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
|
||||
/** X500Name of participant parties **/
|
||||
@ElementCollection
|
||||
@CollectionTable(name = "vault_fungible_states_parts",
|
||||
joinColumns = arrayOf(
|
||||
JoinColumn(name = "output_index", referencedColumnName = "output_index"),
|
||||
JoinColumn(name = "transaction_id", referencedColumnName = "transaction_id")))
|
||||
@Column(name = "participants")
|
||||
var participants: MutableSet<AbstractParty>? = null,
|
||||
|
||||
@ -125,7 +133,8 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
@Column(name = "issuer_name")
|
||||
var issuer: AbstractParty,
|
||||
|
||||
@Column(name = "issuer_reference")
|
||||
@Column(name = "issuer_ref", length = MAX_ISSUER_REF_SIZE)
|
||||
@Type(type = "corda-wrapper-binary")
|
||||
var issuerRef: ByteArray
|
||||
) : PersistentState() {
|
||||
constructor(_owner: AbstractParty, _quantity: Long, _issuerParty: AbstractParty, _issuerRef: OpaqueBytes, _participants: List<AbstractParty>) :
|
||||
@ -138,8 +147,7 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
|
||||
|
||||
@Entity
|
||||
@Table(name = "vault_transaction_notes",
|
||||
indexes = arrayOf(Index(name = "seq_no_index", columnList = "seq_no"),
|
||||
Index(name = "transaction_id_index", columnList = "transaction_id")))
|
||||
indexes = arrayOf(Index(name = "transaction_id_index", columnList = "transaction_id")))
|
||||
class VaultTxnNote(
|
||||
@Id
|
||||
@GeneratedValue
|
||||
|
@ -1,8 +1,8 @@
|
||||
package net.corda.node.shell
|
||||
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.messaging.StateMachineUpdate
|
||||
import net.corda.core.messaging.StateMachineUpdate.Added
|
||||
import net.corda.core.messaging.StateMachineUpdate.Removed
|
||||
@ -72,7 +72,7 @@ class FlowWatchPrintingSubscriber(private val toStream: RenderPrintWriter) : Sub
|
||||
table.add(RowElement().add(
|
||||
LabelElement(formatFlowId(smmUpdate.id)),
|
||||
LabelElement(formatFlowName(smmUpdate.stateMachineInfo.flowLogicClassName)),
|
||||
LabelElement(formatFlowInitiator(smmUpdate.stateMachineInfo.initiator)),
|
||||
LabelElement(formatInvocationContext(smmUpdate.stateMachineInfo.context())),
|
||||
LabelElement("In progress")
|
||||
).style(stateColor(smmUpdate).fg()))
|
||||
indexMap[smmUpdate.id] = table.rows.size - 1
|
||||
@ -105,14 +105,8 @@ class FlowWatchPrintingSubscriber(private val toStream: RenderPrintWriter) : Sub
|
||||
return flowId.toString().removeSurrounding("[", "]")
|
||||
}
|
||||
|
||||
private fun formatFlowInitiator(flowInitiator: FlowInitiator): String {
|
||||
return when (flowInitiator) {
|
||||
is FlowInitiator.Scheduled -> flowInitiator.scheduledState.ref.toString()
|
||||
is FlowInitiator.Shell -> "Shell" // TODO Change when we will have more information on shell user.
|
||||
is FlowInitiator.Peer -> flowInitiator.party.name.organisation
|
||||
is FlowInitiator.RPC -> "RPC: " + flowInitiator.username
|
||||
is FlowInitiator.Service -> "Service: " + flowInitiator.name
|
||||
}
|
||||
private fun formatInvocationContext(context: InvocationContext): String {
|
||||
return context.principal().name
|
||||
}
|
||||
|
||||
private fun formatFlowResult(flowResult: Try<*>): String {
|
||||
|
@ -12,7 +12,6 @@ import net.corda.client.jackson.StringToMethodCallParser
|
||||
import net.corda.core.CordaException
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
@ -25,12 +24,11 @@ import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.node.internal.StartedNode
|
||||
import net.corda.node.services.messaging.CURRENT_RPC_CONTEXT
|
||||
import net.corda.node.services.messaging.RpcContext
|
||||
import net.corda.node.services.messaging.RpcAuthContext
|
||||
import net.corda.node.services.messaging.RpcPermissions
|
||||
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||
import net.corda.node.utilities.ANSIProgressRenderer
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.nodeapi.ArtemisMessagingComponent
|
||||
import net.corda.nodeapi.User
|
||||
import org.crsh.command.InvocationContext
|
||||
import org.crsh.console.jline.JLineProcessor
|
||||
import org.crsh.console.jline.TerminalFactory
|
||||
@ -130,7 +128,9 @@ object InteractiveShell {
|
||||
InterruptHandler { jlineProcessor.interrupt() }.install()
|
||||
thread(name = "Command line shell processor", isDaemon = true) {
|
||||
// Give whoever has local shell access administrator access to the node.
|
||||
CURRENT_RPC_CONTEXT.set(RpcContext(User(ArtemisMessagingComponent.NODE_USER, "", setOf())))
|
||||
// TODO remove this after Shell switches to RPC
|
||||
val context = RpcAuthContext(net.corda.core.context.InvocationContext.shell(), RpcPermissions.NONE)
|
||||
CURRENT_RPC_CONTEXT.set(context)
|
||||
Emoji.renderIfSupported {
|
||||
jlineProcessor.run()
|
||||
}
|
||||
@ -235,7 +235,8 @@ object InteractiveShell {
|
||||
val clazz: Class<FlowLogic<*>> = uncheckedCast(matches.single())
|
||||
try {
|
||||
// TODO Flow invocation should use startFlowDynamic.
|
||||
val fsm = runFlowFromString({ node.services.startFlow(it, FlowInitiator.Shell).getOrThrow() }, inputData, clazz)
|
||||
val context = net.corda.core.context.InvocationContext.shell()
|
||||
val fsm = runFlowFromString({ node.services.startFlow(it, context).getOrThrow() }, inputData, clazz)
|
||||
// Show the progress tracker on the console until the flow completes or is interrupted with a
|
||||
// Ctrl-C keypress.
|
||||
val latch = CountDownLatch(1)
|
||||
|
@ -20,12 +20,11 @@ object ServiceIdentityGenerator {
|
||||
* This method should be called *before* any of the nodes are started.
|
||||
*
|
||||
* @param dirs List of node directories to place the generated identity and key pairs in.
|
||||
* @param serviceName The legal name of the distributed service.
|
||||
* @param serviceName The legal name of the distributed service, with service id as CN.
|
||||
* @param threshold The threshold for the generated group [CompositeKey].
|
||||
*/
|
||||
fun generateToDisk(dirs: List<Path>,
|
||||
serviceName: CordaX500Name,
|
||||
serviceId: String,
|
||||
threshold: Int = 1): Party {
|
||||
log.trace { "Generating a group identity \"serviceName\" for nodes: ${dirs.joinToString()}" }
|
||||
val keyPairs = (1..dirs.size).map { generateKeyPair() }
|
||||
@ -40,6 +39,7 @@ object ServiceIdentityGenerator {
|
||||
val compositeKeyCert = X509Utilities.createCertificate(CertificateType.CLIENT_CA, issuer.certificate, issuer.keyPair, serviceName, notaryKey)
|
||||
val certPath = (dir / "certificates").createDirectories() / "distributedService.jks"
|
||||
val keystore = loadOrCreateKeyStore(certPath, "cordacadevpass")
|
||||
val serviceId = serviceName.commonName
|
||||
keystore.setCertificateEntry("$serviceId-composite-key", compositeKeyCert.cert)
|
||||
keystore.setKeyEntry("$serviceId-private-key", keyPair.private, "cordacadevkeypass".toCharArray(), arrayOf(serviceKeyCert.cert, issuer.certificate.cert, rootCert))
|
||||
keystore.save(certPath, "cordacadevpass")
|
||||
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.utilities.registration
|
||||
|
||||
import com.google.common.net.MediaType
|
||||
import net.corda.core.internal.openHttpConnection
|
||||
import net.corda.node.utilities.CertificateStream
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.bouncycastle.pkcs.PKCS10CertificationRequest
|
||||
@ -12,7 +13,9 @@ import java.security.cert.Certificate
|
||||
import java.util.*
|
||||
import java.util.zip.ZipInputStream
|
||||
|
||||
class HTTPNetworkRegistrationService(val server: URL) : NetworkRegistrationService {
|
||||
class HTTPNetworkRegistrationService(compatibilityZoneURL: URL) : NetworkRegistrationService {
|
||||
private val registrationURL = URL("$compatibilityZoneURL/certificate")
|
||||
|
||||
companion object {
|
||||
// TODO: Propagate version information from gradle
|
||||
val clientVersion = "1.0"
|
||||
@ -21,7 +24,7 @@ class HTTPNetworkRegistrationService(val server: URL) : NetworkRegistrationServi
|
||||
@Throws(CertificateRequestException::class)
|
||||
override fun retrieveCertificates(requestId: String): Array<Certificate>? {
|
||||
// Poll server to download the signed certificate once request has been approved.
|
||||
val url = URL("$server/api/certificate/$requestId")
|
||||
val url = URL("$registrationURL/$requestId")
|
||||
val conn = url.openConnection() as HttpURLConnection
|
||||
conn.requestMethod = "GET"
|
||||
|
||||
@ -42,7 +45,7 @@ class HTTPNetworkRegistrationService(val server: URL) : NetworkRegistrationServi
|
||||
|
||||
override fun submitRequest(request: PKCS10CertificationRequest): String {
|
||||
// Post request to certificate signing server via http.
|
||||
val conn = URL("$server/api/certificate").openConnection() as HttpURLConnection
|
||||
val conn = URL("$registrationURL").openHttpConnection()
|
||||
conn.doOutput = true
|
||||
conn.requestMethod = "POST"
|
||||
conn.setRequestProperty("Content-Type", MediaType.OCTET_STREAM.toString())
|
||||
|
@ -14,7 +14,6 @@ database = {
|
||||
initDatabase = true
|
||||
}
|
||||
devMode = true
|
||||
certificateSigningService = "https://cordaci-netperm.corda.r3cev.com"
|
||||
useHTTPS = false
|
||||
h2port = 0
|
||||
useTestClock = false
|
||||
|
@ -35,7 +35,7 @@ import static org.assertj.core.api.Assertions.*;
|
||||
|
||||
public class VaultQueryJavaTests {
|
||||
@Rule
|
||||
public SerializationEnvironmentRule testSerialization = new SerializationEnvironmentRule();
|
||||
public final SerializationEnvironmentRule testSerialization = new SerializationEnvironmentRule();
|
||||
private MockServices services;
|
||||
private MockServices issuerServices;
|
||||
private VaultService vaultService;
|
||||
@ -50,7 +50,7 @@ public class VaultQueryJavaTests {
|
||||
IdentityService identitySvc = makeTestIdentityService();
|
||||
@SuppressWarnings("unchecked")
|
||||
Pair<CordaPersistence, MockServices> databaseAndServices = makeTestDatabaseAndMockServices(keys, () -> identitySvc, cordappPackages);
|
||||
issuerServices = new MockServices(cordappPackages, getDUMMY_CASH_ISSUER_KEY(), getBOC_KEY());
|
||||
issuerServices = new MockServices(cordappPackages, getDUMMY_CASH_ISSUER_NAME(), getDUMMY_CASH_ISSUER_KEY(), getBOC_KEY());
|
||||
database = databaseAndServices.getFirst();
|
||||
services = databaseAndServices.getSecond();
|
||||
vaultService = services.getVaultService();
|
||||
@ -123,7 +123,6 @@ public class VaultQueryJavaTests {
|
||||
3,
|
||||
3,
|
||||
new Random(),
|
||||
new OpaqueBytes("1".getBytes()),
|
||||
null,
|
||||
CashUtilities.getDUMMY_CASH_ISSUER());
|
||||
return tx;
|
||||
@ -200,10 +199,10 @@ public class VaultQueryJavaTests {
|
||||
Amount<Currency> dollars10 = new Amount<>(10, Currency.getInstance("USD"));
|
||||
Amount<Currency> dollars1 = new Amount<>(1, Currency.getInstance("USD"));
|
||||
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars10, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars1, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars10, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars1, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
return tx;
|
||||
});
|
||||
database.transaction(tx -> {
|
||||
@ -247,7 +246,6 @@ public class VaultQueryJavaTests {
|
||||
3,
|
||||
3,
|
||||
new Random(),
|
||||
new OpaqueBytes("1".getBytes()),
|
||||
null,
|
||||
getDUMMY_CASH_ISSUER());
|
||||
return tx;
|
||||
@ -323,11 +321,11 @@ public class VaultQueryJavaTests {
|
||||
Amount<Currency> pounds = new Amount<>(400, Currency.getInstance("GBP"));
|
||||
Amount<Currency> swissfrancs = new Amount<>(500, Currency.getInstance("CHF"));
|
||||
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, swissfrancs, issuerServices, TestConstants.getDUMMY_NOTARY(), 5, 5, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, swissfrancs, issuerServices, TestConstants.getDUMMY_NOTARY(), 5, 5, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
|
||||
return tx;
|
||||
});
|
||||
@ -371,11 +369,11 @@ public class VaultQueryJavaTests {
|
||||
Amount<Currency> pounds = new Amount<>(400, Currency.getInstance("GBP"));
|
||||
Amount<Currency> swissfrancs = new Amount<>(500, Currency.getInstance("CHF"));
|
||||
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, swissfrancs, issuerServices, TestConstants.getDUMMY_NOTARY(), 5, 5, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, swissfrancs, issuerServices, TestConstants.getDUMMY_NOTARY(), 5, 5, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
|
||||
return tx;
|
||||
});
|
||||
@ -395,37 +393,28 @@ public class VaultQueryJavaTests {
|
||||
Vault.Page<Cash.State> results = vaultService.queryBy(Cash.State.class, criteria);
|
||||
// DOCEND VaultJavaQueryExample22
|
||||
|
||||
assertThat(results.getOtherResults()).hasSize(27);
|
||||
assertThat(results.getOtherResults()).hasSize(18);
|
||||
/** CHF */
|
||||
assertThat(results.getOtherResults().get(0)).isEqualTo(500L);
|
||||
assertThat(results.getOtherResults().get(1)).isEqualTo("CHF");
|
||||
assertThat(results.getOtherResults().get(2)).isEqualTo(5L);
|
||||
assertThat(results.getOtherResults().get(3)).isEqualTo(102L);
|
||||
assertThat(results.getOtherResults().get(4)).isEqualTo("CHF");
|
||||
assertThat(results.getOtherResults().get(5)).isEqualTo(94L);
|
||||
assertThat(results.getOtherResults().get(6)).isEqualTo("CHF");
|
||||
assertThat(results.getOtherResults().get(7)).isEqualTo(100.00);
|
||||
assertThat(results.getOtherResults().get(8)).isEqualTo("CHF");
|
||||
assertThat(results.getOtherResults().get(1)).isEqualTo(5L);
|
||||
assertThat(results.getOtherResults().get(2)).isEqualTo(102L);
|
||||
assertThat(results.getOtherResults().get(3)).isEqualTo(94L);
|
||||
assertThat(results.getOtherResults().get(4)).isEqualTo(100.00);
|
||||
assertThat(results.getOtherResults().get(5)).isEqualTo("CHF");
|
||||
/** GBP */
|
||||
assertThat(results.getOtherResults().get(9)).isEqualTo(400L);
|
||||
assertThat(results.getOtherResults().get(10)).isEqualTo("GBP");
|
||||
assertThat(results.getOtherResults().get(11)).isEqualTo(4L);
|
||||
assertThat(results.getOtherResults().get(12)).isEqualTo(103L);
|
||||
assertThat(results.getOtherResults().get(13)).isEqualTo("GBP");
|
||||
assertThat(results.getOtherResults().get(14)).isEqualTo(93L);
|
||||
assertThat(results.getOtherResults().get(15)).isEqualTo("GBP");
|
||||
assertThat(results.getOtherResults().get(16)).isEqualTo(100.0);
|
||||
assertThat(results.getOtherResults().get(17)).isEqualTo("GBP");
|
||||
assertThat(results.getOtherResults().get(6)).isEqualTo(400L);
|
||||
assertThat(results.getOtherResults().get(7)).isEqualTo(4L);
|
||||
assertThat(results.getOtherResults().get(8)).isEqualTo(103L);
|
||||
assertThat(results.getOtherResults().get(9)).isEqualTo(93L);
|
||||
assertThat(results.getOtherResults().get(10)).isEqualTo(100.0);
|
||||
assertThat(results.getOtherResults().get(11)).isEqualTo("GBP");
|
||||
/** USD */
|
||||
assertThat(results.getOtherResults().get(18)).isEqualTo(600L);
|
||||
assertThat(results.getOtherResults().get(19)).isEqualTo("USD");
|
||||
assertThat(results.getOtherResults().get(20)).isEqualTo(6L);
|
||||
assertThat(results.getOtherResults().get(21)).isEqualTo(113L);
|
||||
assertThat(results.getOtherResults().get(22)).isEqualTo("USD");
|
||||
assertThat(results.getOtherResults().get(23)).isEqualTo(87L);
|
||||
assertThat(results.getOtherResults().get(24)).isEqualTo("USD");
|
||||
assertThat(results.getOtherResults().get(25)).isEqualTo(100.0);
|
||||
assertThat(results.getOtherResults().get(26)).isEqualTo("USD");
|
||||
assertThat(results.getOtherResults().get(12)).isEqualTo(600L);
|
||||
assertThat(results.getOtherResults().get(13)).isEqualTo(6L);
|
||||
assertThat(results.getOtherResults().get(14)).isEqualTo(113L);
|
||||
assertThat(results.getOtherResults().get(15)).isEqualTo(87L);
|
||||
assertThat(results.getOtherResults().get(16)).isEqualTo(100.0);
|
||||
assertThat(results.getOtherResults().get(17)).isEqualTo("USD");
|
||||
|
||||
} catch (NoSuchFieldException e) {
|
||||
e.printStackTrace();
|
||||
@ -443,10 +432,10 @@ public class VaultQueryJavaTests {
|
||||
Amount<Currency> pounds300 = new Amount<>(300, Currency.getInstance("GBP"));
|
||||
Amount<Currency> pounds400 = new Amount<>(400, Currency.getInstance("GBP"));
|
||||
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), new OpaqueBytes("1".getBytes()), null, getBOC().ref(new OpaqueBytes("1".getBytes())));
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), new OpaqueBytes("1".getBytes()), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds400, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), new OpaqueBytes("1".getBytes()), null, getBOC().ref(new OpaqueBytes("1".getBytes())));
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars100, issuerServices, TestConstants.getDUMMY_NOTARY(), 1, 1, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, dollars200, issuerServices, TestConstants.getDUMMY_NOTARY(), 2, 2, new Random(0L), null, getBOC().ref(new OpaqueBytes("1".getBytes())));
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds300, issuerServices, TestConstants.getDUMMY_NOTARY(), 3, 3, new Random(0L), null, getDUMMY_CASH_ISSUER());
|
||||
VaultFiller.fillWithSomeTestCash(services, pounds400, issuerServices, TestConstants.getDUMMY_NOTARY(), 4, 4, new Random(0L), null, getBOC().ref(new OpaqueBytes("1".getBytes())));
|
||||
|
||||
return tx;
|
||||
});
|
||||
|
@ -24,7 +24,8 @@ class ArgsParserTest {
|
||||
isVersion = false,
|
||||
noLocalShell = false,
|
||||
sshdServer = false,
|
||||
justGenerateNodeInfo = false))
|
||||
justGenerateNodeInfo = false,
|
||||
bootstrapRaftCluster = false))
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -124,4 +125,10 @@ class ArgsParserTest {
|
||||
val cmdLineOptions = parser.parse("--just-generate-node-info")
|
||||
assertThat(cmdLineOptions.justGenerateNodeInfo).isTrue()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `bootstrap raft cluster`() {
|
||||
val cmdLineOptions = parser.parse("--bootstrap-raft-cluster")
|
||||
assertThat(cmdLineOptions.bootstrapRaftCluster).isTrue()
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package net.corda.node
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.client.rpc.PermissionException
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.contracts.Amount
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.contracts.Issued
|
||||
@ -25,17 +26,15 @@ import net.corda.finance.flows.CashIssueFlow
|
||||
import net.corda.finance.flows.CashPaymentFlow
|
||||
import net.corda.node.internal.SecureCordaRPCOps
|
||||
import net.corda.node.internal.StartedNode
|
||||
import net.corda.node.services.Permissions.Companion.startFlow
|
||||
import net.corda.node.services.Permissions.Companion.invokeRpc
|
||||
import net.corda.node.services.Permissions.Companion.startFlow
|
||||
import net.corda.node.services.messaging.CURRENT_RPC_CONTEXT
|
||||
import net.corda.node.services.messaging.RpcContext
|
||||
import net.corda.nodeapi.User
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.expect
|
||||
import net.corda.testing.expectEvents
|
||||
import net.corda.node.services.messaging.RpcAuthContext
|
||||
import net.corda.node.services.messaging.RpcPermissions
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNetwork.MockNode
|
||||
import net.corda.testing.sequence
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||
import org.junit.After
|
||||
@ -56,25 +55,25 @@ class CordaRPCOpsImplTest {
|
||||
|
||||
private lateinit var mockNet: MockNetwork
|
||||
private lateinit var aliceNode: StartedNode<MockNode>
|
||||
private lateinit var alice: Party
|
||||
private lateinit var notary: Party
|
||||
private lateinit var rpc: CordaRPCOps
|
||||
private lateinit var stateMachineUpdates: Observable<StateMachineUpdate>
|
||||
private lateinit var transactions: Observable<SignedTransaction>
|
||||
private lateinit var vaultTrackCash: Observable<Vault.Update<Cash.State>>
|
||||
|
||||
private val user = User("user", "pwd", permissions = emptySet())
|
||||
|
||||
@Before
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(cordappPackages = listOf("net.corda.finance.contracts.asset"))
|
||||
aliceNode = mockNet.createNode()
|
||||
aliceNode = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME))
|
||||
rpc = SecureCordaRPCOps(aliceNode.services, aliceNode.smm, aliceNode.database, aliceNode.services)
|
||||
CURRENT_RPC_CONTEXT.set(RpcContext(user))
|
||||
CURRENT_RPC_CONTEXT.set(RpcAuthContext(InvocationContext.rpc(testActor()), RpcPermissions.NONE))
|
||||
|
||||
mockNet.runNetwork()
|
||||
withPermissions(invokeRpc(CordaRPCOps::notaryIdentities)) {
|
||||
notary = rpc.notaryIdentities().first()
|
||||
notary = rpc.notaryIdentities().single()
|
||||
}
|
||||
alice = aliceNode.services.myInfo.identityFromX500Name(ALICE_NAME)
|
||||
}
|
||||
|
||||
@After
|
||||
@ -119,7 +118,7 @@ class CordaRPCOpsImplTest {
|
||||
|
||||
val anonymisedRecipient = result.returnValue.getOrThrow().recipient!!
|
||||
val expectedState = Cash.State(Amount(quantity,
|
||||
Issued(aliceNode.info.chooseIdentity().ref(ref), GBP)),
|
||||
Issued(alice.ref(ref), GBP)),
|
||||
anonymisedRecipient)
|
||||
|
||||
// Query vault via RPC
|
||||
@ -157,7 +156,7 @@ class CordaRPCOpsImplTest {
|
||||
|
||||
mockNet.runNetwork()
|
||||
|
||||
rpc.startFlow(::CashPaymentFlow, 100.DOLLARS, aliceNode.info.chooseIdentity())
|
||||
rpc.startFlow(::CashPaymentFlow, 100.DOLLARS, alice)
|
||||
|
||||
mockNet.runNetwork()
|
||||
|
||||
@ -191,7 +190,7 @@ class CordaRPCOpsImplTest {
|
||||
require(stx.tx.outputs.size == 1)
|
||||
val signaturePubKeys = stx.sigs.map { it.by }.toSet()
|
||||
// Only Alice signed, as issuer
|
||||
val aliceKey = aliceNode.info.chooseIdentity().owningKey
|
||||
val aliceKey = alice.owningKey
|
||||
require(signaturePubKeys.size <= aliceKey.keys.size)
|
||||
require(aliceKey.isFulfilledBy(signaturePubKeys))
|
||||
},
|
||||
@ -290,7 +289,7 @@ class CordaRPCOpsImplTest {
|
||||
|
||||
val previous = CURRENT_RPC_CONTEXT.get()
|
||||
try {
|
||||
CURRENT_RPC_CONTEXT.set(RpcContext(user.copy(permissions = permissions.toSet())))
|
||||
CURRENT_RPC_CONTEXT.set(previous.copy(grantedPermissions = RpcPermissions(permissions.toSet())))
|
||||
action.invoke()
|
||||
} finally {
|
||||
CURRENT_RPC_CONTEXT.set(previous)
|
||||
|
@ -1,9 +1,10 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.flows.FlowInitiator
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StartableByService
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.context.Origin
|
||||
import net.corda.core.node.AppServiceHub
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.node.services.CordaService
|
||||
@ -24,18 +25,18 @@ import kotlin.test.assertNotEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
@StartableByService
|
||||
class DummyServiceFlow : FlowLogic<FlowInitiator>() {
|
||||
class DummyServiceFlow : FlowLogic<InvocationContext>() {
|
||||
companion object {
|
||||
object TEST_STEP : ProgressTracker.Step("Custom progress step")
|
||||
}
|
||||
override val progressTracker: ProgressTracker = ProgressTracker(TEST_STEP)
|
||||
|
||||
@Suspendable
|
||||
override fun call(): FlowInitiator {
|
||||
override fun call(): InvocationContext {
|
||||
// We call a subFlow, otehrwise there is no chance to subscribe to the ProgressTracker
|
||||
subFlow(CashIssueFlow(100.DOLLARS, OpaqueBytes.of(1), serviceHub.networkMapCache.notaryIdentities.first()))
|
||||
progressTracker.currentStep = TEST_STEP
|
||||
return stateMachine.flowInitiator
|
||||
return stateMachine.context
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,9 +44,8 @@ class DummyServiceFlow : FlowLogic<FlowInitiator>() {
|
||||
class TestCordaService(val appServiceHub: AppServiceHub): SingletonSerializeAsToken() {
|
||||
fun startServiceFlow() {
|
||||
val handle = appServiceHub.startFlow(DummyServiceFlow())
|
||||
val initiator = handle.returnValue.get()
|
||||
initiator as FlowInitiator.Service
|
||||
assertEquals(this.javaClass.name, initiator.serviceClassName)
|
||||
val context = handle.returnValue.get()
|
||||
assertEquals(this.javaClass.name, (context.origin as Origin.Service).serviceClassName)
|
||||
}
|
||||
|
||||
fun startServiceFlowAndTrack() {
|
||||
|
@ -68,9 +68,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
private val cordappPackages = listOf("net.corda.finance.contracts")
|
||||
@JvmStatic
|
||||
@Parameterized.Parameters(name = "Anonymous = {0}")
|
||||
fun data(): Collection<Boolean> {
|
||||
return listOf(true, false)
|
||||
}
|
||||
fun data(): Collection<Boolean> = listOf(true, false)
|
||||
}
|
||||
|
||||
private lateinit var mockNet: MockNetwork
|
||||
@ -92,14 +90,15 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
// we run in the unit test thread exclusively to speed things up, ensure deterministic results and
|
||||
// allow interruption half way through.
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = cordappPackages)
|
||||
ledger(MockServices(cordappPackages), initialiseSerialization = false) {
|
||||
ledger(MockServices(cordappPackages)) {
|
||||
val notaryNode = mockNet.defaultNotaryNode
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bankNode = mockNet.createPartyNode(BOC_NAME)
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bank = bankNode.info.singleIdentity()
|
||||
val notary = notaryNode.services.getDefaultNotary()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
val cashIssuer = bank.ref(1)
|
||||
val cpIssuer = bank.ref(1, 2, 3)
|
||||
|
||||
@ -116,9 +115,9 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
1200.DOLLARS `issued by` bank.ref(0), null, notary).second
|
||||
}
|
||||
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, aliceNode, bobNode,
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, bob, aliceNode, bobNode,
|
||||
"alice's paper".outputStateAndRef())
|
||||
|
||||
// TODO: Verify that the result was inserted into the transaction database.
|
||||
@ -142,15 +141,16 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
@Test(expected = InsufficientBalanceException::class)
|
||||
fun `trade cash for commercial paper fails using soft locking`() {
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = cordappPackages)
|
||||
ledger(MockServices(cordappPackages), initialiseSerialization = false) {
|
||||
ledger(MockServices(cordappPackages)) {
|
||||
val notaryNode = mockNet.defaultNotaryNode
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bankNode = mockNet.createPartyNode(BOC_NAME)
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bank = bankNode.info.singleIdentity()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val issuer = bank.ref(1)
|
||||
val notary = aliceNode.services.getDefaultNotary()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
|
||||
aliceNode.internals.disableDBCloseOnStop()
|
||||
bobNode.internals.disableDBCloseOnStop()
|
||||
@ -165,7 +165,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
1200.DOLLARS `issued by` bank.ref(0), null, notary).second
|
||||
}
|
||||
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
|
||||
val cashLockId = UUID.randomUUID()
|
||||
bobNode.database.transaction {
|
||||
@ -176,7 +176,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
}
|
||||
}
|
||||
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, aliceNode, bobNode,
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, bob, aliceNode, bobNode,
|
||||
"alice's paper".outputStateAndRef())
|
||||
|
||||
assertEquals(aliceResult.getOrThrow(), bobStateMachine.getOrThrow().resultFuture.getOrThrow())
|
||||
@ -198,7 +198,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
@Test
|
||||
fun `shutdown and restore`() {
|
||||
mockNet = MockNetwork(cordappPackages = cordappPackages)
|
||||
ledger(MockServices(cordappPackages), initialiseSerialization = false) {
|
||||
ledger(MockServices(cordappPackages)) {
|
||||
val notaryNode = mockNet.defaultNotaryNode
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
var bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
@ -209,9 +209,10 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobAddr = bobNode.network.myAddress as InMemoryMessagingNetwork.PeerHandle
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
|
||||
val notary = notaryNode.services.getDefaultNotary()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bank = bankNode.info.singleIdentity()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val issuer = bank.ref(1, 2, 3)
|
||||
|
||||
bobNode.database.transaction {
|
||||
@ -222,8 +223,8 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
fillUpForSeller(false, issuer, alice,
|
||||
1200.DOLLARS `issued by` bank.ref(0), null, notary).second
|
||||
}
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
val aliceFuture = runBuyerAndSeller(notary, aliceNode, bobNode, "alice's paper".outputStateAndRef()).sellerResult
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
val aliceFuture = runBuyerAndSeller(notary, bob, aliceNode, bobNode, "alice's paper".outputStateAndRef()).sellerResult
|
||||
|
||||
// Everything is on this thread so we can now step through the flow one step at a time.
|
||||
// Seller Alice already sent a message to Buyer Bob. Pump once:
|
||||
@ -298,8 +299,8 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
return mockNet.createNode(MockNodeParameters(legalName = name), nodeFactory = { args ->
|
||||
object : MockNetwork.MockNode(args) {
|
||||
// That constructs a recording tx storage
|
||||
override fun makeTransactionStorage(): WritableTransactionStorage {
|
||||
return RecordingTransactionStorage(database, super.makeTransactionStorage())
|
||||
override fun makeTransactionStorage(database: CordaPersistence): WritableTransactionStorage {
|
||||
return RecordingTransactionStorage(database, super.makeTransactionStorage(database))
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -313,13 +314,13 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobNode = makeNodeWithTracking(BOB_NAME)
|
||||
val bankNode = makeNodeWithTracking(BOC_NAME)
|
||||
mockNet.runNetwork()
|
||||
val notary = aliceNode.services.getDefaultNotary()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val bank = bankNode.info.singleIdentity()
|
||||
val issuer = bank.ref(1, 2, 3)
|
||||
|
||||
ledger(aliceNode.services, initialiseSerialization = false) {
|
||||
ledger(aliceNode.services) {
|
||||
|
||||
// Insert a prospectus type attachment into the commercial paper transaction.
|
||||
val stream = ByteArrayOutputStream()
|
||||
@ -335,16 +336,16 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobsFakeCash = bobNode.database.transaction {
|
||||
fillUpForBuyer(false, issuer, AnonymousParty(bob.owningKey), notary)
|
||||
}.second
|
||||
val bobsSignedTxns = insertFakeTransactions(bobsFakeCash, bobNode, notaryNode, bankNode)
|
||||
val bobsSignedTxns = insertFakeTransactions(bobsFakeCash, bobNode, bob, notaryNode, bankNode)
|
||||
val alicesFakePaper = aliceNode.database.transaction {
|
||||
fillUpForSeller(false, issuer, alice,
|
||||
1200.DOLLARS `issued by` bank.ref(0), attachmentID, notary).second
|
||||
}
|
||||
val alicesSignedTxns = insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
val alicesSignedTxns = insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
|
||||
runBuyerAndSeller(notary, aliceNode, bobNode, "alice's paper".outputStateAndRef())
|
||||
runBuyerAndSeller(notary, bob, aliceNode, bobNode, "alice's paper".outputStateAndRef())
|
||||
|
||||
mockNet.runNetwork()
|
||||
|
||||
@ -419,13 +420,13 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobNode = makeNodeWithTracking(BOB_NAME)
|
||||
val bankNode = makeNodeWithTracking(BOC_NAME)
|
||||
|
||||
mockNet.runNetwork()
|
||||
val notary = aliceNode.services.getDefaultNotary()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
val alice: Party = aliceNode.info.singleIdentity()
|
||||
val bank: Party = bankNode.info.singleIdentity()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val issuer = bank.ref(1, 2, 3)
|
||||
|
||||
ledger(aliceNode.services, initialiseSerialization = false) {
|
||||
ledger(aliceNode.services) {
|
||||
// Insert a prospectus type attachment into the commercial paper transaction.
|
||||
val stream = ByteArrayOutputStream()
|
||||
JarOutputStream(stream).use {
|
||||
@ -441,22 +442,20 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobsFakeCash = bobNode.database.transaction {
|
||||
fillUpForBuyer(false, issuer, AnonymousParty(bobsKey), notary)
|
||||
}.second
|
||||
insertFakeTransactions(bobsFakeCash, bobNode, notaryNode, bankNode)
|
||||
insertFakeTransactions(bobsFakeCash, bobNode, bob, notaryNode, bankNode)
|
||||
|
||||
val alicesFakePaper = aliceNode.database.transaction {
|
||||
fillUpForSeller(false, issuer, alice,
|
||||
1200.DOLLARS `issued by` bank.ref(0), attachmentID, notary).second
|
||||
}
|
||||
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
|
||||
val aliceTxStream = aliceNode.services.validatedTransactions.track().updates
|
||||
val aliceTxMappings = with(aliceNode) {
|
||||
database.transaction { services.stateMachineRecordedTransactionMapping.track().updates }
|
||||
}
|
||||
val aliceSmId = runBuyerAndSeller(notary, aliceNode, bobNode,
|
||||
val aliceSmId = runBuyerAndSeller(notary, bob, aliceNode, bobNode,
|
||||
"alice's paper".outputStateAndRef()).sellerId
|
||||
|
||||
mockNet.runNetwork()
|
||||
@ -495,7 +494,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
@Test
|
||||
fun `dependency with error on buyer side`() {
|
||||
mockNet = MockNetwork(cordappPackages = cordappPackages)
|
||||
ledger(MockServices(cordappPackages), initialiseSerialization = false) {
|
||||
ledger(MockServices(cordappPackages)) {
|
||||
runWithError(true, false, "at least one cash input")
|
||||
}
|
||||
}
|
||||
@ -503,7 +502,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
@Test
|
||||
fun `dependency with error on seller side`() {
|
||||
mockNet = MockNetwork(cordappPackages = cordappPackages)
|
||||
ledger(MockServices(cordappPackages), initialiseSerialization = false) {
|
||||
ledger(MockServices(cordappPackages)) {
|
||||
runWithError(false, true, "Issuances have a time-window")
|
||||
}
|
||||
}
|
||||
@ -516,12 +515,13 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
)
|
||||
|
||||
private fun runBuyerAndSeller(notary: Party,
|
||||
buyer: Party,
|
||||
sellerNode: StartedNode<MockNetwork.MockNode>,
|
||||
buyerNode: StartedNode<MockNetwork.MockNode>,
|
||||
assetToSell: StateAndRef<OwnableState>): RunResult {
|
||||
val buyerFlows: Observable<out FlowLogic<*>> = buyerNode.internals.registerInitiatedFlow(BuyerAcceptor::class.java)
|
||||
val firstBuyerFiber = buyerFlows.toFuture().map { it.stateMachine }
|
||||
val seller = SellerInitiator(buyerNode.info.chooseIdentity(), notary, assetToSell, 1000.DOLLARS, anonymous)
|
||||
val seller = SellerInitiator(buyer, notary, assetToSell, 1000.DOLLARS, anonymous)
|
||||
val sellerResult = sellerNode.services.startFlow(seller).resultFuture
|
||||
return RunResult(firstBuyerFiber, sellerResult, seller.stateMachine.id)
|
||||
}
|
||||
@ -574,8 +574,7 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bankNode = mockNet.createPartyNode(BOC_NAME)
|
||||
|
||||
mockNet.runNetwork()
|
||||
val notary = aliceNode.services.getDefaultNotary()
|
||||
val notary = mockNet.defaultNotaryIdentity
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val bank = bankNode.info.singleIdentity()
|
||||
@ -588,12 +587,10 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
fillUpForSeller(aliceError, issuer, alice,1200.DOLLARS `issued by` issuer, null, notary).second
|
||||
}
|
||||
|
||||
insertFakeTransactions(bobsBadCash, bobNode, notaryNode, bankNode)
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, notaryNode, bankNode)
|
||||
insertFakeTransactions(bobsBadCash, bobNode, bob, notaryNode, bankNode)
|
||||
insertFakeTransactions(alicesFakePaper, aliceNode, alice, notaryNode, bankNode)
|
||||
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, aliceNode, bobNode, "alice's paper".outputStateAndRef())
|
||||
val (bobStateMachine, aliceResult) = runBuyerAndSeller(notary, bob, aliceNode, bobNode, "alice's paper".outputStateAndRef())
|
||||
|
||||
mockNet.runNetwork()
|
||||
|
||||
@ -613,14 +610,14 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
private fun insertFakeTransactions(
|
||||
wtxToSign: List<WireTransaction>,
|
||||
node: StartedNode<*>,
|
||||
identity: Party,
|
||||
notaryNode: StartedNode<*>,
|
||||
vararg extraSigningNodes: StartedNode<*>): Map<SecureHash, SignedTransaction> {
|
||||
|
||||
val notaryParty = notaryNode.info.legalIdentities[0]
|
||||
val notaryParty = mockNet.defaultNotaryIdentity
|
||||
val signed = wtxToSign.map {
|
||||
val id = it.id
|
||||
val sigs = mutableListOf<TransactionSignature>()
|
||||
val nodeKey = node.info.chooseIdentity().owningKey
|
||||
val nodeKey = identity.owningKey
|
||||
sigs += node.services.keyManagementService.sign(
|
||||
SignableData(id, SignatureMetadata(1, Crypto.findSignatureScheme(nodeKey).schemeNumberID)),
|
||||
nodeKey
|
||||
@ -630,11 +627,12 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
notaryParty.owningKey
|
||||
)
|
||||
extraSigningNodes.forEach { currentNode ->
|
||||
val currentIdentity = currentNode.info.singleIdentity()
|
||||
sigs += currentNode.services.keyManagementService.sign(
|
||||
SignableData(id, SignatureMetadata(
|
||||
1,
|
||||
Crypto.findSignatureScheme(currentNode.info.chooseIdentity().owningKey).schemeNumberID)),
|
||||
currentNode.info.chooseIdentity().owningKey)
|
||||
Crypto.findSignatureScheme(currentIdentity.owningKey).schemeNumberID)),
|
||||
currentIdentity.owningKey)
|
||||
}
|
||||
SignedTransaction(it, sigs)
|
||||
}
|
||||
@ -722,7 +720,10 @@ class TwoPartyTradeFlowTests(private val anonymous: Boolean) {
|
||||
}
|
||||
|
||||
|
||||
class RecordingTransactionStorage(val database: CordaPersistence, val delegate: WritableTransactionStorage) : WritableTransactionStorage, SingletonSerializeAsToken() {
|
||||
class RecordingTransactionStorage(
|
||||
private val database: CordaPersistence,
|
||||
private val delegate: WritableTransactionStorage
|
||||
) : WritableTransactionStorage, SingletonSerializeAsToken() {
|
||||
override fun track(): DataFeed<List<SignedTransaction>, SignedTransaction> {
|
||||
return database.transaction {
|
||||
delegate.track()
|
||||
|
@ -17,6 +17,7 @@ import net.corda.testing.*
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNetwork.NotarySpec
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
@ -27,26 +28,31 @@ import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class NotaryChangeTests {
|
||||
companion object {
|
||||
private val DUMMY_NOTARY_SERVICE_NAME: CordaX500Name = DUMMY_NOTARY.name.copy(commonName = "corda.notary.validating")
|
||||
}
|
||||
|
||||
private lateinit var mockNet: MockNetwork
|
||||
private lateinit var oldNotaryNode: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var clientNodeA: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var clientNodeB: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var newNotaryParty: Party
|
||||
private lateinit var oldNotaryParty: Party
|
||||
private lateinit var clientA: Party
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
val oldNotaryName = DUMMY_REGULATOR.name
|
||||
val oldNotaryName = DUMMY_NOTARY.name.copy(organisation = "Old Dummy Notary")
|
||||
mockNet = MockNetwork(
|
||||
notarySpecs = listOf(NotarySpec(DUMMY_NOTARY.name), NotarySpec(oldNotaryName)),
|
||||
cordappPackages = listOf("net.corda.testing.contracts")
|
||||
)
|
||||
clientNodeA = mockNet.createNode()
|
||||
clientNodeB = mockNet.createNode()
|
||||
clientNodeA = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME))
|
||||
clientNodeB = mockNet.createNode(MockNodeParameters(legalName = BOB_NAME))
|
||||
clientA = clientNodeA.info.singleIdentity()
|
||||
oldNotaryNode = mockNet.notaryNodes[1]
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
newNotaryParty = clientNodeA.services.networkMapCache.getNotary(DUMMY_NOTARY.name)!!
|
||||
oldNotaryParty = clientNodeA.services.networkMapCache.getNotary(oldNotaryName)!!
|
||||
newNotaryParty = clientNodeA.services.networkMapCache.getNotary(DUMMY_NOTARY_SERVICE_NAME)!!
|
||||
oldNotaryParty = clientNodeA.services.networkMapCache.getNotary(DUMMY_NOTARY_SERVICE_NAME.copy(organisation = "Old Dummy Notary"))!!
|
||||
}
|
||||
|
||||
@After
|
||||
@ -56,7 +62,7 @@ class NotaryChangeTests {
|
||||
|
||||
@Test
|
||||
fun `should change notary for a state with single participant`() {
|
||||
val state = issueState(clientNodeA, oldNotaryParty)
|
||||
val state = issueState(clientNodeA.services, clientA, oldNotaryParty)
|
||||
assertEquals(state.state.notary, oldNotaryParty)
|
||||
val newState = changeNotary(state, clientNodeA, newNotaryParty)
|
||||
assertEquals(newState.state.notary, newNotaryParty)
|
||||
@ -94,7 +100,7 @@ class NotaryChangeTests {
|
||||
|
||||
@Test
|
||||
fun `should not break encumbrance links`() {
|
||||
val issueTx = issueEncumberedState(clientNodeA, oldNotaryParty)
|
||||
val issueTx = issueEncumberedState(clientNodeA.services, clientA, oldNotaryParty)
|
||||
|
||||
val state = StateAndRef(issueTx.outputs.first(), StateRef(issueTx.id, 0))
|
||||
val newNotary = newNotaryParty
|
||||
@ -128,7 +134,7 @@ class NotaryChangeTests {
|
||||
|
||||
@Test
|
||||
fun `notary change and regular transactions are properly handled during resolution in longer chains`() {
|
||||
val issued = issueState(clientNodeA, oldNotaryParty)
|
||||
val issued = issueState(clientNodeA.services, clientA, oldNotaryParty)
|
||||
val moved = moveState(issued, clientNodeA, clientNodeB)
|
||||
|
||||
// We don't to tx resolution when moving state to another node, so need to add the issue transaction manually
|
||||
@ -167,8 +173,8 @@ class NotaryChangeTests {
|
||||
return finalTransaction.tx.outRef(0)
|
||||
}
|
||||
|
||||
private fun issueEncumberedState(node: StartedNode<*>, notaryIdentity: Party): WireTransaction {
|
||||
val owner = node.info.chooseIdentity().ref(0)
|
||||
private fun issueEncumberedState(services: ServiceHub, nodeIdentity: Party, notaryIdentity: Party): WireTransaction {
|
||||
val owner = nodeIdentity.ref(0)
|
||||
val stateA = DummyContract.SingleOwnerState(Random().nextInt(), owner.party)
|
||||
val stateB = DummyContract.SingleOwnerState(Random().nextInt(), owner.party)
|
||||
val stateC = DummyContract.SingleOwnerState(Random().nextInt(), owner.party)
|
||||
@ -179,9 +185,9 @@ class NotaryChangeTests {
|
||||
addOutputState(stateC, DummyContract.PROGRAM_ID, notaryIdentity)
|
||||
addOutputState(stateB, DummyContract.PROGRAM_ID, notaryIdentity, encumbrance = 1) // Encumbered by stateC
|
||||
}
|
||||
val stx = node.services.signInitialTransaction(tx)
|
||||
node.services.recordTransactions(stx)
|
||||
return tx.toWireTransaction(node.services)
|
||||
val stx = services.signInitialTransaction(tx)
|
||||
services.recordTransactions(stx)
|
||||
return tx.toWireTransaction(services)
|
||||
}
|
||||
|
||||
// TODO: Add more test cases once we have a general flow/service exception handling mechanism:
|
||||
@ -193,10 +199,10 @@ class NotaryChangeTests {
|
||||
// - The transaction type is not a notary change transaction at all.
|
||||
}
|
||||
|
||||
fun issueState(node: StartedNode<*>, notaryIdentity: Party): StateAndRef<DummyContract.SingleOwnerState> {
|
||||
val tx = DummyContract.generateInitial(Random().nextInt(), notaryIdentity, node.info.chooseIdentity().ref(0))
|
||||
val stx = node.services.signInitialTransaction(tx)
|
||||
node.services.recordTransactions(stx)
|
||||
fun issueState(services: ServiceHub, nodeIdentity: Party, notaryIdentity: Party): StateAndRef<DummyContract.SingleOwnerState> {
|
||||
val tx = DummyContract.generateInitial(Random().nextInt(), notaryIdentity, nodeIdentity.ref(0))
|
||||
val stx = services.signInitialTransaction(tx)
|
||||
services.recordTransactions(stx)
|
||||
return stx.tx.outRef(0)
|
||||
}
|
||||
|
||||
|
@ -23,10 +23,10 @@ class NodeConfigurationImplTest {
|
||||
|
||||
@Test
|
||||
fun `check devModeOptions flag helper`() {
|
||||
assertFalse { configDebugOptions(true, null).devModeOptions?.disableCheckpointChecker == true }
|
||||
assertFalse { configDebugOptions(true, DevModeOptions()).devModeOptions?.disableCheckpointChecker == true }
|
||||
assertFalse { configDebugOptions(true, DevModeOptions(false)).devModeOptions?.disableCheckpointChecker == true }
|
||||
assertTrue { configDebugOptions(true, DevModeOptions(true)).devModeOptions?.disableCheckpointChecker == true }
|
||||
assertTrue { configDebugOptions(true, null).shouldCheckCheckpoints() }
|
||||
assertTrue { configDebugOptions(true, DevModeOptions()).shouldCheckCheckpoints() }
|
||||
assertTrue { configDebugOptions(true, DevModeOptions(false)).shouldCheckCheckpoints() }
|
||||
assertFalse { configDebugOptions(true, DevModeOptions(true)).shouldCheckCheckpoints() }
|
||||
}
|
||||
|
||||
private fun configDebugOptions(devMode: Boolean, devModeOptions: DevModeOptions?) : NodeConfiguration {
|
||||
@ -41,7 +41,6 @@ class NodeConfigurationImplTest {
|
||||
trustStorePassword = "trustpass",
|
||||
dataSourceProperties = makeTestDataSourceProperties(ALICE.name.organisation),
|
||||
database = makeTestDatabaseProperties(),
|
||||
certificateSigningService = URL("http://localhost"),
|
||||
rpcUsers = emptyList(),
|
||||
verifierType = VerifierType.InMemory,
|
||||
useHTTPS = false,
|
||||
|
@ -40,10 +40,7 @@ import net.corda.testing.node.MockServices.Companion.makeTestDataSourcePropertie
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDatabaseProperties
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestIdentityService
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.*
|
||||
import java.nio.file.Paths
|
||||
import java.security.PublicKey
|
||||
import java.time.Clock
|
||||
@ -60,7 +57,7 @@ class NodeSchedulerServiceTest : SingletonSerializeAsToken() {
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
private val realClock: Clock = Clock.systemUTC()
|
||||
private val stoppedClock: Clock = Clock.fixed(realClock.instant(), realClock.zone)
|
||||
private val testClock = TestClock(stoppedClock)
|
||||
@ -106,7 +103,7 @@ class NodeSchedulerServiceTest : SingletonSerializeAsToken() {
|
||||
doReturn(configuration).whenever(it).configuration
|
||||
doReturn(MonitoringService(MetricRegistry())).whenever(it).monitoringService
|
||||
doReturn(validatedTransactions).whenever(it).validatedTransactions
|
||||
doReturn(NetworkMapCacheImpl(MockNetworkMapCache(database, configuration), identityService)).whenever(it).networkMapCache
|
||||
doReturn(NetworkMapCacheImpl(MockNetworkMapCache(database), identityService)).whenever(it).networkMapCache
|
||||
doCallRealMethod().whenever(it).signInitialTransaction(any(), any<PublicKey>())
|
||||
doReturn(myInfo).whenever(it).myInfo
|
||||
doReturn(kms).whenever(it).keyManagementService
|
||||
@ -131,6 +128,7 @@ class NodeSchedulerServiceTest : SingletonSerializeAsToken() {
|
||||
}
|
||||
}
|
||||
|
||||
private var allowedUnsuspendedFiberCount = 0
|
||||
@After
|
||||
fun tearDown() {
|
||||
// We need to make sure the StateMachineManager is done before shutting down executors.
|
||||
@ -140,6 +138,7 @@ class NodeSchedulerServiceTest : SingletonSerializeAsToken() {
|
||||
smmExecutor.shutdown()
|
||||
smmExecutor.awaitTermination(60, TimeUnit.SECONDS)
|
||||
database.close()
|
||||
mockSMM.stop(allowedUnsuspendedFiberCount)
|
||||
}
|
||||
|
||||
// Ignore IntelliJ when it says these properties can be private, if they are we cannot serialise them
|
||||
@ -223,6 +222,7 @@ class NodeSchedulerServiceTest : SingletonSerializeAsToken() {
|
||||
|
||||
@Test
|
||||
fun `test activity due in the future and schedule another later`() {
|
||||
allowedUnsuspendedFiberCount = 1
|
||||
val time = stoppedClock.instant() + 1.days
|
||||
scheduleTX(time)
|
||||
|
||||
|
@ -2,8 +2,12 @@ package net.corda.node.services.events
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.Origin
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.flows.FinalityFlow
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowLogicRefFactory
|
||||
import net.corda.core.flows.SchedulableFlow
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.node.services.VaultService
|
||||
import net.corda.core.node.services.queryBy
|
||||
@ -16,11 +20,10 @@ import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.internal.StartedNode
|
||||
import net.corda.node.services.statemachine.StateMachineManager
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.dummyCommand
|
||||
import net.corda.testing.getDefaultNotary
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import org.junit.After
|
||||
import org.junit.Assert.*
|
||||
import org.junit.Before
|
||||
@ -34,9 +37,12 @@ class ScheduledFlowTests {
|
||||
val SORTING = Sort(listOf(Sort.SortColumn(SortAttribute.Standard(Sort.CommonStateAttribute.STATE_REF_TXN_ID), Sort.Direction.DESC)))
|
||||
}
|
||||
|
||||
lateinit var mockNet: MockNetwork
|
||||
lateinit var nodeA: StartedNode<MockNetwork.MockNode>
|
||||
lateinit var nodeB: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var mockNet: MockNetwork
|
||||
private lateinit var aliceNode: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var bobNode: StartedNode<MockNetwork.MockNode>
|
||||
private lateinit var notary: Party
|
||||
private lateinit var alice: Party
|
||||
private lateinit var bob: Party
|
||||
|
||||
data class ScheduledState(val creationTime: Instant,
|
||||
val source: Party,
|
||||
@ -55,11 +61,10 @@ class ScheduledFlowTests {
|
||||
override val participants: List<Party> get() = listOf(source, destination)
|
||||
}
|
||||
|
||||
class InsertInitialStateFlow(private val destination: Party) : FlowLogic<Unit>() {
|
||||
class InsertInitialStateFlow(private val destination: Party, private val notary: Party) : FlowLogic<Unit>() {
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
val scheduledState = ScheduledState(serviceHub.clock.instant(), ourIdentity, destination)
|
||||
val notary = serviceHub.getDefaultNotary()
|
||||
val builder = TransactionBuilder(notary)
|
||||
.addOutputState(scheduledState, DummyContract.PROGRAM_ID)
|
||||
.addCommand(dummyCommand(ourIdentity.owningKey))
|
||||
@ -93,12 +98,11 @@ class ScheduledFlowTests {
|
||||
@Before
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(threadPerNode = true, cordappPackages = listOf("net.corda.testing.contracts"))
|
||||
val a = mockNet.createUnstartedNode()
|
||||
val b = mockNet.createUnstartedNode()
|
||||
|
||||
mockNet.startNodes()
|
||||
nodeA = a.started!!
|
||||
nodeB = b.started!!
|
||||
aliceNode = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME))
|
||||
bobNode = mockNet.createNode(MockNodeParameters(legalName = BOB_NAME))
|
||||
notary = mockNet.defaultNotaryIdentity
|
||||
alice = aliceNode.info.singleIdentity()
|
||||
bob = bobNode.info.singleIdentity()
|
||||
}
|
||||
|
||||
@After
|
||||
@ -109,20 +113,20 @@ class ScheduledFlowTests {
|
||||
@Test
|
||||
fun `create and run scheduled flow then wait for result`() {
|
||||
var countScheduledFlows = 0
|
||||
nodeA.smm.track().updates.subscribe {
|
||||
aliceNode.smm.track().updates.subscribe {
|
||||
if (it is StateMachineManager.Change.Add) {
|
||||
val initiator = it.logic.stateMachine.flowInitiator
|
||||
if (initiator is FlowInitiator.Scheduled)
|
||||
val context = it.logic.stateMachine.context
|
||||
if (context.origin is Origin.Scheduled)
|
||||
countScheduledFlows++
|
||||
}
|
||||
}
|
||||
nodeA.services.startFlow(InsertInitialStateFlow(nodeB.info.chooseIdentity()))
|
||||
aliceNode.services.startFlow(InsertInitialStateFlow(bob, notary))
|
||||
mockNet.waitQuiescent()
|
||||
val stateFromA = nodeA.database.transaction {
|
||||
nodeA.services.vaultService.queryBy<ScheduledState>().states.single()
|
||||
val stateFromA = aliceNode.database.transaction {
|
||||
aliceNode.services.vaultService.queryBy<ScheduledState>().states.single()
|
||||
}
|
||||
val stateFromB = nodeB.database.transaction {
|
||||
nodeB.services.vaultService.queryBy<ScheduledState>().states.single()
|
||||
val stateFromB = bobNode.database.transaction {
|
||||
bobNode.services.vaultService.queryBy<ScheduledState>().states.single()
|
||||
}
|
||||
assertEquals(1, countScheduledFlows)
|
||||
assertEquals("Must be same copy on both nodes", stateFromA, stateFromB)
|
||||
@ -134,8 +138,8 @@ class ScheduledFlowTests {
|
||||
val N = 100
|
||||
val futures = mutableListOf<CordaFuture<*>>()
|
||||
for (i in 0 until N) {
|
||||
futures.add(nodeA.services.startFlow(InsertInitialStateFlow(nodeB.info.chooseIdentity())).resultFuture)
|
||||
futures.add(nodeB.services.startFlow(InsertInitialStateFlow(nodeA.info.chooseIdentity())).resultFuture)
|
||||
futures.add(aliceNode.services.startFlow(InsertInitialStateFlow(bob, notary)).resultFuture)
|
||||
futures.add(bobNode.services.startFlow(InsertInitialStateFlow(alice, notary)).resultFuture)
|
||||
}
|
||||
mockNet.waitQuiescent()
|
||||
|
||||
@ -143,11 +147,11 @@ class ScheduledFlowTests {
|
||||
futures.forEach { it.getOrThrow() }
|
||||
|
||||
// Convert the states into maps to make error reporting easier
|
||||
val statesFromA: List<StateAndRef<ScheduledState>> = nodeA.database.transaction {
|
||||
queryStatesWithPaging(nodeA.services.vaultService)
|
||||
val statesFromA: List<StateAndRef<ScheduledState>> = aliceNode.database.transaction {
|
||||
queryStatesWithPaging(aliceNode.services.vaultService)
|
||||
}
|
||||
val statesFromB: List<StateAndRef<ScheduledState>> = nodeB.database.transaction {
|
||||
queryStatesWithPaging(nodeB.services.vaultService)
|
||||
val statesFromB: List<StateAndRef<ScheduledState>> = bobNode.database.transaction {
|
||||
queryStatesWithPaging(bobNode.services.vaultService)
|
||||
}
|
||||
assertEquals("Expect all states to be present", 2 * N, statesFromA.count())
|
||||
statesFromA.forEach { ref ->
|
||||
|
@ -9,7 +9,6 @@ import net.corda.core.messaging.RPCOps
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.node.services.RPCUserService
|
||||
import net.corda.node.services.RPCUserServiceImpl
|
||||
import net.corda.node.services.api.MonitoringService
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.configureWithDevSSLCertificate
|
||||
import net.corda.node.services.network.NetworkMapCacheImpl
|
||||
@ -79,7 +78,7 @@ class ArtemisMessagingTests {
|
||||
LogHelper.setLevel(PersistentUniquenessProvider::class)
|
||||
database = configureDatabase(makeTestDataSourceProperties(), makeTestDatabaseProperties(), ::makeTestIdentityService)
|
||||
networkMapRegistrationFuture = doneFuture(Unit)
|
||||
networkMapCache = NetworkMapCacheImpl(PersistentNetworkMapCache(database, config, emptyList()), rigorousMock())
|
||||
networkMapCache = NetworkMapCacheImpl(PersistentNetworkMapCache(database), rigorousMock())
|
||||
}
|
||||
|
||||
@After
|
||||
@ -212,7 +211,7 @@ class ArtemisMessagingTests {
|
||||
identity.public,
|
||||
ServiceAffinityExecutor("ArtemisMessagingTests", 1),
|
||||
database,
|
||||
MonitoringService(MetricRegistry())).apply {
|
||||
MetricRegistry()).apply {
|
||||
config.configureWithDevSSLCertificate()
|
||||
messagingClient = this
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.testing.ALICE
|
||||
import net.corda.testing.BOB
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.ALICE_NAME
|
||||
import net.corda.testing.BOB_NAME
|
||||
import net.corda.testing.DUMMY_NOTARY
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.singleIdentity
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.After
|
||||
import org.junit.Test
|
||||
@ -13,7 +14,7 @@ import java.math.BigInteger
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
class NetworkMapCacheTest {
|
||||
val mockNet: MockNetwork = MockNetwork()
|
||||
private val mockNet = MockNetwork()
|
||||
|
||||
@After
|
||||
fun teardown() {
|
||||
@ -23,28 +24,29 @@ class NetworkMapCacheTest {
|
||||
@Test
|
||||
fun `key collision`() {
|
||||
val entropy = BigInteger.valueOf(24012017L)
|
||||
val aliceNode = mockNet.createNode(MockNodeParameters(legalName = ALICE.name, entropyRoot = entropy))
|
||||
mockNet.runNetwork()
|
||||
val aliceNode = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME, entropyRoot = entropy))
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
|
||||
// Node A currently knows only about itself, so this returns node A
|
||||
assertEquals(aliceNode.services.networkMapCache.getNodesByLegalIdentityKey(aliceNode.info.chooseIdentity().owningKey).singleOrNull(), aliceNode.info)
|
||||
val bobNode = mockNet.createNode(MockNodeParameters(legalName = BOB.name, entropyRoot = entropy))
|
||||
assertEquals(aliceNode.info.chooseIdentity(), bobNode.info.chooseIdentity())
|
||||
assertEquals(aliceNode.services.networkMapCache.getNodesByLegalIdentityKey(alice.owningKey).singleOrNull(), aliceNode.info)
|
||||
val bobNode = mockNet.createNode(MockNodeParameters(legalName = BOB_NAME, entropyRoot = entropy))
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
assertEquals(alice, bob)
|
||||
|
||||
aliceNode.services.networkMapCache.addNode(bobNode.info)
|
||||
// The details of node B write over those for node A
|
||||
assertEquals(aliceNode.services.networkMapCache.getNodesByLegalIdentityKey(aliceNode.info.chooseIdentity().owningKey).singleOrNull(), bobNode.info)
|
||||
assertEquals(aliceNode.services.networkMapCache.getNodesByLegalIdentityKey(alice.owningKey).singleOrNull(), bobNode.info)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `getNodeByLegalIdentity`() {
|
||||
val aliceNode = mockNet.createPartyNode(ALICE.name)
|
||||
val bobNode = mockNet.createPartyNode(BOB.name)
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bobCache: NetworkMapCache = bobNode.services.networkMapCache
|
||||
val expected = aliceNode.info
|
||||
|
||||
mockNet.runNetwork()
|
||||
val actual = bobNode.database.transaction { bobCache.getNodeByLegalIdentity(aliceNode.info.chooseIdentity()) }
|
||||
val actual = bobNode.database.transaction { bobCache.getNodeByLegalIdentity(alice) }
|
||||
assertEquals(expected, actual)
|
||||
|
||||
// TODO: Should have a test case with anonymous lookup
|
||||
@ -52,29 +54,27 @@ class NetworkMapCacheTest {
|
||||
|
||||
@Test
|
||||
fun `getPeerByLegalName`() {
|
||||
val aliceNode = mockNet.createPartyNode(ALICE.name)
|
||||
val bobNode = mockNet.createPartyNode(BOB.name)
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bobCache: NetworkMapCache = bobNode.services.networkMapCache
|
||||
val expected = aliceNode.info.legalIdentities.single()
|
||||
val expected = aliceNode.info.singleIdentity()
|
||||
|
||||
mockNet.runNetwork()
|
||||
val actual = bobNode.database.transaction { bobCache.getPeerByLegalName(ALICE.name) }
|
||||
val actual = bobNode.database.transaction { bobCache.getPeerByLegalName(ALICE_NAME) }
|
||||
assertEquals(expected, actual)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `remove node from cache`() {
|
||||
val aliceNode = mockNet.createPartyNode(ALICE.name)
|
||||
val bobNode = mockNet.createPartyNode(BOB.name)
|
||||
val bobLegalIdentity = bobNode.info.chooseIdentity()
|
||||
val alice = aliceNode.info.chooseIdentity()
|
||||
val aliceNode = mockNet.createPartyNode(ALICE_NAME)
|
||||
val bobNode = mockNet.createPartyNode(BOB_NAME)
|
||||
val bob = bobNode.info.singleIdentity()
|
||||
val alice = aliceNode.info.singleIdentity()
|
||||
val bobCache = bobNode.services.networkMapCache
|
||||
mockNet.runNetwork()
|
||||
bobNode.database.transaction {
|
||||
assertThat(bobCache.getNodeByLegalIdentity(alice) != null)
|
||||
bobCache.removeNode(aliceNode.info)
|
||||
assertThat(bobCache.getNodeByLegalIdentity(alice) == null)
|
||||
assertThat(bobCache.getNodeByLegalIdentity(bobLegalIdentity) != null)
|
||||
assertThat(bobCache.getNodeByLegalIdentity(bob) != null)
|
||||
assertThat(bobCache.getNodeByLegalName(alice.name) == null)
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,8 @@ import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.network.TestNodeInfoFactory.createNodeInfo
|
||||
import net.corda.node.utilities.CertificateType
|
||||
import net.corda.node.utilities.X509Utilities
|
||||
import net.corda.testing.SerializationEnvironmentRule
|
||||
@ -29,6 +31,7 @@ import org.junit.Test
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.io.InputStream
|
||||
import java.net.InetSocketAddress
|
||||
import java.net.URL
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.Certificate
|
||||
import java.security.cert.CertificateFactory
|
||||
@ -39,10 +42,10 @@ import javax.ws.rs.core.Response
|
||||
import javax.ws.rs.core.Response.ok
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
class HTTPNetworkMapClientTest {
|
||||
class NetworkMapClientTest {
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
private lateinit var server: Server
|
||||
|
||||
private lateinit var networkMapClient: NetworkMapClient
|
||||
@ -62,7 +65,7 @@ class HTTPNetworkMapClientTest {
|
||||
register(MockNetworkMapServer())
|
||||
}
|
||||
val jerseyServlet = ServletHolder(ServletContainer(resourceConfig)).apply { initOrder = 0 }// Initialise at server start
|
||||
addServlet(jerseyServlet, "/api/*")
|
||||
addServlet(jerseyServlet, "/*")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -73,7 +76,7 @@ class HTTPNetworkMapClientTest {
|
||||
}
|
||||
|
||||
val hostAndPort = server.connectors.mapNotNull { it as? ServerConnector }.first()
|
||||
networkMapClient = HTTPNetworkMapClient("http://${hostAndPort.host}:${hostAndPort.localPort}/api/network-map")
|
||||
networkMapClient = NetworkMapClient(URL("http://${hostAndPort.host}:${hostAndPort.localPort}"))
|
||||
}
|
||||
|
||||
@After
|
||||
@ -91,7 +94,7 @@ class HTTPNetworkMapClientTest {
|
||||
|
||||
val nodeInfoHash = nodeInfo.serialize().sha256()
|
||||
|
||||
assertThat(networkMapClient.getNetworkMap()).containsExactly(nodeInfoHash)
|
||||
assertThat(networkMapClient.getNetworkMap().networkMap).containsExactly(nodeInfoHash)
|
||||
assertEquals(nodeInfo, networkMapClient.getNodeInfo(nodeInfoHash))
|
||||
|
||||
val signedNodeInfo2 = createNodeInfo("Test2")
|
||||
@ -99,27 +102,21 @@ class HTTPNetworkMapClientTest {
|
||||
networkMapClient.publish(signedNodeInfo2)
|
||||
|
||||
val nodeInfoHash2 = nodeInfo2.serialize().sha256()
|
||||
assertThat(networkMapClient.getNetworkMap()).containsExactly(nodeInfoHash, nodeInfoHash2)
|
||||
assertThat(networkMapClient.getNetworkMap().networkMap).containsExactly(nodeInfoHash, nodeInfoHash2)
|
||||
assertEquals(100000.seconds, networkMapClient.getNetworkMap().cacheMaxAge)
|
||||
assertEquals(nodeInfo2, networkMapClient.getNodeInfo(nodeInfoHash2))
|
||||
}
|
||||
|
||||
private fun createNodeInfo(organisation: String): SignedData<NodeInfo> {
|
||||
val keyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||
val clientCert = X509Utilities.createCertificate(CertificateType.CLIENT_CA, intermediateCACert, intermediateCAKey, CordaX500Name(organisation = organisation, locality = "London", country = "GB"), keyPair.public)
|
||||
val certPath = buildCertPath(clientCert.toX509Certificate(), intermediateCACert.toX509Certificate(), rootCACert.toX509Certificate())
|
||||
val nodeInfo = NodeInfo(listOf(NetworkHostAndPort("my.$organisation.com", 1234)), listOf(PartyAndCertificate(certPath)), 1, serial = 1L)
|
||||
|
||||
// Create digital signature.
|
||||
val digitalSignature = DigitalSignature.WithKey(keyPair.public, Crypto.doSign(keyPair.private, nodeInfo.serialize().bytes))
|
||||
|
||||
return SignedData(nodeInfo.serialize(), digitalSignature)
|
||||
@Test
|
||||
fun `get hostname string from http response correctly`() {
|
||||
assertEquals("test.host.name", networkMapClient.myPublicHostname())
|
||||
}
|
||||
}
|
||||
|
||||
@Path("network-map")
|
||||
// This is a stub implementation of the network map rest API.
|
||||
internal class MockNetworkMapServer {
|
||||
private val nodeInfos = mutableMapOf<SecureHash, NodeInfo>()
|
||||
val nodeInfoMap = mutableMapOf<SecureHash, NodeInfo>()
|
||||
@POST
|
||||
@Path("publish")
|
||||
@Consumes(MediaType.APPLICATION_OCTET_STREAM)
|
||||
@ -127,33 +124,31 @@ internal class MockNetworkMapServer {
|
||||
val registrationData = input.readBytes().deserialize<SignedData<NodeInfo>>()
|
||||
val nodeInfo = registrationData.verified()
|
||||
val nodeInfoHash = nodeInfo.serialize().sha256()
|
||||
nodeInfos.put(nodeInfoHash, nodeInfo)
|
||||
nodeInfoMap.put(nodeInfoHash, nodeInfo)
|
||||
return ok().build()
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces(MediaType.APPLICATION_JSON)
|
||||
fun getNetworkMap(): Response {
|
||||
return Response.ok(ObjectMapper().writeValueAsString(nodeInfos.keys.map { it.toString() })).build()
|
||||
return Response.ok(ObjectMapper().writeValueAsString(nodeInfoMap.keys.map { it.toString() })).header("Cache-Control", "max-age=100000").build()
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("{var}")
|
||||
@Produces(MediaType.APPLICATION_OCTET_STREAM)
|
||||
fun getNodeInfo(@PathParam("var") nodeInfoHash: String): Response {
|
||||
val nodeInfo = nodeInfos[SecureHash.parse(nodeInfoHash)]
|
||||
val nodeInfo = nodeInfoMap[SecureHash.parse(nodeInfoHash)]
|
||||
return if (nodeInfo != null) {
|
||||
Response.ok(nodeInfo.serialize().bytes)
|
||||
} else {
|
||||
Response.status(Response.Status.NOT_FOUND)
|
||||
}.build()
|
||||
}
|
||||
}
|
||||
|
||||
private fun buildCertPath(vararg certificates: Certificate): CertPath {
|
||||
return CertificateFactory.getInstance("X509").generateCertPath(certificates.asList())
|
||||
@GET
|
||||
@Path("my-hostname")
|
||||
fun getHostName(): Response {
|
||||
return Response.ok("test.host.name").build()
|
||||
}
|
||||
}
|
||||
|
||||
private fun X509CertificateHolder.toX509Certificate(): X509Certificate {
|
||||
return CertificateFactory.getInstance("X509").generateCertificate(ByteArrayInputStream(encoded)) as X509Certificate
|
||||
}
|
@ -0,0 +1,232 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import com.google.common.jimfs.Configuration
|
||||
import com.google.common.jimfs.Jimfs
|
||||
import com.nhaarman.mockito_kotlin.any
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import com.nhaarman.mockito_kotlin.times
|
||||
import com.nhaarman.mockito_kotlin.verify
|
||||
import net.corda.cordform.CordformNode
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.millis
|
||||
import net.corda.node.services.api.NetworkMapCacheInternal
|
||||
import net.corda.testing.SerializationEnvironmentRule
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import rx.schedulers.TestScheduler
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
class NetworkMapUpdaterTest {
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
private val jimFs = Jimfs.newFileSystem(Configuration.unix())
|
||||
private val baseDir = jimFs.getPath("/node")
|
||||
|
||||
@Test
|
||||
fun `publish node info`() {
|
||||
val keyPair = Crypto.generateKeyPair()
|
||||
|
||||
val nodeInfo1 = TestNodeInfoFactory.createNodeInfo("Info 1").verified()
|
||||
val signedNodeInfo = TestNodeInfoFactory.sign(keyPair, nodeInfo1)
|
||||
|
||||
val sameNodeInfoDifferentTime = nodeInfo1.copy(serial = System.currentTimeMillis())
|
||||
val signedSameNodeInfoDifferentTime = TestNodeInfoFactory.sign(keyPair, sameNodeInfoDifferentTime)
|
||||
|
||||
val differentNodeInfo = nodeInfo1.copy(addresses = listOf(NetworkHostAndPort("my.new.host.com", 1000)))
|
||||
val signedDifferentNodeInfo = TestNodeInfoFactory.sign(keyPair, differentNodeInfo)
|
||||
|
||||
val networkMapCache = getMockNetworkMapCache()
|
||||
|
||||
val networkMapClient = mock<NetworkMapClient>()
|
||||
|
||||
val scheduler = TestScheduler()
|
||||
val fileWatcher = NodeInfoWatcher(baseDir, scheduler = scheduler)
|
||||
val updater = NetworkMapUpdater(networkMapCache, fileWatcher, networkMapClient)
|
||||
|
||||
// Publish node info for the first time.
|
||||
updater.updateNodeInfo(nodeInfo1) { signedNodeInfo }
|
||||
// Sleep as publish is asynchronous.
|
||||
// TODO: Remove sleep in unit test
|
||||
Thread.sleep(200)
|
||||
verify(networkMapClient, times(1)).publish(any())
|
||||
|
||||
networkMapCache.addNode(nodeInfo1)
|
||||
|
||||
// Publish the same node info, but with different serial.
|
||||
updater.updateNodeInfo(sameNodeInfoDifferentTime) { signedSameNodeInfoDifferentTime }
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
|
||||
// Same node info should not publish twice
|
||||
verify(networkMapClient, times(0)).publish(signedSameNodeInfoDifferentTime)
|
||||
|
||||
// Publish different node info.
|
||||
updater.updateNodeInfo(differentNodeInfo) { signedDifferentNodeInfo }
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
verify(networkMapClient, times(1)).publish(signedDifferentNodeInfo)
|
||||
|
||||
updater.close()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `process add node updates from network map, with additional node infos from dir`() {
|
||||
val nodeInfo1 = TestNodeInfoFactory.createNodeInfo("Info 1")
|
||||
val nodeInfo2 = TestNodeInfoFactory.createNodeInfo("Info 2")
|
||||
val nodeInfo3 = TestNodeInfoFactory.createNodeInfo("Info 3")
|
||||
val nodeInfo4 = TestNodeInfoFactory.createNodeInfo("Info 4")
|
||||
val fileNodeInfo = TestNodeInfoFactory.createNodeInfo("Info from file")
|
||||
val networkMapCache = getMockNetworkMapCache()
|
||||
|
||||
val nodeInfoMap = ConcurrentHashMap<SecureHash, SignedData<NodeInfo>>()
|
||||
val networkMapClient = mock<NetworkMapClient> {
|
||||
on { publish(any()) }.then {
|
||||
val signedNodeInfo: SignedData<NodeInfo> = uncheckedCast(it.arguments.first())
|
||||
nodeInfoMap.put(signedNodeInfo.verified().serialize().hash, signedNodeInfo)
|
||||
}
|
||||
on { getNetworkMap() }.then { NetworkMapResponse(nodeInfoMap.keys.toList(), 100.millis) }
|
||||
on { getNodeInfo(any()) }.then { nodeInfoMap[it.arguments.first()]?.verified() }
|
||||
}
|
||||
|
||||
val scheduler = TestScheduler()
|
||||
val fileWatcher = NodeInfoWatcher(baseDir, scheduler = scheduler)
|
||||
val updater = NetworkMapUpdater(networkMapCache, fileWatcher, networkMapClient)
|
||||
|
||||
// Test adding new node.
|
||||
networkMapClient.publish(nodeInfo1)
|
||||
// Not subscribed yet.
|
||||
verify(networkMapCache, times(0)).addNode(any())
|
||||
|
||||
updater.subscribeToNetworkMap()
|
||||
networkMapClient.publish(nodeInfo2)
|
||||
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
verify(networkMapCache, times(2)).addNode(any())
|
||||
verify(networkMapCache, times(1)).addNode(nodeInfo1.verified())
|
||||
verify(networkMapCache, times(1)).addNode(nodeInfo2.verified())
|
||||
|
||||
NodeInfoWatcher.saveToFile(baseDir / CordformNode.NODE_INFO_DIRECTORY, fileNodeInfo)
|
||||
networkMapClient.publish(nodeInfo3)
|
||||
networkMapClient.publish(nodeInfo4)
|
||||
|
||||
scheduler.advanceTimeBy(10, TimeUnit.SECONDS)
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
|
||||
// 4 node info from network map, and 1 from file.
|
||||
verify(networkMapCache, times(5)).addNode(any())
|
||||
verify(networkMapCache, times(1)).addNode(nodeInfo3.verified())
|
||||
verify(networkMapCache, times(1)).addNode(nodeInfo4.verified())
|
||||
verify(networkMapCache, times(1)).addNode(fileNodeInfo.verified())
|
||||
|
||||
updater.close()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `process remove node updates from network map, with additional node infos from dir`() {
|
||||
val nodeInfo1 = TestNodeInfoFactory.createNodeInfo("Info 1")
|
||||
val nodeInfo2 = TestNodeInfoFactory.createNodeInfo("Info 2")
|
||||
val nodeInfo3 = TestNodeInfoFactory.createNodeInfo("Info 3")
|
||||
val nodeInfo4 = TestNodeInfoFactory.createNodeInfo("Info 4")
|
||||
val fileNodeInfo = TestNodeInfoFactory.createNodeInfo("Info from file")
|
||||
val networkMapCache = getMockNetworkMapCache()
|
||||
|
||||
val nodeInfoMap = ConcurrentHashMap<SecureHash, SignedData<NodeInfo>>()
|
||||
val networkMapClient = mock<NetworkMapClient> {
|
||||
on { publish(any()) }.then {
|
||||
val signedNodeInfo: SignedData<NodeInfo> = uncheckedCast(it.arguments.first())
|
||||
nodeInfoMap.put(signedNodeInfo.verified().serialize().hash, signedNodeInfo)
|
||||
}
|
||||
on { getNetworkMap() }.then { NetworkMapResponse(nodeInfoMap.keys.toList(), 100.millis) }
|
||||
on { getNodeInfo(any()) }.then { nodeInfoMap[it.arguments.first()]?.verified() }
|
||||
}
|
||||
|
||||
val scheduler = TestScheduler()
|
||||
val fileWatcher = NodeInfoWatcher(baseDir, scheduler = scheduler)
|
||||
val updater = NetworkMapUpdater(networkMapCache, fileWatcher, networkMapClient)
|
||||
|
||||
// Add all nodes.
|
||||
NodeInfoWatcher.saveToFile(baseDir / CordformNode.NODE_INFO_DIRECTORY, fileNodeInfo)
|
||||
networkMapClient.publish(nodeInfo1)
|
||||
networkMapClient.publish(nodeInfo2)
|
||||
networkMapClient.publish(nodeInfo3)
|
||||
networkMapClient.publish(nodeInfo4)
|
||||
|
||||
updater.subscribeToNetworkMap()
|
||||
scheduler.advanceTimeBy(10, TimeUnit.SECONDS)
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
|
||||
// 4 node info from network map, and 1 from file.
|
||||
assertEquals(4, nodeInfoMap.size)
|
||||
verify(networkMapCache, times(5)).addNode(any())
|
||||
verify(networkMapCache, times(1)).addNode(fileNodeInfo.verified())
|
||||
|
||||
// Test remove node.
|
||||
nodeInfoMap.clear()
|
||||
// TODO: Remove sleep in unit test.
|
||||
Thread.sleep(200)
|
||||
verify(networkMapCache, times(4)).removeNode(any())
|
||||
verify(networkMapCache, times(1)).removeNode(nodeInfo1.verified())
|
||||
verify(networkMapCache, times(1)).removeNode(nodeInfo2.verified())
|
||||
verify(networkMapCache, times(1)).removeNode(nodeInfo3.verified())
|
||||
verify(networkMapCache, times(1)).removeNode(nodeInfo4.verified())
|
||||
|
||||
// Node info from file should not be deleted
|
||||
assertEquals(1, networkMapCache.allNodeHashes.size)
|
||||
assertEquals(fileNodeInfo.verified().serialize().hash, networkMapCache.allNodeHashes.first())
|
||||
|
||||
updater.close()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `receive node infos from directory, without a network map`() {
|
||||
val fileNodeInfo = TestNodeInfoFactory.createNodeInfo("Info from file")
|
||||
|
||||
val networkMapCache = getMockNetworkMapCache()
|
||||
|
||||
val scheduler = TestScheduler()
|
||||
val fileWatcher = NodeInfoWatcher(baseDir, scheduler = scheduler)
|
||||
val updater = NetworkMapUpdater(networkMapCache, fileWatcher, null)
|
||||
|
||||
// Not subscribed yet.
|
||||
verify(networkMapCache, times(0)).addNode(any())
|
||||
|
||||
updater.subscribeToNetworkMap()
|
||||
|
||||
NodeInfoWatcher.saveToFile(baseDir / CordformNode.NODE_INFO_DIRECTORY, fileNodeInfo)
|
||||
scheduler.advanceTimeBy(10, TimeUnit.SECONDS)
|
||||
|
||||
verify(networkMapCache, times(1)).addNode(any())
|
||||
verify(networkMapCache, times(1)).addNode(fileNodeInfo.verified())
|
||||
|
||||
assertEquals(1, networkMapCache.allNodeHashes.size)
|
||||
assertEquals(fileNodeInfo.verified().serialize().hash, networkMapCache.allNodeHashes.first())
|
||||
|
||||
updater.close()
|
||||
}
|
||||
|
||||
private fun getMockNetworkMapCache() = mock<NetworkMapCacheInternal> {
|
||||
val data = ConcurrentHashMap<Party, NodeInfo>()
|
||||
on { addNode(any()) }.then {
|
||||
val nodeInfo = it.arguments.first() as NodeInfo
|
||||
data.put(nodeInfo.legalIdentities.first(), nodeInfo)
|
||||
}
|
||||
on { removeNode(any()) }.then { data.remove((it.arguments.first() as NodeInfo).legalIdentities.first()) }
|
||||
on { getNodeByLegalIdentity(any()) }.then { data[it.arguments.first()] }
|
||||
on { allNodeHashes }.then { data.values.map { it.serialize().hash } }
|
||||
on { getNodeByHash(any()) }.then { mock -> data.values.single { it.serialize().hash == mock.arguments.first() } }
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package net.corda.node.services.network
|
||||
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.DigitalSignature
|
||||
import net.corda.core.crypto.SignedData
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.node.utilities.CertificateType
|
||||
import net.corda.node.utilities.X509Utilities
|
||||
import org.bouncycastle.asn1.x500.X500Name
|
||||
import org.bouncycastle.cert.X509CertificateHolder
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.security.KeyPair
|
||||
import java.security.cert.CertPath
|
||||
import java.security.cert.Certificate
|
||||
import java.security.cert.CertificateFactory
|
||||
import java.security.cert.X509Certificate
|
||||
|
||||
object TestNodeInfoFactory {
|
||||
private val rootCAKey = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||
private val rootCACert = X509Utilities.createSelfSignedCACertificate(CordaX500Name(commonName = "Corda Node Root CA", organisation = "R3 LTD", locality = "London", country = "GB"), rootCAKey)
|
||||
private val intermediateCAKey = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||
private val intermediateCACert = X509Utilities.createCertificate(CertificateType.INTERMEDIATE_CA, rootCACert, rootCAKey, X500Name("CN=Corda Node Intermediate CA,L=London"), intermediateCAKey.public)
|
||||
|
||||
fun createNodeInfo(organisation: String): SignedData<NodeInfo> {
|
||||
val keyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||
val clientCert = X509Utilities.createCertificate(CertificateType.CLIENT_CA, intermediateCACert, intermediateCAKey, CordaX500Name(organisation = organisation, locality = "London", country = "GB"), keyPair.public)
|
||||
val certPath = buildCertPath(clientCert.toX509Certificate(), intermediateCACert.toX509Certificate(), rootCACert.toX509Certificate())
|
||||
val nodeInfo = NodeInfo(listOf(NetworkHostAndPort("my.$organisation.com", 1234)), listOf(PartyAndCertificate(certPath)), 1, serial = 1L)
|
||||
return sign(keyPair, nodeInfo)
|
||||
}
|
||||
|
||||
fun <T : Any> sign(keyPair: KeyPair, t: T): SignedData<T> {
|
||||
// Create digital signature.
|
||||
val digitalSignature = DigitalSignature.WithKey(keyPair.public, Crypto.doSign(keyPair.private, t.serialize().bytes))
|
||||
return SignedData(t.serialize(), digitalSignature)
|
||||
}
|
||||
|
||||
private fun buildCertPath(vararg certificates: Certificate): CertPath {
|
||||
return CertificateFactory.getInstance("X509").generateCertPath(certificates.asList())
|
||||
}
|
||||
|
||||
private fun X509CertificateHolder.toX509Certificate(): X509Certificate {
|
||||
return CertificateFactory.getInstance("X509").generateCertificate(ByteArrayInputStream(encoded)) as X509Certificate
|
||||
}
|
||||
|
||||
}
|
@ -18,10 +18,7 @@ import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.finance.DOLLARS
|
||||
import net.corda.finance.POUNDS
|
||||
import net.corda.finance.SWISS_FRANCS
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER_KEY
|
||||
import net.corda.finance.contracts.asset.DummyFungibleContract
|
||||
import net.corda.finance.contracts.asset.*
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
import net.corda.finance.schemas.SampleCashSchemaV2
|
||||
import net.corda.finance.schemas.SampleCashSchemaV3
|
||||
@ -74,13 +71,13 @@ class HibernateConfigurationTest {
|
||||
@Before
|
||||
fun setUp() {
|
||||
val cordappPackages = listOf("net.corda.testing.contracts", "net.corda.finance.contracts.asset")
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_KEY, BOB_KEY, BOC_KEY)
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_NAME, DUMMY_CASH_ISSUER_KEY, BOB_KEY, BOC_KEY)
|
||||
val dataSourceProps = makeTestDataSourceProperties()
|
||||
val defaultDatabaseProperties = makeTestDatabaseProperties()
|
||||
database = configureDatabase(dataSourceProps, defaultDatabaseProperties, ::makeTestIdentityService)
|
||||
database.transaction {
|
||||
hibernateConfig = database.hibernateConfig
|
||||
services = object : MockServices(cordappPackages, BOB_KEY, BOC_KEY, DUMMY_NOTARY_KEY) {
|
||||
services = object : MockServices(cordappPackages, BOB_NAME, BOB_KEY, BOC_KEY, DUMMY_NOTARY_KEY) {
|
||||
override val vaultService = makeVaultService(database.hibernateConfig)
|
||||
override fun recordTransactions(statesToRecord: StatesToRecord, txs: Iterable<SignedTransaction>) {
|
||||
for (stx in txs) {
|
||||
@ -139,7 +136,9 @@ class HibernateConfigurationTest {
|
||||
|
||||
// execute query
|
||||
val queryResults = entityManager.createQuery(criteriaQuery).resultList
|
||||
val coins = queryResults.map { it.contractState.deserialize<TransactionState<Cash.State>>(context = SerializationDefaults.STORAGE_CONTEXT).data }.sumCash()
|
||||
val coins = queryResults.map {
|
||||
(services.loadState(toStateRef(it.stateRef!!)) as TransactionState<Cash.State>).data
|
||||
}.sumCash()
|
||||
assertThat(coins.toDecimal() >= BigDecimal("50.00"))
|
||||
}
|
||||
|
||||
@ -657,8 +656,7 @@ class HibernateConfigurationTest {
|
||||
val queryResults = entityManager.createQuery(criteriaQuery).resultList
|
||||
|
||||
queryResults.forEach {
|
||||
val contractState = it.contractState.deserialize<TransactionState<ContractState>>(context = SerializationDefaults.STORAGE_CONTEXT)
|
||||
val cashState = contractState.data as Cash.State
|
||||
val cashState = (services.loadState(toStateRef(it.stateRef!!)) as TransactionState<Cash.State>).data
|
||||
println("${it.stateRef} with owner: ${cashState.owner.owningKey.toBase58String()}")
|
||||
}
|
||||
|
||||
@ -742,8 +740,7 @@ class HibernateConfigurationTest {
|
||||
// execute query
|
||||
val queryResults = entityManager.createQuery(criteriaQuery).resultList
|
||||
queryResults.forEach {
|
||||
val contractState = it.contractState.deserialize<TransactionState<ContractState>>(context = SerializationDefaults.STORAGE_CONTEXT)
|
||||
val cashState = contractState.data as Cash.State
|
||||
val cashState = (services.loadState(toStateRef(it.stateRef!!)) as TransactionState<Cash.State>).data
|
||||
println("${it.stateRef} with owner ${cashState.owner.owningKey.toBase58String()} and participants ${cashState.participants.map { it.owningKey.toBase58String() }}")
|
||||
}
|
||||
|
||||
@ -880,4 +877,8 @@ class HibernateConfigurationTest {
|
||||
Assert.assertEquals(cashStates.count(), count)
|
||||
}
|
||||
}
|
||||
|
||||
private fun toStateRef(pStateRef: PersistentStateRef): StateRef {
|
||||
return StateRef(SecureHash.parse(pStateRef.txId!!), pStateRef.index!!)
|
||||
}
|
||||
}
|
@ -9,6 +9,10 @@ import net.corda.core.internal.read
|
||||
import net.corda.core.internal.readAll
|
||||
import net.corda.core.internal.write
|
||||
import net.corda.core.internal.writeLines
|
||||
import net.corda.core.node.services.vault.AttachmentQueryCriteria
|
||||
import net.corda.core.node.services.vault.AttachmentSort
|
||||
import net.corda.core.node.services.vault.Builder
|
||||
import net.corda.core.node.services.vault.Sort
|
||||
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
import net.corda.node.utilities.configureDatabase
|
||||
@ -51,8 +55,7 @@ class NodeAttachmentStorageTest {
|
||||
|
||||
@Test
|
||||
fun `insert and retrieve`() {
|
||||
val testJar = makeTestJar()
|
||||
val expectedHash = testJar.readAll().sha256()
|
||||
val (testJar,expectedHash) = makeTestJar()
|
||||
|
||||
database.transaction {
|
||||
val storage = NodeAttachmentService(MetricRegistry())
|
||||
@ -77,10 +80,87 @@ class NodeAttachmentStorageTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `metadata can be used to search`() {
|
||||
val (jarA,hashA) = makeTestJar()
|
||||
val (jarB,hashB) = makeTestJar(listOf(Pair("file","content")))
|
||||
val (jarC,hashC) = makeTestJar(listOf(Pair("magic_file","magic_content_puff")))
|
||||
|
||||
database.transaction {
|
||||
val storage = NodeAttachmentService(MetricRegistry())
|
||||
|
||||
jarA.read { storage.importAttachment(it) }
|
||||
jarB.read { storage.importAttachment(it, "uploaderB", "fileB.zip") }
|
||||
jarC.read { storage.importAttachment(it, "uploaderC", "fileC.zip") }
|
||||
|
||||
assertEquals(
|
||||
listOf(hashB),
|
||||
storage.queryAttachments( AttachmentQueryCriteria.AttachmentsQueryCriteria( Builder.equal("uploaderB")))
|
||||
)
|
||||
|
||||
assertEquals (
|
||||
listOf(hashB, hashC),
|
||||
storage.queryAttachments( AttachmentQueryCriteria.AttachmentsQueryCriteria( Builder.like ("%uploader%")))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `sorting and compound conditions work`() {
|
||||
val (jarA,hashA) = makeTestJar(listOf(Pair("a","a")))
|
||||
val (jarB,hashB) = makeTestJar(listOf(Pair("b","b")))
|
||||
val (jarC,hashC) = makeTestJar(listOf(Pair("c","c")))
|
||||
|
||||
fun uploaderCondition(s:String) = AttachmentQueryCriteria.AttachmentsQueryCriteria(uploaderCondition = Builder.equal(s))
|
||||
fun filenamerCondition(s:String) = AttachmentQueryCriteria.AttachmentsQueryCriteria(filenameCondition = Builder.equal(s))
|
||||
|
||||
fun filenameSort(direction: Sort.Direction) = AttachmentSort(listOf(AttachmentSort.AttachmentSortColumn(AttachmentSort.AttachmentSortAttribute.FILENAME, direction)))
|
||||
|
||||
database.transaction {
|
||||
val storage = NodeAttachmentService(MetricRegistry())
|
||||
|
||||
jarA.read { storage.importAttachment(it, "complexA", "archiveA.zip") }
|
||||
jarB.read { storage.importAttachment(it, "complexB", "archiveB.zip") }
|
||||
jarC.read { storage.importAttachment(it, "complexC", "archiveC.zip") }
|
||||
|
||||
// DOCSTART AttachmentQueryExample1
|
||||
|
||||
assertEquals(
|
||||
emptyList(),
|
||||
storage.queryAttachments(
|
||||
AttachmentQueryCriteria.AttachmentsQueryCriteria(uploaderCondition = Builder.equal("complexA"))
|
||||
.and(AttachmentQueryCriteria.AttachmentsQueryCriteria(uploaderCondition = Builder.equal("complexB"))))
|
||||
)
|
||||
|
||||
assertEquals(
|
||||
listOf(hashA, hashB),
|
||||
storage.queryAttachments(
|
||||
|
||||
AttachmentQueryCriteria.AttachmentsQueryCriteria(uploaderCondition = Builder.equal("complexA"))
|
||||
.or(AttachmentQueryCriteria.AttachmentsQueryCriteria(uploaderCondition = Builder.equal("complexB"))))
|
||||
)
|
||||
|
||||
val complexCondition =
|
||||
(uploaderCondition("complexB").and(filenamerCondition("archiveB.zip"))).or(filenamerCondition("archiveC.zip"))
|
||||
|
||||
// DOCEND AttachmentQueryExample1
|
||||
|
||||
assertEquals (
|
||||
listOf(hashB, hashC),
|
||||
storage.queryAttachments(complexCondition, sorting = filenameSort(Sort.Direction.ASC))
|
||||
)
|
||||
assertEquals (
|
||||
listOf(hashC, hashB),
|
||||
storage.queryAttachments(complexCondition, sorting = filenameSort(Sort.Direction.DESC))
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Ignore("We need to be able to restart nodes - make importing attachments idempotent?")
|
||||
@Test
|
||||
fun `duplicates not allowed`() {
|
||||
val testJar = makeTestJar()
|
||||
val (testJar,_) = makeTestJar()
|
||||
database.transaction {
|
||||
val storage = NodeAttachmentService(MetricRegistry())
|
||||
testJar.read {
|
||||
@ -96,7 +176,7 @@ class NodeAttachmentStorageTest {
|
||||
|
||||
@Test
|
||||
fun `corrupt entry throws exception`() {
|
||||
val testJar = makeTestJar()
|
||||
val (testJar,_) = makeTestJar()
|
||||
val id = database.transaction {
|
||||
val storage = NodeAttachmentService(MetricRegistry())
|
||||
val id = testJar.read { storage.importAttachment(it) }
|
||||
@ -139,7 +219,7 @@ class NodeAttachmentStorageTest {
|
||||
}
|
||||
|
||||
private var counter = 0
|
||||
private fun makeTestJar(): Path {
|
||||
private fun makeTestJar(extraEntries: List<Pair<String,String>> = emptyList()): Pair<Path, SecureHash> {
|
||||
counter++
|
||||
val file = fs.getPath("$counter.jar")
|
||||
file.write {
|
||||
@ -149,8 +229,12 @@ class NodeAttachmentStorageTest {
|
||||
jar.closeEntry()
|
||||
jar.putNextEntry(JarEntry("test2.txt"))
|
||||
jar.write("Some more useful content".toByteArray())
|
||||
extraEntries.forEach {
|
||||
jar.putNextEntry(JarEntry(it.first))
|
||||
jar.write(it.second.toByteArray())
|
||||
}
|
||||
jar.closeEntry()
|
||||
}
|
||||
return file
|
||||
return Pair(file, file.readAll().sha256())
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,6 @@ class NodeSchemaServiceTest {
|
||||
fun `registering custom schemas for testing with MockNode`() {
|
||||
val mockNet = MockNetwork(cordappPackages = listOf(DummyLinearStateSchemaV1::class.packageName))
|
||||
val mockNode = mockNet.createNode()
|
||||
mockNet.runNetwork()
|
||||
val schemaService = mockNode.services.schemaService
|
||||
assertTrue(schemaService.schemaOptions.containsKey(DummyLinearStateSchemaV1))
|
||||
|
||||
@ -88,7 +87,7 @@ object TestSchema : MappedSchema(SchemaFamily::class.java, 1, setOf(Parent::clas
|
||||
@Table(name = "Children")
|
||||
class Child {
|
||||
@Id
|
||||
@GeneratedValue(strategy = GenerationType.IDENTITY)
|
||||
@GeneratedValue
|
||||
@Column(name = "child_id", unique = true, nullable = false)
|
||||
var childId: Int? = null
|
||||
|
||||
|
@ -9,8 +9,10 @@ import net.corda.core.contracts.StateAndRef
|
||||
import net.corda.core.crypto.random63BitValue
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.concurrent.flatMap
|
||||
import net.corda.core.internal.concurrent.map
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.messaging.MessageRecipients
|
||||
import net.corda.core.node.services.PartyInfo
|
||||
import net.corda.core.node.services.queryBy
|
||||
@ -84,7 +86,7 @@ class FlowFrameworkTests {
|
||||
// Extract identities
|
||||
alice = aliceNode.info.singleIdentity()
|
||||
bob = bobNode.info.singleIdentity()
|
||||
notaryIdentity = aliceNode.services.getDefaultNotary()
|
||||
notaryIdentity = mockNet.defaultNotaryIdentity
|
||||
}
|
||||
|
||||
@After
|
||||
@ -209,7 +211,6 @@ class FlowFrameworkTests {
|
||||
@Test
|
||||
fun `sending to multiple parties`() {
|
||||
val charlieNode = mockNet.createNode(MockNodeParameters(legalName = CHARLIE_NAME))
|
||||
mockNet.runNetwork()
|
||||
val charlie = charlieNode.info.singleIdentity()
|
||||
bobNode.registerFlowFactory(SendFlow::class) { InitiatedReceiveFlow(it).nonTerminating() }
|
||||
charlieNode.registerFlowFactory(SendFlow::class) { InitiatedReceiveFlow(it).nonTerminating() }
|
||||
@ -242,7 +243,6 @@ class FlowFrameworkTests {
|
||||
@Test
|
||||
fun `receiving from multiple parties`() {
|
||||
val charlieNode = mockNet.createNode(MockNodeParameters(legalName = CHARLIE_NAME))
|
||||
mockNet.runNetwork()
|
||||
val charlie = charlieNode.info.singleIdentity()
|
||||
val bobPayload = "Test 1"
|
||||
val charliePayload = "Test 2"
|
||||
@ -396,7 +396,6 @@ class FlowFrameworkTests {
|
||||
@Test
|
||||
fun `FlowException propagated in invocation chain`() {
|
||||
val charlieNode = mockNet.createNode(MockNodeParameters(legalName = CHARLIE_NAME))
|
||||
mockNet.runNetwork()
|
||||
val charlie = charlieNode.info.singleIdentity()
|
||||
|
||||
charlieNode.registerFlowFactory(ReceiveFlow::class) { ExceptionFlow { MyFlowException("Chain") } }
|
||||
@ -411,7 +410,6 @@ class FlowFrameworkTests {
|
||||
@Test
|
||||
fun `FlowException thrown and there is a 3rd unrelated party flow`() {
|
||||
val charlieNode = mockNet.createNode(MockNodeParameters(legalName = CHARLIE_NAME))
|
||||
mockNet.runNetwork()
|
||||
val charlie = charlieNode.info.singleIdentity()
|
||||
|
||||
// Bob will send its payload and then block waiting for the receive from Alice. Meanwhile Alice will move
|
||||
@ -509,7 +507,7 @@ class FlowFrameworkTests {
|
||||
|
||||
val committerFiber = aliceNode.registerFlowFactory(WaitingFlows.Waiter::class) {
|
||||
WaitingFlows.Committer(it)
|
||||
}.map { it.stateMachine }
|
||||
}.map { it.stateMachine }.map { uncheckedCast<FlowStateMachine<*>, FlowStateMachine<Any>>(it) }
|
||||
val waiterStx = bobNode.services.startFlow(WaitingFlows.Waiter(stx, alice)).resultFuture
|
||||
mockNet.runNetwork()
|
||||
assertThat(waiterStx.getOrThrow()).isEqualTo(committerFiber.getOrThrow().resultFuture.getOrThrow())
|
||||
@ -662,7 +660,7 @@ class FlowFrameworkTests {
|
||||
val newNode = mockNet.createNode(MockNodeParameters(id))
|
||||
newNode.internals.acceptableLiveFiberCountOnStop = 1
|
||||
manuallyCloseDB()
|
||||
mockNet.runNetwork() // allow NetworkMapService messages to stabilise and thus start the state machine
|
||||
mockNet.runNetwork()
|
||||
newNode.getSingleFlow<P>().first
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,8 @@ import io.atomix.copycat.client.CopycatClient
|
||||
import io.atomix.copycat.server.CopycatServer
|
||||
import io.atomix.copycat.server.storage.Storage
|
||||
import io.atomix.copycat.server.storage.StorageLevel
|
||||
import net.corda.core.internal.concurrent.asCordaFuture
|
||||
import net.corda.core.internal.concurrent.transpose
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
@ -17,10 +19,7 @@ import net.corda.testing.freeLocalHostAndPort
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDatabaseProperties
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestIdentityService
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.*
|
||||
import java.util.concurrent.CompletableFuture
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
@ -30,7 +29,7 @@ class DistributedImmutableMapTests {
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
lateinit var cluster: List<Member>
|
||||
lateinit var transaction: DatabaseTransaction
|
||||
private val databases: MutableList<CordaPersistence> = mutableListOf()
|
||||
@ -44,10 +43,8 @@ class DistributedImmutableMapTests {
|
||||
@After
|
||||
fun tearDown() {
|
||||
LogHelper.reset("org.apache.activemq")
|
||||
cluster.forEach {
|
||||
it.client.close()
|
||||
it.server.shutdown()
|
||||
}
|
||||
cluster.map { it.client.close().asCordaFuture() }.transpose().getOrThrow()
|
||||
cluster.map { it.server.shutdown().asCordaFuture() }.transpose().getOrThrow()
|
||||
databases.forEach { it.close() }
|
||||
}
|
||||
|
||||
|
@ -17,10 +17,10 @@ import net.corda.node.services.api.StartedNodeServices
|
||||
import net.corda.testing.ALICE_NAME
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.dummyCommand
|
||||
import net.corda.testing.getDefaultNotary
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.singleIdentity
|
||||
import net.corda.testing.startFlow
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
@ -41,9 +41,8 @@ class NotaryServiceTests {
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(cordappPackages = listOf("net.corda.testing.contracts"))
|
||||
aliceServices = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME)).services
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
notaryServices = mockNet.defaultNotaryNode.services //TODO get rid of that
|
||||
notary = aliceServices.getDefaultNotary()
|
||||
notary = mockNet.defaultNotaryIdentity
|
||||
alice = aliceServices.myInfo.singleIdentity()
|
||||
}
|
||||
|
||||
|
@ -15,13 +15,10 @@ import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.services.api.StartedNodeServices
|
||||
import net.corda.node.services.issueInvalidState
|
||||
import net.corda.testing.ALICE_NAME
|
||||
import net.corda.testing.MEGA_CORP_KEY
|
||||
import net.corda.testing.*
|
||||
import net.corda.testing.contracts.DummyContract
|
||||
import net.corda.testing.dummyCommand
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.singleIdentity
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
@ -41,7 +38,6 @@ class ValidatingNotaryServiceTests {
|
||||
fun setup() {
|
||||
mockNet = MockNetwork(cordappPackages = listOf("net.corda.testing.contracts"))
|
||||
val aliceNode = mockNet.createNode(MockNodeParameters(legalName = ALICE_NAME))
|
||||
mockNet.runNetwork() // Clear network map registration messages
|
||||
notaryServices = mockNet.defaultNotaryNode.services
|
||||
aliceServices = aliceNode.services
|
||||
notary = mockNet.defaultNotaryIdentity
|
||||
|
@ -1,14 +1,21 @@
|
||||
package net.corda.node.services.vault
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.generateKeyPair
|
||||
import net.corda.core.contracts.Amount
|
||||
import net.corda.core.contracts.Issued
|
||||
import net.corda.core.contracts.StateAndRef
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.crypto.*
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.node.StatesToRecord
|
||||
import net.corda.core.node.services.*
|
||||
import net.corda.core.node.services.StatesNotAvailableException
|
||||
import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.VaultService
|
||||
import net.corda.core.node.services.queryBy
|
||||
import net.corda.core.node.services.vault.PageSpecification
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.QueryCriteria.*
|
||||
@ -22,6 +29,7 @@ import net.corda.finance.*
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER_KEY
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER_NAME
|
||||
import net.corda.finance.contracts.getCashBalance
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
import net.corda.finance.utils.sumCash
|
||||
@ -52,21 +60,24 @@ class NodeVaultServiceTest {
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
lateinit var services: MockServices
|
||||
private lateinit var services: MockServices
|
||||
private lateinit var identity: PartyAndCertificate
|
||||
private lateinit var issuerServices: MockServices
|
||||
val vaultService get() = services.vaultService as NodeVaultService
|
||||
lateinit var database: CordaPersistence
|
||||
private lateinit var bocServices: MockServices
|
||||
private val vaultService get() = services.vaultService as NodeVaultService
|
||||
private lateinit var database: CordaPersistence
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
LogHelper.setLevel(NodeVaultService::class)
|
||||
val databaseAndServices = MockServices.makeTestDatabaseAndMockServices(
|
||||
keys = listOf(BOC_KEY, DUMMY_CASH_ISSUER_KEY),
|
||||
cordappPackages = cordappPackages
|
||||
)
|
||||
val databaseAndServices = MockServices.makeTestDatabaseAndMockServices(cordappPackages = cordappPackages)
|
||||
database = databaseAndServices.first
|
||||
services = databaseAndServices.second
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_KEY, BOC_KEY)
|
||||
// This is safe because MockServices only ever have a single identity
|
||||
identity = services.myInfo.singleIdentityAndCert()
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_NAME, DUMMY_CASH_ISSUER_KEY)
|
||||
bocServices = MockServices(cordappPackages, BOC_NAME, BOC_KEY)
|
||||
services.identityService.verifyAndRegisterIdentity(DUMMY_CASH_ISSUER_IDENTITY)
|
||||
}
|
||||
|
||||
@After
|
||||
@ -339,8 +350,8 @@ class NodeVaultServiceTest {
|
||||
@Test
|
||||
fun `unconsumedStatesForSpending from two issuer parties`() {
|
||||
database.transaction {
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (DUMMY_CASH_ISSUER))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (BOC.ref(1)))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = DUMMY_CASH_ISSUER)
|
||||
services.fillWithSomeTestCash(100.DOLLARS, bocServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = BOC.ref(1))
|
||||
}
|
||||
database.transaction {
|
||||
val spendableStatesUSD = vaultService.unconsumedCashStatesForSpending(200.DOLLARS,
|
||||
@ -356,10 +367,10 @@ class NodeVaultServiceTest {
|
||||
@Test
|
||||
fun `unconsumedStatesForSpending from specific issuer party and refs`() {
|
||||
database.transaction {
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (DUMMY_CASH_ISSUER))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (BOC.ref(1)), ref = OpaqueBytes.of(1))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (BOC.ref(2)), ref = OpaqueBytes.of(2))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = (BOC.ref(3)), ref = OpaqueBytes.of(3))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, issuerServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = DUMMY_CASH_ISSUER)
|
||||
services.fillWithSomeTestCash(100.DOLLARS, bocServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = BOC.ref(1))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, bocServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = BOC.ref(2))
|
||||
services.fillWithSomeTestCash(100.DOLLARS, bocServices, DUMMY_NOTARY, 1, 1, Random(0L), issuedBy = BOC.ref(3))
|
||||
}
|
||||
database.transaction {
|
||||
val unconsumedStates = vaultService.queryBy<Cash.State>().states
|
||||
@ -447,9 +458,9 @@ class NodeVaultServiceTest {
|
||||
|
||||
@Test
|
||||
fun addNoteToTransaction() {
|
||||
val megaCorpServices = MockServices(cordappPackages, MEGA_CORP_KEY)
|
||||
val megaCorpServices = MockServices(cordappPackages, MEGA_CORP.name, MEGA_CORP_KEY)
|
||||
database.transaction {
|
||||
val freshKey = services.myInfo.chooseIdentity().owningKey
|
||||
val freshKey = identity.owningKey
|
||||
|
||||
// Issue a txn to Send us some Money
|
||||
val usefulBuilder = TransactionBuilder(null).apply {
|
||||
@ -481,11 +492,11 @@ class NodeVaultServiceTest {
|
||||
fun `is ownable state relevant`() {
|
||||
val service = vaultService
|
||||
val amount = Amount(1000, Issued(BOC.ref(1), GBP))
|
||||
val wellKnownCash = Cash.State(amount, services.myInfo.chooseIdentity())
|
||||
val wellKnownCash = Cash.State(amount, identity.party)
|
||||
val myKeys = services.keyManagementService.filterMyKeys(listOf(wellKnownCash.owner.owningKey))
|
||||
assertTrue { service.isRelevant(wellKnownCash, myKeys.toSet()) }
|
||||
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(services.myInfo.chooseIdentityAndCert(), false)
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(identity, false)
|
||||
val anonymousCash = Cash.State(amount, anonymousIdentity.party)
|
||||
val anonymousKeys = services.keyManagementService.filterMyKeys(listOf(anonymousCash.owner.owningKey))
|
||||
assertTrue { service.isRelevant(anonymousCash, anonymousKeys.toSet()) }
|
||||
@ -501,32 +512,43 @@ class NodeVaultServiceTest {
|
||||
@Test
|
||||
fun `correct updates are generated for general transactions`() {
|
||||
val service = vaultService
|
||||
val notary = identity.party
|
||||
val vaultSubscriber = TestSubscriber<Vault.Update<*>>().apply {
|
||||
service.updates.subscribe(this)
|
||||
}
|
||||
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(services.myInfo.chooseIdentityAndCert(), false)
|
||||
val identity = services.myInfo.singleIdentityAndCert()
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(identity, false)
|
||||
val thirdPartyIdentity = AnonymousParty(generateKeyPair().public)
|
||||
val amount = Amount(1000, Issued(BOC.ref(1), GBP))
|
||||
|
||||
// Issue then move some cash
|
||||
val issueTx = TransactionBuilder(services.myInfo.chooseIdentity()).apply {
|
||||
Cash().generateIssue(this,
|
||||
amount, anonymousIdentity.party, services.myInfo.chooseIdentity())
|
||||
}.toWireTransaction(services)
|
||||
val issueBuilder = TransactionBuilder(notary).apply {
|
||||
Cash().generateIssue(this, amount, anonymousIdentity.party.anonymise(), identity.party)
|
||||
}
|
||||
val issueTx = issueBuilder.toWireTransaction(bocServices)
|
||||
val cashState = StateAndRef(issueTx.outputs.single(), StateRef(issueTx.id, 0))
|
||||
|
||||
// ensure transaction contract state is persisted in DBStorage
|
||||
val signedIssuedTx = services.signInitialTransaction(issueBuilder)
|
||||
services.validatedTransactions.addTransaction(signedIssuedTx)
|
||||
|
||||
database.transaction { service.notify(StatesToRecord.ONLY_RELEVANT, issueTx) }
|
||||
val expectedIssueUpdate = Vault.Update(emptySet(), setOf(cashState), null)
|
||||
|
||||
database.transaction {
|
||||
val moveTx = TransactionBuilder(services.myInfo.chooseIdentity()).apply {
|
||||
val moveBuilder = TransactionBuilder(notary).apply {
|
||||
Cash.generateSpend(services, this, Amount(1000, GBP), thirdPartyIdentity)
|
||||
}.toWireTransaction(services)
|
||||
}
|
||||
val moveTx = moveBuilder.toWireTransaction(services)
|
||||
service.notify(StatesToRecord.ONLY_RELEVANT, moveTx)
|
||||
}
|
||||
val expectedMoveUpdate = Vault.Update(setOf(cashState), emptySet(), null)
|
||||
|
||||
// ensure transaction contract state is persisted in DBStorage
|
||||
val signedMoveTx = services.signInitialTransaction(issueBuilder)
|
||||
services.validatedTransactions.addTransaction(signedMoveTx)
|
||||
|
||||
val observedUpdates = vaultSubscriber.onNextEvents
|
||||
assertEquals(observedUpdates, listOf(expectedIssueUpdate, expectedMoveUpdate))
|
||||
}
|
||||
@ -534,21 +556,24 @@ class NodeVaultServiceTest {
|
||||
@Test
|
||||
fun `correct updates are generated when changing notaries`() {
|
||||
val service = vaultService
|
||||
val notary = services.myInfo.chooseIdentity()
|
||||
val notary = identity.party
|
||||
|
||||
val vaultSubscriber = TestSubscriber<Vault.Update<*>>().apply {
|
||||
service.updates.subscribe(this)
|
||||
}
|
||||
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(services.myInfo.chooseIdentityAndCert(), false)
|
||||
val thirdPartyIdentity = AnonymousParty(generateKeyPair().public)
|
||||
val identity = services.myInfo.singleIdentityAndCert()
|
||||
assertEquals(services.identityService.partyFromKey(identity.owningKey), identity.party)
|
||||
val anonymousIdentity = services.keyManagementService.freshKeyAndCert(identity, false)
|
||||
val thirdPartyServices = MockServices()
|
||||
val thirdPartyIdentity = thirdPartyServices.keyManagementService.freshKeyAndCert(thirdPartyServices.myInfo.singleIdentityAndCert(), false)
|
||||
val amount = Amount(1000, Issued(BOC.ref(1), GBP))
|
||||
|
||||
// Issue some cash
|
||||
val issueTxBuilder = TransactionBuilder(notary).apply {
|
||||
Cash().generateIssue(this, amount, anonymousIdentity.party, notary)
|
||||
}
|
||||
val issueStx = services.signInitialTransaction(issueTxBuilder)
|
||||
val issueStx = bocServices.signInitialTransaction(issueTxBuilder)
|
||||
// We need to record the issue transaction so inputs can be resolved for the notary change transaction
|
||||
services.validatedTransactions.addTransaction(issueStx)
|
||||
|
||||
@ -563,12 +588,20 @@ class NodeVaultServiceTest {
|
||||
service.notifyAll(StatesToRecord.ONLY_RELEVANT, listOf(issueStx.tx, changeNotaryTx))
|
||||
}
|
||||
|
||||
// ensure transaction contract state is persisted in DBStorage
|
||||
services.validatedTransactions.addTransaction(SignedTransaction(changeNotaryTx, listOf(NullKeys.NULL_SIGNATURE)))
|
||||
|
||||
// Move cash
|
||||
val moveTx = database.transaction {
|
||||
val moveTxBuilder = database.transaction {
|
||||
TransactionBuilder(newNotary).apply {
|
||||
Cash.generateSpend(services, this, Amount(1000, GBP), thirdPartyIdentity)
|
||||
}.toWireTransaction(services)
|
||||
Cash.generateSpend(services, this, Amount(amount.quantity, GBP), anonymousIdentity, thirdPartyIdentity.party.anonymise())
|
||||
}
|
||||
}
|
||||
val moveTx = moveTxBuilder.toWireTransaction(services)
|
||||
|
||||
// ensure transaction contract state is persisted in DBStorage
|
||||
val signedMoveTx = services.signInitialTransaction(moveTxBuilder)
|
||||
services.validatedTransactions.addTransaction(signedMoveTx)
|
||||
|
||||
database.transaction {
|
||||
service.notify(StatesToRecord.ONLY_RELEVANT, moveTx)
|
||||
@ -600,6 +633,10 @@ class NodeVaultServiceTest {
|
||||
vaultService.notify(StatesToRecord.ONLY_RELEVANT, wtx)
|
||||
}
|
||||
|
||||
// ensure transaction contract state is persisted in DBStorage
|
||||
val signedTxb = services.signInitialTransaction(txb)
|
||||
services.validatedTransactions.addTransaction(signedTxb)
|
||||
|
||||
// Check that it was ignored as irrelevant.
|
||||
assertEquals(currentCashStates, countCash())
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,10 +25,12 @@ import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.node.internal.InitiatedFlowFactory
|
||||
import net.corda.node.services.api.VaultServiceInternal
|
||||
import net.corda.node.services.persistence.HibernateConfiguration
|
||||
import net.corda.testing.chooseIdentity
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.rigorousMock
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.startFlow
|
||||
import org.junit.After
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
@ -81,8 +83,8 @@ class VaultSoftLockManagerTest {
|
||||
}
|
||||
private val mockNet = MockNetwork(cordappPackages = listOf(ContractImpl::class.packageName), defaultFactory = { args ->
|
||||
object : MockNetwork.MockNode(args) {
|
||||
override fun makeVaultService(keyManagementService: KeyManagementService, stateLoader: StateLoader): VaultServiceInternal {
|
||||
val realVault = super.makeVaultService(keyManagementService, stateLoader)
|
||||
override fun makeVaultService(keyManagementService: KeyManagementService, stateLoader: StateLoader, hibernateConfig: HibernateConfiguration): VaultServiceInternal {
|
||||
val realVault = super.makeVaultService(keyManagementService, stateLoader, hibernateConfig)
|
||||
return object : VaultServiceInternal by realVault {
|
||||
override fun softLockRelease(lockId: UUID, stateRefs: NonEmptySet<StateRef>?) {
|
||||
mockVault.softLockRelease(lockId, stateRefs) // No need to also call the real one for these tests.
|
||||
|
@ -1,9 +1,14 @@
|
||||
package net.corda.node.services.vault
|
||||
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.contracts.InsufficientBalanceException
|
||||
import net.corda.core.contracts.LinearState
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.crypto.generateKeyPair
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.concurrent.fork
|
||||
import net.corda.core.internal.concurrent.transpose
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.node.services.Vault
|
||||
import net.corda.core.node.services.VaultService
|
||||
@ -11,10 +16,12 @@ import net.corda.core.node.services.queryBy
|
||||
import net.corda.core.node.services.vault.QueryCriteria
|
||||
import net.corda.core.node.services.vault.QueryCriteria.VaultQueryCriteria
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.finance.*
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER_KEY
|
||||
import net.corda.finance.contracts.asset.DUMMY_CASH_ISSUER_NAME
|
||||
import net.corda.finance.contracts.getCashBalance
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
import net.corda.node.utilities.CordaPersistence
|
||||
@ -29,9 +36,9 @@ import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.Executors
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.fail
|
||||
|
||||
// TODO: Move this to the cash contract tests once mock services are further split up.
|
||||
|
||||
@ -42,22 +49,23 @@ class VaultWithCashTest {
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val testSerialization = SerializationEnvironmentRule()
|
||||
val testSerialization = SerializationEnvironmentRule(true)
|
||||
lateinit var services: MockServices
|
||||
lateinit var issuerServices: MockServices
|
||||
val vaultService: VaultService get() = services.vaultService
|
||||
lateinit var database: CordaPersistence
|
||||
lateinit var notaryServices: MockServices
|
||||
private lateinit var notaryServices: MockServices
|
||||
private lateinit var notary: Party
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
LogHelper.setLevel(VaultWithCashTest::class)
|
||||
val databaseAndServices = makeTestDatabaseAndMockServices(keys = listOf(DUMMY_CASH_ISSUER_KEY, DUMMY_NOTARY_KEY),
|
||||
cordappPackages = cordappPackages)
|
||||
val databaseAndServices = makeTestDatabaseAndMockServices(cordappPackages = cordappPackages, keys = listOf(generateKeyPair(), DUMMY_NOTARY_KEY))
|
||||
database = databaseAndServices.first
|
||||
services = databaseAndServices.second
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_KEY, MEGA_CORP_KEY)
|
||||
notaryServices = MockServices(cordappPackages, DUMMY_NOTARY_KEY)
|
||||
issuerServices = MockServices(cordappPackages, DUMMY_CASH_ISSUER_NAME, DUMMY_CASH_ISSUER_KEY, MEGA_CORP_KEY)
|
||||
notaryServices = MockServices(cordappPackages, DUMMY_NOTARY.name, DUMMY_NOTARY_KEY)
|
||||
notary = notaryServices.myInfo.legalIdentitiesAndCerts.single().party
|
||||
}
|
||||
|
||||
@After
|
||||
@ -87,7 +95,7 @@ class VaultWithCashTest {
|
||||
|
||||
@Test
|
||||
fun `issue and spend total correctly and irrelevant ignored`() {
|
||||
val megaCorpServices = MockServices(cordappPackages, MEGA_CORP_KEY)
|
||||
val megaCorpServices = MockServices(cordappPackages, MEGA_CORP.name, MEGA_CORP_KEY)
|
||||
val freshKey = services.keyManagementService.freshKey()
|
||||
|
||||
val usefulTX =
|
||||
@ -150,82 +158,74 @@ class VaultWithCashTest {
|
||||
}
|
||||
|
||||
val backgroundExecutor = Executors.newFixedThreadPool(2)
|
||||
val countDown = CountDownLatch(2)
|
||||
|
||||
// 1st tx that spends our money.
|
||||
backgroundExecutor.submit {
|
||||
val first = backgroundExecutor.fork {
|
||||
database.transaction {
|
||||
try {
|
||||
val txn1Builder = TransactionBuilder(DUMMY_NOTARY)
|
||||
Cash.generateSpend(services, txn1Builder, 60.DOLLARS, BOB)
|
||||
val ptxn1 = notaryServices.signInitialTransaction(txn1Builder)
|
||||
val txn1 = services.addSignature(ptxn1, freshKey)
|
||||
println("txn1: ${txn1.id} spent ${((txn1.tx.outputs[0].data) as Cash.State).amount}")
|
||||
val unconsumedStates1 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates1 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates1 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn1 states:
|
||||
val txn1Builder = TransactionBuilder(DUMMY_NOTARY)
|
||||
Cash.generateSpend(services, txn1Builder, 60.DOLLARS, BOB)
|
||||
val ptxn1 = notaryServices.signInitialTransaction(txn1Builder)
|
||||
val txn1 = services.addSignature(ptxn1, freshKey)
|
||||
println("txn1: ${txn1.id} spent ${((txn1.tx.outputs[0].data) as Cash.State).amount}")
|
||||
val unconsumedStates1 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates1 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates1 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn1 states:
|
||||
UNCONSUMED: ${unconsumedStates1.totalStatesAvailable} : $unconsumedStates1,
|
||||
CONSUMED: ${consumedStates1.totalStatesAvailable} : $consumedStates1,
|
||||
LOCKED: ${lockedStates1.count()} : $lockedStates1
|
||||
""")
|
||||
services.recordTransactions(txn1)
|
||||
println("txn1: Cash balance: ${services.getCashBalance(USD)}")
|
||||
val unconsumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates2 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates2 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn1 states:
|
||||
services.recordTransactions(txn1)
|
||||
println("txn1: Cash balance: ${services.getCashBalance(USD)}")
|
||||
val unconsumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates2 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates2 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn1 states:
|
||||
UNCONSUMED: ${unconsumedStates2.totalStatesAvailable} : $unconsumedStates2,
|
||||
CONSUMED: ${consumedStates2.totalStatesAvailable} : $consumedStates2,
|
||||
LOCKED: ${lockedStates2.count()} : $lockedStates2
|
||||
""")
|
||||
txn1
|
||||
} catch (e: Exception) {
|
||||
println(e)
|
||||
}
|
||||
txn1
|
||||
}
|
||||
println("txn1 COMMITTED!")
|
||||
countDown.countDown()
|
||||
}
|
||||
|
||||
// 2nd tx that attempts to spend same money
|
||||
backgroundExecutor.submit {
|
||||
val second = backgroundExecutor.fork {
|
||||
database.transaction {
|
||||
try {
|
||||
val txn2Builder = TransactionBuilder(DUMMY_NOTARY)
|
||||
Cash.generateSpend(services, txn2Builder, 80.DOLLARS, BOB)
|
||||
val ptxn2 = notaryServices.signInitialTransaction(txn2Builder)
|
||||
val txn2 = services.addSignature(ptxn2, freshKey)
|
||||
println("txn2: ${txn2.id} spent ${((txn2.tx.outputs[0].data) as Cash.State).amount}")
|
||||
val unconsumedStates1 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates1 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates1 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn2 states:
|
||||
val txn2Builder = TransactionBuilder(DUMMY_NOTARY)
|
||||
Cash.generateSpend(services, txn2Builder, 80.DOLLARS, BOB)
|
||||
val ptxn2 = notaryServices.signInitialTransaction(txn2Builder)
|
||||
val txn2 = services.addSignature(ptxn2, freshKey)
|
||||
println("txn2: ${txn2.id} spent ${((txn2.tx.outputs[0].data) as Cash.State).amount}")
|
||||
val unconsumedStates1 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates1 = vaultService.queryBy<Cash.State>(VaultQueryCriteria(status = Vault.StateStatus.CONSUMED))
|
||||
val lockedStates1 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn2 states:
|
||||
UNCONSUMED: ${unconsumedStates1.totalStatesAvailable} : $unconsumedStates1,
|
||||
CONSUMED: ${consumedStates1.totalStatesAvailable} : $consumedStates1,
|
||||
LOCKED: ${lockedStates1.count()} : $lockedStates1
|
||||
""")
|
||||
services.recordTransactions(txn2)
|
||||
println("txn2: Cash balance: ${services.getCashBalance(USD)}")
|
||||
val unconsumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val lockedStates2 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn2 states:
|
||||
services.recordTransactions(txn2)
|
||||
println("txn2: Cash balance: ${services.getCashBalance(USD)}")
|
||||
val unconsumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val consumedStates2 = vaultService.queryBy<Cash.State>()
|
||||
val lockedStates2 = vaultService.queryBy<Cash.State>(criteriaLocked).states
|
||||
println("""txn2 states:
|
||||
UNCONSUMED: ${unconsumedStates2.totalStatesAvailable} : $unconsumedStates2,
|
||||
CONSUMED: ${consumedStates2.totalStatesAvailable} : $consumedStates2,
|
||||
LOCKED: ${lockedStates2.count()} : $lockedStates2
|
||||
""")
|
||||
txn2
|
||||
} catch (e: Exception) {
|
||||
println(e)
|
||||
}
|
||||
txn2
|
||||
}
|
||||
println("txn2 COMMITTED!")
|
||||
|
||||
countDown.countDown()
|
||||
}
|
||||
|
||||
countDown.await()
|
||||
val both = listOf(first, second).transpose()
|
||||
try {
|
||||
both.getOrThrow()
|
||||
fail("Expected insufficient balance.")
|
||||
} catch (e: InsufficientBalanceException) {
|
||||
assertEquals(0, e.suppressed.size) // One should succeed.
|
||||
}
|
||||
database.transaction {
|
||||
println("Cash balance: ${services.getCashBalance(USD)}")
|
||||
assertThat(services.getCashBalance(USD)).isIn(DOLLARS(20), DOLLARS(40))
|
||||
@ -240,10 +240,10 @@ class VaultWithCashTest {
|
||||
val linearId = UniqueIdentifier()
|
||||
|
||||
// Issue a linear state
|
||||
val dummyIssueBuilder = TransactionBuilder(notary = DUMMY_NOTARY).apply {
|
||||
val dummyIssueBuilder = TransactionBuilder(notary = notary).apply {
|
||||
addOutputState(DummyLinearContract.State(linearId = linearId, participants = listOf(freshIdentity)), DUMMY_LINEAR_CONTRACT_PROGRAM_ID)
|
||||
addOutputState(DummyLinearContract.State(linearId = linearId, participants = listOf(freshIdentity)), DUMMY_LINEAR_CONTRACT_PROGRAM_ID)
|
||||
addCommand(dummyCommand(notaryServices.myInfo.chooseIdentity().owningKey))
|
||||
addCommand(dummyCommand(notary!!.owningKey))
|
||||
}
|
||||
val dummyIssue = notaryServices.signInitialTransaction(dummyIssueBuilder)
|
||||
|
||||
@ -264,7 +264,7 @@ class VaultWithCashTest {
|
||||
// Issue a linear state
|
||||
val dummyIssueBuilder = TransactionBuilder(notary = DUMMY_NOTARY)
|
||||
.addOutputState(DummyLinearContract.State(linearId = linearId, participants = listOf(freshIdentity)), DUMMY_LINEAR_CONTRACT_PROGRAM_ID)
|
||||
.addCommand(dummyCommand(notaryServices.myInfo.chooseIdentity().owningKey))
|
||||
.addCommand(dummyCommand(notary.owningKey))
|
||||
val dummyIssuePtx = notaryServices.signInitialTransaction(dummyIssueBuilder)
|
||||
val dummyIssue = services.addSignature(dummyIssuePtx)
|
||||
|
||||
@ -280,7 +280,7 @@ class VaultWithCashTest {
|
||||
val dummyMoveBuilder = TransactionBuilder(notary = DUMMY_NOTARY)
|
||||
.addOutputState(DummyLinearContract.State(linearId = linearId, participants = listOf(freshIdentity)), DUMMY_LINEAR_CONTRACT_PROGRAM_ID)
|
||||
.addInputState(dummyIssue.tx.outRef<LinearState>(0))
|
||||
.addCommand(dummyCommand(notaryServices.myInfo.chooseIdentity().owningKey))
|
||||
.addCommand(dummyCommand(notary.owningKey))
|
||||
|
||||
val dummyMove = notaryServices.signInitialTransaction(dummyMoveBuilder)
|
||||
|
||||
@ -307,7 +307,7 @@ class VaultWithCashTest {
|
||||
cash.forEach { println(it.state.data.amount) }
|
||||
}
|
||||
database.transaction {
|
||||
services.fillWithSomeTestDeals(listOf("123", "456", "789"))
|
||||
services.fillWithSomeTestDeals(listOf("123", "456", "789"), issuerServices)
|
||||
}
|
||||
database.transaction {
|
||||
val deals = vaultService.queryBy<DummyDealContract.State>().states
|
||||
@ -337,7 +337,7 @@ class VaultWithCashTest {
|
||||
val freshKey = services.keyManagementService.freshKey()
|
||||
val freshIdentity = AnonymousParty(freshKey)
|
||||
database.transaction {
|
||||
services.fillWithSomeTestDeals(listOf("123", "456", "789"))
|
||||
services.fillWithSomeTestDeals(listOf("123", "456", "789"), issuerServices)
|
||||
}
|
||||
val deals =
|
||||
database.transaction {
|
||||
@ -351,12 +351,12 @@ class VaultWithCashTest {
|
||||
linearStates.forEach { println(it.state.data.linearId) }
|
||||
|
||||
// Create a txn consuming different contract types
|
||||
val dummyMoveBuilder = TransactionBuilder(notary = DUMMY_NOTARY).apply {
|
||||
val dummyMoveBuilder = TransactionBuilder(notary = notary).apply {
|
||||
addOutputState(DummyLinearContract.State(participants = listOf(freshIdentity)), DUMMY_LINEAR_CONTRACT_PROGRAM_ID)
|
||||
addOutputState(DummyDealContract.State(ref = "999", participants = listOf(freshIdentity)), DUMMY_DEAL_PROGRAM_ID)
|
||||
addInputState(linearStates.first())
|
||||
addInputState(deals.first())
|
||||
addCommand(dummyCommand(notaryServices.myInfo.chooseIdentity().owningKey))
|
||||
addCommand(dummyCommand(notary!!.owningKey))
|
||||
}
|
||||
|
||||
val dummyMove = notaryServices.signInitialTransaction(dummyMoveBuilder)
|
||||
|
@ -0,0 +1 @@
|
||||
mock-maker-inline
|
Reference in New Issue
Block a user