Merge opensource into master

This commit is contained in:
szymonsztuka
2017-11-21 18:53:14 +00:00
150 changed files with 1878 additions and 1077 deletions

View File

@ -0,0 +1,170 @@
package net.corda.node
import co.paralleluniverse.fibers.Suspendable
import com.jcraft.jsch.ChannelExec
import com.jcraft.jsch.JSch
import com.jcraft.jsch.JSchException
import net.corda.core.flows.*
import net.corda.core.identity.Party
import net.corda.core.utilities.ProgressTracker
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
import net.corda.nodeapi.User
import net.corda.testing.ALICE
import net.corda.testing.driver.driver
import org.bouncycastle.util.io.Streams
import org.junit.Test
import net.corda.node.services.Permissions.Companion.startFlow
import java.net.ConnectException
import kotlin.test.assertTrue
import kotlin.test.fail
import org.assertj.core.api.Assertions.assertThat
import java.util.regex.Pattern
class SSHServerTest {
@Test()
fun `ssh server does not start be default`() {
val user = User("u", "p", setOf())
// The driver will automatically pick up the annotated flows below
driver() {
val node = startNode(providedName = ALICE.name, rpcUsers = listOf(user))
node.getOrThrow()
val session = JSch().getSession("u", "localhost", 2222)
session.setConfig("StrictHostKeyChecking", "no")
session.setPassword("p")
try {
session.connect()
fail()
} catch (e:JSchException) {
assertTrue(e.cause is ConnectException)
}
}
}
@Test
fun `ssh server starts when configured`() {
val user = User("u", "p", setOf())
// The driver will automatically pick up the annotated flows below
driver {
val node = startNode(providedName = ALICE.name, rpcUsers = listOf(user),
customOverrides = mapOf("sshd" to mapOf("port" to 2222)))
node.getOrThrow()
val session = JSch().getSession("u", "localhost", 2222)
session.setConfig("StrictHostKeyChecking", "no")
session.setPassword("p")
session.connect()
assertTrue(session.isConnected)
}
}
@Test
fun `ssh server verify credentials`() {
val user = User("u", "p", setOf())
// The driver will automatically pick up the annotated flows below
driver {
val node = startNode(providedName = ALICE.name, rpcUsers = listOf(user),
customOverrides = mapOf("sshd" to mapOf("port" to 2222)))
node.getOrThrow()
val session = JSch().getSession("u", "localhost", 2222)
session.setConfig("StrictHostKeyChecking", "no")
session.setPassword("p_is_bad_password")
try {
session.connect()
fail("Server should reject invalid credentials")
} catch (e: JSchException) {
//There is no specialized exception for this
assertTrue(e.message == "Auth fail")
}
}
}
@Test
fun `ssh respects permissions`() {
val user = User("u", "p", setOf(startFlow<FlowICanRun>()))
// The driver will automatically pick up the annotated flows below
driver(isDebug = true) {
val node = startNode(providedName = ALICE.name, rpcUsers = listOf(user),
customOverrides = mapOf("sshd" to mapOf("port" to 2222)))
node.getOrThrow()
val session = JSch().getSession("u", "localhost", 2222)
session.setConfig("StrictHostKeyChecking", "no")
session.setPassword("p")
session.connect()
assertTrue(session.isConnected)
val channel = session.openChannel("exec") as ChannelExec
channel.setCommand("start FlowICannotRun otherParty: \"O=Alice Corp,L=Madrid,C=ES\"")
channel.connect()
val response = String(Streams.readAll(channel.inputStream))
val flowNameEscaped = Pattern.quote("StartFlow.${SSHServerTest::class.qualifiedName}$${FlowICannotRun::class.simpleName}")
channel.disconnect()
session.disconnect()
assertThat(response).matches("(?s)User not permissioned with any of \\[[^]]*${flowNameEscaped}.*")
}
}
@Test
fun `ssh runs flows`() {
val user = User("u", "p", setOf(startFlow<FlowICanRun>()))
// The driver will automatically pick up the annotated flows below
driver(isDebug = true) {
val node = startNode(providedName = ALICE.name, rpcUsers = listOf(user),
customOverrides = mapOf("sshd" to mapOf("port" to 2222)))
node.getOrThrow()
val session = JSch().getSession("u", "localhost", 2222)
session.setConfig("StrictHostKeyChecking", "no")
session.setPassword("p")
session.connect()
assertTrue(session.isConnected)
val channel = session.openChannel("exec") as ChannelExec
channel.setCommand("start FlowICanRun")
channel.connect()
val response = String(Streams.readAll(channel.inputStream))
//There are ANSI control characters involved, so we want to avoid direct byte to byte matching
assertThat(response.lines()).filteredOn( { it.contains("") && it.contains("Done")}).hasSize(1)
}
}
@StartableByRPC
@InitiatingFlow
class FlowICanRun : FlowLogic<String>() {
private val HELLO_STEP = ProgressTracker.Step("Hello")
@Suspendable
override fun call(): String {
progressTracker?.currentStep = HELLO_STEP
return "bambam"
}
override val progressTracker: ProgressTracker? = ProgressTracker(HELLO_STEP)
}
@StartableByRPC
@InitiatingFlow
class FlowICannotRun(val otherParty: Party) : FlowLogic<String>() {
@Suspendable
override fun call(): String = initiateFlow(otherParty).receive<String>().unwrap { it }
override val progressTracker: ProgressTracker? = ProgressTracker()
}
}

View File

@ -13,8 +13,8 @@ import net.corda.core.internal.div
import net.corda.core.internal.toLedgerTransaction
import net.corda.core.serialization.SerializationFactory
import net.corda.core.transactions.TransactionBuilder
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.loggerFor
import net.corda.node.internal.cordapp.CordappLoader
import net.corda.node.internal.cordapp.CordappProviderImpl
import net.corda.testing.*
@ -43,7 +43,7 @@ class AttachmentLoadingTests : IntegrationTest() {
}
private companion object {
val logger = loggerFor<AttachmentLoadingTests>()
private val logger = contextLogger()
val isolatedJAR = AttachmentLoadingTests::class.java.getResource("isolated.jar")!!
val ISOLATED_CONTRACT_ID = "net.corda.finance.contracts.isolated.AnotherDummyContract"

View File

@ -2,6 +2,9 @@ package net.corda.node.shell;
// See the comments at the top of run.java
import net.corda.core.messaging.CordaRPCOps;
import net.corda.node.utilities.ANSIProgressRenderer;
import net.corda.node.utilities.CRaSHNSIProgressRenderer;
import org.crsh.cli.*;
import org.crsh.command.*;
import org.crsh.text.*;
@ -9,6 +12,7 @@ import org.crsh.text.ui.TableElement;
import java.util.*;
import static net.corda.node.services.messaging.RPCServerKt.CURRENT_RPC_CONTEXT;
import static net.corda.node.shell.InteractiveShell.*;
@Man(
@ -25,25 +29,27 @@ public class FlowShellCommand extends InteractiveShellCommand {
@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name,
@Usage("The data to pass as input") @Argument(unquote = false) List<String> input
) {
startFlow(name, input, out);
startFlow(name, input, out, ops(), ansiProgressRenderer());
}
// TODO Limit number of flows shown option?
@Command
@Usage("watch information about state machines running on the node with result information")
public void watch(InvocationContext<TableElement> context) throws Exception {
runStateMachinesView(out);
runStateMachinesView(out, ops());
}
static void startFlow(@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name,
@Usage("The data to pass as input") @Argument(unquote = false) List<String> input,
RenderPrintWriter out) {
RenderPrintWriter out,
CordaRPCOps rpcOps,
ANSIProgressRenderer ansiProgressRenderer) {
if (name == null) {
out.println("You must pass a name for the flow, see 'man flow'", Color.red);
return;
}
String inp = input == null ? "" : String.join(" ", input).trim();
runFlowByNameFragment(name, inp, out);
runFlowByNameFragment(name, inp, out, rpcOps, ansiProgressRenderer != null ? ansiProgressRenderer : new CRaSHNSIProgressRenderer(out) );
}
@Command

View File

@ -2,6 +2,8 @@ package net.corda.node.shell;
// A simple forwarder to the "flow start" command, for easier typing.
import net.corda.node.utilities.ANSIProgressRenderer;
import net.corda.node.utilities.CRaSHNSIProgressRenderer;
import org.crsh.cli.*;
import java.util.*;
@ -11,6 +13,7 @@ public class StartShellCommand extends InteractiveShellCommand {
@Man("An alias for 'flow start'. Example: \"start Yo target: Some other company\"")
public void main(@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name,
@Usage("The data to pass as input") @Argument(unquote = false) List<String> input) {
FlowShellCommand.startFlow(name, input, out);
ANSIProgressRenderer ansiProgressRenderer = ansiProgressRenderer();
FlowShellCommand.startFlow(name, input, out, ops(), ansiProgressRenderer != null ? ansiProgressRenderer : new CRaSHNSIProgressRenderer(out));
}
}

View File

@ -36,6 +36,7 @@ import net.corda.node.internal.cordapp.CordappProviderInternal
import net.corda.node.services.ContractUpgradeHandler
import net.corda.node.services.FinalityHandler
import net.corda.node.services.NotaryChangeHandler
import net.corda.node.services.RPCUserService
import net.corda.node.services.api.*
import net.corda.node.services.config.BFTSMaRtConfiguration
import net.corda.node.services.config.NodeConfiguration
@ -55,11 +56,11 @@ import net.corda.node.services.transactions.*
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
import net.corda.node.services.vault.NodeVaultService
import net.corda.node.services.vault.VaultSoftLockManager
import net.corda.node.shell.InteractiveShell
import net.corda.node.utilities.*
import org.apache.activemq.artemis.utils.ReusableLatch
import org.slf4j.Logger
import rx.Observable
import rx.subjects.PublishSubject
import java.io.IOException
import java.lang.reflect.InvocationTargetException
import java.security.KeyPair
@ -119,7 +120,6 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
protected val services: ServiceHubInternal get() = _services
private lateinit var _services: ServiceHubInternalImpl
protected val nodeStateObservable: PublishSubject<NodeState> = PublishSubject.create<NodeState>()
protected var myNotaryIdentity: PartyAndCertificate? = null
protected lateinit var checkpointStorage: CheckpointStorage
protected lateinit var smm: StateMachineManager
@ -130,6 +130,8 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
protected val _nodeReadyFuture = openFuture<Unit>()
protected val networkMapClient: NetworkMapClient? by lazy { configuration.compatibilityZoneURL?.let(::NetworkMapClient) }
lateinit var userService: RPCUserService get
/** Completes once the node has successfully registered with the network map service
* or has loaded network map data from local database */
val nodeReadyFuture: CordaFuture<Unit>
@ -211,8 +213,7 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
registerCordappFlows()
_services.rpcFlows += cordappLoader.cordapps.flatMap { it.rpcFlows }
FlowLogicRefFactoryImpl.classloader = cordappLoader.appClassLoader
runOnStop += network::stop
startShell(rpcOps)
Pair(StartedNodeImpl(this, _services, info, checkpointStorage, smm, attachments, network, database, rpcOps, flowStarter, notaryService), schedulerService)
}
@ -243,6 +244,10 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
}
}
open fun startShell(rpcOps: CordaRPCOps) {
InteractiveShell.startShell(configuration, rpcOps, userService, _services.identityService, _services.database)
}
private fun initNodeInfo(): Pair<Set<KeyPair>, NodeInfo> {
val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null)
val keyPairs = mutableSetOf(identityKeyPair)
@ -624,9 +629,6 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
// Meanwhile, we let the remote service send us updates until the acknowledgment buffer overflows and it
// unsubscribes us forcibly, rather than blocking the shutdown process.
// Notify observers that the node is shutting down
nodeStateObservable.onNext(NodeState.SHUTTING_DOWN)
// Run shutdown hooks in opposite order to starting
for (toRun in runOnStop.reversed()) {
toRun()
@ -727,7 +729,6 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
override val attachments: AttachmentStorage get() = this@AbstractNode.attachments
override val networkService: MessagingService get() = network
override val clock: Clock get() = platformClock
override val myNodeStateObservable: Observable<NodeState> get() = nodeStateObservable
override val configuration: NodeConfiguration get() = this@AbstractNode.configuration
override fun <T : SerializeAsToken> cordaService(type: Class<T>): T {
require(type.isAnnotationPresent(CordaService::class.java)) { "${type.name} is not a Corda service" }

View File

@ -20,8 +20,8 @@ import net.corda.core.node.services.NetworkMapCache
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.*
import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.loggerFor
import net.corda.node.services.api.FlowStarter
import net.corda.node.services.api.ServiceHubInternal
import net.corda.node.services.messaging.context
@ -117,10 +117,6 @@ internal class CordaRPCOpsImpl(
return services.myInfo
}
override fun nodeStateObservable(): Observable<NodeState> {
return services.myNodeStateObservable
}
override fun notaryIdentities(): List<Party> {
return services.networkMapCache.notaryIdentities
}
@ -142,7 +138,9 @@ internal class CordaRPCOpsImpl(
return FlowProgressHandleImpl(
id = stateMachine.id,
returnValue = stateMachine.resultFuture,
progress = stateMachine.logic.track()?.updates ?: Observable.empty()
progress = stateMachine.logic.track()?.updates ?: Observable.empty(),
stepsTreeIndexFeed = stateMachine.logic.trackStepsTreeIndex(),
stepsTreeFeed = stateMachine.logic.trackStepsTree()
)
}
@ -296,6 +294,6 @@ internal class CordaRPCOpsImpl(
}
companion object {
private val log = loggerFor<CordaRPCOpsImpl>()
private val log = contextLogger()
}
}

View File

@ -8,20 +8,20 @@ import net.corda.core.internal.uncheckedCast
import net.corda.core.messaging.RPCOps
import net.corda.core.node.NodeInfo
import net.corda.core.node.ServiceHub
import net.corda.core.node.services.TransactionVerifierService
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.loggerFor
import net.corda.core.serialization.internal.SerializationEnvironmentImpl
import net.corda.core.serialization.internal.nodeSerializationEnv
import net.corda.core.utilities.contextLogger
import net.corda.node.VersionInfo
import net.corda.node.internal.cordapp.CordappLoader
import net.corda.node.serialization.KryoServerSerializationScheme
import net.corda.node.services.RPCUserService
import net.corda.node.services.RPCUserServiceImpl
import net.corda.node.services.api.SchemaService
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.messaging.ArtemisMessagingServer
import net.corda.node.services.messaging.MessagingService
import net.corda.node.services.messaging.NodeMessagingClient
import net.corda.node.services.config.VerifierType
import net.corda.node.services.messaging.*
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
import net.corda.node.utilities.AddressUtils
import net.corda.node.utilities.AffinityExecutor
import net.corda.node.utilities.CordaPersistence
@ -49,7 +49,7 @@ open class Node(configuration: NodeConfiguration,
cordappLoader: CordappLoader = makeCordappLoader(configuration)
) : AbstractNode(configuration, createClock(configuration), versionInfo, cordappLoader) {
companion object {
private val logger = loggerFor<Node>()
private val staticLog = contextLogger()
var renderBasicInfoToConsole = true
/** Used for useful info that we always want to show, even when not logging to the console */
@ -79,8 +79,11 @@ open class Node(configuration: NodeConfiguration,
}
}
override val log: Logger get() = logger
override fun makeTransactionVerifierService() = (network as NodeMessagingClient).verifierService
override val log: Logger get() = staticLog
override fun makeTransactionVerifierService(): TransactionVerifierService = when (configuration.verifierType) {
VerifierType.OutOfProcess -> verifierMessagingClient!!.verifierService
VerifierType.InMemory -> InMemoryTransactionVerifierService(numberOfWorkers = 4)
}
private val sameVmNodeNumber = sameVmNodeCounter.incrementAndGet() // Under normal (non-test execution) it will always be "1"
@ -127,7 +130,6 @@ open class Node(configuration: NodeConfiguration,
private var shutdownHook: ShutdownHook? = null
private lateinit var userService: RPCUserService
override fun makeMessagingService(database: CordaPersistence, info: NodeInfo): MessagingService {
userService = RPCUserServiceImpl(configuration.rpcUsers)
@ -135,15 +137,18 @@ open class Node(configuration: NodeConfiguration,
val advertisedAddress = info.addresses.single()
printBasicNodeInfo("Incoming connection address", advertisedAddress.toString())
return NodeMessagingClient(
rpcMessagingClient = RPCMessagingClient(configuration, serverAddress)
verifierMessagingClient = when (configuration.verifierType) {
VerifierType.OutOfProcess -> VerifierMessagingClient(configuration, serverAddress, services.monitoringService.metrics)
VerifierType.InMemory -> null
}
return P2PMessagingClient(
configuration,
versionInfo,
serverAddress,
info.legalIdentities[0].owningKey,
serverThread,
database,
services.monitoringService.metrics,
advertisedAddress)
}
@ -202,9 +207,19 @@ open class Node(configuration: NodeConfiguration,
runOnStop += this::stop
start()
}
// Start up the MQ client.
(network as NodeMessagingClient).start(rpcOps, userService)
// Start up the MQ clients.
rpcMessagingClient.run {
runOnStop += this::stop
start(rpcOps, userService)
}
verifierMessagingClient?.run {
runOnStop += this::stop
start()
}
(network as P2PMessagingClient).apply {
runOnStop += this::stop
start()
}
}
/**
@ -273,7 +288,7 @@ open class Node(configuration: NodeConfiguration,
_startupComplete.set(Unit)
}
},
{ th -> logger.error("Unexpected exception", th) }
{ th -> staticLog.error("Unexpected exception", th) } // XXX: Why not use log?
)
shutdownHook = addShutdownHook {
stop()
@ -294,9 +309,13 @@ open class Node(configuration: NodeConfiguration,
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader))
}
private lateinit var rpcMessagingClient: RPCMessagingClient
private var verifierMessagingClient: VerifierMessagingClient? = null
/** Starts a blocking event loop for message dispatch. */
fun run() {
(network as NodeMessagingClient).run(messageBroker!!.serverControl)
rpcMessagingClient.start2(messageBroker!!.serverControl)
verifierMessagingClient?.start2()
(network as P2PMessagingClient).run()
}
private var shutdown = false

View File

@ -23,14 +23,13 @@ import java.lang.management.ManagementFactory
import java.net.InetAddress
import java.nio.file.Path
import java.nio.file.Paths
import java.time.LocalDate
import java.util.*
import kotlin.system.exitProcess
/** This class is responsible for starting a Node from command line arguments. */
open class NodeStartup(val args: Array<String>) {
companion object {
private val logger by lazy { loggerFor<Node>() }
private val logger by lazy { loggerFor<Node>() } // I guess this is lazy to allow for logging init, but why Node?
val LOGS_DIRECTORY_NAME = "logs"
val LOGS_CAN_BE_FOUND_IN_STRING = "Logs can be found in"
}
@ -118,12 +117,13 @@ open class NodeStartup(val args: Array<String>) {
Node.printBasicNodeInfo("Node for \"$name\" started up and registered in $elapsed sec")
// Don't start the shell if there's no console attached.
val runShell = !cmdlineOptions.noLocalShell && System.console() != null
startedNode.internals.startupComplete.then {
try {
InteractiveShell.startShell(cmdlineOptions.baseDirectory, runShell, cmdlineOptions.sshdServer, startedNode)
} catch (e: Throwable) {
logger.error("Shell failed to start", e)
if (!cmdlineOptions.noLocalShell && System.console() != null && conf.devMode) {
startedNode.internals.startupComplete.then {
try {
InteractiveShell.runLocalShell(startedNode)
} catch (e: Throwable) {
logger.error("Shell failed to start", e)
}
}
}
},
@ -305,11 +305,6 @@ open class NodeStartup(val args: Array<String>) {
"Computers are useless. They can only\ngive you answers. -- Picasso"
)
// TODO: Delete this after CordaCon.
val cordaCon2017date = LocalDate.of(2017, 9, 12)
val cordaConBanner = if (LocalDate.now() < cordaCon2017date)
"${Emoji.soon} Register for our Free CordaCon event : see https://goo.gl/Z15S8W" else ""
if (Emoji.hasEmojiTerminal)
messages += "Kind of like a regular database but\nwith emojis, colours and ascii art. ${Emoji.coolGuy}"
val (msg1, msg2) = messages.randomOrNull()!!.split('\n')
@ -323,8 +318,6 @@ open class NodeStartup(val args: Array<String>) {
a("--- ${versionInfo.vendor} ${versionInfo.releaseVersion} (${versionInfo.revision.take(7)}) -----------------------------------------------").
newline().
newline().
a(cordaConBanner).
newline().
reset())
}
}

View File

@ -8,14 +8,12 @@ import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.messaging.DataFeed
import net.corda.core.messaging.NodeState
import net.corda.core.node.NodeInfo
import net.corda.core.node.services.AttachmentId
import net.corda.core.node.services.NetworkMapCache
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.*
import net.corda.node.services.messaging.RpcAuthContext
import rx.Observable
import java.io.InputStream
import java.security.PublicKey
@ -68,8 +66,6 @@ class RpcAuthorisationProxy(private val implementation: CordaRPCOps, private val
override fun nodeInfo(): NodeInfo = guard("nodeInfo", implementation::nodeInfo)
override fun nodeStateObservable(): Observable<NodeState> = guard("nodeStateObservable", implementation::nodeStateObservable)
override fun notaryIdentities(): List<Party> = guard("notaryIdentities", implementation::notaryIdentities)
override fun addVaultTransactionNote(txnId: SecureHash, txnNote: String) = guard("addVaultTransactionNote") {

View File

@ -12,7 +12,7 @@ import net.corda.core.node.services.CordaService
import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.internal.classloading.requireAnnotation
import net.corda.node.services.config.NodeConfiguration
import net.corda.nodeapi.internal.serialization.DefaultWhitelist
@ -53,8 +53,7 @@ class CordappLoader private constructor(private val cordappJarPaths: List<Restri
}
companion object {
private val logger = loggerFor<CordappLoader>()
private val logger = contextLogger()
/**
* Default cordapp dir name
*/

View File

@ -2,18 +2,24 @@ package net.corda.node.internal.cordapp
import com.google.common.collect.HashBiMap
import net.corda.core.contracts.ContractClassName
import net.corda.core.crypto.SecureHash
import net.corda.core.node.services.AttachmentStorage
import net.corda.core.cordapp.Cordapp
import net.corda.core.cordapp.CordappContext
import net.corda.core.crypto.SecureHash
import net.corda.core.node.services.AttachmentId
import net.corda.core.node.services.AttachmentStorage
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.loggerFor
import java.net.URL
/**
* Cordapp provider and store. For querying CorDapps for their attachment and vice versa.
*/
open class CordappProviderImpl(private val cordappLoader: CordappLoader, attachmentStorage: AttachmentStorage) : SingletonSerializeAsToken(), CordappProviderInternal {
companion object {
private val log = loggerFor<CordappProviderImpl>()
}
override fun getAppContext(): CordappContext {
// TODO: Use better supported APIs in Java 9
Exception().stackTrace.forEach { stackFrame ->
@ -45,7 +51,7 @@ open class CordappProviderImpl(private val cordappLoader: CordappLoader, attachm
private fun loadContractsIntoAttachmentStore(attachmentStorage: AttachmentStorage): Map<SecureHash, URL> {
val cordappsWithAttachments = cordapps.filter { !it.contractClassNames.isEmpty() }.map { it.jarPath }
val attachmentIds = cordappsWithAttachments.map { it.openStream().use { attachmentStorage.importAttachment(it) } }
val attachmentIds = cordappsWithAttachments.map { it.openStream().use { attachmentStorage.importOrGetAttachment(it) }}
return attachmentIds.zip(cordappsWithAttachments).toMap()
}

View File

@ -16,7 +16,7 @@ import net.corda.core.node.services.NetworkMapCache
import net.corda.core.node.services.NetworkMapCacheBase
import net.corda.core.node.services.TransactionStorage
import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.internal.InitiatedFlowFactory
import net.corda.node.internal.cordapp.CordappProviderInternal
import net.corda.node.services.config.NodeConfiguration
@ -43,7 +43,7 @@ interface NetworkMapCacheBaseInternal : NetworkMapCacheBase {
interface ServiceHubInternal : ServiceHub {
companion object {
private val log = loggerFor<ServiceHubInternal>()
private val log = contextLogger()
}
override val vaultService: VaultServiceInternal

View File

@ -5,13 +5,13 @@ import net.corda.core.crypto.Crypto
import net.corda.core.crypto.SignatureScheme
import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.*
import net.corda.core.utilities.loggerFor
import net.corda.node.utilities.*
import net.corda.nodeapi.config.SSLConfiguration
import net.corda.nodeapi.config.toProperties
import org.bouncycastle.asn1.x509.GeneralName
import org.bouncycastle.asn1.x509.GeneralSubtree
import org.bouncycastle.asn1.x509.NameConstraints
import org.slf4j.LoggerFactory
import java.nio.file.Path
import java.security.KeyStore
@ -19,8 +19,7 @@ fun configOf(vararg pairs: Pair<String, Any?>): Config = ConfigFactory.parseMap(
operator fun Config.plus(overrides: Map<String, Any?>): Config = ConfigFactory.parseMap(overrides).withFallback(this)
object ConfigHelper {
private val log = loggerFor<ConfigHelper>()
private val log = LoggerFactory.getLogger(javaClass)
fun loadConfig(baseDirectory: Path,
configFile: Path = baseDirectory / "node.conf",
allowMissingConfig: Boolean = false,

View File

@ -39,6 +39,7 @@ interface NodeConfiguration : NodeSSLConfiguration {
// TODO Move into DevModeOptions
val useTestClock: Boolean get() = false
val detectPublicIp: Boolean get() = true
val sshd: SSHDConfiguration?
val relay: RelayConfiguration?
}
@ -111,7 +112,9 @@ data class NodeConfigurationImpl(
override val detectPublicIp: Boolean = true,
override val activeMQServer: ActiveMqServerConfiguration,
// TODO See TODO above. Rename this to nodeInfoPollingFrequency and make it of type Duration
override val additionalNodeInfoPollingFrequencyMsec: Long = 5.seconds.toMillis()
override val additionalNodeInfoPollingFrequencyMsec: Long = 5.seconds.toMillis(),
override val sshd: SSHDConfiguration? = null
) : NodeConfiguration {
override val exportJMXto: String get() = "http"
@ -147,6 +150,7 @@ data class CertChainPolicyConfig(val role: String, private val policy: CertChain
}
}
data class SSHDConfiguration(val port: Int)
data class RelayConfiguration(val relayHost: String,
val remoteInboundPort: Int,
val username: String,

View File

@ -17,7 +17,7 @@ import net.corda.core.internal.until
import net.corda.core.node.StateLoader
import net.corda.core.schemas.PersistentStateRef
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.trace
import net.corda.node.internal.MutableClock
import net.corda.node.services.api.FlowStarter
@ -65,8 +65,7 @@ class NodeSchedulerService(private val clock: Clock,
: SchedulerService, SingletonSerializeAsToken() {
companion object {
private val log = loggerFor<NodeSchedulerService>()
private val log = contextLogger()
/**
* Wait until the given [Future] is complete or the deadline is reached, with support for [MutableClock] implementations
* used in demos or testing. This will substitute a Fiber compatible Future so the current

View File

@ -8,7 +8,7 @@ import net.corda.core.internal.toX509CertHolder
import net.corda.core.node.services.IdentityService
import net.corda.core.node.services.UnknownAnonymousPartyException
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.trace
import org.bouncycastle.cert.X509CertificateHolder
import java.security.InvalidAlgorithmParameterException
@ -16,7 +16,6 @@ import java.security.PublicKey
import java.security.cert.*
import java.util.concurrent.ConcurrentHashMap
import javax.annotation.concurrent.ThreadSafe
import javax.security.auth.x500.X500Principal
/**
* Simple identity service which caches parties and provides functionality for efficient lookup.
@ -33,7 +32,7 @@ class InMemoryIdentityService(identities: Iterable<PartyAndCertificate> = emptyS
trustRoot: X509CertificateHolder) : this(wellKnownIdentities, confidentialIdentities, trustRoot.cert)
companion object {
private val log = loggerFor<InMemoryIdentityService>()
private val log = contextLogger()
}
/**

View File

@ -10,11 +10,10 @@ import net.corda.core.node.services.IdentityService
import net.corda.core.node.services.UnknownAnonymousPartyException
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.MAX_HASH_HEX_SIZE
import net.corda.core.utilities.contextLogger
import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NODE_DATABASE_PREFIX
import net.corda.node.utilities.X509Utilities
import org.bouncycastle.cert.X509CertificateHolder
import java.io.ByteArrayInputStream
import java.security.InvalidAlgorithmParameterException
@ -36,7 +35,7 @@ class PersistentIdentityService(identities: Iterable<PartyAndCertificate> = empt
trustRoot: X509CertificateHolder) : this(wellKnownIdentities, confidentialIdentities, trustRoot.cert)
companion object {
private val log = loggerFor<PersistentIdentityService>()
private val log = contextLogger()
private val certFactory: CertificateFactory = CertificateFactory.getInstance("X.509")
fun createPKMap(): AppendOnlyPersistentMap<SecureHash, PartyAndCertificate, PersistentIdentity, String> {

View File

@ -0,0 +1,58 @@
package net.corda.node.services.messaging
import net.corda.core.serialization.internal.nodeSerializationEnv
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.loggerFor
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
import net.corda.nodeapi.ArtemisTcpTransport
import net.corda.nodeapi.ConnectionDirection
import net.corda.nodeapi.config.SSLConfiguration
import org.apache.activemq.artemis.api.core.client.*
import org.apache.activemq.artemis.api.core.client.ActiveMQClient.DEFAULT_ACK_BATCH_SIZE
class ArtemisMessagingClient(private val config: SSLConfiguration, private val serverAddress: NetworkHostAndPort) {
companion object {
private val log = loggerFor<ArtemisMessagingClient>()
}
class Started(val sessionFactory: ClientSessionFactory, val session: ClientSession, val producer: ClientProducer)
var started: Started? = null
private set
fun start(): Started = synchronized(this) {
check(started == null) { "start can't be called twice" }
log.info("Connecting to message broker: $serverAddress")
// TODO Add broker CN to config for host verification in case the embedded broker isn't used
val tcpTransport = ArtemisTcpTransport.tcpTransport(ConnectionDirection.Outbound(), serverAddress, config)
val locator = ActiveMQClient.createServerLocatorWithoutHA(tcpTransport).apply {
// Never time out on our loopback Artemis connections. If we switch back to using the InVM transport this
// would be the default and the two lines below can be deleted.
connectionTTL = -1
clientFailureCheckPeriod = -1
minLargeMessageSize = ArtemisMessagingServer.MAX_FILE_SIZE
isUseGlobalPools = nodeSerializationEnv != null
}
val sessionFactory = locator.createSessionFactory()
// Login using the node username. The broker will authentiate us as its node (as opposed to another peer)
// using our TLS certificate.
// Note that the acknowledgement of messages is not flushed to the Artermis journal until the default buffer
// size of 1MB is acknowledged.
val session = sessionFactory!!.createSession(NODE_USER, NODE_USER, false, true, true, locator.isPreAcknowledge, DEFAULT_ACK_BATCH_SIZE)
session.start()
// Create a general purpose producer.
val producer = session.createProducer()
return Started(sessionFactory, session, producer).also { started = it }
}
fun stop() = synchronized(this) {
started!!.run {
producer.close()
// Ensure any trailing messages are committed to the journal
session.commit()
// Closing the factory closes all the sessions it produced as well.
sessionFactory.close()
}
started = null
}
}

View File

@ -12,10 +12,7 @@ import net.corda.core.node.NodeInfo
import net.corda.core.node.services.NetworkMapCache
import net.corda.core.node.services.NetworkMapCache.MapChange
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.parsePublicKeyBase58
import net.corda.core.utilities.*
import net.corda.node.internal.Node
import net.corda.node.services.RPCUserService
import net.corda.node.services.config.NodeConfiguration
@ -99,7 +96,7 @@ class ArtemisMessagingServer(private val config: NodeConfiguration,
val networkMapCache: NetworkMapCache,
val userService: RPCUserService) : SingletonSerializeAsToken() {
companion object {
private val log = loggerFor<ArtemisMessagingServer>()
private val log = contextLogger()
/** 10 MiB maximum allowed file size for attachments, including message headers. TODO: acquire this value from Network Map when supported. */
@JvmStatic
val MAX_FILE_SIZE = 10485760
@ -235,8 +232,10 @@ class ArtemisMessagingServer(private val config: NodeConfiguration,
securityRoles["$INTERNAL_PREFIX#"] = setOf(nodeInternalRole) // Do not add any other roles here as it's only for the node
securityRoles[P2P_QUEUE] = setOf(nodeInternalRole, restrictedRole(PEER_ROLE, send = true))
securityRoles[RPCApi.RPC_SERVER_QUEUE_NAME] = setOf(nodeInternalRole, restrictedRole(RPC_ROLE, send = true))
// TODO remove the NODE_USER role once the webserver doesn't need it
// TODO: remove the NODE_USER role below once the webserver doesn't need it anymore.
securityRoles["${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.$NODE_USER.#"] = setOf(nodeInternalRole)
// Each RPC user must have its own role and its own queue. This prevents users accessing each other's queues
// and stealing RPC responses.
for ((username) in userService.users) {
securityRoles["${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.$username.#"] = setOf(
nodeInternalRole,
@ -417,7 +416,7 @@ private class VerifyingNettyConnector(configuration: MutableMap<String, Any>,
protocolManager: ClientProtocolManager?) :
NettyConnector(configuration, handler, listener, closeExecutor, threadPool, scheduledThreadPool, protocolManager) {
companion object {
private val log = loggerFor<VerifyingNettyConnector>()
private val log = contextLogger()
}
private val sslEnabled = ConfigurationHelper.getBooleanProperty(TransportConstants.SSL_ENABLED_PROP_NAME, TransportConstants.DEFAULT_SSL_ENABLED, configuration)
@ -540,8 +539,7 @@ class NodeLoginModule : LoginModule {
const val VERIFIER_ROLE = "SystemRoles/Verifier"
const val CERT_CHAIN_CHECKS_OPTION_NAME = "CertChainChecks"
val log = loggerFor<NodeLoginModule>()
private val log = contextLogger()
}
private var loginSucceeded: Boolean = false

View File

@ -1,6 +1,5 @@
package net.corda.node.services.messaging
import com.google.common.util.concurrent.ListenableFuture
import net.corda.core.concurrent.CordaFuture
import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.concurrent.openFuture
@ -133,14 +132,6 @@ interface MessagingService {
/** Returns an address that refers to this node. */
val myAddress: SingleMessageRecipient
/**
* Initiates shutdown: if called from a thread that isn't controlled by the executor passed to the constructor
* then this will block until all in-flight messages have finished being handled and acknowledged. If called
* from a thread that's a part of the [net.corda.node.utilities.AffinityExecutor] given to the constructor,
* it returns immediately and shutdown is asynchronous.
*/
fun stop()
}
/**

View File

@ -1,50 +1,32 @@
package net.corda.node.services.messaging
import com.codahale.metrics.MetricRegistry
import net.corda.core.crypto.random63BitValue
import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.ThreadBox
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.messaging.MessageRecipients
import net.corda.core.messaging.RPCOps
import net.corda.core.messaging.SingleMessageRecipient
import net.corda.core.node.services.PartyInfo
import net.corda.core.node.services.TransactionVerifierService
import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.internal.nodeSerializationEnv
import net.corda.core.serialization.serialize
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.utilities.*
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.sequence
import net.corda.core.utilities.trace
import net.corda.node.VersionInfo
import net.corda.node.services.RPCUserService
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.config.VerifierType
import net.corda.node.services.statemachine.StateMachineManagerImpl
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
import net.corda.node.services.transactions.OutOfProcessTransactionVerifierService
import net.corda.node.utilities.*
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_QUEUE
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ArtemisAddress
import net.corda.nodeapi.internal.ArtemisMessagingComponent.NodeAddress
import net.corda.nodeapi.internal.ArtemisMessagingComponent.ServiceAddress
import net.corda.nodeapi.ArtemisTcpTransport
import net.corda.nodeapi.ConnectionDirection
import net.corda.nodeapi.VerifierApi
import net.corda.nodeapi.VerifierApi.VERIFICATION_REQUESTS_QUEUE_NAME
import net.corda.nodeapi.VerifierApi.VERIFICATION_RESPONSES_QUEUE_NAME_PREFIX
import org.apache.activemq.artemis.api.core.ActiveMQObjectClosedException
import org.apache.activemq.artemis.api.core.Message.*
import org.apache.activemq.artemis.api.core.RoutingType
import org.apache.activemq.artemis.api.core.SimpleString
import org.apache.activemq.artemis.api.core.client.*
import org.apache.activemq.artemis.api.core.client.ActiveMQClient.DEFAULT_ACK_BATCH_SIZE
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl
import java.security.PublicKey
import java.time.Instant
import java.util.*
@ -77,18 +59,16 @@ import javax.persistence.Lob
* If not provided, will default to [serverAddress].
*/
@ThreadSafe
class NodeMessagingClient(private val config: NodeConfiguration,
private val versionInfo: VersionInfo,
private val serverAddress: NetworkHostAndPort,
private val myIdentity: PublicKey,
private val nodeExecutor: AffinityExecutor.ServiceAffinityExecutor,
private val database: CordaPersistence,
private val metrics: MetricRegistry,
advertisedAddress: NetworkHostAndPort = serverAddress
class P2PMessagingClient(config: NodeConfiguration,
private val versionInfo: VersionInfo,
serverAddress: NetworkHostAndPort,
private val myIdentity: PublicKey,
private val nodeExecutor: AffinityExecutor.ServiceAffinityExecutor,
private val database: CordaPersistence,
advertisedAddress: NetworkHostAndPort = serverAddress
) : SingletonSerializeAsToken(), MessagingService {
companion object {
private val log = loggerFor<NodeMessagingClient>()
private val log = contextLogger()
// This is a "property" attached to an Artemis MQ message object, which contains our own notion of "topic".
// We should probably try to unify our notion of "topic" (really, just a string that identifies an endpoint
// that will handle messages, like a URL) with the terminology used by underlying MQ libraries, to avoid
@ -99,8 +79,6 @@ class NodeMessagingClient(private val config: NodeConfiguration,
private val releaseVersionProperty = SimpleString("release-version")
private val platformVersionProperty = SimpleString("platform-version")
private val amqDelayMillis = System.getProperty("amq.delivery.delay.ms", "0").toInt()
private val verifierResponseAddress = "$VERIFICATION_RESPONSES_QUEUE_NAME_PREFIX.${random63BitValue()}"
private val messageMaxRetryCount: Int = 3
fun createProcessedMessage(): AppendOnlyPersistentMap<UUID, Instant, ProcessedMessage, String> {
@ -144,15 +122,8 @@ class NodeMessagingClient(private val config: NodeConfiguration,
}
private class InnerState {
var started = false
var running = false
var producer: ClientProducer? = null
var p2pConsumer: ClientConsumer? = null
var session: ClientSession? = null
var sessionFactory: ClientSessionFactory? = null
var rpcServer: RPCServer? = null
// Consumer for inbound client RPC messages.
var verificationResponseConsumer: ClientConsumer? = null
}
private val messagesToRedeliver = database.transaction {
@ -161,11 +132,6 @@ class NodeMessagingClient(private val config: NodeConfiguration,
private val scheduledMessageRedeliveries = ConcurrentHashMap<Long, ScheduledFuture<*>>()
val verifierService = when (config.verifierType) {
VerifierType.InMemory -> InMemoryTransactionVerifierService(numberOfWorkers = 4)
VerifierType.OutOfProcess -> createOutOfProcessVerifierService()
}
/** A registration to handle messages of different types */
data class Handler(val topicSession: TopicSession,
val callback: (ReceivedMessage, MessageHandlerRegistration) -> Unit) : MessageHandlerRegistration
@ -176,7 +142,8 @@ class NodeMessagingClient(private val config: NodeConfiguration,
private val messagingExecutor = AffinityExecutor.ServiceAffinityExecutor("Messaging", 1)
override val myAddress: SingleMessageRecipient = NodeAddress(myIdentity, advertisedAddress)
private val messageRedeliveryDelaySeconds = config.messageRedeliveryDelaySeconds.toLong()
private val artemis = ArtemisMessagingClient(config, serverAddress)
private val state = ThreadBox(InnerState())
private val handlers = CopyOnWriteArrayList<Handler>()
@ -209,54 +176,11 @@ class NodeMessagingClient(private val config: NodeConfiguration,
var recipients: ByteArray = ByteArray(0)
)
fun start(rpcOps: RPCOps, userService: RPCUserService) {
fun start() {
state.locked {
check(!started) { "start can't be called twice" }
started = true
log.info("Connecting to message broker: $serverAddress")
// TODO Add broker CN to config for host verification in case the embedded broker isn't used
val tcpTransport = ArtemisTcpTransport.tcpTransport(ConnectionDirection.Outbound(), serverAddress, config)
val locator = ActiveMQClient.createServerLocatorWithoutHA(tcpTransport).apply {
// Never time out on our loopback Artemis connections. If we switch back to using the InVM transport this
// would be the default and the two lines below can be deleted.
connectionTTL = -1
clientFailureCheckPeriod = -1
minLargeMessageSize = ArtemisMessagingServer.MAX_FILE_SIZE
isUseGlobalPools = nodeSerializationEnv != null
}
sessionFactory = locator.createSessionFactory()
// Login using the node username. The broker will authentiate us as its node (as opposed to another peer)
// using our TLS certificate.
// Note that the acknowledgement of messages is not flushed to the Artermis journal until the default buffer
// size of 1MB is acknowledged.
val session = sessionFactory!!.createSession(NODE_USER, NODE_USER, false, true, true, locator.isPreAcknowledge, DEFAULT_ACK_BATCH_SIZE)
this.session = session
session.start()
// Create a general purpose producer.
val producer = session.createProducer()
this.producer = producer
val session = artemis.start().session
// Create a queue, consumer and producer for handling P2P network messages.
p2pConsumer = session.createConsumer(P2P_QUEUE)
val myCert = loadKeyStore(config.sslKeystore, config.keyStorePassword).getX509Certificate(X509Utilities.CORDA_CLIENT_TLS)
rpcServer = RPCServer(rpcOps, NODE_USER, NODE_USER, locator, userService, CordaX500Name.build(myCert.subjectX500Principal))
fun checkVerifierCount() {
if (session.queueQuery(SimpleString(VERIFICATION_REQUESTS_QUEUE_NAME)).consumerCount == 0) {
log.warn("No connected verifier listening on $VERIFICATION_REQUESTS_QUEUE_NAME!")
}
}
if (config.verifierType == VerifierType.OutOfProcess) {
createQueueIfAbsent(VerifierApi.VERIFICATION_REQUESTS_QUEUE_NAME)
createQueueIfAbsent(verifierResponseAddress)
verificationResponseConsumer = session.createConsumer(verifierResponseAddress)
messagingExecutor.scheduleAtFixedRate(::checkVerifierCount, 0, 10, TimeUnit.SECONDS)
}
}
resumeMessageRedelivery()
@ -309,14 +233,12 @@ class NodeMessagingClient(private val config: NodeConfiguration,
/**
* Starts the p2p event loop: this method only returns once [stop] has been called.
*/
fun run(serverControl: ActiveMQServerControl) {
fun run() {
try {
val consumer = state.locked {
check(started) { "start must be called first" }
check(artemis.started != null) { "start must be called first" }
check(!running) { "run can't be called twice" }
running = true
rpcServer!!.start(serverControl)
(verifierService as? OutOfProcessTransactionVerifierService)?.start(verificationResponseConsumer!!)
// If it's null, it means we already called stop, so return immediately.
p2pConsumer ?: return
}
@ -401,10 +323,16 @@ class NodeMessagingClient(private val config: NodeConfiguration,
}
}
override fun stop() {
/**
* Initiates shutdown: if called from a thread that isn't controlled by the executor passed to the constructor
* then this will block until all in-flight messages have finished being handled and acknowledged. If called
* from a thread that's a part of the [net.corda.node.utilities.AffinityExecutor] given to the constructor,
* it returns immediately and shutdown is asynchronous.
*/
fun stop() {
val running = state.locked {
// We allow stop() to be called without a run() in between, but it must have at least been started.
check(started)
check(artemis.started != null)
val prevRunning = running
running = false
val c = p2pConsumer ?: throw IllegalStateException("stop can't be called twice")
@ -423,13 +351,7 @@ class NodeMessagingClient(private val config: NodeConfiguration,
// Only first caller to gets running true to protect against double stop, which seems to happen in some integration tests.
if (running) {
state.locked {
producer?.close()
producer = null
// Ensure any trailing messages are committed to the journal
session!!.commit()
// Closing the factory closes all the sessions it produced as well.
sessionFactory!!.close()
sessionFactory = null
artemis.stop()
}
}
}
@ -440,7 +362,8 @@ class NodeMessagingClient(private val config: NodeConfiguration,
messagingExecutor.fetchFrom {
state.locked {
val mqAddress = getMQAddress(target)
val artemisMessage = session!!.createMessage(true).apply {
val artemis = artemis.started!!
val artemisMessage = artemis.session.createMessage(true).apply {
putStringProperty(cordaVendorProperty, cordaVendor)
putStringProperty(releaseVersionProperty, releaseVersion)
putIntProperty(platformVersionProperty, versionInfo.platformVersion)
@ -459,15 +382,14 @@ class NodeMessagingClient(private val config: NodeConfiguration,
"Send to: $mqAddress topic: ${message.topicSession.topic} " +
"sessionID: ${message.topicSession.sessionID} uuid: ${message.uniqueMessageId}"
}
producer!!.send(mqAddress, artemisMessage)
artemis.producer.send(mqAddress, artemisMessage)
retryId?.let {
database.transaction {
messagesToRedeliver.computeIfAbsent(it, { Pair(message, target) })
}
scheduledMessageRedeliveries[it] = messagingExecutor.schedule({
sendWithRetry(0, mqAddress, artemisMessage, it)
}, config.messageRedeliveryDelaySeconds.toLong(), TimeUnit.SECONDS)
}, messageRedeliveryDelaySeconds, TimeUnit.SECONDS)
}
}
@ -498,12 +420,12 @@ class NodeMessagingClient(private val config: NodeConfiguration,
state.locked {
log.trace { "Retry #$retryCount sending message $message to $address for $retryId" }
producer!!.send(address, message)
artemis.started!!.producer.send(address, message)
}
scheduledMessageRedeliveries[retryId] = messagingExecutor.schedule({
sendWithRetry(retryCount + 1, address, message, retryId)
}, config.messageRedeliveryDelaySeconds.toLong(), TimeUnit.SECONDS)
}, messageRedeliveryDelaySeconds, TimeUnit.SECONDS)
}
override fun cancelRedelivery(retryId: Long) {
@ -533,10 +455,11 @@ class NodeMessagingClient(private val config: NodeConfiguration,
/** Attempts to create a durable queue on the broker which is bound to an address of the same name. */
private fun createQueueIfAbsent(queueName: String) {
state.alreadyLocked {
val queueQuery = session!!.queueQuery(SimpleString(queueName))
val session = artemis.started!!.session
val queueQuery = session.queueQuery(SimpleString(queueName))
if (!queueQuery.isExists) {
log.info("Create fresh queue $queueName bound on same address")
session!!.createQueue(queueName, RoutingType.MULTICAST, queueName, true)
session.createQueue(queueName, RoutingType.MULTICAST, queueName, true)
}
}
}
@ -564,22 +487,6 @@ class NodeMessagingClient(private val config: NodeConfiguration,
return NodeClientMessage(topicSession, data, uuid)
}
private fun createOutOfProcessVerifierService(): TransactionVerifierService {
return object : OutOfProcessTransactionVerifierService(metrics) {
override fun sendRequest(nonce: Long, transaction: LedgerTransaction) {
messagingExecutor.fetchFrom {
state.locked {
val message = session!!.createMessage(false)
val request = VerifierApi.VerificationRequest(nonce, transaction, SimpleString(verifierResponseAddress))
request.writeToClientMessage(message)
producer!!.send(VERIFICATION_REQUESTS_QUEUE_NAME, message)
}
}
}
}
}
// TODO Rethink PartyInfo idea and merging PeerAddress/ServiceAddress (the only difference is that Service address doesn't hold host and port)
override fun getAddressOfParty(partyInfo: PartyInfo): MessageRecipients {
return when (partyInfo) {

View File

@ -0,0 +1,29 @@
package net.corda.node.services.messaging
import net.corda.core.identity.CordaX500Name
import net.corda.core.messaging.RPCOps
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.node.services.RPCUserService
import net.corda.node.utilities.*
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.NODE_USER
import net.corda.nodeapi.config.SSLConfiguration
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl
class RPCMessagingClient(private val config: SSLConfiguration, serverAddress: NetworkHostAndPort) : SingletonSerializeAsToken() {
private val artemis = ArtemisMessagingClient(config, serverAddress)
private var rpcServer: RPCServer? = null
fun start(rpcOps: RPCOps, userService: RPCUserService) = synchronized(this) {
val locator = artemis.start().sessionFactory.serverLocator
val myCert = loadKeyStore(config.sslKeystore, config.keyStorePassword).getX509Certificate(X509Utilities.CORDA_CLIENT_TLS)
rpcServer = RPCServer(rpcOps, NODE_USER, NODE_USER, locator, userService, CordaX500Name.build(myCert.subjectX500Principal))
}
fun start2(serverControl: ActiveMQServerControl) = synchronized(this) {
rpcServer!!.start(serverControl)
}
fun stop() = synchronized(this) {
artemis.stop()
}
}

View File

@ -24,10 +24,7 @@ import net.corda.core.messaging.RPCOps
import net.corda.core.serialization.SerializationContext
import net.corda.core.serialization.SerializationDefaults.RPC_SERVER_CONTEXT
import net.corda.core.serialization.deserialize
import net.corda.core.utilities.Try
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.seconds
import net.corda.core.utilities.*
import net.corda.node.services.RPCUserService
import net.corda.node.services.logging.pushToLoggingContext
import net.corda.nodeapi.*
@ -42,6 +39,7 @@ import org.apache.activemq.artemis.api.core.client.ServerLocator
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl
import org.apache.activemq.artemis.api.core.management.CoreNotificationType
import org.apache.activemq.artemis.api.core.management.ManagementHelper
import org.slf4j.LoggerFactory
import org.slf4j.MDC
import rx.Notification
import rx.Observable
@ -90,7 +88,7 @@ class RPCServer(
private val rpcConfiguration: RPCServerConfiguration = RPCServerConfiguration.default
) {
private companion object {
val log = loggerFor<RPCServer>()
private val log = contextLogger()
}
private enum class State {
@ -442,7 +440,7 @@ class ObservableContext(
val observationSendExecutor: ExecutorService
) {
private companion object {
val log = loggerFor<ObservableContext>()
private val log = contextLogger()
}
private val serializationContextWithObservableContext = RpcServerObservableSerializer.createContext(this)
@ -465,8 +463,7 @@ class ObservableContext(
object RpcServerObservableSerializer : Serializer<Observable<*>>() {
private object RpcObservableContextKey
private val log = loggerFor<RpcServerObservableSerializer>()
private val log = LoggerFactory.getLogger(javaClass)
fun createContext(observableContext: ObservableContext): SerializationContext {
return RPC_SERVER_CONTEXT.withProperty(RpcServerObservableSerializer.RpcObservableContextKey, observableContext)
}

View File

@ -23,6 +23,7 @@ data class RpcPermissions(private val values: Set<String> = emptySet()) {
companion object {
val NONE = RpcPermissions()
val ALL = RpcPermissions(setOf("ALL"))
}
fun coverAny(permissions: Set<String>) = !values.intersect(permissions + Permissions.all()).isEmpty()

View File

@ -0,0 +1,73 @@
package net.corda.node.services.messaging
import com.codahale.metrics.MetricRegistry
import net.corda.core.crypto.random63BitValue
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.loggerFor
import net.corda.node.services.transactions.OutOfProcessTransactionVerifierService
import net.corda.node.utilities.*
import net.corda.nodeapi.VerifierApi
import net.corda.nodeapi.VerifierApi.VERIFICATION_REQUESTS_QUEUE_NAME
import net.corda.nodeapi.VerifierApi.VERIFICATION_RESPONSES_QUEUE_NAME_PREFIX
import net.corda.nodeapi.config.SSLConfiguration
import org.apache.activemq.artemis.api.core.RoutingType
import org.apache.activemq.artemis.api.core.SimpleString
import org.apache.activemq.artemis.api.core.client.*
import java.util.concurrent.*
class VerifierMessagingClient(config: SSLConfiguration, serverAddress: NetworkHostAndPort, metrics: MetricRegistry) : SingletonSerializeAsToken() {
companion object {
private val log = loggerFor<VerifierMessagingClient>()
private val verifierResponseAddress = "$VERIFICATION_RESPONSES_QUEUE_NAME_PREFIX.${random63BitValue()}"
}
private val artemis = ArtemisMessagingClient(config, serverAddress)
/** An executor for sending messages */
private val messagingExecutor = AffinityExecutor.ServiceAffinityExecutor("Messaging", 1)
private var verificationResponseConsumer: ClientConsumer? = null
fun start(): Unit = synchronized(this) {
val session = artemis.start().session
fun checkVerifierCount() {
if (session.queueQuery(SimpleString(VERIFICATION_REQUESTS_QUEUE_NAME)).consumerCount == 0) {
log.warn("No connected verifier listening on $VERIFICATION_REQUESTS_QUEUE_NAME!")
}
}
// Attempts to create a durable queue on the broker which is bound to an address of the same name.
fun createQueueIfAbsent(queueName: String) {
val queueQuery = session.queueQuery(SimpleString(queueName))
if (!queueQuery.isExists) {
log.info("Create fresh queue $queueName bound on same address")
session.createQueue(queueName, RoutingType.MULTICAST, queueName, true)
}
}
createQueueIfAbsent(VERIFICATION_REQUESTS_QUEUE_NAME)
createQueueIfAbsent(verifierResponseAddress)
verificationResponseConsumer = session.createConsumer(verifierResponseAddress)
messagingExecutor.scheduleAtFixedRate(::checkVerifierCount, 0, 10, TimeUnit.SECONDS)
}
fun start2() = synchronized(this) {
verifierService.start(verificationResponseConsumer!!)
}
fun stop() = synchronized(this) {
artemis.stop()
}
internal val verifierService = OutOfProcessTransactionVerifierService(metrics) { nonce, transaction ->
messagingExecutor.fetchFrom {
sendRequest(nonce, transaction)
}
}
private fun sendRequest(nonce: Long, transaction: LedgerTransaction) = synchronized(this) {
val started = artemis.started!!
val message = started.session.createMessage(false)
val request = VerifierApi.VerificationRequest(nonce, transaction, SimpleString(verifierResponseAddress))
request.writeToClientMessage(message)
started.producer.send(VERIFICATION_REQUESTS_QUEUE_NAME, message)
}
}

View File

@ -8,7 +8,7 @@ import net.corda.core.internal.openHttpConnection
import net.corda.core.node.NodeInfo
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.minutes
import net.corda.core.utilities.seconds
import net.corda.node.services.api.NetworkMapCacheInternal
@ -25,10 +25,6 @@ import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
class NetworkMapClient(compatibilityZoneURL: URL) {
companion object {
val logger = loggerFor<NetworkMapClient>()
}
private val networkMapUrl = URL("$compatibilityZoneURL/network-map")
fun publish(signedNodeInfo: SignedData<NodeInfo>) {
@ -73,7 +69,7 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
private val fileWatcher: NodeInfoWatcher,
private val networkMapClient: NetworkMapClient?) : Closeable {
companion object {
private val logger = loggerFor<NetworkMapUpdater>()
private val logger = contextLogger()
private val retryInterval = 1.minutes
}

View File

@ -7,7 +7,7 @@ import net.corda.core.internal.*
import net.corda.core.node.NodeInfo
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.seconds
import net.corda.nodeapi.NodeInfoFilesCopier
import rx.Observable
@ -40,8 +40,7 @@ class NodeInfoWatcher(private val nodePath: Path,
val processedNodeInfoHashes: Set<SecureHash> get() = _processedNodeInfoHashes.toSet()
companion object {
private val logger = loggerFor<NodeInfoWatcher>()
private val logger = contextLogger()
/**
* Saves the given [NodeInfo] to a path.
* The node is 'encoded' as a SignedData<NodeInfo>, signed with the owning key of its first identity.

View File

@ -19,7 +19,7 @@ import net.corda.core.node.services.PartyInfo
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.serialize
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.services.api.NetworkMapCacheBaseInternal
import net.corda.node.services.api.NetworkMapCacheInternal
import net.corda.node.utilities.CordaPersistence
@ -63,7 +63,7 @@ class NetworkMapCacheImpl(
@ThreadSafe
open class PersistentNetworkMapCache(private val database: CordaPersistence) : SingletonSerializeAsToken(), NetworkMapCacheBaseInternal {
companion object {
val logger = loggerFor<PersistentNetworkMapCache>()
private val logger = contextLogger()
}
// TODO Small explanation, partyNodes and registeredNodes is left in memory as it was before, because it will be removed in

View File

@ -3,7 +3,7 @@ package net.corda.node.services.persistence
import net.corda.core.identity.AbstractParty
import net.corda.core.identity.CordaX500Name
import net.corda.core.node.services.IdentityService
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import org.hibernate.type.descriptor.WrapperOptions
import org.hibernate.type.descriptor.java.AbstractTypeDescriptor
import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan
@ -11,7 +11,7 @@ import org.hibernate.type.descriptor.java.MutabilityPlan
class AbstractPartyDescriptor(identitySvc: () -> IdentityService) : AbstractTypeDescriptor<AbstractParty>(AbstractParty::class.java) {
companion object {
private val log = loggerFor<AbstractPartyDescriptor>()
private val log = contextLogger()
}
private val identityService: IdentityService by lazy(identitySvc)

View File

@ -3,7 +3,7 @@ package net.corda.node.services.persistence
import net.corda.core.identity.AbstractParty
import net.corda.core.identity.CordaX500Name
import net.corda.core.node.services.IdentityService
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import javax.persistence.AttributeConverter
import javax.persistence.Converter
@ -14,7 +14,7 @@ import javax.persistence.Converter
@Converter(autoApply = true)
class AbstractPartyToX500NameAsStringConverter(identitySvc: () -> IdentityService) : AttributeConverter<AbstractParty, String> {
companion object {
private val log = loggerFor<AbstractPartyToX500NameAsStringConverter>()
private val log = contextLogger()
}
private val identityService: IdentityService by lazy(identitySvc)

View File

@ -3,8 +3,7 @@ package net.corda.node.services.persistence
import net.corda.core.internal.castIfPossible
import net.corda.core.node.services.IdentityService
import net.corda.core.schemas.MappedSchema
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.toHexString
import net.corda.node.services.api.SchemaService
import net.corda.node.utilities.DatabaseTransactionManager
@ -29,7 +28,7 @@ import java.util.concurrent.ConcurrentHashMap
class HibernateConfiguration(val schemaService: SchemaService, private val databaseProperties: Properties, private val createIdentityService: () -> IdentityService) {
companion object {
val logger = loggerFor<HibernateConfiguration>()
private val logger = contextLogger()
}
// TODO: make this a guava cache or similar to limit ability for this to grow forever.

View File

@ -1,32 +1,31 @@
package net.corda.node.services.persistence
import com.codahale.metrics.MetricRegistry
import net.corda.core.internal.VisibleForTesting
import com.google.common.hash.HashCode
import com.google.common.hash.Hashing
import com.google.common.hash.HashingInputStream
import com.google.common.io.CountingInputStream
import net.corda.core.CordaRuntimeException
import net.corda.core.contracts.*
import net.corda.core.internal.AbstractAttachment
import net.corda.core.contracts.Attachment
import net.corda.core.crypto.SecureHash
import net.corda.core.internal.AbstractAttachment
import net.corda.core.internal.VisibleForTesting
import net.corda.core.node.services.AttachmentId
import net.corda.core.node.services.AttachmentStorage
import net.corda.core.node.services.vault.*
import net.corda.core.node.services.vault.AttachmentQueryCriteria
import net.corda.core.node.services.vault.AttachmentSort
import net.corda.core.serialization.*
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.services.vault.HibernateAttachmentQueryCriteriaParser
import net.corda.node.utilities.DatabaseTransactionManager
import net.corda.node.utilities.NODE_DATABASE_PREFIX
import net.corda.node.utilities.currentDBSession
import java.io.*
import java.lang.Exception
import java.nio.file.Paths
import java.time.Instant
import java.util.jar.JarInputStream
import javax.annotation.concurrent.ThreadSafe
import javax.persistence.*
import javax.persistence.Column
/**
* Stores attachments using Hibernate to database.
@ -34,6 +33,29 @@ import javax.persistence.Column
@ThreadSafe
class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, SingletonSerializeAsToken() {
companion object {
private val log = contextLogger()
// Just iterate over the entries with verification enabled: should be good enough to catch mistakes.
// Note that JarInputStream won't throw any kind of error at all if the file stream is in fact not
// a ZIP! It'll just pretend it's an empty archive, which is kind of stupid but that's how it works.
// So we have to check to ensure we found at least one item.
private fun checkIsAValidJAR(stream: InputStream) {
val jar = JarInputStream(stream, true)
var count = 0
while (true) {
val cursor = jar.nextJarEntry ?: break
val entryPath = Paths.get(cursor.name)
// Security check to stop zips trying to escape their rightful place.
require(!entryPath.isAbsolute) { "Path $entryPath is absolute" }
require(entryPath.normalize() == entryPath) { "Path $entryPath is not normalised" }
require(!('\\' in cursor.name || cursor.name == "." || cursor.name == "..")) { "Bad character in $entryPath" }
count++
}
require(count > 0) { "Stream is either empty or not a JAR/ZIP" }
}
}
@Entity
@Table(name = "${NODE_DATABASE_PREFIX}attachments",
indexes = arrayOf(Index(name = "att_id_idx", columnList = "att_id")))
@ -56,10 +78,6 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
var filename: String? = null
) : Serializable
companion object {
private val log = loggerFor<NodeAttachmentService>()
}
@VisibleForTesting
var checkAttachmentsOnLoad = true
@ -171,6 +189,24 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
return import(jar, uploader, filename)
}
fun getAttachmentIdAndBytes(jar: InputStream): Pair<AttachmentId, ByteArray> {
val hs = HashingInputStream(Hashing.sha256(), jar)
val bytes = hs.readBytes()
checkIsAValidJAR(ByteArrayInputStream(bytes))
val id = SecureHash.SHA256(hs.hash().asBytes())
return Pair(id, bytes)
}
override fun hasAttachment(attachmentId: AttachmentId): Boolean {
val session = currentDBSession()
val criteriaBuilder = session.criteriaBuilder
val criteriaQuery = criteriaBuilder.createQuery(Long::class.java)
val attachments = criteriaQuery.from(NodeAttachmentService.DBAttachment::class.java)
criteriaQuery.select(criteriaBuilder.count(criteriaQuery.from(NodeAttachmentService.DBAttachment::class.java)))
criteriaQuery.where(criteriaBuilder.equal(attachments.get<String>(DBAttachment::attId.name), attachmentId.toString()))
return (session.createQuery(criteriaQuery).singleResult > 0)
}
// TODO: PLT-147: The attachment should be randomised to prevent brute force guessing and thus privacy leaks.
private fun import(jar: InputStream, uploader: String?, filename: String?): AttachmentId {
require(jar !is JarInputStream)
@ -180,27 +216,28 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
// To do this we must pipe stream into the database without knowing its hash, which we will learn only once
// the insert/upload is complete. We can then query to see if it's a duplicate and if so, erase, and if not
// set the hash field of the new attachment record.
val hs = HashingInputStream(Hashing.sha256(), jar)
val bytes = hs.readBytes()
checkIsAValidJAR(ByteArrayInputStream(bytes))
val id = SecureHash.SHA256(hs.hash().asBytes())
val session = currentDBSession()
val criteriaBuilder = session.criteriaBuilder
val criteriaQuery = criteriaBuilder.createQuery(Long::class.java)
val attachments = criteriaQuery.from(NodeAttachmentService.DBAttachment::class.java)
criteriaQuery.select(criteriaBuilder.count(criteriaQuery.from(NodeAttachmentService.DBAttachment::class.java)))
criteriaQuery.where(criteriaBuilder.equal(attachments.get<String>(DBAttachment::attId.name), id.toString()))
val count = session.createQuery(criteriaQuery).singleResult
if (count == 0L) {
val (id, bytes) = getAttachmentIdAndBytes(jar)
if (!hasAttachment(id)) {
checkIsAValidJAR(ByteArrayInputStream(bytes))
val session = currentDBSession()
val attachment = NodeAttachmentService.DBAttachment(attId = id.toString(), content = bytes, uploader = uploader, filename = filename)
session.save(attachment)
attachmentCount.inc()
log.info("Stored new attachment $id")
return id
} else {
throw java.nio.file.FileAlreadyExistsException(id.toString())
}
}
return id
override fun importOrGetAttachment(jar: InputStream): AttachmentId {
try {
return importAttachment(jar)
}
catch (faee: java.nio.file.FileAlreadyExistsException) {
return AttachmentId.parse(faee.message!!)
}
}
override fun queryAttachments(criteria: AttachmentQueryCriteria, sorting: AttachmentSort?): List<AttachmentId> {
@ -227,22 +264,4 @@ class NodeAttachmentService(metrics: MetricRegistry) : AttachmentStorage, Single
}
private fun checkIsAValidJAR(stream: InputStream) {
// Just iterate over the entries with verification enabled: should be good enough to catch mistakes.
// Note that JarInputStream won't throw any kind of error at all if the file stream is in fact not
// a ZIP! It'll just pretend it's an empty archive, which is kind of stupid but that's how it works.
// So we have to check to ensure we found at least one item.
val jar = JarInputStream(stream, true)
var count = 0
while (true) {
val cursor = jar.nextJarEntry ?: break
val entryPath = Paths.get(cursor.name)
// Security check to stop zips trying to escape their rightful place.
require(!entryPath.isAbsolute) { "Path $entryPath is absolute" }
require(entryPath.normalize() == entryPath) { "Path $entryPath is not normalised" }
require(!('\\' in cursor.name || cursor.name == "." || cursor.name == "..")) { "Bad character in $entryPath" }
count++
}
require(count > 0) { "Stream is either empty or not a JAR/ZIP" }
}
}

View File

@ -7,8 +7,8 @@ import net.corda.core.internal.VisibleForTesting
import net.corda.core.node.services.Vault
import net.corda.core.schemas.MappedSchema
import net.corda.core.schemas.PersistentStateRef
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.node.services.persistence.HibernateConfiguration
import net.corda.node.utilities.DatabaseTransactionManager
import org.hibernate.FlushMode
@ -20,7 +20,7 @@ import rx.Observable
// TODO: Manage version evolution of the schemas via additional tooling.
class HibernateObserver private constructor(private val config: HibernateConfiguration) {
companion object {
private val log = loggerFor<HibernateObserver>()
private val log = contextLogger()
@JvmStatic
fun install(vaultUpdates: Observable<Vault.Update<ContractState>>, config: HibernateConfiguration): HibernateObserver {
val observer = HibernateObserver(config)

View File

@ -14,7 +14,7 @@ import net.corda.node.services.api.SchemaService
import net.corda.node.services.events.NodeSchedulerService
import net.corda.node.services.identity.PersistentIdentityService
import net.corda.node.services.keys.PersistentKeyManagementService
import net.corda.node.services.messaging.NodeMessagingClient
import net.corda.node.services.messaging.P2PMessagingClient
import net.corda.node.services.persistence.DBCheckpointStorage
import net.corda.node.services.persistence.DBTransactionMappingStorage
import net.corda.node.services.persistence.DBTransactionStorage
@ -47,8 +47,8 @@ class NodeSchemaService(cordappLoader: CordappLoader?) : SchemaService, Singleto
PersistentUniquenessProvider.PersistentNotaryCommit::class.java,
NodeSchedulerService.PersistentScheduledState::class.java,
NodeAttachmentService.DBAttachment::class.java,
NodeMessagingClient.ProcessedMessage::class.java,
NodeMessagingClient.RetryMessage::class.java,
P2PMessagingClient.ProcessedMessage::class.java,
P2PMessagingClient.RetryMessage::class.java,
NodeAttachmentService.DBAttachment::class.java,
RaftUniquenessProvider.RaftState::class.java,
BFTNonValidatingNotaryService.PersistedCommittedState::class.java,

View File

@ -3,7 +3,7 @@ package net.corda.node.services.statemachine
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowStackSnapshot
import net.corda.core.flows.StateMachineRunId
import net.corda.core.utilities.loggerFor
import org.slf4j.LoggerFactory
import java.nio.file.Path
import java.util.*
@ -26,8 +26,7 @@ interface FlowStackSnapshotFactory {
fun persistAsJsonFile(flowClass: Class<out FlowLogic<*>>, baseDir: Path, flowId: StateMachineRunId)
private object DefaultFlowStackSnapshotFactory : FlowStackSnapshotFactory {
private val log = loggerFor<DefaultFlowStackSnapshotFactory>()
private val log = LoggerFactory.getLogger(javaClass)
override fun getFlowStackSnapshot(flowClass: Class<out FlowLogic<*>>): FlowStackSnapshot? {
log.warn("Flow stack snapshot are not supposed to be used in a production deployment")
return null

View File

@ -27,10 +27,7 @@ import net.corda.core.serialization.SerializationDefaults.SERIALIZATION_FACTORY
import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.utilities.Try
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.trace
import net.corda.core.utilities.*
import net.corda.node.internal.InitiatedFlowFactory
import net.corda.node.services.api.Checkpoint
import net.corda.node.services.api.CheckpointStorage
@ -67,7 +64,7 @@ class StateMachineManagerImpl(
inner class FiberScheduler : FiberExecutorScheduler("Same thread scheduler", executor)
companion object {
private val logger = loggerFor<StateMachineManagerImpl>()
private val logger = contextLogger()
internal val sessionTopic = TopicSession("platform.session")
init {

View File

@ -16,7 +16,6 @@ import net.corda.core.node.services.NotaryService
import net.corda.core.node.services.TimeWindowChecker
import net.corda.core.node.services.UniquenessProvider
import net.corda.core.schemas.PersistentStateRef
import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.transactions.FilteredTransaction
@ -41,7 +40,7 @@ class BFTNonValidatingNotaryService(override val services: ServiceHubInternal,
cluster: BFTSMaRt.Cluster) : NotaryService() {
companion object {
val id = constructId(validating = false, bft = true)
private val log = loggerFor<BFTNonValidatingNotaryService>()
private val log = contextLogger()
}
private val client: BFTSMaRt.Client

View File

@ -29,8 +29,8 @@ import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.transactions.FilteredTransaction
import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import net.corda.node.services.api.ServiceHubInternal
import net.corda.node.services.transactions.BFTSMaRt.Client
import net.corda.node.services.transactions.BFTSMaRt.Replica
@ -77,7 +77,7 @@ object BFTSMaRt {
class Client(config: BFTSMaRtConfig, private val clientId: Int, private val cluster: Cluster, private val notaryService: BFTNonValidatingNotaryService) : SingletonSerializeAsToken() {
companion object {
private val log = loggerFor<Client>()
private val log = contextLogger()
}
/** A proxy for communicating with the BFT cluster */
@ -181,7 +181,7 @@ object BFTSMaRt {
protected val notaryIdentityKey: PublicKey,
private val timeWindowChecker: TimeWindowChecker) : DefaultRecoverable() {
companion object {
private val log = loggerFor<Replica>()
private val log = contextLogger()
}
private val stateManagerOverride = run {

View File

@ -2,8 +2,8 @@ package net.corda.node.services.transactions
import net.corda.core.internal.div
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor
import java.io.FileWriter
import java.io.PrintWriter
import java.net.InetAddress
@ -19,7 +19,7 @@ import java.util.concurrent.TimeUnit.MILLISECONDS
*/
class BFTSMaRtConfig(private val replicaAddresses: List<NetworkHostAndPort>, debug: Boolean, val exposeRaces: Boolean) : PathManager<BFTSMaRtConfig>(Files.createTempDirectory("bft-smart-config")) {
companion object {
private val log = loggerFor<BFTSMaRtConfig>()
private val log = contextLogger()
internal val portIsClaimedFormat = "Port %s is claimed by another replica: %s"
}

View File

@ -4,7 +4,7 @@ import io.atomix.copycat.Command
import io.atomix.copycat.Query
import io.atomix.copycat.server.Commit
import io.atomix.copycat.server.StateMachine
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.utilities.*
import java.util.LinkedHashMap
@ -17,7 +17,7 @@ import java.util.LinkedHashMap
*/
class DistributedImmutableMap<K : Any, V : Any, E, EK>(val db: CordaPersistence, createMap: () -> AppendOnlyPersistentMap<K, Pair<Long, V>, E, EK>) : StateMachine() {
companion object {
private val log = loggerFor<DistributedImmutableMap<*, *, *, *>>()
private val log = contextLogger()
}
object Commands {

View File

@ -11,16 +11,17 @@ import net.corda.core.internal.concurrent.OpenFuture
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.nodeapi.VerifierApi
import org.apache.activemq.artemis.api.core.client.ClientConsumer
import java.util.concurrent.ConcurrentHashMap
abstract class OutOfProcessTransactionVerifierService(
private val metrics: MetricRegistry
class OutOfProcessTransactionVerifierService(
private val metrics: MetricRegistry,
private val sendRequest: (Long, LedgerTransaction) -> Unit
) : SingletonSerializeAsToken(), TransactionVerifierService {
companion object {
val log = loggerFor<OutOfProcessTransactionVerifierService>()
private val log = contextLogger()
}
private data class VerificationHandle(
@ -60,8 +61,6 @@ abstract class OutOfProcessTransactionVerifierService(
}
}
abstract fun sendRequest(nonce: Long, transaction: LedgerTransaction)
override fun verify(transaction: LedgerTransaction): CordaFuture<*> {
log.info("Verifying ${transaction.id}")
val future = openFuture<Unit>()

View File

@ -9,11 +9,8 @@ import net.corda.core.internal.ThreadBox
import net.corda.core.node.services.UniquenessException
import net.corda.core.node.services.UniquenessProvider
import net.corda.core.schemas.PersistentStateRef
import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NODE_DATABASE_PREFIX
import java.io.Serializable
@ -62,8 +59,7 @@ class PersistentUniquenessProvider : UniquenessProvider, SingletonSerializeAsTok
private val mutex = ThreadBox(InnerState())
companion object {
private val log = loggerFor<PersistentUniquenessProvider>()
private val log = contextLogger()
fun createMap(): AppendOnlyPersistentMap<StateRef, UniquenessProvider.ConsumingTx, PersistentNotaryCommit, PersistentStateRef> =
AppendOnlyPersistentMap(
toPersistentEntityKey = { PersistentStateRef(it.txhash.toString(), it.index) },

View File

@ -26,7 +26,7 @@ import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.node.services.config.RaftConfig
import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.CordaPersistence
@ -49,8 +49,7 @@ import javax.persistence.*
@ThreadSafe
class RaftUniquenessProvider(private val transportConfiguration: NodeSSLConfiguration, private val db: CordaPersistence, private val metrics: MetricRegistry, private val raftConfig: RaftConfig) : UniquenessProvider, SingletonSerializeAsToken() {
companion object {
private val log = loggerFor<RaftUniquenessProvider>()
private val log = contextLogger()
fun createMap(): AppendOnlyPersistentMap<String, Pair<Long, Any>, RaftState, String> =
AppendOnlyPersistentMap(
toPersistentEntityKey = { it },
@ -76,10 +75,10 @@ class RaftUniquenessProvider(private val transportConfiguration: NodeSSLConfigur
var key: String = "",
@Lob
@Column
@Column(name = "state_value")
var value: ByteArray = ByteArray(0),
@Column
@Column(name = "state_index")
var index: Long = 0
)

View File

@ -11,7 +11,7 @@ import net.corda.core.node.services.vault.QueryCriteria.CommonQueryCriteria
import net.corda.core.schemas.PersistentState
import net.corda.core.schemas.PersistentStateRef
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.trace
import net.corda.node.services.persistence.NodeAttachmentService
import org.hibernate.query.criteria.internal.expression.LiteralExpression
@ -107,7 +107,7 @@ class HibernateAttachmentQueryCriteriaParser(override val criteriaBuilder: Crite
AbstractQueryCriteriaParser<AttachmentQueryCriteria, AttachmentsQueryCriteriaParser, AttachmentSort>(), AttachmentsQueryCriteriaParser {
private companion object {
val log = loggerFor<HibernateAttachmentQueryCriteriaParser>()
private val log = contextLogger()
}
init {
@ -170,7 +170,7 @@ class HibernateQueryCriteriaParser(val contractStateType: Class<out ContractStat
val criteriaQuery: CriteriaQuery<Tuple>,
val vaultStates: Root<VaultSchemaV1.VaultStates>) : AbstractQueryCriteriaParser<QueryCriteria, IQueryCriteriaParser, Sort>(), IQueryCriteriaParser {
private companion object {
val log = loggerFor<HibernateQueryCriteriaParser>()
private val log = contextLogger()
}
// incrementally build list of join predicates

View File

@ -57,7 +57,7 @@ class NodeVaultService(
hibernateConfig: HibernateConfiguration
) : SingletonSerializeAsToken(), VaultServiceInternal {
private companion object {
val log = loggerFor<NodeVaultService>()
private val log = contextLogger()
}
private class InnerState {

View File

@ -29,7 +29,8 @@ object VaultSchemaV1 : MappedSchema(schemaFamily = VaultSchema.javaClass, versio
mappedTypes = listOf(VaultStates::class.java, VaultLinearStates::class.java, VaultFungibleStates::class.java, VaultTxnNote::class.java)) {
@Entity
@Table(name = "vault_states",
indexes = arrayOf(Index(name = "state_status_idx", columnList = "state_status")))
indexes = arrayOf(Index(name = "state_status_idx", columnList = "state_status"),
Index(name = "lock_id_idx", columnList = "lock_id, state_status")))
class VaultStates(
/** NOTE: serialized transaction state (including contract state) is now resolved from transaction store */
// TODO: create a distinct table to hold serialized state data (once DBTransactionStore is encrypted)

View File

@ -4,17 +4,14 @@ import net.corda.core.contracts.FungibleAsset
import net.corda.core.contracts.StateRef
import net.corda.core.flows.FlowLogic
import net.corda.core.node.services.VaultService
import net.corda.core.utilities.NonEmptySet
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.toNonEmptySet
import net.corda.core.utilities.trace
import net.corda.core.utilities.*
import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.services.statemachine.StateMachineManager
import java.util.*
class VaultSoftLockManager private constructor(private val vault: VaultService) {
companion object {
private val log = loggerFor<VaultSoftLockManager>()
private val log = contextLogger()
@JvmStatic
fun install(vault: VaultService, smm: StateMachineManager) {
val manager = VaultSoftLockManager(vault)

View File

@ -0,0 +1,35 @@
package net.corda.node.shell
import net.corda.core.context.Actor
import net.corda.core.context.InvocationContext
import net.corda.core.identity.CordaX500Name
import net.corda.core.messaging.CordaRPCOps
import net.corda.node.services.RPCUserService
import net.corda.node.services.messaging.RpcPermissions
import org.crsh.auth.AuthInfo
import org.crsh.auth.AuthenticationPlugin
import org.crsh.plugin.CRaSHPlugin
class CordaAuthenticationPlugin(val rpcOps:CordaRPCOps, val userService:RPCUserService, val nodeLegalName:CordaX500Name) : CRaSHPlugin<AuthenticationPlugin<String>>(), AuthenticationPlugin<String> {
override fun getImplementation(): AuthenticationPlugin<String> = this
override fun getName(): String = "corda"
override fun authenticate(username: String?, credential: String?): AuthInfo {
if (username == null || credential == null) {
return AuthInfo.UNSUCCESSFUL
}
val user = userService.getUser(username)
if (user != null && user.password == credential) {
val actor = Actor(Actor.Id(username), userService.id, nodeLegalName)
return CordaSSHAuthInfo(true, RPCOpsWithContext(rpcOps, InvocationContext.rpc(actor), RpcPermissions(user.permissions)))
}
return AuthInfo.UNSUCCESSFUL;
}
override fun getCredentialType(): Class<String> = String::class.java
}

View File

@ -0,0 +1,9 @@
package net.corda.node.shell
import net.corda.core.messaging.CordaRPCOps
import net.corda.node.utilities.ANSIProgressRenderer
import org.crsh.auth.AuthInfo
class CordaSSHAuthInfo(val successful: Boolean, val rpcOps: CordaRPCOps, val ansiProgressRenderer: ANSIProgressRenderer? = null) : AuthInfo {
override fun isSuccessful(): Boolean = successful
}

View File

@ -9,26 +9,32 @@ import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.google.common.io.Closeables
import net.corda.client.jackson.JacksonSupport
import net.corda.client.jackson.StringToMethodCallParser
import net.corda.client.rpc.PermissionException
import net.corda.core.CordaException
import net.corda.core.concurrent.CordaFuture
import net.corda.core.contracts.UniqueIdentifier
import net.corda.core.flows.FlowLogic
import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.*
import net.corda.core.internal.concurrent.OpenFuture
import net.corda.core.internal.concurrent.doneFuture
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.messaging.DataFeed
import net.corda.core.messaging.FlowProgressHandle
import net.corda.core.messaging.StateMachineUpdate
import net.corda.core.utilities.getOrThrow
import net.corda.core.node.services.IdentityService
import net.corda.core.utilities.loggerFor
import net.corda.node.internal.Node
import net.corda.node.internal.StartedNode
import net.corda.node.services.RPCUserService
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.messaging.CURRENT_RPC_CONTEXT
import net.corda.node.services.messaging.RpcAuthContext
import net.corda.node.services.messaging.RpcPermissions
import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.utilities.ANSIProgressRenderer
import net.corda.node.utilities.CordaPersistence
import net.corda.node.utilities.StdoutANSIProgressRenderer
import org.crsh.command.InvocationContext
import org.crsh.console.jline.JLineProcessor
import org.crsh.console.jline.TerminalFactory
@ -48,6 +54,7 @@ import org.crsh.util.Utils
import org.crsh.vfs.FS
import org.crsh.vfs.spi.file.FileMountFactory
import org.crsh.vfs.spi.url.ClassPathMountFactory
import org.slf4j.LoggerFactory
import rx.Observable
import rx.Subscriber
import java.io.*
@ -73,63 +80,59 @@ import kotlin.concurrent.thread
// TODO: Make it notice new shell commands added after the node started.
object InteractiveShell {
private val log = loggerFor<InteractiveShell>()
private val log = LoggerFactory.getLogger(javaClass)
private lateinit var node: StartedNode<Node>
@VisibleForTesting
internal lateinit var database: CordaPersistence
private lateinit var rpcOps:CordaRPCOps
private lateinit var userService:RPCUserService
private lateinit var identityService:IdentityService
private var shell:Shell? = null
private lateinit var nodeLegalName: CordaX500Name
/**
* Starts an interactive shell connected to the local terminal. This shell gives administrator access to the node
* internals.
*/
fun startShell(dir: Path, runLocalShell: Boolean, runSSHServer: Boolean, node: StartedNode<Node>) {
this.node = node
this.database = node.database
var runSSH = runSSHServer
fun startShell(configuration:NodeConfiguration, cordaRPCOps: CordaRPCOps, userService: RPCUserService, identityService: IdentityService, database:CordaPersistence) {
this.rpcOps = cordaRPCOps
this.userService = userService
this.identityService = identityService
this.nodeLegalName = configuration.myLegalName
this.database = database
val dir = configuration.baseDirectory
val runSshDeamon = configuration.sshd != null
val config = Properties()
if (runSSH) {
// TODO: Finish and enable SSH access.
// This means bringing the CRaSH SSH plugin into the Corda tree and applying Marek's patches
// found in https://github.com/marekdapps/crash/commit/8a37ce1c7ef4d32ca18f6396a1a9d9841f7ff643
// to that local copy, as CRaSH is no longer well maintained by the upstream and the SSH plugin
// that it comes with is based on a very old version of Apache SSHD which can't handle connections
// from newer SSH clients. It also means hooking things up to the authentication system.
Node.printBasicNodeInfo("SSH server access is not fully implemented, sorry.")
runSSH = false
}
if (runSshDeamon) {
val sshKeysDir = dir / "sshkey"
sshKeysDir.toFile().mkdirs()
if (runSSH) {
// Enable SSH access. Note: these have to be strings, even though raw object assignments also work.
config["crash.ssh.keypath"] = (dir / "sshkey").toString()
config["crash.ssh.keypath"] = (sshKeysDir / "hostkey.pem").toString()
config["crash.ssh.keygen"] = "true"
// config["crash.ssh.port"] = node.configuration.sshdAddress.port.toString()
config["crash.auth"] = "simple"
config["crash.auth.simple.username"] = "admin"
config["crash.auth.simple.password"] = "admin"
config["crash.ssh.port"] = configuration.sshd?.port.toString()
config["crash.auth"] = "corda"
}
ExternalResolver.INSTANCE.addCommand("run", "Runs a method from the CordaRPCOps interface on the node.", RunShellCommand::class.java)
ExternalResolver.INSTANCE.addCommand("flow", "Commands to work with flows. Flows are how you can change the ledger.", FlowShellCommand::class.java)
ExternalResolver.INSTANCE.addCommand("start", "An alias for 'flow start'", StartShellCommand::class.java)
val shell = ShellLifecycle(dir).start(config)
shell = ShellLifecycle(dir).start(config)
if (runSSH) {
// printBasicNodeInfo("SSH server listening on address", node.configuration.sshdAddress.toString())
if (runSshDeamon) {
Node.printBasicNodeInfo("SSH server listening on port", configuration.sshd!!.port.toString())
}
}
// Possibly bring up a local shell in the launching terminal window, unless it's disabled.
if (!runLocalShell)
return
// TODO: Automatically set up the JDBC sub-command with a connection to the database.
fun runLocalShell(node:StartedNode<Node>) {
val terminal = TerminalFactory.create()
val consoleReader = ConsoleReader("Corda", FileInputStream(FileDescriptor.`in`), System.out, terminal)
val jlineProcessor = JLineProcessor(terminal.isAnsiSupported, shell, consoleReader, System.out)
InterruptHandler { jlineProcessor.interrupt() }.install()
thread(name = "Command line shell processor", isDaemon = true) {
// Give whoever has local shell access administrator access to the node.
// TODO remove this after Shell switches to RPC
val context = RpcAuthContext(net.corda.core.context.InvocationContext.shell(), RpcPermissions.NONE)
val context = RpcAuthContext(net.corda.core.context.InvocationContext.shell(), RpcPermissions.ALL)
CURRENT_RPC_CONTEXT.set(context)
Emoji.renderIfSupported {
jlineProcessor.run()
@ -168,27 +171,25 @@ object InteractiveShell {
// Don't use the Java language plugin (we may not have tools.jar available at runtime), this
// will cause any commands using JIT Java compilation to be suppressed. In CRaSH upstream that
// is only the 'jmx' command.
return super.getPlugins().filterNot { it is JavaLanguage }
return super.getPlugins().filterNot { it is JavaLanguage } + CordaAuthenticationPlugin(rpcOps, userService, nodeLegalName)
}
}
val attributes = mapOf(
"node" to node.internals,
"services" to node.services,
"ops" to node.rpcOps,
"ops" to rpcOps,
"mapper" to yamlInputMapper
)
val context = PluginContext(discovery, attributes, commandsFS, confFS, classLoader)
context.refresh()
this.config = config
start(context)
return context.getPlugin(ShellFactory::class.java).create(null)
return context.getPlugin(ShellFactory::class.java).create(null, CordaSSHAuthInfo(false, RPCOpsWithContext(rpcOps, net.corda.core.context.InvocationContext.shell(), RpcPermissions.ALL), StdoutANSIProgressRenderer))
}
}
private val yamlInputMapper: ObjectMapper by lazy {
// Return a standard Corda Jackson object mapper, configured to use YAML by default and with extra
// serializers.
JacksonSupport.createInMemoryMapper(node.services.identityService, YAMLFactory(), true).apply {
JacksonSupport.createInMemoryMapper(identityService, YAMLFactory(), true).apply {
val rpcModule = SimpleModule()
rpcModule.addDeserializer(InputStream::class.java, InputStreamDeserializer)
rpcModule.addDeserializer(UniqueIdentifier::class.java, UniqueIdentifierDeserializer)
@ -217,42 +218,41 @@ object InteractiveShell {
/**
* Called from the 'flow' shell command. Takes a name fragment and finds a matching flow, or prints out
* the list of options if the request is ambiguous. Then parses [inputData] as constructor arguments using
* the [runFlowFromString] method and starts the requested flow using the [ANSIProgressRenderer] to draw
* the progress tracker. Ctrl-C can be used to cancel.
* the [runFlowFromString] method and starts the requested flow. Ctrl-C can be used to cancel.
*/
@JvmStatic
fun runFlowByNameFragment(nameFragment: String, inputData: String, output: RenderPrintWriter) {
val matches = node.services.rpcFlows.filter { nameFragment in it.name }
fun runFlowByNameFragment(nameFragment: String, inputData: String, output: RenderPrintWriter, rpcOps: CordaRPCOps, ansiProgressRenderer: ANSIProgressRenderer) {
val matches = rpcOps.registeredFlows().filter { nameFragment in it }
if (matches.isEmpty()) {
output.println("No matching flow found, run 'flow list' to see your options.", Color.red)
return
} else if (matches.size > 1) {
output.println("Ambigous name provided, please be more specific. Your options are:")
output.println("Ambiguous name provided, please be more specific. Your options are:")
matches.forEachIndexed { i, s -> output.println("${i + 1}. $s", Color.yellow) }
return
}
val clazz: Class<FlowLogic<*>> = uncheckedCast(matches.single())
val clazz: Class<FlowLogic<*>> = uncheckedCast(Class.forName(matches.single()))
try {
// TODO Flow invocation should use startFlowDynamic.
val context = net.corda.core.context.InvocationContext.shell()
val fsm = runFlowFromString({ node.services.startFlow(it, context).getOrThrow() }, inputData, clazz)
// Show the progress tracker on the console until the flow completes or is interrupted with a
// Ctrl-C keypress.
val stateObservable = runFlowFromString({ clazz,args -> rpcOps.startTrackedFlowDynamic (clazz, *args) }, inputData, clazz)
val latch = CountDownLatch(1)
ANSIProgressRenderer.onDone = { latch.countDown() }
ANSIProgressRenderer.progressTracker = (fsm as FlowStateMachineImpl).logic.progressTracker
ansiProgressRenderer.render(stateObservable, { latch.countDown() })
try {
// Wait for the flow to end and the progress tracker to notice. By the time the latch is released
// the tracker is done with the screen.
latch.await()
} catch (e: InterruptedException) {
ANSIProgressRenderer.progressTracker = null
// TODO: When the flow framework allows us to kill flows mid-flight, do so here.
}
} catch (e: NoApplicableConstructor) {
output.println("No matching constructor found:", Color.red)
e.errors.forEach { output.println("- $it", Color.red) }
} catch (e:PermissionException) {
output.println(e.message ?: "Access denied", Color.red)
} finally {
InputStreamDeserializer.closeAll()
}
@ -273,10 +273,10 @@ object InteractiveShell {
* @throws NoApplicableConstructor if no constructor could be found for the given set of types.
*/
@Throws(NoApplicableConstructor::class)
fun runFlowFromString(invoke: (FlowLogic<*>) -> FlowStateMachine<*>,
fun <T> runFlowFromString(invoke: (Class<out FlowLogic<T>>, Array<out Any?>) -> FlowProgressHandle<T>,
inputData: String,
clazz: Class<out FlowLogic<*>>,
om: ObjectMapper = yamlInputMapper): FlowStateMachine<*> {
clazz: Class<out FlowLogic<T>>,
om: ObjectMapper = yamlInputMapper): FlowProgressHandle<T> {
// For each constructor, attempt to parse the input data as a method call. Use the first that succeeds,
// and keep track of the reasons we failed so we can print them out if no constructors are usable.
val parser = StringToMethodCallParser(clazz, om)
@ -303,7 +303,7 @@ object InteractiveShell {
errors.add("A flow must override the progress tracker in order to be run from the shell")
continue
}
return invoke(flow)
return invoke(clazz, args)
} catch (e: StringToMethodCallParser.UnparseableCallException.MissingParameter) {
errors.add("${getPrototype()}: missing parameter ${e.paramName}")
} catch (e: StringToMethodCallParser.UnparseableCallException.TooManyParameters) {
@ -321,8 +321,8 @@ object InteractiveShell {
// TODO Filtering on error/success when we will have some sort of flow auditing, for now it doesn't make much sense.
@JvmStatic
fun runStateMachinesView(out: RenderPrintWriter): Any? {
val proxy = node.rpcOps
fun runStateMachinesView(out: RenderPrintWriter, rpcOps: CordaRPCOps): Any? {
val proxy = rpcOps
val (stateMachines, stateMachineUpdates) = proxy.stateMachinesFeed()
val currentStateMachines = stateMachines.map { StateMachineUpdate.Added(it) }
val subscriber = FlowWatchPrintingSubscriber(out)
@ -395,7 +395,7 @@ object InteractiveShell {
return result
}
private fun printAndFollowRPCResponse(response: Any?, toStream: PrintWriter): CordaFuture<Unit>? {
private fun printAndFollowRPCResponse(response: Any?, toStream: PrintWriter): CordaFuture<Unit> {
val printerFun = yamlMapper::writeValueAsString
toStream.println(printerFun(response))
toStream.flush()
@ -422,28 +422,31 @@ object InteractiveShell {
override fun onNext(t: Any?) {
count++
toStream.println("Observation $count: " + printerFun(t))
toStream.flush()
}
@Synchronized
override fun onError(e: Throwable) {
toStream.println("Observable completed with an error")
e.printStackTrace()
e.printStackTrace(toStream)
future.setException(e)
}
}
private fun maybeFollow(response: Any?, printerFun: (Any?) -> String, toStream: PrintWriter): OpenFuture<Unit>? {
private fun maybeFollow(response: Any?, printerFun: (Any?) -> String, toStream: PrintWriter): CordaFuture<Unit> {
// Match on a couple of common patterns for "important" observables. It's tough to do this in a generic
// way because observables can be embedded anywhere in the object graph, and can emit other arbitrary
// object graphs that contain yet more observables. So we just look for top level responses that follow
// the standard "track" pattern, and print them until the user presses Ctrl-C
if (response == null) return null
if (response == null) return doneFuture(Unit)
val observable: Observable<*> = when (response) {
is Observable<*> -> response
is DataFeed<*, *> -> response.updates
else -> return null
is DataFeed<*, *> -> {
toStream.println("Snapshot")
toStream.println(response.snapshot)
response.updates
}
else -> return doneFuture(Unit)
}
val subscriber = PrintingSubscriber(printerFun, toStream)

View File

@ -4,12 +4,14 @@ import com.fasterxml.jackson.databind.ObjectMapper
import net.corda.core.messaging.CordaRPCOps
import net.corda.node.services.api.ServiceHubInternal
import org.crsh.command.BaseCommand
import org.crsh.shell.impl.command.CRaSHSession
/**
* Simply extends CRaSH BaseCommand to add easy access to the RPC ops class.
*/
open class InteractiveShellCommand : BaseCommand() {
fun ops() = context.attributes["ops"] as CordaRPCOps
fun ops() = ((context.session as CRaSHSession).authInfo as CordaSSHAuthInfo).rpcOps
fun ansiProgressRenderer() = ((context.session as CRaSHSession).authInfo as CordaSSHAuthInfo).ansiProgressRenderer
fun services() = context.attributes["services"] as ServiceHubInternal
fun objectMapper() = context.attributes["mapper"] as ObjectMapper
}

View File

@ -0,0 +1,205 @@
package net.corda.node.shell
import net.corda.core.concurrent.CordaFuture
import net.corda.core.context.InvocationContext
import net.corda.core.contracts.ContractState
import net.corda.core.crypto.SecureHash
import net.corda.core.flows.FlowLogic
import net.corda.core.identity.AbstractParty
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.messaging.*
import net.corda.core.node.NodeInfo
import net.corda.core.node.services.AttachmentId
import net.corda.core.node.services.NetworkMapCache
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.*
import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.getOrThrow
import net.corda.node.services.messaging.CURRENT_RPC_CONTEXT
import net.corda.node.services.messaging.RpcAuthContext
import net.corda.node.services.messaging.RpcPermissions
import java.io.InputStream
import java.security.PublicKey
import java.time.Instant
import java.util.concurrent.CompletableFuture
import java.util.concurrent.Future
class RPCOpsWithContext(val cordaRPCOps: CordaRPCOps, val invocationContext:InvocationContext, val rpcPermissions: RpcPermissions) : CordaRPCOps {
class RPCContextRunner<T>(val invocationContext:InvocationContext, val permissions:RpcPermissions, val block:() -> T) : Thread() {
private var result: CompletableFuture<T> = CompletableFuture()
override fun run() {
CURRENT_RPC_CONTEXT.set(RpcAuthContext(invocationContext, permissions))
try {
result.complete(block())
} catch (e:Throwable) {
result.completeExceptionally(e)
}
CURRENT_RPC_CONTEXT.remove()
}
fun get(): Future<T> {
start()
join()
return result
}
}
override fun uploadAttachmentWithMetadata(jar: InputStream, uploader: String, filename: String): SecureHash {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.uploadAttachmentWithMetadata(jar, uploader, filename) }.get().getOrThrow()
}
override fun queryAttachments(query: AttachmentQueryCriteria, sorting: AttachmentSort?): List<AttachmentId> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.queryAttachments(query, sorting) }.get().getOrThrow()
}
override fun <T : ContractState> vaultTrackByWithSorting(contractStateType: Class<out T>, criteria: QueryCriteria, sorting: Sort): DataFeed<Vault.Page<T>, Vault.Update<T>> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultTrackByWithSorting(contractStateType, criteria, sorting) }.get().getOrThrow()
}
override fun <T : ContractState> vaultTrackByWithPagingSpec(contractStateType: Class<out T>, criteria: QueryCriteria, paging: PageSpecification): DataFeed<Vault.Page<T>, Vault.Update<T>> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultTrackByWithPagingSpec(contractStateType, criteria, paging) }.get().getOrThrow()
}
override fun <T : ContractState> vaultTrackByCriteria(contractStateType: Class<out T>, criteria: QueryCriteria): DataFeed<Vault.Page<T>, Vault.Update<T>> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultTrackByCriteria(contractStateType, criteria) }.get().getOrThrow()
}
override fun <T : ContractState> vaultTrack(contractStateType: Class<out T>): DataFeed<Vault.Page<T>, Vault.Update<T>> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultTrack(contractStateType) }.get().getOrThrow()
}
override fun <T : ContractState> vaultQueryByWithSorting(contractStateType: Class<out T>, criteria: QueryCriteria, sorting: Sort): Vault.Page<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultQueryByWithSorting(contractStateType, criteria, sorting) }.get().getOrThrow()
}
override fun <T : ContractState> vaultQueryByWithPagingSpec(contractStateType: Class<out T>, criteria: QueryCriteria, paging: PageSpecification): Vault.Page<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultQueryByWithPagingSpec(contractStateType, criteria, paging) }.get().getOrThrow()
}
override fun <T : ContractState> vaultQueryByCriteria(criteria: QueryCriteria, contractStateType: Class<out T>): Vault.Page<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultQueryByCriteria(criteria, contractStateType) }.get().getOrThrow()
}
override fun <T : ContractState> vaultQuery(contractStateType: Class<out T>): Vault.Page<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultQuery(contractStateType) }.get().getOrThrow()
}
override fun stateMachinesSnapshot(): List<StateMachineInfo> {
return RPCContextRunner(invocationContext, rpcPermissions, cordaRPCOps::stateMachinesSnapshot).get().getOrThrow()
}
override fun stateMachinesFeed(): DataFeed<List<StateMachineInfo>, StateMachineUpdate> {
return RPCContextRunner(invocationContext, rpcPermissions, cordaRPCOps::stateMachinesFeed).get().getOrThrow()
}
override fun <T : ContractState> vaultQueryBy(criteria: QueryCriteria, paging: PageSpecification, sorting: Sort, contractStateType: Class<out T>): Vault.Page<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultQueryBy(criteria, paging, sorting, contractStateType) }.get().getOrThrow()
}
override fun <T : ContractState> vaultTrackBy(criteria: QueryCriteria, paging: PageSpecification, sorting: Sort, contractStateType: Class<out T>): DataFeed<Vault.Page<T>, Vault.Update<T>> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.vaultTrackBy(criteria, paging, sorting, contractStateType) }.get().getOrThrow()
}
override fun internalVerifiedTransactionsSnapshot(): List<SignedTransaction> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.internalVerifiedTransactionsSnapshot() }.get().getOrThrow()
}
override fun internalVerifiedTransactionsFeed(): DataFeed<List<SignedTransaction>, SignedTransaction> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.internalVerifiedTransactionsFeed() }.get().getOrThrow()
}
override fun stateMachineRecordedTransactionMappingSnapshot(): List<StateMachineTransactionMapping> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.stateMachineRecordedTransactionMappingSnapshot() }.get().getOrThrow()
}
override fun stateMachineRecordedTransactionMappingFeed(): DataFeed<List<StateMachineTransactionMapping>, StateMachineTransactionMapping> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.stateMachineRecordedTransactionMappingFeed() }.get().getOrThrow()
}
override fun networkMapSnapshot(): List<NodeInfo> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.networkMapSnapshot() }.get().getOrThrow()
}
override fun networkMapFeed(): DataFeed<List<NodeInfo>, NetworkMapCache.MapChange> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.networkMapFeed() }.get().getOrThrow()
}
override fun <T> startFlowDynamic(logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowHandle<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.startFlowDynamic(logicType, *args) }.get().getOrThrow()
}
override fun <T> startTrackedFlowDynamic(logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowProgressHandle<T> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.startTrackedFlowDynamic(logicType, *args) }.get().getOrThrow()
}
override fun nodeInfo(): NodeInfo {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.nodeInfo() }.get().getOrThrow()
}
override fun notaryIdentities(): List<Party> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.notaryIdentities() }.get().getOrThrow()
}
override fun addVaultTransactionNote(txnId: SecureHash, txnNote: String) {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.addVaultTransactionNote(txnId, txnNote) }.get().getOrThrow()
}
override fun getVaultTransactionNotes(txnId: SecureHash): Iterable<String> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.getVaultTransactionNotes(txnId) }.get().getOrThrow()
}
override fun attachmentExists(id: SecureHash): Boolean {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.attachmentExists(id) }.get().getOrThrow()
}
override fun openAttachment(id: SecureHash): InputStream {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.openAttachment(id) }.get().getOrThrow()
}
override fun uploadAttachment(jar: InputStream): SecureHash {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.uploadAttachment(jar) }.get().getOrThrow()
}
override fun currentNodeTime(): Instant {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.currentNodeTime() }.get().getOrThrow()
}
override fun waitUntilNetworkReady(): CordaFuture<Void?> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.waitUntilNetworkReady() }.get().getOrThrow()
}
override fun wellKnownPartyFromAnonymous(party: AbstractParty): Party? {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.wellKnownPartyFromAnonymous(party) }.get().getOrThrow()
}
override fun partyFromKey(key: PublicKey): Party? {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.partyFromKey(key) }.get().getOrThrow()
}
override fun wellKnownPartyFromX500Name(x500Name: CordaX500Name): Party? {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.wellKnownPartyFromX500Name(x500Name) }.get().getOrThrow()
}
override fun notaryPartyFromX500Name(x500Name: CordaX500Name): Party? {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.notaryPartyFromX500Name(x500Name) }.get().getOrThrow()
}
override fun partiesFromName(query: String, exactMatch: Boolean): Set<Party> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.partiesFromName(query, exactMatch) }.get().getOrThrow()
}
override fun registeredFlows(): List<String> {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.registeredFlows() }.get().getOrThrow()
}
override fun nodeInfoFromParty(party: AbstractParty): NodeInfo? {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.nodeInfoFromParty(party) }.get().getOrThrow()
}
override fun clearNetworkMapCache() {
return RPCContextRunner(invocationContext, rpcPermissions) { cordaRPCOps.clearNetworkMapCache() }.get().getOrThrow()
}
}

View File

@ -1,138 +1,113 @@
package net.corda.node.utilities
import net.corda.core.internal.Emoji
import net.corda.core.utilities.ProgressTracker
import net.corda.node.utilities.ANSIProgressRenderer.progressTracker
import net.corda.core.messaging.FlowProgressHandle
import org.apache.logging.log4j.LogManager
import org.apache.logging.log4j.core.LogEvent
import org.apache.logging.log4j.core.LoggerContext
import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender
import org.apache.logging.log4j.core.appender.ConsoleAppender
import org.apache.logging.log4j.core.appender.OutputStreamManager
import org.crsh.text.RenderPrintWriter
import org.fusesource.jansi.Ansi
import org.fusesource.jansi.AnsiConsole
import org.fusesource.jansi.AnsiOutputStream
import rx.Subscription
/**
* Knows how to render a [ProgressTracker] to the terminal using coloured, emoji-fied output. Useful when writing small
* command line tools, demos, tests etc. Just set the [progressTracker] field and it will go ahead and start drawing
* if the terminal supports it. Otherwise it just prints out the name of the step whenever it changes.
*
* When a progress tracker is on the screen, it takes over the bottom part and reconfigures logging so that, assuming
* 1 log event == 1 line, the progress tracker is always glued to the bottom and logging scrolls above it.
*
* TODO: More thread safety
*/
object ANSIProgressRenderer {
abstract class ANSIProgressRenderer {
private var subscriptionIndex: Subscription? = null
private var subscriptionTree: Subscription? = null
protected var usingANSI = false
protected var checkEmoji = false
protected var treeIndex: Int = 0
protected var tree: List<Pair<Int,String>> = listOf()
private var installedYet = false
private var subscription: Subscription? = null
private var usingANSI = false
var progressTracker: ProgressTracker? = null
set(value) {
subscription?.unsubscribe()
field = value
if (!installedYet) {
setup()
}
// Reset the state when a new tracker is wired up.
if (value != null) {
prevMessagePrinted = null
prevLinesDrawn = 0
draw(true)
subscription = value.changes.subscribe({ draw(true) }, { done(it) }, { done(null) })
}
}
var onDone: () -> Unit = {}
private fun done(error: Throwable?) {
if (error == null) progressTracker = null
draw(true, error)
onDone()
}
private fun setup() {
AnsiConsole.systemInstall()
// This line looks weird as hell because the magic code to decide if we really have a TTY or not isn't
// actually exposed anywhere as a function (weak sauce). So we have to rely on our knowledge of jansi
// implementation details.
usingANSI = AnsiConsole.wrapOutputStream(System.out) !is AnsiOutputStream
if (usingANSI) {
// This super ugly code hacks into log4j and swaps out its console appender for our own. It's a bit simpler
// than doing things the official way with a dedicated plugin, etc, as it avoids mucking around with all
// the config XML and lifecycle goop.
val manager = LogManager.getContext(false) as LoggerContext
val consoleAppender = manager.configuration.appenders.values.filterIsInstance<ConsoleAppender>().single { it.name == "Console-Appender" }
val scrollingAppender = object : AbstractOutputStreamAppender<OutputStreamManager>(
consoleAppender.name, consoleAppender.layout, consoleAppender.filter,
consoleAppender.ignoreExceptions(), true, consoleAppender.manager) {
override fun append(event: LogEvent) {
// We lock on the renderer to avoid threads that are logging to the screen simultaneously messing
// things up. Of course this slows stuff down a bit, but only whilst this little utility is in use.
// Eventually it will be replaced with a real GUI and we can delete all this.
synchronized(ANSIProgressRenderer) {
if (progressTracker != null) {
val ansi = Ansi.ansi()
repeat(prevLinesDrawn) { ansi.eraseLine().cursorUp(1).eraseLine() }
System.out.print(ansi)
System.out.flush()
}
super.append(event)
if (progressTracker != null)
draw(false)
}
}
}
scrollingAppender.start()
manager.configuration.appenders[consoleAppender.name] = scrollingAppender
val loggerConfigs = manager.configuration.loggers.values
for (config in loggerConfigs) {
val appenderRefs = config.appenderRefs
val consoleAppenders = config.appenders.filter { it.value is ConsoleAppender }.keys
consoleAppenders.forEach { config.removeAppender(it) }
appenderRefs.forEach { config.addAppender(manager.configuration.appenders[it.ref], it.level, it.filter) }
}
manager.updateLoggers()
}
installedYet = true
}
private var onDone: () -> Unit = {}
// prevMessagePrinted is just for non-ANSI mode.
private var prevMessagePrinted: String? = null
// prevLinesDraw is just for ANSI mode.
private var prevLinesDrawn = 0
protected var prevLinesDrawn = 0
@Synchronized private fun draw(moveUp: Boolean, error: Throwable? = null) {
private fun done(error: Throwable?) {
if (error == null) _render(null)
draw(true, error)
onDone()
}
fun render(flowProgressHandle: FlowProgressHandle<*>, onDone: () -> Unit = {}) {
this.onDone = onDone
_render(flowProgressHandle)
}
protected abstract fun printLine(line:String)
protected abstract fun printAnsi(ansi:Ansi)
protected abstract fun setup()
private fun _render(flowProgressHandle: FlowProgressHandle<*>?) {
subscriptionIndex?.unsubscribe()
subscriptionTree?.unsubscribe()
treeIndex = 0
tree = listOf()
if (!installedYet) {
setup()
installedYet = true
}
prevMessagePrinted = null
prevLinesDrawn = 0
draw(true)
flowProgressHandle?.apply {
stepsTreeIndexFeed?.apply {
treeIndex = snapshot
subscriptionIndex = updates.subscribe({
treeIndex = it
draw(true)
}, { done(it) }, { done(null) })
}
stepsTreeFeed?.apply {
tree = snapshot
subscriptionTree = updates.subscribe({
tree = it
draw(true)
}, { done(it) }, { done(null) })
}
}
}
@Synchronized protected fun draw(moveUp: Boolean, error: Throwable? = null) {
if (!usingANSI) {
val currentMessage = progressTracker?.currentStepRecursive?.label
val currentMessage = tree.getOrNull(treeIndex)?.second
if (currentMessage != null && currentMessage != prevMessagePrinted) {
println(currentMessage)
printLine(currentMessage)
prevMessagePrinted = currentMessage
}
return
}
Emoji.renderIfSupported {
fun printingBody() {
// Handle the case where the number of steps in a progress tracker is changed during execution.
val ansi = Ansi.ansi()
val ansi = Ansi()
if (prevLinesDrawn > 0 && moveUp)
ansi.cursorUp(prevLinesDrawn)
// Put a blank line between any logging and us.
ansi.eraseLine()
ansi.newline()
val pt = progressTracker ?: return
var newLinesDrawn = 1 + pt.renderLevel(ansi, 0, error != null)
if (tree.isEmpty()) return
var newLinesDrawn = 1 + renderLevel(ansi, error != null)
if (error != null) {
ansi.a("${Emoji.skullAndCrossbones} ${error.message}")
@ -152,46 +127,137 @@ object ANSIProgressRenderer {
}
prevLinesDrawn = newLinesDrawn
// Need to force a flush here in order to ensure stderr/stdout sync up properly.
System.out.print(ansi)
System.out.flush()
printAnsi(ansi)
}
if (checkEmoji) {
Emoji.renderIfSupported(::printingBody)
} else {
printingBody()
}
}
// Returns number of lines rendered.
private fun ProgressTracker.renderLevel(ansi: Ansi, indent: Int, error: Boolean): Int {
private fun renderLevel(ansi: Ansi, error: Boolean): Int {
with(ansi) {
var lines = 0
for ((index, step) in steps.withIndex()) {
// Don't bother rendering these special steps in some cases.
if (step == ProgressTracker.UNSTARTED) continue
if (indent > 0 && step == ProgressTracker.DONE) continue
for ((index, step) in tree.withIndex()) {
val marker = when {
index < stepIndex -> "${Emoji.greenTick} "
index == stepIndex && step == ProgressTracker.DONE -> "${Emoji.greenTick} "
index == stepIndex -> "${Emoji.rightArrow} "
index < treeIndex -> "${Emoji.greenTick} "
treeIndex == tree.lastIndex -> "${Emoji.greenTick} "
index == treeIndex -> "${Emoji.rightArrow} "
error -> "${Emoji.noEntry} "
else -> " " // Not reached yet.
}
a(" ".repeat(indent))
a(" ".repeat(step.first))
a(marker)
val active = index == stepIndex && step != ProgressTracker.DONE
val active = index == treeIndex
if (active) bold()
a(step.label)
a(step.second)
if (active) boldOff()
eraseLine(Ansi.Erase.FORWARD)
newline()
lines++
val child = getChildProgressTracker(step)
if (child != null)
lines += child.renderLevel(ansi, indent + 1, error)
}
return lines
}
}
}
class CRaSHNSIProgressRenderer(val renderPrintWriter:RenderPrintWriter) : ANSIProgressRenderer() {
override fun printLine(line: String) {
renderPrintWriter.println(line)
}
override fun printAnsi(ansi: Ansi) {
renderPrintWriter.print(ansi)
renderPrintWriter.flush()
}
override fun setup() {
//we assume SSH always use ansi
usingANSI = true
}
}
/**
* Knows how to render a [FlowProgressHandle] to the terminal using coloured, emoji-fied output. Useful when writing small
* command line tools, demos, tests etc. Just call [draw] method and it will go ahead and start drawing
* if the terminal supports it. Otherwise it just prints out the name of the step whenever it changes.
*
* When a progress tracker is on the screen, it takes over the bottom part and reconfigures logging so that, assuming
* 1 log event == 1 line, the progress tracker is always glued to the bottom and logging scrolls above it.
*
* TODO: More thread safety
*/
object StdoutANSIProgressRenderer : ANSIProgressRenderer() {
override fun setup() {
AnsiConsole.systemInstall()
checkEmoji = true
// This line looks weird as hell because the magic code to decide if we really have a TTY or not isn't
// actually exposed anywhere as a function (weak sauce). So we have to rely on our knowledge of jansi
// implementation details.
usingANSI = AnsiConsole.wrapOutputStream(System.out) !is AnsiOutputStream
if (usingANSI) {
// This super ugly code hacks into log4j and swaps out its console appender for our own. It's a bit simpler
// than doing things the official way with a dedicated plugin, etc, as it avoids mucking around with all
// the config XML and lifecycle goop.
val manager = LogManager.getContext(false) as LoggerContext
val consoleAppender = manager.configuration.appenders.values.filterIsInstance<ConsoleAppender>().single { it.name == "Console-Appender" }
val scrollingAppender = object : AbstractOutputStreamAppender<OutputStreamManager>(
consoleAppender.name, consoleAppender.layout, consoleAppender.filter,
consoleAppender.ignoreExceptions(), true, consoleAppender.manager) {
override fun append(event: LogEvent) {
// We lock on the renderer to avoid threads that are logging to the screen simultaneously messing
// things up. Of course this slows stuff down a bit, but only whilst this little utility is in use.
// Eventually it will be replaced with a real GUI and we can delete all this.
synchronized(StdoutANSIProgressRenderer) {
if (tree.isNotEmpty()) {
val ansi = Ansi.ansi()
repeat(prevLinesDrawn) { ansi.eraseLine().cursorUp(1).eraseLine() }
System.out.print(ansi)
System.out.flush()
}
super.append(event)
if (tree.isNotEmpty())
draw(false)
}
}
}
scrollingAppender.start()
manager.configuration.appenders[consoleAppender.name] = scrollingAppender
val loggerConfigs = manager.configuration.loggers.values
for (config in loggerConfigs) {
val appenderRefs = config.appenderRefs
val consoleAppenders = config.appenders.filter { it.value is ConsoleAppender }.keys
consoleAppenders.forEach { config.removeAppender(it) }
appenderRefs.forEach { config.addAppender(manager.configuration.appenders[it.ref], it.level, it.filter) }
}
manager.updateLoggers()
}
}
override fun printLine(line:String) {
System.out.println(line)
}
override fun printAnsi(ansi: Ansi) {
// Need to force a flush here in order to ensure stderr/stdout sync up properly.
System.out.print(ansi)
System.out.flush()
}
}

View File

@ -2,7 +2,6 @@ package net.corda.node.utilities
import com.google.common.util.concurrent.SettableFuture
import com.google.common.util.concurrent.Uninterruptibles
import net.corda.core.utilities.loggerFor
import java.util.*
import java.util.concurrent.CompletableFuture
import java.util.concurrent.Executor
@ -55,10 +54,6 @@ interface AffinityExecutor : Executor {
*/
open class ServiceAffinityExecutor(threadName: String, numThreads: Int) : AffinityExecutor,
ScheduledThreadPoolExecutor(numThreads) {
companion object {
val logger = loggerFor<ServiceAffinityExecutor>()
}
private val threads = Collections.synchronizedSet(HashSet<Thread>())
init {

View File

@ -1,6 +1,6 @@
package net.corda.node.utilities
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import java.util.*
@ -18,7 +18,7 @@ class AppendOnlyPersistentMap<K, V, E, out EK>(
) { //TODO determine cacheBound based on entity class later or with node config allowing tuning, or using some heuristic based on heap size
private companion object {
val log = loggerFor<AppendOnlyPersistentMap<*, *, *, *>>()
private val log = contextLogger()
}
private val cache = NonInvalidatingCache<K, Optional<V>>(

View File

@ -4,7 +4,7 @@ package net.corda.node.utilities
import com.google.common.cache.RemovalCause
import com.google.common.cache.RemovalListener
import com.google.common.cache.RemovalNotification
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.contextLogger
import java.util.*
@ -19,7 +19,7 @@ class PersistentMap<K, V, E, out EK>(
) : MutableMap<K, V>, AbstractMap<K, V>() {
private companion object {
val log = loggerFor<PersistentMap<*, *, *, *>>()
private val log = contextLogger()
}
private val cache = NonInvalidatingUnboundCache(

View File

@ -7,13 +7,12 @@ import net.corda.core.identity.Party
import net.corda.core.internal.cert
import net.corda.core.internal.createDirectories
import net.corda.core.internal.div
import net.corda.core.utilities.loggerFor
import net.corda.core.utilities.trace
import org.slf4j.LoggerFactory
import java.nio.file.Path
object ServiceIdentityGenerator {
private val log = loggerFor<ServiceIdentityGenerator>()
private val log = LoggerFactory.getLogger(javaClass)
/**
* Generates signing key pairs and a common distributed service identity for a set of nodes.
* The key pairs and the group identity get serialized to disk in the corresponding node directories.

View File

@ -241,6 +241,18 @@ class CordaRPCOpsImplTest {
}
}
@Test
fun `can't upload the same attachment`() {
withPermissions(invokeRpc(CordaRPCOps::uploadAttachment), invokeRpc(CordaRPCOps::attachmentExists)) {
assertThatExceptionOfType(java.nio.file.FileAlreadyExistsException::class.java).isThrownBy {
val inputJar1 = Thread.currentThread().contextClassLoader.getResourceAsStream(testJar)
val inputJar2 = Thread.currentThread().contextClassLoader.getResourceAsStream(testJar)
val secureHash1 = rpc.uploadAttachment(inputJar1)
val secureHash2 = rpc.uploadAttachment(inputJar2)
}
}
}
@Test
fun `can download an uploaded attachment`() {
withPermissions(invokeRpc(CordaRPCOps::uploadAttachment), invokeRpc(CordaRPCOps::openAttachment)) {

View File

@ -2,11 +2,17 @@ package net.corda.node
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import net.corda.client.jackson.JacksonSupport
import net.corda.core.concurrent.CordaFuture
import net.corda.core.contracts.Amount
import net.corda.core.crypto.SecureHash
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.StateMachineRunId
import net.corda.core.identity.Party
import net.corda.core.internal.FlowStateMachine
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.internal.objectOrNewInstance
import net.corda.core.messaging.FlowProgressHandle
import net.corda.core.messaging.FlowProgressHandleImpl
import net.corda.core.utilities.ProgressTracker
import net.corda.node.services.identity.InMemoryIdentityService
import net.corda.node.shell.InteractiveShell
@ -20,6 +26,7 @@ import net.corda.testing.rigorousMock
import org.junit.After
import org.junit.Before
import org.junit.Test
import rx.Observable
import java.util.*
import kotlin.test.assertEquals
@ -36,8 +43,8 @@ class InteractiveShellTest {
@Suppress("UNUSED")
class FlowA(val a: String) : FlowLogic<String>() {
constructor(b: Int) : this(b.toString())
constructor(b: Int, c: String) : this(b.toString() + c)
constructor(b: Int?) : this(b.toString())
constructor(b: Int?, c: String) : this(b.toString() + c)
constructor(amount: Amount<Currency>) : this(amount.toString())
constructor(pair: Pair<Amount<Currency>, SecureHash.SHA256>) : this(pair.toString())
constructor(party: Party) : this(party.name.toString())
@ -50,9 +57,16 @@ class InteractiveShellTest {
private val om = JacksonSupport.createInMemoryMapper(ids, YAMLFactory())
private fun check(input: String, expected: String) {
var output: DummyFSM? = null
InteractiveShell.runFlowFromString({ DummyFSM(it as FlowA).apply { output = this } }, input, FlowA::class.java, om)
assertEquals(expected, output!!.logic.a, input)
var output: String? = null
InteractiveShell.runFlowFromString( { clazz, args ->
val instance = clazz.getConstructor(*args.map { it!!::class.java }.toTypedArray()).newInstance(*args) as FlowA
output = instance.a
val future = openFuture<String>()
future.set("ABC")
FlowProgressHandleImpl(StateMachineRunId.createRandom(), future, Observable.just("Some string"))
}, input, FlowA::class.java, om)
assertEquals(expected, output!!, input)
}
@Test

View File

@ -1,11 +1,9 @@
package net.corda.node.services.messaging
import com.codahale.metrics.MetricRegistry
import net.corda.core.concurrent.CordaFuture
import net.corda.core.crypto.generateKeyPair
import net.corda.core.internal.concurrent.doneFuture
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.messaging.RPCOps
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.node.services.RPCUserService
import net.corda.node.services.RPCUserServiceImpl
@ -58,16 +56,10 @@ class ArtemisMessagingTests {
private lateinit var database: CordaPersistence
private lateinit var userService: RPCUserService
private lateinit var networkMapRegistrationFuture: CordaFuture<Unit>
private var messagingClient: NodeMessagingClient? = null
private var messagingClient: P2PMessagingClient? = null
private var messagingServer: ArtemisMessagingServer? = null
private lateinit var networkMapCache: NetworkMapCacheImpl
private val rpcOps = object : RPCOps {
override val protocolVersion: Int get() = throw UnsupportedOperationException()
}
@Before
fun setUp() {
val baseDirectory = temporaryFolder.root.toPath()
@ -186,10 +178,10 @@ class ArtemisMessagingTests {
}
private fun startNodeMessagingClient() {
messagingClient!!.start(rpcOps, userService)
messagingClient!!.start()
}
private fun createAndStartClientAndServer(receivedMessages: LinkedBlockingQueue<Message>): NodeMessagingClient {
private fun createAndStartClientAndServer(receivedMessages: LinkedBlockingQueue<Message>): P2PMessagingClient {
createMessagingServer().start()
val messagingClient = createMessagingClient()
@ -198,20 +190,19 @@ class ArtemisMessagingTests {
receivedMessages.add(message)
}
// Run after the handlers are added, otherwise (some of) the messages get delivered and discarded / dead-lettered.
thread { messagingClient.run(messagingServer!!.serverControl) }
thread { messagingClient.run() }
return messagingClient
}
private fun createMessagingClient(server: NetworkHostAndPort = NetworkHostAndPort("localhost", serverPort)): NodeMessagingClient {
private fun createMessagingClient(server: NetworkHostAndPort = NetworkHostAndPort("localhost", serverPort)): P2PMessagingClient {
return database.transaction {
NodeMessagingClient(
P2PMessagingClient(
config,
MOCK_VERSION_INFO,
server,
identity.public,
ServiceAffinityExecutor("ArtemisMessagingTests", 1),
database,
MetricRegistry()).apply {
database).apply {
config.configureWithDevSSLCertificate()
messagingClient = this
}