Merge remote-tracking branch 'remotes/open/master' into parkri-os-merge-20180924-1

# Conflicts:
#	core/src/main/kotlin/net/corda/core/internal/NamedCache.kt
#	docs/source/index.rst
#	node/src/integration-test/kotlin/net/corda/node/services/messaging/ArtemisMessagingTest.kt
#	node/src/integration-test/kotlin/net/corda/node/services/network/PersistentNetworkMapCacheTest.kt
#	node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt
#	node/src/main/kotlin/net/corda/node/services/messaging/P2PMessageDeduplicator.kt
#	node/src/main/kotlin/net/corda/node/services/messaging/P2PMessagingClient.kt
#	node/src/main/kotlin/net/corda/node/services/persistence/DBTransactionStorage.kt
This commit is contained in:
rick.parker 2018-09-24 10:13:01 +01:00
commit 5469e41458
35 changed files with 665 additions and 114 deletions

View File

@ -1,5 +1,6 @@
package net.corda.client.jfx.model package net.corda.client.jfx.model
import com.github.benmanes.caffeine.cache.CacheLoader
import com.github.benmanes.caffeine.cache.Caffeine import com.github.benmanes.caffeine.cache.Caffeine
import javafx.beans.value.ObservableValue import javafx.beans.value.ObservableValue
import javafx.collections.FXCollections import javafx.collections.FXCollections
@ -31,7 +32,7 @@ class NetworkIdentityModel {
private val rpcProxy by observableValue(NodeMonitorModel::proxyObservable) private val rpcProxy by observableValue(NodeMonitorModel::proxyObservable)
private val identityCache = Caffeine.newBuilder() private val identityCache = Caffeine.newBuilder()
.buildNamed<PublicKey, ObservableValue<NodeInfo?>>("NetworkIdentityModel_identity", { publicKey -> .buildNamed<PublicKey, ObservableValue<NodeInfo?>>("NetworkIdentityModel_identity", CacheLoader { publicKey: PublicKey ->
publicKey.let { rpcProxy.map { it?.cordaRPCOps?.nodeInfoFromParty(AnonymousParty(publicKey)) } } publicKey.let { rpcProxy.map { it?.cordaRPCOps?.nodeInfoFromParty(AnonymousParty(publicKey)) } }
}) })
val notaries = ChosenList(rpcProxy.map { FXCollections.observableList(it?.cordaRPCOps?.notaryIdentities() ?: emptyList()) }, "notaries") val notaries = ChosenList(rpcProxy.map { FXCollections.observableList(it?.cordaRPCOps?.notaryIdentities() ?: emptyList()) }, "notaries")

View File

@ -28,12 +28,6 @@ fun <K, V> Caffeine<in K, in V>.buildNamed(name: String): Cache<K, V> {
return wrap(this.build<K, V>(), name) return wrap(this.build<K, V>(), name)
} }
fun <K, V> Caffeine<in K, in V>.buildNamed(name: String, loadFunc: (K) -> V): LoadingCache<K, V> {
checkCacheName(name)
return wrap(this.build<K, V>(loadFunc), name)
}
fun <K, V> Caffeine<in K, in V>.buildNamed(name: String, loader: CacheLoader<K, V>): LoadingCache<K, V> { fun <K, V> Caffeine<in K, in V>.buildNamed(name: String, loader: CacheLoader<K, V>): LoadingCache<K, V> {
checkCacheName(name) checkCacheName(name)
return wrap(this.build<K, V>(loader), name) return wrap(this.build<K, V>(loader), name)

View File

@ -0,0 +1,429 @@
<style>.wy-table-responsive table td, .wy-table-responsive table th { white-space: normal;}</style>
Corda Threat Model
==================
This document describes the security threat model of the Corda Platform. The Corda Threat Model is the result of architectural and threat modelling sessions,
and is designed to provide a high level overview of the security objectives for the Corda Network , and the controls and mitigations used to deliver on those
objectives. It is intended to support subsequent analysis and architecture of systems connecting with the network and the applications which interact with data
across it.
It is incumbent on all ledger network participants to review and assess the security measures described in this document against their specific organisational
requirements and policies, and to implement any additional measures needed.
Scope
-----
Built on the [Corda](http://www.corda.net/) distributed ledger platform designed by R3, the ledger network enables the origination and management of agreements
between business partners. Participants to the network create and maintain Corda *nodes,* each hosting one or more pluggable applications ( *CorDapps* ) which
define the data to be exchanged and its workflow. See the [Corda Technical White Paper](https://docs.corda.net/_static/corda-technical-whitepaper.pdf) for a
detailed description of Corda's design and functionality.
R3 provide and maintain a number of essential services underpinning the ledger network. In the future these services are intended to be operated by a separate
Corda Foundation. The network services currently include:
- Network Identity service ('Doorman'): Issues signed digital certificates that uniquely identity parties on the network.
- Network Map service: Provides a way for nodes to advertise their identity, and identify other nodes on the network, their network address and advertised
services.
Participants to the ledger network include major institutions, financial organisations and regulated bodies, across various global jurisdictions. In a majority
of cases, there are stringent requirements in place for participants to demonstrate that their handling of all data is performed in an appropriately secure
manner, including the exchange of data over the ledger network. This document identifies measures within the Corda platform and supporting infrastructure to
mitigate key security risks in support of these requirements.
The Corda Network
-----------------
The diagram below illustrates the network architecture, protocols and high level data flows that comprise the Corda Network. The threat model has been developed
based upon this architecture.
![](./images/threat-model.png)
Threat Model
------------
Threat Modelling is an iterative process that works to identify, describe and mitigate threats to a system. One of the most common models for identifying
threats is the [STRIDE](https://en.wikipedia.org/wiki/STRIDE_(security)) framework. It provides a set of security threats in six categories:
- Spoofing
- Tampering
- Information Disclosure
- Repudiation
- Denial of Service
- Elevation of Privilege
The Corda threat model uses the STRIDE framework to present the threats to the Corda Network in a structured way. It should be stressed that threat modelling is
an iterative process that is never complete. The model described below is part of an on-going process intended to continually refine the security architecture
of the Corda platform.
### Spoofing
Spoofing is pretending to be something or someone other than yourself. It is the actions taken by an attacker to impersonate another party, typically for the
purposes of gaining unauthorised access to privileged data, or perpetrating fraudulent transactions. Spoofing can occur on multiple levels. Machines can be
impersonated at the network level by a variety of methods such as ARP & IP spoofing or DNS compromise.
Spoofing can also occur at an application or user-level. Attacks at this level typically target authentication logic, using compromised passwords and
cryptographic keys, or by subverting cryptography systems.
Corda employs a Public Key Infrastructure (PKI) to validate the identity of nodes, both at the point of registration with the network map service and
subsequently through the cryptographic signing of transactions. An imposter would need to acquire an organisation's private keys in order to meaningfully
impersonate that organisation. R3 provides guidance to all ledger network participants to ensure adequate security is maintained around cryptographic keys.
+-------------+------------------------------------------------------------------------------+----------------------------------------------------------------+
| Element | Attacks | Mitigations |
+=============+==============================================================================+================================================================+
| RPC Client | An external attacker impersonates an RPC client and is able to initiate | The RPC Client is authenticated by the node and must supply |
| | flows on their behalf. | valid credentials (username & password). |
| | | |
| | A malicious RPC client connects to the node and impersonates another, | RPC Client permissions are configured by the node |
| | higher-privileged client on the same system, and initiates flows on their | administrator and can be used to restrict the actions and |
| | behalf. | flows available to the client. |
| | | |
| | **Impacts** | RPC credentials and permissions can be managed by an Apache |
| | | Shiro service. The RPC service restricts which actions are |
| | If successful, the attacker would be able to perform actions that they are | available to a client based on what permissions they have been |
| | not authorised to perform, such initiating flows. The impact of these | assigned. |
| | actions could have financial consequences depending on what flows were | |
| | available to the attacker. | |
+-------------+------------------------------------------------------------------------------+----------------------------------------------------------------+
| Node | An attacker attempts to impersonate a node and issue a transaction using | Nodes must connect to each other using using |
| | their identity. | mutually-authenticated TLS connections. Node identity is |
| | | authenticated using the certificates exchanged as part of the |
| | An attacker attempts to impersonate another node on the network by | TLS protocol. Only the node that owns the corresponding |
| | submitting NodeInfo updates with falsified address and/or identity | private key can assert their true identity. |
| | information. | |
| | | NodeInfo updates contain the node's public identity |
| | **Impacts** | certificate and must be signed by the corresponding private |
| | | key. Only the node in possession of this private key can sign |
| | If successful, a node able to assume the identity of another party could | the NodeInfo. |
| | conduct fraudulent transactions (e.g. pay cash to its own identity), giving | |
| | a direct financial impact to the compromised identity. Demonstrating that | Corda employs a Public Key Infrastructure (PKI) to validate |
| | the actions were undertaken fraudulently could prove technically challenging | the identity of nodes. An imposter would need to acquire an |
| | to any subsequent dispute resolution process. | organisation's private keys in order to meaningfully |
| | | impersonate that organisation. Corda will soon support a range |
| | In addition, an impersonating node may be able to obtain privileged | of HSMs (Hardware Security Modules) for storing a node's |
| | information from other nodes, including receipt of messages intended for the | private keys, which mitigates this risk. |
| | original party containing information on new and historic transactions. | |
+-------------+------------------------------------------------------------------------------+----------------------------------------------------------------+
| Network Map | An attacker with appropriate network access performs a DNS compromise, | Connections to the Network Map service are secured using the |
| | resulting in network traffic to the Doorman & Network Map being routed to | HTTPS protocol. The connecting node authenticates the |
| | their attack server, which attempts to impersonate these machines. | NetworkMap servers using their public certificates, to ensure |
| | | the identity of these servers is correct. |
| | **Impact** | |
| | | All data received from the NetworkMap is digitally signed (in |
| | Impersonation of the Network Map would enable an attacker to issue | addition to being protected by TLS) - an attacker attempting |
| | unauthorised updates to the map. | to spoof the Network Map would need to acquire both private |
| | | TLS keys, and the private NetworkMap signing keys. |
| | | |
| | | The Doorman and NetworkMap signing keys are stored inside a |
| | | (Hardware Security Module (HSM) with strict security controls |
| | | (network separation and physical access controls). |
+-------------+------------------------------------------------------------------------------+----------------------------------------------------------------+
| Doorman | An malicious attacker operator attempts to join the Corda Network by | R3 operate strict validation procedures to ensure that |
| | impersonating an existing organisation and issues a fraudulent registration | requests to join the Corda Network have legitimately |
| | request. | originated from the organisation in question. |
| | | |
| | **Impact** | |
| | | |
| | The attacker would be able to join and impersonate an organisation. | |
| | | |
| | The operator could issue an identity cert for any organisation, publish a | |
| | valid NodeInfo and redirect all traffic to themselves in the clear. | |
+-------------+------------------------------------------------------------------------------+----------------------------------------------------------------+
### Tampering
Tampering refers to the modification of data with malicious intent. This typically involves modification of data at rest (such as a file on disk, or fields in a
database), or modification of data in transit.
To be successful, an attacker would require privileged access to some part of the network infrastructure (either public or internal private networks). They
might also have access to a node's file-system, database or even direct memory access.
+------------+-----------------------------------------------------------------------------+------------------------------------------------------------------+
| Element | Attacks | Mitigations |
+============+=============================================================================+==================================================================+
| Node | Unintended, adverse behaviour of a CorDapp running on one or more nodes - | By design, Corda's notary-based consensus model and contract |
| (CorDapp) | either its core code or any supporting third party libraries. A coding bug | validation mechanisms provide protection against attempts to |
| | is assumed to be the default cause, although malicious modification of a | alter shared data or perform invariant operations. The primary |
| | CorDapp could result in similar effects. | risk is therefore to local systems. |
| | | |
| | | Future versions of Corda will require CorDapps to be executed |
| | | inside a sandboxed JVM environment, modified to restrict |
| | | unauthorised access to the local file system and network. This |
| | | is intended to minimise the potential of a compromised CorDapp |
| | | to affect systems local to the node. |
+------------+-----------------------------------------------------------------------------+------------------------------------------------------------------+
| P2P & RPC | An attacker performs Man-in-the-Middle (MITM) attack against a node's | Mutually authenticated TLS connections between nodes ensures |
| connection | peer-to-peer (P2P) connection | that Man-In-The-Middle (MITM) attacks cannot take place. Corda |
| s | | Nodes restrict their connections to TLS v1.2 and also restrict |
| | **Impact** | which cipher suites are accepted. |
| | | |
| | An attacker would be able to modify transactions between participating | |
| | nodes. | |
+------------+-----------------------------------------------------------------------------+------------------------------------------------------------------+
| Node Vault | An attacker gains access to the node's vault and modifies tables in the | There are not currently any direct controls to mitigate this |
| | database. | kind of attack. A node's vault is assumed to be within the same |
| | | trust boundary of the node JVM. Access to the vault must be |
| | **Impact** | restricted such that only the node can access it. Both |
| | | network-level controls (fire-walling) and database permissions |
| | Transaction history would become compromised. The impact could range from | must be employed. |
| | deletion of data to malicious tampering of financial detail. | |
| | | Note that the tampering of a node's vault only affects that |
| | | specific node's transaction history. No other node in the |
| | | network is affected and any tampering attempts are easily |
| | | detected. |
| | | |
| | | |
+------------+-----------------------------------------------------------------------------+------------------------------------------------------------------+
| Network | An attacker compromises the Network Map service and publishes an | Individual Node entries in the NetworkMap must be signed by the |
| Map | illegitimate update. | associated node's private key. The signatures are validated by |
| | | the NetworkMap service, and all other Nodes in the network, to |
| | **Impact** | ensure they have not been tampered with. An attacker would need |
| | | to acquire a node's private identity signing key to be able to |
| | NodeInfo entries (name & address information) could potentially become | make modifications to a NodeInfo. This is only possible if the |
| | altered if this attack was possible | attacker has control of the node in question. |
| | | |
| | The NetworkMap could be deleted and/or unauthorized nodes could be added | It is not possible for the NetworkMap service (or R3) to modify |
| | to, or removed from the map. | entries in the network map (because the node's private keys are |
| | | not accessible). If the NetworkMap service were compromised, the |
| | | only impact the attacker could have would be to add or remove |
| | | individual entries in the map. |
+------------+-----------------------------------------------------------------------------+------------------------------------------------------------------+
### Repudiation
Repudiation refers to the ability to claim a malicious action did not take place. Repudiation becomes relevant when it is not possible to verify the identity of
an attacker, or there is a lack of evidence to link their malicious actions with events in a system.
Preventing repudiation does not prevent other forms of attack. Rather, the goal is to ensure that the attacker is identifiable, their actions can be traced, and
there is no way for the attacker to deny having committed those actions.
+-------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| Element | Attacks | Mitigations |
+=============+==============================================================================+=================================================================+
| RPC Client | Attacker attempts to initiate a flow that they are not entitled to perform | RPC clients must authenticate to the Node using credentials |
| | | passed over TLS. It is therefore not possible for an RPC client |
| | **Impact** | to perform actions without first proving their identity. |
| | | |
| | Flows could be initiated without knowing the identity of the client. | All interactions with an RPC user are also logged by the node. |
| | | An attacker's identity and actions will be recorded and cannot |
| | | be repudiated. |
+-------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| Node | A malicious CorDapp attempts to spend a state that does not belong to them. | Corda transactions must be signed with a node's private |
| | The node operator then claims that it was not their node that initiated the | identity key in order to be accepted by the rest of the |
| | transaction. | network. The signature directly identities the signing party |
| | | and cannot be made by any other node - therefore the act of |
| | **Impact** | signing a transaction |
| | | |
| | Financial transactions could be initiated by anonymous parties, leading to | Corda transactions between nodes utilize the P2P protocol, |
| | financial loss, and loss of confidence in the network. | which requires a mutually authenticated TLS connection. It is |
| | | not possible for a node to issue transactions without having |
| | | it's identity authenticated by other nodes in the network. Node |
| | | identity and TLS certificates are issued via Corda Network |
| | | services, and use the Corda PKI (Public Key Infrastructure) for |
| | | authentication. |
| | | |
| | | All P2P transactions are logged by the node, meaning that any |
| | | interactions are recorded |
+-------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| Node | A node attempts to perform a denial-of-state attack. | Non-validating Notaries require a signature over every request, |
| | | therefore nobody can deny performing denial-of-state attack |
| | | because every transaction clearly identities the node that |
| | | initiated it. |
+-------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| Node | | |
+-------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
### Information Disclosure
Information disclosure is about the unauthorised access of data. Attacks of this kind have an impact when confidential data is accessed. Typical examples of
attack include extracting secrets from a running process, and accessing confidential files on a file-system which have not been appropriately secured.
Interception of network communications between trusted parties can also lead to information disclosure.
An attacker capable of intercepting network traffic from a Corda node would, at a minimum, be able to identify which other parties that node was interacting
with, along with relative frequency and volume of data being shared; this could be used to infer additional privileged information without the parties'
consent. All network communication of a Corda is encrypted using the TLS protocol (v1.2), using modern cryptography algorithms.
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Element | Attack | Mitigations |
+============+==============================================================================+==================================================================+
| Node | An attacker attempts to retrieve transaction history from a peer node in the | By design, Corda nodes do not globally broadcast transaction |
| | network, for which they have no legitimate right of access. | information to all participants in the network. |
| | | |
| | Corda nodes will, upon receipt of a request referencing a valid transaction | A node will not divulge arbitrary transactions to a peer unless |
| | hash, respond with the dependency graph of that transaction. One theoretical | that peer has been included in the transaction flow. A node only |
| | scenario is therefore that a participant is able to guess (or otherwise | divulges transaction history if the transaction being requested |
| | acquire by illicit means) the hash of a valid transaction, thereby being | is a descendant of a transaction that the node itself has |
| | able to acquire its content from another node. | previously shared as part of the current flow session. |
| | | |
| | **Impact** | The SGX integration feature currently envisaged for Corda will |
| | | implement CPU peer-to-peer encryption under which transaction |
| | If successful, an exploit of the form above could result in information | graphs are transmitted in an encrypted state and only decrypted |
| | private to specific participants being shared with one or more | within a secure enclave. Knowledge of a transaction hash will |
| | non-privileged parties. This may include market-sensitive information used | then be further rendered insufficient for a non-privileged party |
| | to derive competitive advantage. | to view the content of a transaction. |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Node Vault | An unauthorised user attempts to access the node's vault | Access to the Vault uses standard JDBC authentication mechanism. |
| (database) | | Any user connecting to the vault must have permission to do so. |
| | **Impact** | |
| | | |
| | Access to the vault would reveal the full transaction history that the node | |
| | has taken part in. This may include financial information. | |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Node | An attacker who gains access to the machine running the Node attempts to | Corda Nodes are designed to be executed using a designated |
| Process | read memory from the JVM process. | 'corda' system process, which other users and processes on the |
| (JVM) | | system do not have permission to access. |
| | An attacker with access the file-system attempts to read the node's | |
| | cryptographic key-store, containing the private identity keys. | The node's Java Key Store is encrypted using PKCS\#12 |
| | | encryption. In the future Corda will eventually store its keys |
| | **Impact** | in a HSM (Hardware Security Module). |
| | | |
| | An attacker would be able to read sensitive such as private identity keys. | |
| | The worst impact would be the ability to extract private keys from the JVM | |
| | process. | |
| | | |
| | | |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| RPC Client | Interception of RPC traffic between a client system and the node. | RPC communications are protected by the TLS protocol. |
| | | |
| | A malicious RPC client authenticates to a Node and attempts to query the | Permission to query a node's vault must be explicitly granted on |
| | transaction vault. | a per-user basis. It is recommended that RPC credentials and |
| | | permissions are managed in an Apache Shiro database. |
| | **Impact** | |
| | | |
| | An attacker would be able to see details of transactions shared between the | |
| | connected business systems and any transacting party. | |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
### Denial of Service
Denial-of-service (DoS) attacks target the availability of a resource from its intended users. There are two anticipated targets of a DoS attack - network
participants (Corda Nodes) and network services (Doorman and the Network Map). DoS attacks occur by targeting the node or network services with a high
volume/frequency of requests, or by sending malformed requests. Typical DoS attacks leverage a botnet or other distributed group of systems (Distributed Denial
of Service, DDoS). A successful DoS attack may result in non-availability of targeted ledger network node(s)/service(s), both during the attack and thereafter
until normal service can be resumed.
Communication over the ledger network is primarily peer-to-peer. Therefore the network as a whole is relatively resilient to DoS attacks. Notaries and oracles
will only communicate with peers in the network, so are protected from non-member-on-member application-level attack.
Corda Network Services are protected by enterprise-grade DDoS detection and mitigation services.
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Element | Attack | Mitigations |
+============+==============================================================================+==================================================================+
| Node | An attacker control sends high volume of malformed transactions to a node. | P2P communcation is authenticated as part of the TLS protocol, |
| | | meaning that attackers must be part of the Corda network to |
| | **Impact** | launch an attack. |
| | | |
| | Nodes targeted by this attack could exhaust their processing & memory | Communication over the ledger network is primarily peer-to-peer, |
| | resources, or potentially cease responding to transactions. | the network as a whole is relatively resilient to DoS attacks, |
| | | the primary threat being to specific nodes or services. |
| | | |
| | | Note that there is no specific mitigation against DoS attacks at |
| | | the per-node level. DoS attacks by participants on other |
| | | participants will be expressly forbidden under the terms of the |
| | | ledger network's network agreement. Measures will be taken |
| | | against any ledger network participant found to have perpetrated |
| | | a DoS attack, including exclusion from the ledger network |
| | | network and potential litigation. As a result, the perceived |
| | | risk of a member-on-member attack is low and technical measures |
| | | are not considered under this threat model, although they may be |
| | | included in future iterations. |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| CorDapp | Unintended termination or other logical sequence (e.g. due to a coding bug | The network agreement will stipulate a default maximum allowable |
| | in either Corda or a CorDapp) by which a party is rendered unable to resolve | period time - the 'event horizon' - within which a party is |
| | a f low. The most likely results from another party failing to respond when | required to provide a valid response to any message sent to it |
| | required to do so under the terms of the agreed transaction protocol. | in the course of a flow. If that period is exceeded, the flow |
| | | will be considered to be cancelled and may be discontinued |
| | **Impact** | without prejudice by all parties. The event horizon may be |
| | | superseded by agreements between parties specifying other |
| | Depending on the nature of the flow, a party could be financially impacted | timeout periods, which may be encoded into flows under the Corda |
| | by failure to resolve a flow on an indefinite basis. For example, a party | flow framework. |
| | may be left in possession of a digital asset without the means to transfer | |
| | it to another party. | Additional measures may be taken under the agreement against |
| | | parties who repeatedly fail to meet their response obligations |
| | | under the network agreement. |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Doorman | Attacker submits excessive registration requests to the Doorman service | Doorman is deployed behind a rate-limiting firewall. |
| | | |
| | | Doorman requests are validated and filtered to ensure malformed |
| | | requests are rejected. |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
| Network | Attacker causes the network map service to become unavailable | Updates to the network map must be signed by participant nodes |
| Map | | and are authenticated before being processed. |
| | | |
| | | The network map is designed to be distributed by a CDN (Content |
| | | Delivery Network). This design leverages the architecture and |
| | | security controls of the CDN and is expected to be resilient to |
| | | DDoS (Distributed Denial of Service) attack. |
| | | |
| | | The Network Map is also cached locally by nodes on the network. |
| | | If the network map online service were temporarily unavailable, |
| | | the Corda network would not be affected. |
| | | |
| | | There is no requirement for the network map services to be |
| | | highly available in order for the ledger network to be |
| | | operational. Temporary non-availability of the network map |
| | | service may delay certification of new entrants to the network, |
| | | but will have no impact on existing participants. Similarly, the |
| | | network map will be cached by individual nodes once downloaded |
| | | from the network map service; unplanned downtime would prevent |
| | | broadcast of updates relating to new nodes connecting to / |
| | | disconnecting from the network, but not affect communication |
| | | between nodes whose connection state remains unchanged |
| | | throughout the incident. |
+------------+------------------------------------------------------------------------------+------------------------------------------------------------------+
### Elevation of Privilege
Elevation of Privilege is enabling somebody to perform actions they are not permitted to do. Attacks range from a normal user executing actions as a more
privileged administrator, to a remote (external) attacker with no privileges executing arbitrary code.
+------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| Element | Attack | Mitigations |
+============+==============================================================================+=================================================================+
| Node | Malicious contract attempts to instantiate classes in the JVM that it is not | The AMQP serialiser uses a combination of white and black-lists |
| | authorised to access. | to mitigate against de-serialisation vulnerabilities. |
| | | |
| | Malicious CorDapp sends malformed serialised data to a peer. | Corda does not currently provide specific security controls to |
| | | mitigate all classes of privilege escalation vulnerabilities. |
| | **Impact** | The design of Corda requires that CorDapps are inherently |
| | | trusted by the node administrator. |
| | Unauthorised remote code execution would lead to complete system compromise. | |
| | | Future security research will introduce stronger controls that |
| | | can mitigate this class of threat. The Deterministic JVM will |
| | | provide a sandbox that prevents execution of code & classes |
| | | outside of the security boundary that contract code is |
| | | restricted to. |
| | | |
| | | |
+------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
| RPC Client | A malicious RPC client connects to the node and impersonates another, | Nodes implement an access-control model that restricts what |
| | higher-privileged client on the same system, and initiates flows on their | actions RPC users can perform. |
| | behalf. | |
| | | Session replay is mitigated by virtue of the TLS protocol used |
| | | to protect RPC communications. |
+------------+------------------------------------------------------------------------------+-----------------------------------------------------------------+
Conclusion
----------
The threat model presented here describes the main threats to the Corda Network, and the controls that are included to mitigate these threats. It was necessary
to restrict this model to a high-level perspective of the Corda Network. It is hoped that enough information is provided to allow network participants to
understand the security model of Corda.
Threat modelling is an on-going process. There is active research at R3 to continue evolving the Corda Threat Model. In particular, models are being developed
that focus more closely on individual components - such as the Node, Network Map and Doorman.

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

View File

@ -16,15 +16,16 @@ import net.corda.node.services.config.configureWithDevSSLCertificate
import net.corda.node.services.network.PersistentNetworkMapCache import net.corda.node.services.network.PersistentNetworkMapCache
import net.corda.node.services.transactions.PersistentUniquenessProvider import net.corda.node.services.transactions.PersistentUniquenessProvider
import net.corda.node.utilities.AffinityExecutor.ServiceAffinityExecutor import net.corda.node.utilities.AffinityExecutor.ServiceAffinityExecutor
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.core.ALICE_NAME import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.MAX_MESSAGE_SIZE import net.corda.testing.core.MAX_MESSAGE_SIZE
import net.corda.testing.core.SerializationEnvironmentRule import net.corda.testing.core.SerializationEnvironmentRule
import net.corda.testing.driver.PortAllocation import net.corda.testing.driver.PortAllocation
import net.corda.testing.internal.stubs.CertificateStoreStubs
import net.corda.testing.internal.LogHelper import net.corda.testing.internal.LogHelper
import net.corda.testing.internal.rigorousMock import net.corda.testing.internal.rigorousMock
import net.corda.testing.internal.stubs.CertificateStoreStubs
import net.corda.testing.node.internal.MOCK_VERSION_INFO import net.corda.testing.node.internal.MOCK_VERSION_INFO
import net.corda.testing.node.internal.makeInternalTestDataSourceProperties import net.corda.testing.node.internal.makeInternalTestDataSourceProperties
import org.apache.activemq.artemis.api.core.Message.HDR_VALIDATED_USER import org.apache.activemq.artemis.api.core.Message.HDR_VALIDATED_USER
@ -93,7 +94,7 @@ class ArtemisMessagingTest {
} }
LogHelper.setLevel(PersistentUniquenessProvider::class) LogHelper.setLevel(PersistentUniquenessProvider::class)
database = configureDatabase(makeInternalTestDataSourceProperties(configSupplier = { ConfigFactory.empty() }), DatabaseConfig(runMigration = true), { null }, { null }) database = configureDatabase(makeInternalTestDataSourceProperties(configSupplier = { ConfigFactory.empty() }), DatabaseConfig(runMigration = true), { null }, { null })
networkMapCache = PersistentNetworkMapCache(database, rigorousMock()).apply { start(emptyList()) } networkMapCache = PersistentNetworkMapCache(TestingNamedCacheFactory(), database, rigorousMock()).apply { start(emptyList()) }
} }
@After @After
@ -397,6 +398,7 @@ class ArtemisMessagingTest {
database, database,
networkMapCache, networkMapCache,
MetricRegistry(), MetricRegistry(),
TestingNamedCacheFactory(),
isDrainingModeOn = { false }, isDrainingModeOn = { false },
drainingModeWasChangedEvents = PublishSubject.create()).apply { drainingModeWasChangedEvents = PublishSubject.create()).apply {
config.configureWithDevSSLCertificate() config.configureWithDevSSLCertificate()

View File

@ -5,6 +5,7 @@ import net.corda.core.utilities.NetworkHostAndPort
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.internal.schemas.NodeInfoSchemaV1 import net.corda.node.internal.schemas.NodeInfoSchemaV1
import net.corda.node.services.identity.InMemoryIdentityService import net.corda.node.services.identity.InMemoryIdentityService
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.DEV_ROOT_CA import net.corda.nodeapi.internal.DEV_ROOT_CA
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
@ -36,14 +37,14 @@ class PersistentNetworkMapCacheTest : IntegrationTest() {
//Enterprise only - objects created in the setup method, below initialized with dummy values to avoid need for nullable type declaration //Enterprise only - objects created in the setup method, below initialized with dummy values to avoid need for nullable type declaration
private var database = CordaPersistence(DatabaseConfig(), emptySet()) private var database = CordaPersistence(DatabaseConfig(), emptySet())
private var charlieNetMapCache = PersistentNetworkMapCache(database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate)) private var charlieNetMapCache = PersistentNetworkMapCache(TestingNamedCacheFactory(), database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate))
@Before() @Before()
fun setup() { fun setup() {
//Enterprise only - for test in database mode ensure the remote database is setup before creating CordaPersistence //Enterprise only - for test in database mode ensure the remote database is setup before creating CordaPersistence
super.setUp() super.setUp()
database = configureDatabase(makeTestDataSourceProperties(CHARLIE_NAME.toDatabaseSchemaName()), makeTestDatabaseProperties(CHARLIE_NAME.toDatabaseSchemaName()), { null }, { null }) database = configureDatabase(makeTestDataSourceProperties(CHARLIE_NAME.toDatabaseSchemaName()), makeTestDatabaseProperties(CHARLIE_NAME.toDatabaseSchemaName()), { null }, { null })
charlieNetMapCache = PersistentNetworkMapCache(database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate)) charlieNetMapCache = PersistentNetworkMapCache(TestingNamedCacheFactory(), database, InMemoryIdentityService(trustRoot = DEV_ROOT_CA.certificate))
} }
@After @After

View File

@ -17,6 +17,7 @@ import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.getOrThrow import net.corda.core.utilities.getOrThrow
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.services.schema.NodeSchemaService import net.corda.node.services.schema.NodeSchemaService
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.core.ALICE_NAME import net.corda.testing.core.ALICE_NAME
@ -157,7 +158,7 @@ class RaftTransactionCommitLogTests {
// Enterprise - OS difference: below configureDatabase parameters differs with OS intentionally to be able run test in remote database // Enterprise - OS difference: below configureDatabase parameters differs with OS intentionally to be able run test in remote database
val database = configureDatabase(makeInternalTestDataSourceProperties( configSupplier = { ConfigFactory.empty() }), DatabaseConfig(runMigration = true), { null }, { null }, NodeSchemaService(includeNotarySchemas = true)) val database = configureDatabase(makeInternalTestDataSourceProperties( configSupplier = { ConfigFactory.empty() }), DatabaseConfig(runMigration = true), { null }, { null }, NodeSchemaService(includeNotarySchemas = true))
databases.add(database) databases.add(database)
val stateMachineFactory = { RaftTransactionCommitLog(database, Clock.systemUTC(), RaftUniquenessProvider.Companion::createMap) } val stateMachineFactory = { RaftTransactionCommitLog(database, Clock.systemUTC(), { RaftUniquenessProvider.createMap(TestingNamedCacheFactory()) }) }
val server = CopycatServer.builder(address) val server = CopycatServer.builder(address)
.withStateMachine(stateMachineFactory) .withStateMachine(stateMachineFactory)

View File

@ -65,10 +65,7 @@ import net.corda.node.services.statemachine.*
import net.corda.node.services.transactions.* import net.corda.node.services.transactions.*
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
import net.corda.node.services.vault.NodeVaultService import net.corda.node.services.vault.NodeVaultService
import net.corda.node.utilities.AffinityExecutor import net.corda.node.utilities.*
import net.corda.node.utilities.JVMAgentRegistry
import net.corda.node.utilities.NamedThreadFactory
import net.corda.node.utilities.NodeBuildProperties
import net.corda.nodeapi.internal.NodeInfoAndSigned import net.corda.nodeapi.internal.NodeInfoAndSigned
import net.corda.nodeapi.internal.SignedNodeInfo import net.corda.nodeapi.internal.SignedNodeInfo
import net.corda.nodeapi.internal.config.CertificateStore import net.corda.nodeapi.internal.config.CertificateStore
@ -119,6 +116,7 @@ import net.corda.core.crypto.generateKeyPair as cryptoGenerateKeyPair
// TODO Log warning if this node is a notary but not one of the ones specified in the network parameters, both for core and custom // TODO Log warning if this node is a notary but not one of the ones specified in the network parameters, both for core and custom
abstract class AbstractNode<S>(val configuration: NodeConfiguration, abstract class AbstractNode<S>(val configuration: NodeConfiguration,
val platformClock: CordaClock, val platformClock: CordaClock,
cacheFactoryPrototype: NamedCacheFactory,
protected val versionInfo: VersionInfo, protected val versionInfo: VersionInfo,
protected val cordappLoader: CordappLoader, protected val cordappLoader: CordappLoader,
protected val serverThread: AffinityExecutor.ServiceAffinityExecutor, protected val serverThread: AffinityExecutor.ServiceAffinityExecutor,
@ -128,6 +126,11 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
@Suppress("LeakingThis") @Suppress("LeakingThis")
private var tokenizableServices: MutableList<Any>? = mutableListOf(platformClock, this) private var tokenizableServices: MutableList<Any>? = mutableListOf(platformClock, this)
protected val metricRegistry = MetricRegistry()
protected val cacheFactory = cacheFactoryPrototype.bindWithConfig(configuration).bindWithMetrics(metricRegistry).tokenize()
val monitoringService = MonitoringService(metricRegistry).tokenize()
protected val runOnStop = ArrayList<() -> Any?>() protected val runOnStop = ArrayList<() -> Any?>()
init { init {
@ -142,7 +145,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
} }
val schemaService = NodeSchemaService(cordappLoader.cordappSchemas, configuration.notary != null).tokenize() val schemaService = NodeSchemaService(cordappLoader.cordappSchemas, configuration.notary != null).tokenize()
val identityService = PersistentIdentityService().tokenize() val identityService = PersistentIdentityService(cacheFactory).tokenize()
val database: CordaPersistence = createCordaPersistence( val database: CordaPersistence = createCordaPersistence(
configuration.database, configuration.database,
identityService::wellKnownPartyFromX500Name, identityService::wellKnownPartyFromX500Name,
@ -153,13 +156,13 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
// TODO Break cyclic dependency // TODO Break cyclic dependency
identityService.database = database identityService.database = database
} }
val networkMapCache = PersistentNetworkMapCache(database, identityService).tokenize()
val networkMapCache = PersistentNetworkMapCache(cacheFactory, database, identityService).tokenize()
val checkpointStorage = DBCheckpointStorage() val checkpointStorage = DBCheckpointStorage()
@Suppress("LeakingThis") @Suppress("LeakingThis")
val transactionStorage = makeTransactionStorage(configuration.transactionCacheSizeBytes).tokenize() val transactionStorage = makeTransactionStorage(configuration.transactionCacheSizeBytes).tokenize()
val networkMapClient: NetworkMapClient? = configuration.networkServices?.let { NetworkMapClient(it.networkMapURL, versionInfo) } val networkMapClient: NetworkMapClient? = configuration.networkServices?.let { NetworkMapClient(it.networkMapURL, versionInfo) }
val metricRegistry = MetricRegistry() val attachments = NodeAttachmentService(metricRegistry, cacheFactory, database).tokenize()
val attachments = NodeAttachmentService(metricRegistry, database, configuration.attachmentContentCacheSizeBytes, configuration.attachmentCacheBound).tokenize()
val cordappProvider = CordappProviderImpl(cordappLoader, CordappConfigFileProvider(), attachments).tokenize() val cordappProvider = CordappProviderImpl(cordappLoader, CordappConfigFileProvider(), attachments).tokenize()
@Suppress("LeakingThis") @Suppress("LeakingThis")
val keyManagementService = makeKeyManagementService(identityService).tokenize() val keyManagementService = makeKeyManagementService(identityService).tokenize()
@ -168,7 +171,6 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
val vaultService = makeVaultService(keyManagementService, servicesForResolution, database).tokenize() val vaultService = makeVaultService(keyManagementService, servicesForResolution, database).tokenize()
val nodeProperties = NodePropertiesPersistentStore(StubbedNodeUniqueIdProvider::value, database) val nodeProperties = NodePropertiesPersistentStore(StubbedNodeUniqueIdProvider::value, database)
val flowLogicRefFactory = FlowLogicRefFactoryImpl(cordappLoader.appClassLoader) val flowLogicRefFactory = FlowLogicRefFactoryImpl(cordappLoader.appClassLoader)
val monitoringService = MonitoringService(metricRegistry).tokenize()
val networkMapUpdater = NetworkMapUpdater( val networkMapUpdater = NetworkMapUpdater(
networkMapCache, networkMapCache,
NodeInfoWatcher( NodeInfoWatcher(
@ -316,7 +318,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
servicesForResolution.start(netParams) servicesForResolution.start(netParams)
networkMapCache.start(netParams.notaries) networkMapCache.start(netParams.notaries)
startDatabase(metricRegistry) startDatabase()
val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null) val (identity, identityKeyPair) = obtainIdentity(notaryConfig = null)
identityService.start(trustRoot, listOf(identity.certificate, nodeCa)) identityService.start(trustRoot, listOf(identity.certificate, nodeCa))
@ -725,7 +727,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
} }
protected open fun makeTransactionStorage(transactionCacheSizeBytes: Long): WritableTransactionStorage { protected open fun makeTransactionStorage(transactionCacheSizeBytes: Long): WritableTransactionStorage {
return DBTransactionStorage(transactionCacheSizeBytes, database) return DBTransactionStorage(database, cacheFactory)
} }
@VisibleForTesting @VisibleForTesting
@ -792,7 +794,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
// Specific class so that MockNode can catch it. // Specific class so that MockNode can catch it.
class DatabaseConfigurationException(message: String) : CordaException(message) class DatabaseConfigurationException(message: String) : CordaException(message)
protected open fun startDatabase(metricRegistry: MetricRegistry? = null) { protected open fun startDatabase() {
log.debug { log.debug {
val driverClasses = DriverManager.getDrivers().asSequence().map { it.javaClass.name } val driverClasses = DriverManager.getDrivers().asSequence().map { it.javaClass.name }
"Available JDBC drivers: $driverClasses" "Available JDBC drivers: $driverClasses"
@ -822,7 +824,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
// Place the long term identity key in the KMS. Eventually, this is likely going to be separated again because // Place the long term identity key in the KMS. Eventually, this is likely going to be separated again because
// the KMS is meant for derived temporary keys used in transactions, and we're not supposed to sign things with // the KMS is meant for derived temporary keys used in transactions, and we're not supposed to sign things with
// the identity key. But the infrastructure to make that easy isn't here yet. // the identity key. But the infrastructure to make that easy isn't here yet.
return PersistentKeyManagementService(identityService, database) return PersistentKeyManagementService(cacheFactory, identityService, database)
} }
private fun makeCoreNotaryService(notaryConfig: NotaryConfig, myNotaryIdentity: PartyAndCertificate?): NotaryService { private fun makeCoreNotaryService(notaryConfig: NotaryConfig, myNotaryIdentity: PartyAndCertificate?): NotaryService {
@ -831,7 +833,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
return notaryConfig.run { return notaryConfig.run {
when { when {
raft != null -> { raft != null -> {
val uniquenessProvider = RaftUniquenessProvider(configuration.baseDirectory, configuration.p2pSslOptions, database, platformClock, monitoringService.metrics, raft) val uniquenessProvider = RaftUniquenessProvider(configuration.baseDirectory, configuration.p2pSslOptions, database, platformClock, monitoringService.metrics, cacheFactory, raft)
(if (validating) ::RaftValidatingNotaryService else ::RaftNonValidatingNotaryService)(services, notaryKey, uniquenessProvider) (if (validating) ::RaftValidatingNotaryService else ::RaftNonValidatingNotaryService)(services, notaryKey, uniquenessProvider)
} }
bftSMaRt != null -> { bftSMaRt != null -> {
@ -982,6 +984,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
override val clock: Clock get() = platformClock override val clock: Clock get() = platformClock
override val configuration: NodeConfiguration get() = this@AbstractNode.configuration override val configuration: NodeConfiguration get() = this@AbstractNode.configuration
override val networkMapUpdater: NetworkMapUpdater get() = this@AbstractNode.networkMapUpdater override val networkMapUpdater: NetworkMapUpdater get() = this@AbstractNode.networkMapUpdater
override val cacheFactory: NamedCacheFactory get() = this@AbstractNode.cacheFactory
private lateinit var _myInfo: NodeInfo private lateinit var _myInfo: NodeInfo
override val myInfo: NodeInfo get() = _myInfo override val myInfo: NodeInfo get() = _myInfo

View File

@ -48,6 +48,7 @@ import net.corda.node.services.messaging.*
import net.corda.node.services.rpc.ArtemisRpcBroker import net.corda.node.services.rpc.ArtemisRpcBroker
import net.corda.node.utilities.AddressUtils import net.corda.node.utilities.AddressUtils
import net.corda.node.utilities.AffinityExecutor import net.corda.node.utilities.AffinityExecutor
import net.corda.node.utilities.DefaultNamedCacheFactory
import net.corda.node.utilities.DemoClock import net.corda.node.utilities.DemoClock
import net.corda.nodeapi.internal.ArtemisMessagingClient import net.corda.nodeapi.internal.ArtemisMessagingClient
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.INTERNAL_SHELL_USER import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.INTERNAL_SHELL_USER
@ -94,6 +95,7 @@ open class Node(configuration: NodeConfiguration,
) : AbstractNode<NodeInfo>( ) : AbstractNode<NodeInfo>(
configuration, configuration,
createClock(configuration), createClock(configuration),
DefaultNamedCacheFactory(),
versionInfo, versionInfo,
cordappLoader, cordappLoader,
// Under normal (non-test execution) it will always be "1" // Under normal (non-test execution) it will always be "1"
@ -197,7 +199,9 @@ open class Node(configuration: NodeConfiguration,
networkMap = networkMapCache, networkMap = networkMapCache,
metricRegistry = metricRegistry, metricRegistry = metricRegistry,
isDrainingModeOn = nodeProperties.flowsDrainingMode::isEnabled, isDrainingModeOn = nodeProperties.flowsDrainingMode::isEnabled,
drainingModeWasChangedEvents = nodeProperties.flowsDrainingMode.values drainingModeWasChangedEvents = nodeProperties.flowsDrainingMode.values,
metricRegistry = metricRegistry,
cacheFactory = cacheFactory
) )
} }
@ -356,7 +360,7 @@ open class Node(configuration: NodeConfiguration,
* This is not using the H2 "automatic mixed mode" directly but leans on many of the underpinnings. For more details * This is not using the H2 "automatic mixed mode" directly but leans on many of the underpinnings. For more details
* on H2 URLs and configuration see: http://www.h2database.com/html/features.html#database_url * on H2 URLs and configuration see: http://www.h2database.com/html/features.html#database_url
*/ */
override fun startDatabase(metricRegistry: MetricRegistry?) { override fun startDatabase() {
val databaseUrl = configuration.dataSourceProperties.getProperty("dataSource.url") val databaseUrl = configuration.dataSourceProperties.getProperty("dataSource.url")
val h2Prefix = "jdbc:h2:file:" val h2Prefix = "jdbc:h2:file:"
@ -395,7 +399,7 @@ open class Node(configuration: NodeConfiguration,
printBasicNodeInfo("Database connection url is", databaseUrl) printBasicNodeInfo("Database connection url is", databaseUrl)
} }
super.startDatabase(metricRegistry) super.startDatabase()
database.closeOnStop() database.closeOnStop()
} }
@ -444,12 +448,13 @@ open class Node(configuration: NodeConfiguration,
// https://jolokia.org/agent/jvm.html // https://jolokia.org/agent/jvm.html
JmxReporter.forRegistry(registry).inDomain("net.corda").createsObjectNamesWith { _, domain, name -> JmxReporter.forRegistry(registry).inDomain("net.corda").createsObjectNamesWith { _, domain, name ->
// Make the JMX hierarchy a bit better organised. // Make the JMX hierarchy a bit better organised.
val category = name.substringBefore('.') val category = name.substringBefore('.').substringBeforeLast('/')
val component = name.substringBefore('.').substringAfterLast('/', "")
val subName = name.substringAfter('.', "") val subName = name.substringAfter('.', "")
if (subName == "") (if (subName == "")
ObjectName("$domain:name=$category") ObjectName("$domain:name=$category${if (component.isNotEmpty()) ",component=$component," else ""}")
else else
ObjectName("$domain:type=$category,name=$subName") ObjectName("$domain:type=$category,${if (component.isNotEmpty()) "component=$component," else ""}name=$subName"))
}.build().start() }.build().start()
} }

View File

@ -25,6 +25,7 @@ import net.corda.node.services.network.NetworkMapUpdater
import net.corda.node.services.persistence.AttachmentStorageInternal import net.corda.node.services.persistence.AttachmentStorageInternal
import net.corda.node.services.statemachine.ExternalEvent import net.corda.node.services.statemachine.ExternalEvent
import net.corda.node.services.statemachine.FlowStateMachineImpl import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import java.security.PublicKey import java.security.PublicKey
@ -132,6 +133,7 @@ interface ServiceHubInternal : ServiceHub {
} }
fun getFlowFactory(initiatingFlowClass: Class<out FlowLogic<*>>): InitiatedFlowFactory<*>? fun getFlowFactory(initiatingFlowClass: Class<out FlowLogic<*>>): InitiatedFlowFactory<*>?
val cacheFactory: NamedCacheFactory
} }
interface FlowStarter { interface FlowStarter {

View File

@ -10,6 +10,7 @@ import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug import net.corda.core.utilities.debug
import net.corda.node.services.api.IdentityServiceInternal import net.corda.node.services.api.IdentityServiceInternal
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.crypto.X509CertificateFactory import net.corda.nodeapi.internal.crypto.X509CertificateFactory
import net.corda.nodeapi.internal.crypto.x509Certificates import net.corda.nodeapi.internal.crypto.x509Certificates
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
@ -29,13 +30,14 @@ import javax.persistence.Lob
* cached for efficient lookup. * cached for efficient lookup.
*/ */
@ThreadSafe @ThreadSafe
class PersistentIdentityService : SingletonSerializeAsToken(), IdentityServiceInternal { class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), IdentityServiceInternal {
companion object { companion object {
private val log = contextLogger() private val log = contextLogger()
fun createPKMap(): AppendOnlyPersistentMap<SecureHash, PartyAndCertificate, PersistentIdentity, String> { fun createPKMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<SecureHash, PartyAndCertificate, PersistentIdentity, String> {
return AppendOnlyPersistentMap( return AppendOnlyPersistentMap(
"PersistentIdentityService_partyByKey", cacheFactory = cacheFactory,
name = "PersistentIdentityService_partyByKey",
toPersistentEntityKey = { it.toString() }, toPersistentEntityKey = { it.toString() },
fromPersistentEntity = { fromPersistentEntity = {
Pair( Pair(
@ -50,9 +52,10 @@ class PersistentIdentityService : SingletonSerializeAsToken(), IdentityServiceIn
) )
} }
fun createX500Map(): AppendOnlyPersistentMap<CordaX500Name, SecureHash, PersistentIdentityNames, String> { fun createX500Map(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<CordaX500Name, SecureHash, PersistentIdentityNames, String> {
return AppendOnlyPersistentMap( return AppendOnlyPersistentMap(
"PersistentIdentityService_partyByName", cacheFactory = cacheFactory,
name = "PersistentIdentityService_partyByName",
toPersistentEntityKey = { it.toString() }, toPersistentEntityKey = { it.toString() },
fromPersistentEntity = { Pair(CordaX500Name.parse(it.name), SecureHash.parse(it.publicKeyHash)) }, fromPersistentEntity = { Pair(CordaX500Name.parse(it.name), SecureHash.parse(it.publicKeyHash)) },
toPersistentEntity = { key: CordaX500Name, value: SecureHash -> toPersistentEntity = { key: CordaX500Name, value: SecureHash ->
@ -101,8 +104,8 @@ class PersistentIdentityService : SingletonSerializeAsToken(), IdentityServiceIn
// CordaPersistence is not a c'tor parameter to work around the cyclic dependency // CordaPersistence is not a c'tor parameter to work around the cyclic dependency
lateinit var database: CordaPersistence lateinit var database: CordaPersistence
private val keyToParties = createPKMap() private val keyToParties = createPKMap(cacheFactory)
private val principalToParties = createX500Map() private val principalToParties = createX500Map(cacheFactory)
fun start(trustRoot: X509Certificate, caCertificates: List<X509Certificate> = emptyList()) { fun start(trustRoot: X509Certificate, caCertificates: List<X509Certificate> = emptyList()) {
_trustRoot = trustRoot _trustRoot = trustRoot

View File

@ -6,6 +6,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.MAX_HASH_HEX_SIZE import net.corda.core.utilities.MAX_HASH_HEX_SIZE
import net.corda.node.services.identity.PersistentIdentityService import net.corda.node.services.identity.PersistentIdentityService
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY
@ -25,7 +26,7 @@ import javax.persistence.Lob
* *
* This class needs database transactions to be in-flight during method calls and init. * This class needs database transactions to be in-flight during method calls and init.
*/ */
class PersistentKeyManagementService(val identityService: PersistentIdentityService, class PersistentKeyManagementService(cacheFactory: NamedCacheFactory, val identityService: PersistentIdentityService,
private val database: CordaPersistence) : SingletonSerializeAsToken(), KeyManagementServiceInternal { private val database: CordaPersistence) : SingletonSerializeAsToken(), KeyManagementServiceInternal {
@Entity @Entity
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}our_key_pairs") @javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}our_key_pairs")
@ -46,9 +47,10 @@ class PersistentKeyManagementService(val identityService: PersistentIdentityServ
} }
private companion object { private companion object {
fun createKeyMap(): AppendOnlyPersistentMap<PublicKey, PrivateKey, PersistentKey, String> { fun createKeyMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<PublicKey, PrivateKey, PersistentKey, String> {
return AppendOnlyPersistentMap( return AppendOnlyPersistentMap(
"PersistentKeyManagementService_keys", cacheFactory = cacheFactory,
name = "PersistentKeyManagementService_keys",
toPersistentEntityKey = { it.toStringShort() }, toPersistentEntityKey = { it.toStringShort() },
fromPersistentEntity = { fromPersistentEntity = {
Pair(Crypto.decodePublicKey(it.publicKey), Pair(Crypto.decodePublicKey(it.publicKey),
@ -62,7 +64,7 @@ class PersistentKeyManagementService(val identityService: PersistentIdentityServ
} }
} }
private val keysMap = createKeyMap() private val keysMap = createKeyMap(cacheFactory)
override fun start(initialKeyPairs: Set<KeyPair>) { override fun start(initialKeyPairs: Set<KeyPair>) {
initialKeyPairs.forEach { keysMap.addWithDuplicatesAllowed(it.public, it.private) } initialKeyPairs.forEach { keysMap.addWithDuplicatesAllowed(it.public, it.private) }

View File

@ -5,6 +5,7 @@ import net.corda.core.crypto.SecureHash
import net.corda.core.identity.CordaX500Name import net.corda.core.identity.CordaX500Name
import net.corda.node.services.statemachine.DeduplicationId import net.corda.node.services.statemachine.DeduplicationId
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
import java.time.Instant import java.time.Instant
@ -19,20 +20,21 @@ typealias SenderHashToSeqNo = Pair<String, Long?>
/** /**
* Encapsulate the de-duplication logic. * Encapsulate the de-duplication logic.
*/ */
class P2PMessageDeduplicator(private val database: CordaPersistence) { class P2PMessageDeduplicator(cacheFactory: NamedCacheFactory, private val database: CordaPersistence) {
// A temporary in-memory set of deduplication IDs and associated high water mark details. // A temporary in-memory set of deduplication IDs and associated high water mark details.
// When we receive a message we don't persist the ID immediately, // When we receive a message we don't persist the ID immediately,
// so we store the ID here in the meantime (until the persisting db tx has committed). This is because Artemis may // so we store the ID here in the meantime (until the persisting db tx has committed). This is because Artemis may
// redeliver messages to the same consumer if they weren't ACKed. // redeliver messages to the same consumer if they weren't ACKed.
private val beingProcessedMessages = ConcurrentHashMap<DeduplicationId, MessageMeta>() private val beingProcessedMessages = ConcurrentHashMap<DeduplicationId, MessageMeta>()
private val processedMessages = createProcessedMessages() private val processedMessages = createProcessedMessages(cacheFactory)
// We add the peer to the key, so other peers cannot attempt malicious meddling with sequence numbers. // We add the peer to the key, so other peers cannot attempt malicious meddling with sequence numbers.
// Expire after 7 days since we last touched an entry, to avoid infinite growth. // Expire after 7 days since we last touched an entry, to avoid infinite growth.
private val senderUUIDSeqNoHWM: MutableMap<SenderKey, SenderHashToSeqNo> = Caffeine.newBuilder().expireAfterAccess(7, TimeUnit.DAYS).build<SenderKey, SenderHashToSeqNo>().asMap() private val senderUUIDSeqNoHWM: MutableMap<SenderKey, SenderHashToSeqNo> = Caffeine.newBuilder().expireAfterAccess(7, TimeUnit.DAYS).build<SenderKey, SenderHashToSeqNo>().asMap()
private fun createProcessedMessages(): AppendOnlyPersistentMap<DeduplicationId, MessageMeta, ProcessedMessage, String> { private fun createProcessedMessages(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<DeduplicationId, MessageMeta, ProcessedMessage, String> {
return AppendOnlyPersistentMap( return AppendOnlyPersistentMap(
"P2PMessageDeduplicator_processedMessages", cacheFactory = cacheFactory,
name = "P2PMessageDeduplicator_processedMessages",
toPersistentEntityKey = { it.toString }, toPersistentEntityKey = { it.toString },
fromPersistentEntity = { Pair(DeduplicationId(it.id), MessageMeta(it.insertionTime, it.hash, it.seqNo)) }, fromPersistentEntity = { Pair(DeduplicationId(it.id), MessageMeta(it.insertionTime, it.hash, it.seqNo)) },
toPersistentEntity = { key: DeduplicationId, value: MessageMeta -> toPersistentEntity = { key: DeduplicationId, value: MessageMeta ->

View File

@ -27,6 +27,7 @@ import net.corda.node.services.statemachine.DeduplicationId
import net.corda.node.services.statemachine.ExternalEvent import net.corda.node.services.statemachine.ExternalEvent
import net.corda.node.services.statemachine.SenderDeduplicationId import net.corda.node.services.statemachine.SenderDeduplicationId
import net.corda.node.utilities.AffinityExecutor import net.corda.node.utilities.AffinityExecutor
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.ArtemisMessagingComponent import net.corda.nodeapi.internal.ArtemisMessagingComponent
import net.corda.nodeapi.internal.ArtemisMessagingComponent.* import net.corda.nodeapi.internal.ArtemisMessagingComponent.*
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_CONTROL import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_CONTROL
@ -82,7 +83,9 @@ class P2PMessagingClient(val config: NodeConfiguration,
private val nodeExecutor: AffinityExecutor.ServiceAffinityExecutor, private val nodeExecutor: AffinityExecutor.ServiceAffinityExecutor,
private val database: CordaPersistence, private val database: CordaPersistence,
private val networkMap: NetworkMapCacheInternal, private val networkMap: NetworkMapCacheInternal,
@Suppress("UNUSED")
private val metricRegistry: MetricRegistry, private val metricRegistry: MetricRegistry,
cacheFactory: NamedCacheFactory,
private val isDrainingModeOn: () -> Boolean, private val isDrainingModeOn: () -> Boolean,
private val drainingModeWasChangedEvents: Observable<Pair<Boolean, Boolean>> private val drainingModeWasChangedEvents: Observable<Pair<Boolean, Boolean>>
) : SingletonSerializeAsToken(), MessagingService, AddressToArtemisQueueResolver { ) : SingletonSerializeAsToken(), MessagingService, AddressToArtemisQueueResolver {
@ -133,7 +136,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
private val handlers = ConcurrentHashMap<String, MessageHandler>() private val handlers = ConcurrentHashMap<String, MessageHandler>()
private val deduplicator = P2PMessageDeduplicator(database) private val deduplicator = P2PMessageDeduplicator(cacheFactory, database)
// Note: Public visibility for testing // Note: Public visibility for testing
var messagingExecutor: MessagingExecutor? = null var messagingExecutor: MessagingExecutor? = null

View File

@ -23,6 +23,7 @@ import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug import net.corda.core.utilities.debug
import net.corda.node.internal.schemas.NodeInfoSchemaV1 import net.corda.node.internal.schemas.NodeInfoSchemaV1
import net.corda.node.services.api.NetworkMapCacheInternal import net.corda.node.services.api.NetworkMapCacheInternal
import net.corda.node.utilities.NamedCacheFactory
import net.corda.node.utilities.NonInvalidatingCache import net.corda.node.utilities.NonInvalidatingCache
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.bufferUntilDatabaseCommit import net.corda.nodeapi.internal.persistence.bufferUntilDatabaseCommit
@ -36,7 +37,8 @@ import javax.annotation.concurrent.ThreadSafe
/** Database-based network map cache. */ /** Database-based network map cache. */
@ThreadSafe @ThreadSafe
open class PersistentNetworkMapCache(private val database: CordaPersistence, open class PersistentNetworkMapCache(cacheFactory: NamedCacheFactory,
private val database: CordaPersistence,
private val identityService: IdentityService) : NetworkMapCacheInternal, SingletonSerializeAsToken() { private val identityService: IdentityService) : NetworkMapCacheInternal, SingletonSerializeAsToken() {
companion object { companion object {
private val logger = contextLogger() private val logger = contextLogger()
@ -124,8 +126,8 @@ open class PersistentNetworkMapCache(private val database: CordaPersistence,
override fun getNodesByLegalIdentityKey(identityKey: PublicKey): List<NodeInfo> = nodesByKeyCache[identityKey]!! override fun getNodesByLegalIdentityKey(identityKey: PublicKey): List<NodeInfo> = nodesByKeyCache[identityKey]!!
private val nodesByKeyCache = NonInvalidatingCache<PublicKey, List<NodeInfo>>( private val nodesByKeyCache = NonInvalidatingCache<PublicKey, List<NodeInfo>>(
"PersistentNetworkMap_nodesByKey", cacheFactory = cacheFactory,
1024) { key -> name = "PersistentNetworkMap_nodesByKey") { key ->
database.transaction { queryByIdentityKey(session, key) } database.transaction { queryByIdentityKey(session, key) }
} }
@ -144,8 +146,8 @@ open class PersistentNetworkMapCache(private val database: CordaPersistence,
} }
private val identityByLegalNameCache = NonInvalidatingCache<CordaX500Name, Optional<PartyAndCertificate>>( private val identityByLegalNameCache = NonInvalidatingCache<CordaX500Name, Optional<PartyAndCertificate>>(
"PersistentNetworkMap_idByLegalName", cacheFactory = cacheFactory,
1024) { name -> name = "PersistentNetworkMap_idByLegalName") { name ->
Optional.ofNullable(database.transaction { queryIdentityByLegalName(session, name) }) Optional.ofNullable(database.transaction { queryIdentityByLegalName(session, name) })
} }

View File

@ -15,6 +15,7 @@ import net.corda.core.transactions.SignedTransaction
import net.corda.node.services.api.WritableTransactionStorage import net.corda.node.services.api.WritableTransactionStorage
import net.corda.node.services.statemachine.FlowStateMachineImpl import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.utilities.AppendOnlyPersistentMapBase import net.corda.node.utilities.AppendOnlyPersistentMapBase
import net.corda.node.utilities.NamedCacheFactory
import net.corda.node.utilities.WeightBasedAppendOnlyPersistentMap import net.corda.node.utilities.WeightBasedAppendOnlyPersistentMap
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
@ -32,7 +33,7 @@ typealias TxCacheValue = Pair<SerializedBytes<CoreTransaction>, List<Transaction
fun TxCacheValue.toSignedTx() = SignedTransaction(this.first, this.second) fun TxCacheValue.toSignedTx() = SignedTransaction(this.first, this.second)
fun SignedTransaction.toTxCacheValue() = TxCacheValue(this.txBits, this.sigs) fun SignedTransaction.toTxCacheValue() = TxCacheValue(this.txBits, this.sigs)
class DBTransactionStorage(cacheSizeBytes: Long, private val database: CordaPersistence) : WritableTransactionStorage, SingletonSerializeAsToken() { class DBTransactionStorage(private val database: CordaPersistence, cacheFactory: NamedCacheFactory) : WritableTransactionStorage, SingletonSerializeAsToken() {
@Entity @Entity
@Table(name = "${NODE_DATABASE_PREFIX}transactions") @Table(name = "${NODE_DATABASE_PREFIX}transactions")
@ -50,9 +51,10 @@ class DBTransactionStorage(cacheSizeBytes: Long, private val database: CordaPers
) )
private companion object { private companion object {
fun createTransactionsMap(maxSizeInBytes: Long) fun createTransactionsMap(cacheFactory: NamedCacheFactory)
: AppendOnlyPersistentMapBase<SecureHash, TxCacheValue, DBTransaction, String> { : AppendOnlyPersistentMapBase<SecureHash, TxCacheValue, DBTransaction, String> {
return WeightBasedAppendOnlyPersistentMap<SecureHash, TxCacheValue, DBTransaction, String>( return WeightBasedAppendOnlyPersistentMap<SecureHash, TxCacheValue, DBTransaction, String>(
cacheFactory = cacheFactory,
name = "DBTransactionStorage_transactions", name = "DBTransactionStorage_transactions",
toPersistentEntityKey = { it.toString() }, toPersistentEntityKey = { it.toString() },
fromPersistentEntity = { fromPersistentEntity = {
@ -69,7 +71,6 @@ class DBTransactionStorage(cacheSizeBytes: Long, private val database: CordaPers
} }
}, },
persistentEntityClass = DBTransaction::class.java, persistentEntityClass = DBTransaction::class.java,
maxWeight = maxSizeInBytes,
weighingFunc = { hash, tx -> hash.size + weighTx(tx) } weighingFunc = { hash, tx -> hash.size + weighTx(tx) }
) )
} }
@ -88,7 +89,7 @@ class DBTransactionStorage(cacheSizeBytes: Long, private val database: CordaPers
} }
} }
private val txStorage = ConcurrentBox(createTransactionsMap(cacheSizeBytes)) private val txStorage = ConcurrentBox(createTransactionsMap(cacheFactory))
override fun addTransaction(transaction: SignedTransaction): Boolean = database.transaction { override fun addTransaction(transaction: SignedTransaction): Boolean = database.transaction {
txStorage.concurrent { txStorage.concurrent {

View File

@ -18,8 +18,8 @@ import net.corda.core.node.services.vault.AttachmentQueryCriteria
import net.corda.core.node.services.vault.AttachmentSort import net.corda.core.node.services.vault.AttachmentSort
import net.corda.core.serialization.* import net.corda.core.serialization.*
import net.corda.core.utilities.contextLogger import net.corda.core.utilities.contextLogger
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.vault.HibernateAttachmentQueryCriteriaParser import net.corda.node.services.vault.HibernateAttachmentQueryCriteriaParser
import net.corda.node.utilities.NamedCacheFactory
import net.corda.node.utilities.NonInvalidatingCache import net.corda.node.utilities.NonInvalidatingCache
import net.corda.node.utilities.NonInvalidatingWeightBasedCache import net.corda.node.utilities.NonInvalidatingWeightBasedCache
import net.corda.nodeapi.exceptions.DuplicateAttachmentException import net.corda.nodeapi.exceptions.DuplicateAttachmentException
@ -43,9 +43,8 @@ import javax.persistence.*
@ThreadSafe @ThreadSafe
class NodeAttachmentService( class NodeAttachmentService(
metrics: MetricRegistry, metrics: MetricRegistry,
private val database: CordaPersistence, cacheFactory: NamedCacheFactory,
attachmentContentCacheSize: Long = NodeConfiguration.defaultAttachmentContentCacheSize, private val database: CordaPersistence
attachmentCacheBound: Long = NodeConfiguration.defaultAttachmentCacheBound
) : AttachmentStorageInternal, SingletonSerializeAsToken() { ) : AttachmentStorageInternal, SingletonSerializeAsToken() {
companion object { companion object {
private val log = contextLogger() private val log = contextLogger()
@ -206,8 +205,8 @@ class NodeAttachmentService(
// a problem somewhere else or this needs to be revisited. // a problem somewhere else or this needs to be revisited.
private val attachmentContentCache = NonInvalidatingWeightBasedCache( private val attachmentContentCache = NonInvalidatingWeightBasedCache(
cacheFactory = cacheFactory,
name = "NodeAttachmentService_attachmentContent", name = "NodeAttachmentService_attachmentContent",
maxWeight = attachmentContentCacheSize,
weigher = Weigher<SecureHash, Optional<Pair<Attachment, ByteArray>>> { key, value -> key.size + if (value.isPresent) value.get().second.size else 0 }, weigher = Weigher<SecureHash, Optional<Pair<Attachment, ByteArray>>> { key, value -> key.size + if (value.isPresent) value.get().second.size else 0 },
loadFunction = { Optional.ofNullable(loadAttachmentContent(it)) } loadFunction = { Optional.ofNullable(loadAttachmentContent(it)) }
) )
@ -228,10 +227,9 @@ class NodeAttachmentService(
} }
private val attachmentCache = NonInvalidatingCache<SecureHash, Optional<Attachment>>( private val attachmentCache = NonInvalidatingCache<SecureHash, Optional<Attachment>>(
"NodeAttachmentService_attachemnt", cacheFactory = cacheFactory,
attachmentCacheBound) { key -> name = "NodeAttachmentService_attachmentPresence",
Optional.ofNullable(createAttachment(key)) loadFunction = { key -> Optional.ofNullable(createAttachment(key)) })
}
private fun createAttachment(key: SecureHash): Attachment? { private fun createAttachment(key: SecureHash): Attachment? {
val content = attachmentContentCache.get(key)!! val content = attachmentContentCache.get(key)!!

View File

@ -102,7 +102,8 @@ class BFTNonValidatingNotaryService(
private fun createMap(): AppendOnlyPersistentMap<StateRef, SecureHash, CommittedState, PersistentStateRef> { private fun createMap(): AppendOnlyPersistentMap<StateRef, SecureHash, CommittedState, PersistentStateRef> {
return AppendOnlyPersistentMap( return AppendOnlyPersistentMap(
"BFTNonValidatingNotaryService_transactions", cacheFactory = services.cacheFactory,
name = "BFTNonValidatingNotaryService_transactions",
toPersistentEntityKey = { PersistentStateRef(it.txhash.toString(), it.index) }, toPersistentEntityKey = { PersistentStateRef(it.txhash.toString(), it.index) },
fromPersistentEntity = { fromPersistentEntity = {
//TODO null check will become obsolete after making DB/JPA columns not nullable //TODO null check will become obsolete after making DB/JPA columns not nullable

View File

@ -11,7 +11,10 @@ import net.corda.core.flows.StateConsumptionDetails
import net.corda.core.identity.Party import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.OpenFuture import net.corda.core.internal.concurrent.OpenFuture
import net.corda.core.internal.concurrent.openFuture import net.corda.core.internal.concurrent.openFuture
import net.corda.core.internal.notary.* import net.corda.core.internal.notary.AsyncUniquenessProvider
import net.corda.core.internal.notary.NotaryInternalException
import net.corda.core.internal.notary.isConsumedByTheSameTx
import net.corda.core.internal.notary.validateTimeWindow
import net.corda.core.schemas.PersistentStateRef import net.corda.core.schemas.PersistentStateRef
import net.corda.core.serialization.CordaSerializable import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SingletonSerializeAsToken import net.corda.core.serialization.SingletonSerializeAsToken
@ -19,10 +22,10 @@ import net.corda.core.serialization.serialize
import net.corda.core.utilities.contextLogger import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug import net.corda.core.utilities.debug
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
import net.corda.nodeapi.internal.persistence.currentDBSession import net.corda.nodeapi.internal.persistence.currentDBSession
import net.corda.serialization.internal.CordaSerializationEncoding
import java.time.Clock import java.time.Clock
import java.time.Instant import java.time.Instant
import java.util.* import java.util.*
@ -33,7 +36,7 @@ import kotlin.concurrent.thread
/** A RDBMS backed Uniqueness provider */ /** A RDBMS backed Uniqueness provider */
@ThreadSafe @ThreadSafe
class PersistentUniquenessProvider(val clock: Clock, val database: CordaPersistence) : AsyncUniquenessProvider, SingletonSerializeAsToken() { class PersistentUniquenessProvider(val clock: Clock, val database: CordaPersistence, cacheFactory: NamedCacheFactory) : AsyncUniquenessProvider, SingletonSerializeAsToken() {
@MappedSuperclass @MappedSuperclass
class BaseComittedState( class BaseComittedState(
@ -80,7 +83,7 @@ class PersistentUniquenessProvider(val clock: Clock, val database: CordaPersiste
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_committed_states") @javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_committed_states")
class CommittedState(id: PersistentStateRef, consumingTxHash: String) : BaseComittedState(id, consumingTxHash) class CommittedState(id: PersistentStateRef, consumingTxHash: String) : BaseComittedState(id, consumingTxHash)
private val commitLog = createMap() private val commitLog = createMap(cacheFactory)
private val requestQueue = LinkedBlockingQueue<CommitRequest>(requestQueueSize) private val requestQueue = LinkedBlockingQueue<CommitRequest>(requestQueueSize)
@ -98,9 +101,10 @@ class PersistentUniquenessProvider(val clock: Clock, val database: CordaPersiste
companion object { companion object {
private const val requestQueueSize = 100_000 private const val requestQueueSize = 100_000
private val log = contextLogger() private val log = contextLogger()
fun createMap(): AppendOnlyPersistentMap<StateRef, SecureHash, CommittedState, PersistentStateRef> = fun createMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<StateRef, SecureHash, CommittedState, PersistentStateRef> =
AppendOnlyPersistentMap( AppendOnlyPersistentMap(
"PersistentUniquenessProvider_transactions", cacheFactory = cacheFactory,
name = "PersistentUniquenessProvider_transactions",
toPersistentEntityKey = { PersistentStateRef(it.txhash.toString(), it.index) }, toPersistentEntityKey = { PersistentStateRef(it.txhash.toString(), it.index) },
fromPersistentEntity = { fromPersistentEntity = {
//TODO null check will become obsolete after making DB/JPA columns not nullable //TODO null check will become obsolete after making DB/JPA columns not nullable

View File

@ -28,6 +28,7 @@ import net.corda.core.utilities.debug
import net.corda.node.services.config.RaftConfig import net.corda.node.services.config.RaftConfig
import net.corda.node.services.transactions.RaftTransactionCommitLog.Commands.CommitTransaction import net.corda.node.services.transactions.RaftTransactionCommitLog.Commands.CommitTransaction
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.NamedCacheFactory
import net.corda.nodeapi.internal.config.MutualSslConfiguration import net.corda.nodeapi.internal.config.MutualSslConfiguration
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
@ -55,13 +56,15 @@ class RaftUniquenessProvider(
private val db: CordaPersistence, private val db: CordaPersistence,
private val clock: Clock, private val clock: Clock,
private val metrics: MetricRegistry, private val metrics: MetricRegistry,
private val cacheFactory: NamedCacheFactory,
private val raftConfig: RaftConfig private val raftConfig: RaftConfig
) : UniquenessProvider, SingletonSerializeAsToken() { ) : UniquenessProvider, SingletonSerializeAsToken() {
companion object { companion object {
private val log = contextLogger() private val log = contextLogger()
fun createMap(): AppendOnlyPersistentMap<StateRef, Pair<Long, SecureHash>, CommittedState, PersistentStateRef> = fun createMap(cacheFactory: NamedCacheFactory): AppendOnlyPersistentMap<StateRef, Pair<Long, SecureHash>, CommittedState, PersistentStateRef> =
AppendOnlyPersistentMap( AppendOnlyPersistentMap(
"RaftUniquenessProvider_transactions", cacheFactory = cacheFactory,
name = "RaftUniquenessProvider_transactions",
toPersistentEntityKey = { PersistentStateRef(it) }, toPersistentEntityKey = { PersistentStateRef(it) },
fromPersistentEntity = { fromPersistentEntity = {
val txId = it.id.txId val txId = it.id.txId
@ -109,7 +112,7 @@ class RaftUniquenessProvider(
fun start() { fun start() {
log.info("Creating Copycat server, log stored in: ${storagePath.toAbsolutePath()}") log.info("Creating Copycat server, log stored in: ${storagePath.toAbsolutePath()}")
val stateMachineFactory = { val stateMachineFactory = {
RaftTransactionCommitLog(db, clock, RaftUniquenessProvider.Companion::createMap) RaftTransactionCommitLog(db, clock, { createMap(cacheFactory) })
} }
val address = raftConfig.nodeAddress.let { Address(it.host, it.port) } val address = raftConfig.nodeAddress.let { Address(it.host, it.port) }
val storage = buildStorage(storagePath) val storage = buildStorage(storagePath)

View File

@ -8,7 +8,7 @@ import java.security.PublicKey
/** A simple Notary service that does not perform transaction validation */ /** A simple Notary service that does not perform transaction validation */
class SimpleNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() { class SimpleNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() {
override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database) override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database, services.cacheFactory)
override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow = NonValidatingNotaryFlow(otherPartySession, this) override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow = NonValidatingNotaryFlow(otherPartySession, this)

View File

@ -8,7 +8,7 @@ import java.security.PublicKey
/** A Notary service that validates the transaction chain of the submitted transaction before committing it */ /** A Notary service that validates the transaction chain of the submitted transaction before committing it */
class ValidatingNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() { class ValidatingNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() {
override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database) override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database, services.cacheFactory)
override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow = ValidatingNotaryFlow(otherPartySession, this) override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow = ValidatingNotaryFlow(otherPartySession, this)

View File

@ -309,21 +309,20 @@ abstract class AppendOnlyPersistentMapBase<K, V, E, out EK>(
// Open for tests to override // Open for tests to override
open class AppendOnlyPersistentMap<K, V, E, out EK>( open class AppendOnlyPersistentMap<K, V, E, out EK>(
cacheFactory: NamedCacheFactory,
name: String, name: String,
toPersistentEntityKey: (K) -> EK, toPersistentEntityKey: (K) -> EK,
fromPersistentEntity: (E) -> Pair<K, V>, fromPersistentEntity: (E) -> Pair<K, V>,
toPersistentEntity: (key: K, value: V) -> E, toPersistentEntity: (key: K, value: V) -> E,
persistentEntityClass: Class<E>, persistentEntityClass: Class<E>
cacheBound: Long = 1024
) : AppendOnlyPersistentMapBase<K, V, E, EK>( ) : AppendOnlyPersistentMapBase<K, V, E, EK>(
toPersistentEntityKey, toPersistentEntityKey,
fromPersistentEntity, fromPersistentEntity,
toPersistentEntity, toPersistentEntity,
persistentEntityClass) { persistentEntityClass) {
//TODO determine cacheBound based on entity class later or with node config allowing tuning, or using some heuristic based on heap size
override val cache = NonInvalidatingCache( override val cache = NonInvalidatingCache(
cacheFactory = cacheFactory,
name = name, name = name,
bound = cacheBound,
loadFunction = { key: K -> loadFunction = { key: K ->
// This gets called if a value is read and the cache has no Transactional for this key yet. // This gets called if a value is read and the cache has no Transactional for this key yet.
val value: V? = loadValue(key) val value: V? = loadValue(key)
@ -355,12 +354,12 @@ open class AppendOnlyPersistentMap<K, V, E, out EK>(
// Same as above, but with weighted values (e.g. memory footprint sensitive). // Same as above, but with weighted values (e.g. memory footprint sensitive).
class WeightBasedAppendOnlyPersistentMap<K, V, E, out EK>( class WeightBasedAppendOnlyPersistentMap<K, V, E, out EK>(
cacheFactory: NamedCacheFactory,
name: String, name: String,
toPersistentEntityKey: (K) -> EK, toPersistentEntityKey: (K) -> EK,
fromPersistentEntity: (E) -> Pair<K, V>, fromPersistentEntity: (E) -> Pair<K, V>,
toPersistentEntity: (key: K, value: V) -> E, toPersistentEntity: (key: K, value: V) -> E,
persistentEntityClass: Class<E>, persistentEntityClass: Class<E>,
maxWeight: Long,
weighingFunc: (K, Transactional<V>) -> Int weighingFunc: (K, Transactional<V>) -> Int
) : AppendOnlyPersistentMapBase<K, V, E, EK>( ) : AppendOnlyPersistentMapBase<K, V, E, EK>(
toPersistentEntityKey, toPersistentEntityKey,
@ -368,8 +367,8 @@ class WeightBasedAppendOnlyPersistentMap<K, V, E, out EK>(
toPersistentEntity, toPersistentEntity,
persistentEntityClass) { persistentEntityClass) {
override val cache = NonInvalidatingWeightBasedCache( override val cache = NonInvalidatingWeightBasedCache(
name, cacheFactory = cacheFactory,
maxWeight = maxWeight, name = name,
weigher = Weigher { key, value -> weighingFunc(key, value) }, weigher = Weigher { key, value -> weighingFunc(key, value) },
loadFunction = { key: K -> loadFunction = { key: K ->
val value: V? = loadValue(key) val value: V? = loadValue(key)

View File

@ -0,0 +1,54 @@
package net.corda.node.utilities
import com.codahale.metrics.MetricRegistry
import com.github.benmanes.caffeine.cache.Cache
import com.github.benmanes.caffeine.cache.CacheLoader
import com.github.benmanes.caffeine.cache.Caffeine
import com.github.benmanes.caffeine.cache.LoadingCache
import net.corda.core.internal.buildNamed
import net.corda.core.serialization.SerializeAsToken
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.node.services.config.NodeConfiguration
/**
* Allow passing metrics and config to caching implementations.
*/
interface NamedCacheFactory : SerializeAsToken {
/**
* Build a new cache factory of the same type that incorporates metrics.
*/
fun bindWithMetrics(metricRegistry: MetricRegistry): NamedCacheFactory
/**
* Build a new cache factory of the same type that incorporates the associated configuration.
*/
fun bindWithConfig(nodeConfiguration: NodeConfiguration): NamedCacheFactory
fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String): Cache<K, V>
fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String, loader: CacheLoader<K, V>): LoadingCache<K, V>
}
class DefaultNamedCacheFactory private constructor(private val metricRegistry: MetricRegistry?, private val nodeConfiguration: NodeConfiguration?) : NamedCacheFactory, SingletonSerializeAsToken() {
constructor() : this(null, null)
override fun bindWithMetrics(metricRegistry: MetricRegistry): NamedCacheFactory = DefaultNamedCacheFactory(metricRegistry, this.nodeConfiguration)
override fun bindWithConfig(nodeConfiguration: NodeConfiguration): NamedCacheFactory = DefaultNamedCacheFactory(this.metricRegistry, nodeConfiguration)
override fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String): Cache<K, V> {
checkNotNull(metricRegistry)
checkNotNull(nodeConfiguration)
return caffeine.maximumSize(1024).buildNamed<K, V>(name)
}
override fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String, loader: CacheLoader<K, V>): LoadingCache<K, V> {
checkNotNull(metricRegistry)
checkNotNull(nodeConfiguration)
val configuredCaffeine = when (name) {
"DBTransactionStorage_transactions" -> caffeine.maximumWeight(nodeConfiguration!!.transactionCacheSizeBytes)
"NodeAttachmentService_attachmentContent" -> caffeine.maximumWeight(nodeConfiguration!!.attachmentContentCacheSizeBytes)
"NodeAttachmentService_attachmentPresence" -> caffeine.maximumSize(nodeConfiguration!!.attachmentCacheBound)
else -> caffeine.maximumSize(1024)
}
return configuredCaffeine.buildNamed<K, V>(name, loader)
}
}

View File

@ -4,19 +4,18 @@ import com.github.benmanes.caffeine.cache.CacheLoader
import com.github.benmanes.caffeine.cache.Caffeine import com.github.benmanes.caffeine.cache.Caffeine
import com.github.benmanes.caffeine.cache.LoadingCache import com.github.benmanes.caffeine.cache.LoadingCache
import com.github.benmanes.caffeine.cache.Weigher import com.github.benmanes.caffeine.cache.Weigher
import net.corda.core.internal.buildNamed
class NonInvalidatingCache<K, V> private constructor( class NonInvalidatingCache<K, V> private constructor(
val cache: LoadingCache<K, V> val cache: LoadingCache<K, V>
) : LoadingCache<K, V> by cache { ) : LoadingCache<K, V> by cache {
constructor(name: String, bound: Long, loadFunction: (K) -> V) : constructor(cacheFactory: NamedCacheFactory, name: String, loadFunction: (K) -> V) :
this(buildCache(name, bound, loadFunction)) this(buildCache(cacheFactory, name, loadFunction))
private companion object { private companion object {
private fun <K, V> buildCache(name: String, bound: Long, loadFunction: (K) -> V): LoadingCache<K, V> { private fun <K, V> buildCache(cacheFactory: NamedCacheFactory, name: String, loadFunction: (K) -> V): LoadingCache<K, V> {
val builder = Caffeine.newBuilder().maximumSize(bound) val builder = Caffeine.newBuilder()
return builder.buildNamed(name, NonInvalidatingCacheLoader(loadFunction)) return cacheFactory.buildNamed(builder, name, NonInvalidatingCacheLoader(loadFunction))
} }
} }
@ -33,13 +32,13 @@ class NonInvalidatingCache<K, V> private constructor(
class NonInvalidatingWeightBasedCache<K, V> private constructor( class NonInvalidatingWeightBasedCache<K, V> private constructor(
val cache: LoadingCache<K, V> val cache: LoadingCache<K, V>
) : LoadingCache<K, V> by cache { ) : LoadingCache<K, V> by cache {
constructor (name: String, maxWeight: Long, weigher: Weigher<K, V>, loadFunction: (K) -> V) : constructor (cacheFactory: NamedCacheFactory, name: String, weigher: Weigher<K, V>, loadFunction: (K) -> V) :
this(buildCache(name, maxWeight, weigher, loadFunction)) this(buildCache(cacheFactory, name, weigher, loadFunction))
private companion object { private companion object {
private fun <K, V> buildCache(name: String, maxWeight: Long, weigher: Weigher<K, V>, loadFunction: (K) -> V): LoadingCache<K, V> { private fun <K, V> buildCache(cacheFactory: NamedCacheFactory, name: String, weigher: Weigher<K, V>, loadFunction: (K) -> V): LoadingCache<K, V> {
val builder = Caffeine.newBuilder().maximumWeight(maxWeight).weigher(weigher) val builder = Caffeine.newBuilder().weigher(weigher)
return builder.buildNamed(name, NonInvalidatingCache.NonInvalidatingCacheLoader(loadFunction)) return cacheFactory.buildNamed(builder, name, NonInvalidatingCache.NonInvalidatingCacheLoader(loadFunction))
} }
} }
} }

View File

@ -8,6 +8,7 @@ import net.corda.core.identity.Party
import net.corda.core.identity.PartyAndCertificate import net.corda.core.identity.PartyAndCertificate
import net.corda.core.node.services.UnknownAnonymousPartyException import net.corda.core.node.services.UnknownAnonymousPartyException
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.crypto.CertificateType import net.corda.nodeapi.internal.crypto.CertificateType
import net.corda.nodeapi.internal.crypto.X509Utilities import net.corda.nodeapi.internal.crypto.X509Utilities
import net.corda.nodeapi.internal.crypto.x509Certificates import net.corda.nodeapi.internal.crypto.x509Certificates
@ -46,7 +47,7 @@ class PersistentIdentityServiceTests {
@Before @Before
fun setup() { fun setup() {
identityService = PersistentIdentityService() identityService = PersistentIdentityService(TestingNamedCacheFactory())
database = configureDatabase( database = configureDatabase(
makeTestDataSourceProperties(), makeTestDataSourceProperties(),
DatabaseConfig(runMigration = true), DatabaseConfig(runMigration = true),
@ -218,7 +219,7 @@ class PersistentIdentityServiceTests {
identityService.verifyAndRegisterIdentity(anonymousBob) identityService.verifyAndRegisterIdentity(anonymousBob)
// Create new identity service mounted onto same DB // Create new identity service mounted onto same DB
val newPersistentIdentityService = PersistentIdentityService().also { val newPersistentIdentityService = PersistentIdentityService(TestingNamedCacheFactory()).also {
it.database = database it.database = database
it.start(DEV_ROOT_CA.certificate) it.start(DEV_ROOT_CA.certificate)
} }

View File

@ -5,6 +5,7 @@ import net.corda.core.utilities.loggerFor
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.services.schema.NodeSchemaService import net.corda.node.services.schema.NodeSchemaService
import net.corda.node.utilities.AppendOnlyPersistentMap import net.corda.node.utilities.AppendOnlyPersistentMap
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
import org.junit.After import org.junit.After
@ -271,7 +272,8 @@ class AppendOnlyPersistentMapTest(var scenario: Scenario) {
) )
class TestMap : AppendOnlyPersistentMap<Long, String, PersistentMapEntry, Long>( class TestMap : AppendOnlyPersistentMap<Long, String, PersistentMapEntry, Long>(
"ApoendOnlyPersistentMap_test", cacheFactory = TestingNamedCacheFactory(),
name = "ApoendOnlyPersistentMap_test",
toPersistentEntityKey = { it }, toPersistentEntityKey = { it },
fromPersistentEntity = { Pair(it.key, it.value) }, fromPersistentEntity = { Pair(it.key, it.value) },
toPersistentEntity = { key: Long, value: String -> toPersistentEntity = { key: Long, value: String ->

View File

@ -8,8 +8,8 @@ import net.corda.core.crypto.TransactionSignature
import net.corda.core.toFuture import net.corda.core.toFuture
import net.corda.core.transactions.SignedTransaction import net.corda.core.transactions.SignedTransaction
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.transactions.PersistentUniquenessProvider import net.corda.node.services.transactions.PersistentUniquenessProvider
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.core.* import net.corda.testing.core.*
@ -154,7 +154,7 @@ class DBTransactionStorageTests {
} }
private fun newTransactionStorage(cacheSizeBytesOverride: Long? = null) { private fun newTransactionStorage(cacheSizeBytesOverride: Long? = null) {
transactionStorage = DBTransactionStorage(cacheSizeBytesOverride ?: NodeConfiguration.defaultTransactionCacheSize, database) transactionStorage = DBTransactionStorage(database, TestingNamedCacheFactory(cacheSizeBytesOverride ?: 1024))
} }
private fun assertTransactionIsRetrievable(transaction: SignedTransaction) { private fun assertTransactionIsRetrievable(transaction: SignedTransaction) {

View File

@ -9,6 +9,7 @@ import net.corda.finance.contracts.asset.Cash
import net.corda.finance.flows.CashIssueFlow import net.corda.finance.flows.CashIssueFlow
import net.corda.node.services.identity.PersistentIdentityService import net.corda.node.services.identity.PersistentIdentityService
import net.corda.node.services.keys.E2ETestKeyManagementService import net.corda.node.services.keys.E2ETestKeyManagementService
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.testing.core.BOC_NAME import net.corda.testing.core.BOC_NAME
import net.corda.testing.node.InMemoryMessagingNetwork import net.corda.testing.node.InMemoryMessagingNetwork
import net.corda.testing.node.MockNetwork import net.corda.testing.node.MockNetwork
@ -47,7 +48,7 @@ class HibernateColumnConverterTests {
val ref = OpaqueBytes.of(0x01) val ref = OpaqueBytes.of(0x01)
// Create parallel set of key and identity services so that the values are not cached, forcing the node caches to do a lookup. // Create parallel set of key and identity services so that the values are not cached, forcing the node caches to do a lookup.
val identityService = PersistentIdentityService() val identityService = PersistentIdentityService(TestingNamedCacheFactory())
val originalIdentityService: PersistentIdentityService = bankOfCordaNode.services.identityService as PersistentIdentityService val originalIdentityService: PersistentIdentityService = bankOfCordaNode.services.identityService as PersistentIdentityService
identityService.database = originalIdentityService.database identityService.database = originalIdentityService.database
identityService.start(originalIdentityService.trustRoot) identityService.start(originalIdentityService.trustRoot)

View File

@ -15,6 +15,7 @@ import net.corda.core.node.services.vault.Sort
import net.corda.core.utilities.getOrThrow import net.corda.core.utilities.getOrThrow
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.services.transactions.PersistentUniquenessProvider import net.corda.node.services.transactions.PersistentUniquenessProvider
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.internal.LogHelper import net.corda.testing.internal.LogHelper
@ -51,7 +52,7 @@ class NodeAttachmentServiceTest {
val dataSourceProperties = makeTestDataSourceProperties() val dataSourceProperties = makeTestDataSourceProperties()
database = configureDatabase(dataSourceProperties, DatabaseConfig(runMigration = true), { null }, { null }) database = configureDatabase(dataSourceProperties, DatabaseConfig(runMigration = true), { null }, { null })
fs = Jimfs.newFileSystem(Configuration.unix()) fs = Jimfs.newFileSystem(Configuration.unix())
storage = NodeAttachmentService(MetricRegistry(), database).also { storage = NodeAttachmentService(MetricRegistry(), TestingNamedCacheFactory(), database).also {
database.transaction { database.transaction {
it.start() it.start()
} }

View File

@ -10,6 +10,7 @@ import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.notary.NotaryInternalException import net.corda.core.internal.notary.NotaryInternalException
import net.corda.node.internal.configureDatabase import net.corda.node.internal.configureDatabase
import net.corda.node.services.schema.NodeSchemaService import net.corda.node.services.schema.NodeSchemaService
import net.corda.node.utilities.TestingNamedCacheFactory
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.core.SerializationEnvironmentRule import net.corda.testing.core.SerializationEnvironmentRule
@ -49,7 +50,7 @@ class PersistentUniquenessProviderTests {
@Test @Test
fun `should commit a transaction with unused inputs without exception`() { fun `should commit a transaction with unused inputs without exception`() {
val provider = PersistentUniquenessProvider(Clock.systemUTC(), database) val provider = PersistentUniquenessProvider(Clock.systemUTC(), database, TestingNamedCacheFactory())
val inputState = generateStateRef() val inputState = generateStateRef()
provider.commit(listOf(inputState), txID, identity, requestSignature) provider.commit(listOf(inputState), txID, identity, requestSignature)
@ -57,7 +58,7 @@ class PersistentUniquenessProviderTests {
@Test @Test
fun `should report a conflict for a transaction with previously used inputs`() { fun `should report a conflict for a transaction with previously used inputs`() {
val provider = PersistentUniquenessProvider(Clock.systemUTC(), database) val provider = PersistentUniquenessProvider(Clock.systemUTC(), database, TestingNamedCacheFactory())
val inputState = generateStateRef() val inputState = generateStateRef()
val inputs = listOf(inputState) val inputs = listOf(inputState)

View File

@ -0,0 +1,33 @@
package net.corda.node.utilities
import com.codahale.metrics.MetricRegistry
import com.github.benmanes.caffeine.cache.Cache
import com.github.benmanes.caffeine.cache.CacheLoader
import com.github.benmanes.caffeine.cache.Caffeine
import com.github.benmanes.caffeine.cache.LoadingCache
import net.corda.core.internal.buildNamed
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.node.services.config.MB
import net.corda.node.services.config.NodeConfiguration
class TestingNamedCacheFactory private constructor(private val sizeOverride: Long, private val metricRegistry: MetricRegistry?, private val nodeConfiguration: NodeConfiguration?) : NamedCacheFactory, SingletonSerializeAsToken() {
constructor(sizeOverride: Long = 1024) : this(sizeOverride, null, null)
override fun bindWithMetrics(metricRegistry: MetricRegistry): NamedCacheFactory = TestingNamedCacheFactory(sizeOverride, metricRegistry, this.nodeConfiguration)
override fun bindWithConfig(nodeConfiguration: NodeConfiguration): NamedCacheFactory = TestingNamedCacheFactory(sizeOverride, this.metricRegistry, nodeConfiguration)
override fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String): Cache<K, V> {
// Does not check metricRegistry or nodeConfiguration, because for tests we don't care.
return caffeine.maximumSize(sizeOverride).buildNamed<K, V>(name)
}
override fun <K, V> buildNamed(caffeine: Caffeine<in K, in V>, name: String, loader: CacheLoader<K, V>): LoadingCache<K, V> {
// Does not check metricRegistry or nodeConfiguration, because for tests we don't care.
val configuredCaffeine = when (name) {
"DBTransactionStorage_transactions" -> caffeine.maximumWeight(1.MB)
"NodeAttachmentService_attachmentContent" -> caffeine.maximumWeight(1.MB)
else -> caffeine.maximumSize(sizeOverride)
}
return configuredCaffeine.buildNamed<K, V>(name, loader)
}
}

View File

@ -27,7 +27,7 @@ import java.security.SignatureException
// START 1 // START 1
@CordaService @CordaService
class MyCustomValidatingNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() { class MyCustomValidatingNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : TrustedAuthorityNotaryService() {
override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database) override val uniquenessProvider = PersistentUniquenessProvider(services.clock, services.database, services.cacheFactory)
override fun createServiceFlow(otherPartySession: FlowSession): FlowLogic<Void?> = MyValidatingNotaryFlow(otherPartySession, this) override fun createServiceFlow(otherPartySession: FlowSession): FlowLogic<Void?> = MyValidatingNotaryFlow(otherPartySession, this)

View File

@ -1,6 +1,5 @@
package net.corda.testing.node.internal package net.corda.testing.node.internal
import com.codahale.metrics.MetricRegistry
import com.google.common.jimfs.Configuration.unix import com.google.common.jimfs.Configuration.unix
import com.google.common.jimfs.Jimfs import com.google.common.jimfs.Jimfs
import com.nhaarman.mockito_kotlin.doReturn import com.nhaarman.mockito_kotlin.doReturn
@ -48,6 +47,7 @@ import net.corda.node.services.transactions.BFTNonValidatingNotaryService
import net.corda.node.services.transactions.BFTSMaRt import net.corda.node.services.transactions.BFTSMaRt
import net.corda.node.services.transactions.InMemoryTransactionVerifierService import net.corda.node.services.transactions.InMemoryTransactionVerifierService
import net.corda.node.utilities.AffinityExecutor.ServiceAffinityExecutor import net.corda.node.utilities.AffinityExecutor.ServiceAffinityExecutor
import net.corda.node.utilities.DefaultNamedCacheFactory
import net.corda.nodeapi.internal.DevIdentityGenerator import net.corda.nodeapi.internal.DevIdentityGenerator
import net.corda.nodeapi.internal.config.User import net.corda.nodeapi.internal.config.User
import net.corda.nodeapi.internal.network.NetworkParametersCopier import net.corda.nodeapi.internal.network.NetworkParametersCopier
@ -55,9 +55,9 @@ import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.common.internal.testNetworkParameters import net.corda.testing.common.internal.testNetworkParameters
import net.corda.testing.driver.TestCorDapp import net.corda.testing.driver.TestCorDapp
import net.corda.testing.internal.stubs.CertificateStoreStubs
import net.corda.testing.internal.rigorousMock import net.corda.testing.internal.rigorousMock
import net.corda.testing.internal.setGlobalSerialization import net.corda.testing.internal.setGlobalSerialization
import net.corda.testing.internal.stubs.CertificateStoreStubs
import net.corda.testing.internal.testThreadFactory import net.corda.testing.internal.testThreadFactory
import net.corda.testing.node.* import net.corda.testing.node.*
import org.apache.activemq.artemis.utils.ReusableLatch import org.apache.activemq.artemis.utils.ReusableLatch
@ -279,6 +279,7 @@ open class InternalMockNetwork(defaultParameters: MockNetworkParameters = MockNe
open class MockNode(args: MockNodeArgs, cordappLoader: CordappLoader = JarScanningCordappLoader.fromDirectories(args.config.cordappDirectories)) : AbstractNode<TestStartedNode>( open class MockNode(args: MockNodeArgs, cordappLoader: CordappLoader = JarScanningCordappLoader.fromDirectories(args.config.cordappDirectories)) : AbstractNode<TestStartedNode>(
args.config, args.config,
TestClock(Clock.systemUTC()), TestClock(Clock.systemUTC()),
DefaultNamedCacheFactory(),
args.version, args.version,
cordappLoader, cordappLoader,
args.network.getServerThread(args.id), args.network.getServerThread(args.id),
@ -405,8 +406,8 @@ open class InternalMockNetwork(defaultParameters: MockNetworkParameters = MockNe
get() = _serializationWhitelists get() = _serializationWhitelists
private var dbCloser: (() -> Any?)? = null private var dbCloser: (() -> Any?)? = null
override fun startDatabase(metricRegistry: MetricRegistry?) { override fun startDatabase() {
super.startDatabase(metricRegistry) super.startDatabase()
dbCloser = database::close dbCloser = database::close
runOnStop += dbCloser!! runOnStop += dbCloser!!
} }

View File

@ -12,12 +12,14 @@ fun startReporter(shutdownManager: ShutdownManager, metricRegistry: MetricRegist
val jmxReporter = thread { val jmxReporter = thread {
JmxReporter.forRegistry(metricRegistry).inDomain("net.corda").createsObjectNamesWith { _, domain, name -> JmxReporter.forRegistry(metricRegistry).inDomain("net.corda").createsObjectNamesWith { _, domain, name ->
// Make the JMX hierarchy a bit better organised. // Make the JMX hierarchy a bit better organised.
val category = name.substringBefore('.') val category = name.substringBefore('.').substringBeforeLast('/')
val component = name.substringBefore('.').substringAfterLast('/', "")
val subName = name.substringAfter('.', "") val subName = name.substringAfter('.', "")
if (subName == "") if (subName == "")
ObjectName("$domain:name=$category") ObjectName("$domain:name=$category${if (component.isNotEmpty()) ",component=$component," else ""}")
else else
ObjectName("$domain:type=$category,name=$subName") ObjectName("$domain:type=$category,${if (component.isNotEmpty()) "component=$component," else ""}name=$subName")
}.build().start() }.build().start()
} }
shutdownManager.registerShutdown { jmxReporter.interrupt() } shutdownManager.registerShutdown { jmxReporter.interrupt() }