mirror of
https://github.com/corda/corda.git
synced 2025-06-01 23:20:54 +00:00
Merge commit '02348a584d56cdb562581d99be5b56348f7d1001' into chrisr3-os-merge
This commit is contained in:
commit
8b3a5432cb
@ -497,6 +497,6 @@ if(file('corda-docs-only-build').exists() || (System.getenv('CORDA_DOCS_ONLY_BUI
|
||||
}
|
||||
|
||||
wrapper {
|
||||
gradleVersion = "4.8"
|
||||
gradleVersion = "4.8.1"
|
||||
distributionType = Wrapper.DistributionType.ALL
|
||||
}
|
||||
|
@ -43,7 +43,16 @@ fun Path.exists(vararg options: LinkOption): Boolean = Files.exists(this, *optio
|
||||
/** Copy the file into the target directory using [Files.copy]. */
|
||||
fun Path.copyToDirectory(targetDir: Path, vararg options: CopyOption): Path {
|
||||
require(targetDir.isDirectory()) { "$targetDir is not a directory" }
|
||||
val targetFile = targetDir.resolve(fileName)
|
||||
/*
|
||||
* We must use fileName.toString() here because resolve(Path)
|
||||
* will throw ProviderMismatchException if the Path parameter
|
||||
* and targetDir have different Path providers, e.g. a file
|
||||
* on the filesystem vs an entry in a ZIP file.
|
||||
*
|
||||
* Path.toString() is assumed safe because fileName should
|
||||
* not include any path separator characters.
|
||||
*/
|
||||
val targetFile = targetDir.resolve(fileName.toString())
|
||||
Files.copy(this, targetFile, *options)
|
||||
return targetFile
|
||||
}
|
||||
|
@ -4,6 +4,9 @@ import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.rules.TemporaryFolder
|
||||
import java.net.URI
|
||||
import java.nio.file.FileSystems
|
||||
import java.nio.file.Path
|
||||
|
||||
class PathUtilsTest {
|
||||
@Rule
|
||||
@ -59,4 +62,21 @@ class PathUtilsTest {
|
||||
dir.deleteRecursively()
|
||||
assertThat(dir).doesNotExist()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `copyToDirectory - copy into zip directory`() {
|
||||
val source: Path = tempFolder.newFile("source.txt").let {
|
||||
it.writeText("Example Text")
|
||||
it.toPath()
|
||||
}
|
||||
val target = tempFolder.root.toPath() / "target.zip"
|
||||
FileSystems.newFileSystem(URI.create("jar:${target.toUri()}"), mapOf("create" to "true")).use { fs ->
|
||||
val dir = fs.getPath("dir").createDirectories()
|
||||
val result = source.copyToDirectory(dir)
|
||||
assertThat(result)
|
||||
.isRegularFile()
|
||||
.hasParent(dir)
|
||||
.hasSameContentAs(source)
|
||||
}
|
||||
}
|
||||
}
|
@ -81,7 +81,7 @@ command to accept the new parameters file and then restarting the node. Node own
|
||||
time effectively stop being a part of the network.
|
||||
|
||||
**Signature constraints.** These are not yet supported, but once implemented they will allow a state to require a JAR
|
||||
signed by a specified identity, via the regular Java jarsigner tool. This will be the most flexible type
|
||||
signed by a specified identity, via the regular Java ``jarsigner`` tool. This will be the most flexible type
|
||||
and the smoothest to deploy: no restarts or contract upgrade transactions are needed.
|
||||
|
||||
**Defaults.** The default constraint type is either a zone constraint, if the network parameters in effect when the
|
||||
@ -135,18 +135,18 @@ constraint placeholder is useful.
|
||||
FinalityFlow
|
||||
------------
|
||||
|
||||
It's possible to encounter contract contraint issues when notarising transactions with the ``FinalityFlow`` on a network
|
||||
containing multiple versions of the same CorDapp. This will happen when using hash contraints or with zone contraints
|
||||
It's possible to encounter contract constraint issues when notarising transactions with the ``FinalityFlow`` on a network
|
||||
containing multiple versions of the same CorDapp. This will happen when using hash constraints or with zone constraints
|
||||
if the zone whitelist has missing CorDapp versions. If a participating party fails to validate the **notarised** transaction
|
||||
then we have a scenerio where the members of the network do not have a consistent view of the ledger.
|
||||
then we have a scenario where the members of the network do not have a consistent view of the ledger.
|
||||
|
||||
Therfore, if the finality handler flow (which is run on the counterparty) errors for any reason it will always be sent to
|
||||
Therefore, if the finality handler flow (which is run on the counter-party) errors for any reason it will always be sent to
|
||||
the flow hospital. From there it's suspended waiting to be retried on node restart. This gives the node operator the opportunity
|
||||
to recover from those errors, which in the case of contract constraint voilations means either updating the CorDapp or
|
||||
to recover from those errors, which in the case of contract constraint violations means either updating the CorDapp or
|
||||
adding its hash to the zone whitelist.
|
||||
|
||||
.. note:: This is a temporary issue in the current version of Corda, until we implement some missing features which will
|
||||
enable a seemless handling of differences in CorDapp versions.
|
||||
enable a seamless handling of differences in CorDapp versions.
|
||||
|
||||
CorDapps as attachments
|
||||
-----------------------
|
||||
|
@ -521,7 +521,7 @@ Only one party has to call ``FinalityFlow`` for a given transaction to be record
|
||||
|
||||
Because the transaction has already been notarised and the input states consumed, if the participants when receiving the
|
||||
transaction fail to verify it, or the receiving flow (the finality handler) fails due to some other error, we then have
|
||||
the scenerio where not all parties have the correct up to date view of the ledger. To recover from this the finality handler
|
||||
the scenario where not all parties have the correct up to date view of the ledger. To recover from this the finality handler
|
||||
is automatically sent to the flow hospital where it's suspended and retried from its last checkpoint on node restart.
|
||||
This gives the node operator the opportunity to recover from the error. Until the issue is resolved the node will continue
|
||||
to retry the flow on each startup.
|
||||
|
@ -25,7 +25,7 @@ There are three kinds of breaking change:
|
||||
* Removal or modification of existing API, i.e. an existing class, method or field has been either deleted or renamed, or
|
||||
its signature somehow altered.
|
||||
* Addition of a new method to an interface or abstract class. Types that have been annotated as ``@DoNotImplement`` are
|
||||
excluded from this check. (This annotation is also inherited across subclasses and subinterfaces.)
|
||||
excluded from this check. (This annotation is also inherited across subclasses and sub-interfaces.)
|
||||
* Exposure of an internal type via a public API. Internal types are considered to be anything in a ``*.internal.`` package
|
||||
or anything in a module that isn't in the stable modules list :ref:`here <internal-apis-and-stability-guarantees>`.
|
||||
|
||||
@ -49,7 +49,7 @@ Updating the API
|
||||
As a rule, ``api-current.txt`` should only be updated by the release manager for each Corda release.
|
||||
|
||||
We do not expect modifications to ``api-current.txt`` as part of normal development. However, we may sometimes need to adjust
|
||||
the public API in ways that would not break developers' CorDapps but which would be blocked by the API Stabilty check.
|
||||
the public API in ways that would not break developers' CorDapps but which would be blocked by the API Stability check.
|
||||
For example, migrating a method from an interface into a superinterface. Any changes to the API summary file should be
|
||||
included in the PR, which would then need explicit approval from either `Mike Hearn <https://github.com/mikehearn>`_, `Rick Parker <https://github.com/rick-r3>`_ or `Matthew Nesbit <https://github.com/mnesbit>`_.
|
||||
|
||||
|
@ -52,7 +52,7 @@ Configuring the ``MockNetwork``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ``MockNetwork`` is configured automatically. You can tweak its configuration using a ``MockNetworkParameters``
|
||||
object, or by using named paramters in Kotlin:
|
||||
object, or by using named parameters in Kotlin:
|
||||
|
||||
.. container:: codeset
|
||||
|
||||
|
@ -118,7 +118,7 @@ All ``QueryCriteria`` implementations are composable using ``and`` and ``or`` op
|
||||
All ``QueryCriteria`` implementations provide an explicitly specifiable set of common attributes:
|
||||
|
||||
1. State status attribute (``Vault.StateStatus``), which defaults to filtering on UNCONSUMED states.
|
||||
When chaining several criterias using AND / OR, the last value of this attribute will override any previous
|
||||
When chaining several criteria using AND / OR, the last value of this attribute will override any previous
|
||||
2. Contract state types (``<Set<Class<out ContractState>>``), which will contain at minimum one type (by default this
|
||||
will be ``ContractState`` which resolves to all state types). When chaining several criteria using ``and`` and
|
||||
``or`` operators, all specified contract state types are combined into a single set
|
||||
|
@ -81,7 +81,7 @@ The deployment process will start and typically takes 8-10 minutes to complete.
|
||||
|
||||
Once deployed click 'Resources Groups', select the resource group you defined in Step 1 above and click 'Overview' to see the virtual machine details. The names of your VMs will be pre-fixed with the resource prefix value you defined in Step 1 above.
|
||||
|
||||
The Newtork Map Service node is suffixed nm0. The Notary node is suffixed not0. Your Corda participant nodes are suffixed node0, node1, node2 etc. Note down the **Public IP address** for your Corda nodes. You will need these to connect to UI screens via your web browser:
|
||||
The Network Map Service node is suffixed nm0. The Notary node is suffixed not0. Your Corda participant nodes are suffixed node0, node1, node2 etc. Note down the **Public IP address** for your Corda nodes. You will need these to connect to UI screens via your web browser:
|
||||
|
||||
.. image:: resources/azure_ip.png
|
||||
:width: 300px
|
||||
|
@ -11,8 +11,10 @@ CorDapps
|
||||
cordapp-build-systems
|
||||
building-against-master
|
||||
corda-api
|
||||
serialization
|
||||
serialization-index
|
||||
secure-coding-guidelines
|
||||
flow-cookbook
|
||||
vault
|
||||
soft-locking
|
||||
cheat-sheet
|
||||
building-a-cordapp-samples
|
@ -5,7 +5,8 @@ Here's a summary of what's changed in each Corda release. For guidance on how to
|
||||
release, see :doc:`upgrade-notes`.
|
||||
|
||||
Unreleased
|
||||
==========
|
||||
----------
|
||||
|
||||
* Introduced a hierarchy of ``DatabaseMigrationException``s, allowing ``NodeStartup`` to gracefully inform users of problems related to database migrations before exiting with a non-zero code.
|
||||
|
||||
* Introduced a grace period before the initial node registration fails if the node cannot connect to the Doorman.
|
||||
@ -16,7 +17,7 @@ Unreleased
|
||||
* H2 database changes:
|
||||
* The node's H2 database now listens on ``localhost`` by default.
|
||||
* The database server address must also be enabled in the node configuration.
|
||||
* A new ``h2Settings`` configuration block supercedes the ``h2Port`` option.
|
||||
* A new ``h2Settings`` configuration block supersedes the ``h2Port`` option.
|
||||
|
||||
* Improved documentation PDF quality. Building the documentation now requires ``LaTex`` to be installed on the OS.
|
||||
|
||||
@ -27,13 +28,13 @@ Unreleased
|
||||
|
||||
* Introducing the flow hospital - a component of the node that manages flows that have errored and whether they should
|
||||
be retried from their previous checkpoints or have their errors propagate. Currently it will respond to any error that
|
||||
occurs during the resolution of a received transaction as part of ``FinalityFlow``. In such a scenerio the receiving
|
||||
occurs during the resolution of a received transaction as part of ``FinalityFlow``. In such a scenario the receiving
|
||||
flow will be parked and retried on node restart. This is to allow the node operator to rectify the situation as otherwise
|
||||
the node will have an incomplete view of the ledger.
|
||||
|
||||
* Fixed an issue preventing out of process nodes started by the ``Driver`` from logging to file.
|
||||
|
||||
* Fixed an issue with ``CashException`` not being able to deserialise after the introduction of AMQP for RPC.
|
||||
* Fixed an issue with ``CashException`` not being able to deserialize after the introduction of AMQP for RPC.
|
||||
|
||||
* Removed -Xmx VM argument from Explorer's Capsule setup. This helps avoiding out of memory errors.
|
||||
|
||||
@ -89,13 +90,13 @@ Unreleased
|
||||
* ``SerializedBytes`` is serialised by materialising the bytes into the object it represents, and then serialising that
|
||||
object into YAML/JSON
|
||||
* ``X509Certificate`` is serialised as an object with key fields such as ``issuer``, ``publicKey``, ``serialNumber``, etc.
|
||||
The encoded bytes are also serialised into the ``encoded`` field. This can be used to deserialise an ``X509Certificate``
|
||||
The encoded bytes are also serialised into the ``encoded`` field. This can be used to deserialize an ``X509Certificate``
|
||||
back.
|
||||
* ``CertPath`` objects are serialised as a list of ``X509Certificate`` objects.
|
||||
* ``WireTransaction`` now nicely outputs into its components: ``id``, ``notary``, ``inputs``, ``attachments``, ``outputs``,
|
||||
``commands``, ``timeWindow`` and ``privacySalt``. This can be deserialised back.
|
||||
``commands``, ``timeWindow`` and ``privacySalt``. This can be deserialized back.
|
||||
* ``SignedTransaction`` is serialised into ``wire`` (i.e. currently only ``WireTransaction`` tested) and ``signatures``,
|
||||
and can be deserialised back.
|
||||
and can be deserialized back.
|
||||
|
||||
* ``fullParties`` boolean parameter added to ``JacksonSupport.createDefaultMapper`` and ``createNonRpcMapper``. If ``true``
|
||||
then ``Party`` objects are serialised as JSON objects with the ``name`` and ``owningKey`` fields. For ``PartyAndCertificate``
|
||||
@ -279,7 +280,7 @@ Corda Enterprise 3.0 Developer Preview
|
||||
:doc:`corda-configuration-file` for more details.
|
||||
|
||||
* Introducing the concept of network parameters which are a set of constants which all nodes on a network must agree on
|
||||
to correctly interop. These can be retrieved from ``ServiceHub.networkParameters``.
|
||||
to correctly interoperate. These can be retrieved from ``ServiceHub.networkParameters``.
|
||||
|
||||
* One of these parameters, ``maxTransactionSize``, limits the size of a transaction, including its attachments, so that
|
||||
all nodes have sufficient memory to validate transactions.
|
||||
@ -374,7 +375,7 @@ Corda Enterprise 3.0 Developer Preview
|
||||
* A new function ``checkCommandVisibility(publicKey: PublicKey)`` has been added to ``FilteredTransaction`` to check
|
||||
if every command that a signer should receive (e.g. an Oracle) is indeed visible.
|
||||
|
||||
* Changed the AMQP serialiser to use the officially assigned R3 identifier rather than a placeholder.
|
||||
* Changed the AMQP serializer to use the officially assigned R3 identifier rather than a placeholder.
|
||||
|
||||
* The ``ReceiveTransactionFlow`` can now be told to record the transaction at the same time as receiving it. Using this
|
||||
feature, better support for observer/regulator nodes has been added. See :doc:`tutorial-observer-nodes`.
|
||||
@ -519,7 +520,7 @@ Corda 1.0
|
||||
the only functionality left. You also need to rename your services resource file to the new class name.
|
||||
An associated property on ``MockNode`` was renamed from ``testPluginRegistries`` to ``testSerializationWhitelists``.
|
||||
|
||||
* Contract Upgrades: deprecated RPC authorisation / deauthorisation API calls in favour of equivalent flows in ContractUpgradeFlow.
|
||||
* Contract Upgrades: deprecated RPC authorization / deauthorization API calls in favour of equivalent flows in ContractUpgradeFlow.
|
||||
Implemented contract upgrade persistence using JDBC backed persistent map.
|
||||
|
||||
* Vault query common attributes (state status and contract state types) are now handled correctly when using composite
|
||||
@ -527,7 +528,7 @@ Corda 1.0
|
||||
|
||||
* Cash selection algorithm is now pluggable (with H2 being the default implementation)
|
||||
|
||||
* Removed usage of Requery ORM library (repalced with JPA/Hibernate)
|
||||
* Removed usage of Requery ORM library (replaced with JPA/Hibernate)
|
||||
|
||||
* Vault Query performance improvement (replaced expensive per query SQL statement to obtain concrete state types
|
||||
with single query on start-up followed by dynamic updates using vault state observable))
|
||||
|
@ -387,7 +387,7 @@ the error handler upon subscription to an ``Observable``. The call to this ``onE
|
||||
happens then the code will terminate existing subscription, closes RPC connection and recursively calls ``performRpcReconnect``
|
||||
which will re-subscribe once RPC connection comes back online.
|
||||
|
||||
Client code if fed with instances of ``StateMachineInfo`` using call ``clientCode(it)``. Upon re-connec, this code receives
|
||||
Client code if fed with instances of ``StateMachineInfo`` using call ``clientCode(it)``. Upon re-connecting, this code receives
|
||||
all the items. Some of these items might have already been delivered to client code prior to failover occurred.
|
||||
It is down to client code in this case handle those duplicate items as appropriate.
|
||||
|
||||
|
@ -34,7 +34,7 @@ that doesn't mean it's always better. In particular:
|
||||
bugs, but over-used it can make code that has to adjust fields of an immutable object (in a clone) hard to read and
|
||||
stress the garbage collector. When such code becomes a widespread pattern it can lead to code that is just generically
|
||||
slow but without hotspots.
|
||||
* The tradeoffs between various thread safety techniques are complex, subtle, and no technique is always superior to
|
||||
* The trade-offs between various thread safety techniques are complex, subtle, and no technique is always superior to
|
||||
the others. Our code uses a mix of locks, worker threads and messaging depending on the situation.
|
||||
|
||||
1.1 Line Length and Spacing
|
||||
@ -68,7 +68,7 @@ told by the code are best deleted. Comments should:
|
||||
|
||||
* Explain what the code is doing at a higher level than is obtainable from just examining the statement and
|
||||
surrounding code.
|
||||
* Explain why certain choices were made and the tradeoffs considered.
|
||||
* Explain why certain choices were made and the trade-offs considered.
|
||||
* Explain how things can go wrong, which is a detail often not easily seen just by reading the code.
|
||||
* Use good grammar with capital letters and full stops. This gets us in the right frame of mind for writing real
|
||||
explanations of things.
|
||||
|
@ -108,5 +108,5 @@ as ``@DoNotImplement``. While we undertake not to remove or modify any of these
|
||||
functionality, the annotation is a warning that we may need to extend them in future versions of Corda.
|
||||
Cordapp developers should therefore just use these classes "as is", and *not* attempt to extend or implement any of them themselves.
|
||||
|
||||
This annotation is inherited by subclasses and subinterfaces.
|
||||
This annotation is inherited by subclasses and sub-interfaces.
|
||||
|
||||
|
@ -19,13 +19,13 @@ If you specify both command line arguments at the same time, the node will fail
|
||||
|
||||
Format
|
||||
------
|
||||
The Corda configuration file uses the HOCON format which is superset of JSON. Please visit
|
||||
The Corda configuration file uses the HOCON format which is a superset of JSON. Please visit
|
||||
`<https://github.com/typesafehub/config/blob/master/HOCON.md>`_ for further details.
|
||||
|
||||
Please do NOT use double quotes (``"``) in configuration keys.
|
||||
|
||||
Node setup will log `Config files should not contain \" in property names. Please fix: [key]` as error
|
||||
when it founds double quotes around keys.
|
||||
Node setup will log `Config files should not contain \" in property names. Please fix: [key]` as an error
|
||||
when it finds double quotes around keys.
|
||||
This prevents configuration errors when mixing keys containing ``.`` wrapped with double quotes and without them
|
||||
e.g.:
|
||||
The property `"dataSourceProperties.dataSourceClassName" = "val"` in ``reference.conf``
|
||||
|
@ -74,7 +74,7 @@ Each of the above certificates will specify a CRL allowing the certificate to be
|
||||
(primarily R3) will be required to maintain this CRL for the lifetime of the process.
|
||||
|
||||
TLS certificates will remain issued under Node CA certificates (see [decision: TLS trust
|
||||
root](./decisions/tls-trust-root.html)).
|
||||
root](./decisions/tls-trust-root.md)).
|
||||
|
||||
Nodes will be able to specify CRL(s) for TLS certificates they issue; in general, they will be required to such CRLs for
|
||||
the lifecycle of the TLS certificates.
|
||||
|
@ -37,7 +37,7 @@ MN presented a high level summary of the options:
|
||||
|
||||
- Zookeeper (recommended option): industry standard widely used and trusted. May be able to leverage clients' incumbent Zookeeper infrastructure
|
||||
- Positive: has flexibility for storage and a potential for future proofing; good permissioning capabilities; standalone cluster of Zookeeper servers allows 2 nodes solution rather than 3
|
||||
- Negative: adds deployment complexity due to need for Zookeeper cluster split across datacentres
|
||||
- Negative: adds deployment complexity due to need for Zookeeper cluster split across data centers
|
||||
Wrapper library choice for Zookeeper requires some analysis
|
||||
|
||||
|
||||
@ -87,7 +87,7 @@ MH: how does failover work with HSMs?
|
||||
MN: can replicate realm so failover is trivial
|
||||
|
||||
JC: how do we document Enterprise features? Publish design docs? Enterprise fact sheets? R3 Corda marketing material?
|
||||
Clear seperation of documentation is required. GT: this is already achieved by having docs.corda.net for open source
|
||||
Clear separation of documentation is required. GT: this is already achieved by having docs.corda.net for open source
|
||||
Corda and docs.corda.r3.com for enterprise R3 Corda
|
||||
|
||||
|
||||
|
@ -56,7 +56,7 @@ MH contented that the TLS implementation was specific to Corda in several ways w
|
||||
|
||||
RGB proposed messaging to clients that the option existed to terminate on the firewall if it supported the relevant requirements.
|
||||
|
||||
MN re-raised the question of key management. RGB asked about the risk implied from the threat of a compromised float. MN said an attacker who compromised a float could establish TLS connections in the name of the compromised party, and couldinspect and alter packets including readable busness data (assuming AMQP serialisation). MH gave an example of a MITM attack where an attacker could swap in their own single-use key allowing them to gain control of (e.g.) a cash asset; the TLS layer is the only current protection against that.
|
||||
MN re-raised the question of key management. RGB asked about the risk implied from the threat of a compromised float. MN said an attacker who compromised a float could establish TLS connections in the name of the compromised party, and could inspect and alter packets including readable business data (assuming AMQP serialisation). MH gave an example of a MITM attack where an attacker could swap in their own single-use key allowing them to gain control of (e.g.) a cash asset; the TLS layer is the only current protection against that.
|
||||
|
||||
RGB queried whether messages could be signed by senders. MN raised potential threat of traffic analysis, and stated E2E encryption was definitely possible but not for March-April.
|
||||
|
||||
|
@ -38,7 +38,7 @@ pre-existence of an applicable message queue for that peer.
|
||||
## Scope
|
||||
|
||||
* Goals:
|
||||
* Allow connection to a Corda node wihout requiring direct incoming connections from external participants.
|
||||
* Allow connection to a Corda node without requiring direct incoming connections from external participants.
|
||||
* Allow connections to a Corda node without requiring the node itself to have a public IP address. Separate TLS connection handling from the MQ broker.
|
||||
* Non-goals (out of scope):
|
||||
* Support for MQ brokers other than Apache Artemis
|
||||
@ -50,7 +50,7 @@ For delivery by end Q1 2018.
|
||||
Allow connectivity in compliance with DMZ constraints commonly imposed by modern financial institutions; namely:
|
||||
1. Firewalls required between the internet and any device in the DMZ, and between the DMZ and the internal network
|
||||
2. Data passing from the internet and the internal network via the DMZ should pass through a clear protocol break in the DMZ.
|
||||
3. Only identified IPs and ports are permitted to access devices in the DMZ; this include communications between devices colocated in the DMZ.
|
||||
3. Only identified IPs and ports are permitted to access devices in the DMZ; this include communications between devices co-located in the DMZ.
|
||||
4. Only a limited number of ports are opened in the firewall (<5) to make firewall operation manageable. These ports must change slowly.
|
||||
5. Any DMZ machine is typically multi-homed, with separate network cards handling traffic through the institutional
|
||||
firewall vs. to the Internet. (There is usually a further hidden management interface card accessed via a jump box for
|
||||
|
@ -16,7 +16,7 @@ The potential use of a crash shell is relevant to high availability capabilities
|
||||
#### Disadvantages
|
||||
|
||||
1. Won’t reliably work if the node is in an unstable state
|
||||
2. Not practical for running hundreds of nodes as our customers arealready trying to do.
|
||||
2. Not practical for running hundreds of nodes as our customers already trying to do.
|
||||
3. Doesn’t mesh with the user access controls of the organisation.
|
||||
4. Doesn’t interface to the existing monitoring and control systems i.e. Nagios, Geneos ITRS, Docker Swarm, etc.
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
## Background / Context
|
||||
|
||||
End-to-end encryption is a desirable potential design feature for the [high availability support](design).
|
||||
End-to-end encryption is a desirable potential design feature for the [high availability support](../design.md).
|
||||
|
||||
## Options Analysis
|
||||
|
||||
|
@ -97,7 +97,7 @@ state index.
|
||||
|
||||
| Heading | Recommendation |
|
||||
| ---------------------------------------- | -------------- |
|
||||
| [Replication framework](decisions/replicated_storage.md) | Option C |
|
||||
| [Replication framework](decisions/replicated-storage.md) | Option C |
|
||||
| [Index storage engine](decisions/index-storage.md) | Option A |
|
||||
|
||||
TECHNICAL DESIGN
|
||||
|
@ -34,7 +34,7 @@ Corda currently exposes several forms of monitorable content:
|
||||
|
||||
* Industry standard exposed JMX-based metrics, both standard JVM and custom application metrics are exposed directly
|
||||
using the [Dropwizard.io](http://metrics.dropwizard.io/3.2.3/) *JmxReporter* facility. In addition Corda also uses the
|
||||
[Jolokia](https://jolokia.org/) framework to make these accesible over an HTTP endpoint. Typically, these metrics are
|
||||
[Jolokia](https://jolokia.org/) framework to make these accessible over an HTTP endpoint. Typically, these metrics are
|
||||
also collated by 3rd party tools to provide pro-active monitoring, visualisation and re-active management.
|
||||
|
||||
A full list of currently exposed metrics can be found in the appendix A.
|
||||
@ -48,7 +48,7 @@ The `ProgressTracker` component is used to report the progress of a flow through
|
||||
typically configured to report the start of a specific business workflow step (often before and after message send and
|
||||
receipt where other participants form part of a multi-staged business workflow). The progress tracking framework was
|
||||
designed to become a vital part of how exceptions, errors, and other faults are surfaced to human operators for
|
||||
investigation and resolution. It provides a means of exporting progress as a hierachy of steps in a way that’s both
|
||||
investigation and resolution. It provides a means of exporting progress as a hierarchy of steps in a way that’s both
|
||||
human readable and machine readable.
|
||||
|
||||
In addition, in-house Corda networks at R3 use the following tools:
|
||||
@ -129,7 +129,7 @@ design, either directly or through an integrated enterprise-wide systems managem
|
||||
The following design decisions are to be confirmed:
|
||||
|
||||
1. JMX for metric eventing and SLF4J for logging
|
||||
Both above are widely adopted mechanisms that enable pluggability and seamless inteoperability with other 3rd party
|
||||
Both above are widely adopted mechanisms that enable pluggability and seamless interoperability with other 3rd party
|
||||
enterprise-wide system management solutions.
|
||||
2. Continue or discontinue usage of Jolokia? (TBC - most likely yes, subject to read-only security lock-down)
|
||||
3. Separation of Corda Node and CorDapp log outputs (TBC)
|
||||
@ -140,7 +140,7 @@ There are a number of activities and parts to the solution proposal:
|
||||
|
||||
1. Extend JMX metric reporting through the Corda Monitoring Service and associated jolokia conversion to REST/JSON)
|
||||
coverage (see implementation details) to include all Corda services (vault, key management, transaction storage,
|
||||
network map, attachment storage, identity, cordapp provision) & subsytems components (state machine)
|
||||
network map, attachment storage, identity, cordapp provision) & sub-sytems components (state machine)
|
||||
|
||||
2. Review and extend Corda log4j2 coverage (see implementation details) to ensure
|
||||
|
||||
@ -264,16 +264,16 @@ The Health checker is a CorDapp which verifies the health and liveliness of the
|
||||
Autotriggering of above flow using RPC to exercise the following:
|
||||
|
||||
- messaging subsystem verification (RPC queuing)
|
||||
- authenticaton and permissing checking (against underlying configuration)
|
||||
- authenticaton and permissions checking (against underlying configuration)
|
||||
|
||||
|
||||
The Health checker may be deployed as part of a Corda distribution and automatically invoked upoin start-up and/or manually triggered via JMX or the nodes associated Crash shell (using the startFlow command)
|
||||
The Health checker may be deployed as part of a Corda distribution and automatically invoked upon start-up and/or manually triggered via JMX or the nodes associated Crash shell (using the startFlow command)
|
||||
|
||||
Please note that the Health checker application is not responsible for determining the healthiness of a Corda Network. This is the responsibility of the network operator, and may include verification checks such as:
|
||||
|
||||
- correct functioning of Network Map Service (registration, discovery)
|
||||
- correct functioning of configured Notary
|
||||
- remote messaging subsytem (including bridge creation)
|
||||
- remote messaging sub-sytem (including bridge creation)
|
||||
|
||||
#### Metrics augmentation within Corda Subsystems and Components
|
||||
|
||||
@ -281,7 +281,7 @@ Please note that the Health checker application is not responsible for determini
|
||||
|
||||
- Gauge: is an instantaneous measurement of a value.
|
||||
- Counter: is a gauge for a numeric value (specifically of type `AtomicLong`) which can be incremented or decremented.
|
||||
- Meter: measures mean throughtput (eg. the rate of events over time, e.g., “requests per second”). Also measures one-, five-, and fifteen-minute exponentially-weighted moving average throughputs.
|
||||
- Meter: measures mean throughput (eg. the rate of events over time, e.g., “requests per second”). Also measures one-, five-, and fifteen-minute exponentially-weighted moving average throughputs.
|
||||
- Histogram: measures the statistical distribution of values in a stream of data (minimum, maximum, mean, median, 75th, 90th, 95th, 98th, 99th, and 99.9th percentiles).
|
||||
- Timer: measures both the rate that a particular piece of code is called and the distribution of its duration (eg. rate of requests in requests per second).
|
||||
- Health checks: provides a means of centralizing service (database, message broker health checks).
|
||||
@ -299,11 +299,11 @@ The following table identifies additional metrics to report for a Corda node:
|
||||
| State Machine | Fiber thread pool queue size (counter), Live fibers (counter) , Fibers waiting for ledger commit (counter)<br />Flow Session Messages (counters): init, confirm, received, reject, normal end, error end, total received messages (for a given flow session, Id and state)<br />(in addition to existing metrics captured)<br />Flow error (count) |
|
||||
| Flow State Machine | Initiated flows (counter)<br />For a given flow session (counters): initiated flows, send, sendAndReceive, receive, receiveAll, retries upon send<br />For flow messaging (timers) to determine round trip latencies between send/receive interactions with counterparties.<br />Flow suspension metrics (count, age, wait reason, cordapp) |
|
||||
| RPC | For each RPC operation we should export metrics to report: calling user, round trip latency (timer), calling frequency (meter). Metric reporting should include the Corda RPC protocol version (should be the same as the node's Platform Version) in play. <br />Failed requests would be of particular interest for alerting. |
|
||||
| Vault | Roundtrip latency of Vault Queries (timer)<br />Soft locking counters for reserve, release (counter), elapsed times soft locks are held for per flow id (timer, histogram), list of soft locked flow ids and associated stateRefs.<br />attempt to soft lock fungible states for spending (timer) |
|
||||
| Vault | round trip latency of Vault Queries (timer)<br />Soft locking counters for reserve, release (counter), elapsed times soft locks are held for per flow id (timer, histogram), list of soft locked flow ids and associated stateRefs.<br />attempt to soft lock fungible states for spending (timer) |
|
||||
| Transaction Verification<br />(InMemoryTransactionVerifierService) | worker pool size (counter), verify duration (timer), verify throughput (meter), success (counter), failure counter), in flight (counter) |
|
||||
| Notarisation | Notary details (type, members in cluster)<br />Counters for success, failures, failure types (conflict, invalid time window, invalid transaction, wrong notary), elapsed time (timer)<br />Ideally provide breakdown of latency across notarisation steps: state ref notary validation, signature checking, from sending to remote notary to receiving response |
|
||||
| RAFT Notary Service<br />(awaiting choice of new RAFT implementation) | should include similar metrics to previous RAFT (see appendix). |
|
||||
| SimpleNotaryService | success/failure uniqueness checking<br />success/failure timewindow checking |
|
||||
| SimpleNotaryService | success/failure uniqueness checking<br />success/failure time-window checking |
|
||||
| ValidatingNotaryService | as above plus success/failure of transaction validation |
|
||||
| RaftNonValidatingNotaryService | as `SimpleNotaryService`, plus timer for algorithmic execution latency |
|
||||
| RaftValidatingNotaryService | as `ValidatingNotaryService`, plus timer for algorithmic execution latency |
|
||||
@ -423,7 +423,7 @@ Additionally, JMX metrics are also generated within the Corda *node-driver* perf
|
||||
|
||||
## Appendix B - Corda Logging and Reporting coverage
|
||||
|
||||
Primary node services exposed publically via ServiceHub (SH) or internally by ServiceHubInternal (SHI):
|
||||
Primary node services exposed publicly via ServiceHub (SH) or internally by ServiceHubInternal (SHI):
|
||||
|
||||
| Service | Type | Implementation | Logging summary |
|
||||
| ---------------------------------------- | ---- | ---------------------------------- | ---------------------------------------- |
|
||||
|
@ -19,7 +19,7 @@ This design question concerns the way we can manage a certification key. A more
|
||||
|
||||
### A. Use Intel's recommended protocol
|
||||
|
||||
This involves using aesmd and the Intel SDK to establish an opaque attestation key that transparently signs quotes.
|
||||
This involves using ``aesmd`` and the Intel SDK to establish an opaque attestation key that transparently signs quotes.
|
||||
Then for each enclave we need to do several round trips to IAS to get a revocation list (which we don't need) and request
|
||||
a direct Intel signature over the quote (which we shouldn't need as the trust has been established already during EPID
|
||||
join)
|
||||
|
@ -19,7 +19,7 @@ This is a simple choice of technology.
|
||||
|
||||
1. Clunky API
|
||||
2. No HTTP API
|
||||
3. Handrolled protocol
|
||||
3. Hand-rolled protocol
|
||||
|
||||
### B. etcd
|
||||
|
||||
|
@ -75,13 +75,13 @@ This may be done in the following steps:
|
||||
4. Find an alive host that has the channel in its active set for the measurement
|
||||
|
||||
1 may be done by maintaining a channel -> measurements map in etcd. This mapping would effectively define the enclave
|
||||
deployment and would be the central place to control incremental rollout or rollbacks.
|
||||
deployment and would be the central place to control incremental roll-out or rollbacks.
|
||||
|
||||
2 requires storing of additional metadata per advertised channel, namely a datastructure describing the enclave's trust
|
||||
predicate. A similar datastructure is provided by the discovering entity - these two predicates can then be used to
|
||||
filter measurements based on trust.
|
||||
|
||||
3 is where we may want to introduce more control if we want to support incremental rollout/canary deployments.
|
||||
3 is where we may want to introduce more control if we want to support incremental roll-out/canary deployments.
|
||||
|
||||
4 is where various (non-MVP) optimisation considerations come to mind. We could add a loadbalancer, do autoscaling based
|
||||
on load (although Kubernetes already provides support for this), could have a preference for looping back to the same
|
||||
|
@ -5,7 +5,7 @@ layer which hides the infrastructure details. Users provide "lambdas", which are
|
||||
other lambdas, access other AWS services etc. Because Lambdas are inherently stateless (any state they need must be
|
||||
accessed through a service) they may be loaded and executed on demand. This is in contrast with microservices, which
|
||||
are inherently stateful. Internally AWS caches the lambda images and even caches JIT compiled/warmed up code in order
|
||||
to reduce latency. Furthermore the lambda invokation interface provides a convenient way to scale these lambdas: as the
|
||||
to reduce latency. Furthermore the lambda invocation interface provides a convenient way to scale these lambdas: as the
|
||||
functions are statelesss AWS can spin up new VMs to push lambda functions to. The user simply pays for CPU usage, all
|
||||
the infrastructure pain is hidden by Amazon.
|
||||
|
||||
|
@ -51,7 +51,7 @@ as part of a separate design effort. Figuring out what you will *not* do is freq
|
||||
|
||||
List of design decisions identified in defining the target solution.
|
||||
|
||||
For each item, please complete the attached [Design Decision template](decisions/decision.html)
|
||||
For each item, please complete the attached [Design Decision template](decisions/decision.md)
|
||||
|
||||
Use the ``.. toctree::`` feature to list out the design decision docs here (see the source of this file for an example).
|
||||
|
||||
|
@ -37,7 +37,7 @@ Corda Modules
|
||||
``core-deterministic`` and ``serialization-deterministic`` are generated from Corda's ``core`` and ``serialization``
|
||||
modules respectively using both `ProGuard <https://www.guardsquare.com/en/proguard>`_ and Corda's ``JarFilter`` Gradle
|
||||
plugin. Corda developers configure these tools by applying Corda's ``@KeepForDJVM`` and ``@DeleteForDJVM``
|
||||
annotations to elements of ``core`` and ``serialization`` as described `here <deterministic_annotations_>`_.
|
||||
annotations to elements of ``core`` and ``serialization`` as described :ref:`here <deterministic_annotations>`.
|
||||
|
||||
The build generates each of Corda's deterministic JARs in six steps:
|
||||
|
||||
|
@ -125,7 +125,7 @@ object TopupIssuerFlow {
|
||||
val txns: List<SignedTransaction> = reserveLimits.map { amount ->
|
||||
// request asset issue
|
||||
logger.info("Requesting currency issue $amount")
|
||||
val txn = issueCashTo(amount, topupRequest.issueToParty, topupRequest.issuerPartyRef)
|
||||
val txn = issueCashTo(amount, topupRequest.issueToParty, topupRequest.issuerPartyRef, topupRequest.notaryParty)
|
||||
progressTracker.currentStep = SENDING_TOP_UP_ISSUE_REQUEST
|
||||
return@map txn.stx
|
||||
}
|
||||
@ -138,10 +138,8 @@ object TopupIssuerFlow {
|
||||
@Suspendable
|
||||
private fun issueCashTo(amount: Amount<Currency>,
|
||||
issueTo: Party,
|
||||
issuerPartyRef: OpaqueBytes): AbstractCashFlow.Result {
|
||||
// TODO: pass notary in as request parameter
|
||||
val notaryParty = serviceHub.networkMapCache.notaryIdentities.firstOrNull()
|
||||
?: throw IllegalArgumentException("Couldn't find any notary in NetworkMapCache")
|
||||
issuerPartyRef: OpaqueBytes,
|
||||
notaryParty: Party): AbstractCashFlow.Result {
|
||||
// invoke Cash subflow to issue Asset
|
||||
progressTracker.currentStep = ISSUING
|
||||
val issueCashFlow = CashIssueFlow(amount, issuerPartyRef, notaryParty)
|
||||
|
@ -79,15 +79,15 @@ private fun prepareOurInputsAndOutputs(serviceHub: ServiceHub, lockId: UUID, req
|
||||
val (inputs, residual) = gatherOurInputs(serviceHub, lockId, sellAmount, request.notary)
|
||||
|
||||
// Build and an output state for the counterparty
|
||||
val transferedFundsOutput = Cash.State(sellAmount, request.counterparty)
|
||||
val transferredFundsOutput = Cash.State(sellAmount, request.counterparty)
|
||||
|
||||
val outputs = if (residual > 0L) {
|
||||
// Build an output state for the residual change back to us
|
||||
val residualAmount = Amount(residual, sellAmount.token)
|
||||
val residualOutput = Cash.State(residualAmount, serviceHub.myInfo.singleIdentity())
|
||||
listOf(transferedFundsOutput, residualOutput)
|
||||
listOf(transferredFundsOutput, residualOutput)
|
||||
} else {
|
||||
listOf(transferedFundsOutput)
|
||||
listOf(transferredFundsOutput)
|
||||
}
|
||||
return Pair(inputs, outputs)
|
||||
// DOCEND 2
|
||||
|
@ -1,106 +0,0 @@
|
||||
/*
|
||||
* R3 Proprietary and Confidential
|
||||
*
|
||||
* Copyright (c) 2018 R3 Limited. All rights reserved.
|
||||
*
|
||||
* The intellectual and technical concepts contained herein are proprietary to R3 and its suppliers and are protected by trade secret law.
|
||||
*
|
||||
* Distribution of this file or any portion thereof via any medium without the express permission of R3 is strictly prohibited.
|
||||
*/
|
||||
|
||||
package net.corda.docs.tutorial.mocknetwork
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.google.common.collect.ImmutableList
|
||||
import net.corda.core.contracts.requireThat
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.InitiatedBy
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.StartedMockNode
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.rules.ExpectedException
|
||||
|
||||
class TutorialMockNetwork {
|
||||
|
||||
@InitiatingFlow
|
||||
class FlowA(private val otherParty: Party) : FlowLogic<Unit>() {
|
||||
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
val session = initiateFlow(otherParty)
|
||||
|
||||
session.receive<Int>().unwrap {
|
||||
requireThat { "Expected to receive 1" using (it == 1) }
|
||||
}
|
||||
|
||||
session.receive<Int>().unwrap {
|
||||
requireThat { "Expected to receive 2" using (it == 2) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@InitiatedBy(FlowA::class)
|
||||
class FlowB(private val session: FlowSession) : FlowLogic<Unit>() {
|
||||
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
session.send(1)
|
||||
session.send(2)
|
||||
}
|
||||
}
|
||||
|
||||
private lateinit var mockNet: MockNetwork
|
||||
private lateinit var nodeA: StartedMockNode
|
||||
private lateinit var nodeB: StartedMockNode
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val expectedEx: ExpectedException = ExpectedException.none()
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
mockNet = MockNetwork(ImmutableList.of("net.corda.docs.tutorial.mocknetwork"))
|
||||
nodeA = mockNet.createPartyNode()
|
||||
nodeB = mockNet.createPartyNode()
|
||||
}
|
||||
|
||||
@After
|
||||
fun tearDown() {
|
||||
mockNet.stopNodes()
|
||||
}
|
||||
|
||||
// @Test
|
||||
// fun `fail if initiated doesn't send back 1 on first result`() {
|
||||
|
||||
// DOCSTART 1
|
||||
// TODO: Fix this test - accessing the MessagingService directly exposes internal interfaces
|
||||
// nodeB.setMessagingServiceSpy(object : MessagingServiceSpy(nodeB.network) {
|
||||
// override fun send(message: Message, target: MessageRecipients, retryId: Long?, sequenceKey: Any, additionalHeaders: Map<String, String>) {
|
||||
// val messageData = message.data.deserialize<Any>() as? ExistingSessionMessage
|
||||
// val payload = messageData?.payload
|
||||
//
|
||||
// if (payload is DataSessionMessage && payload.payload.deserialize() == 1) {
|
||||
// val alteredMessageData = messageData.copy(payload = payload.copy(99.serialize())).serialize().bytes
|
||||
// messagingService.send(InMemoryMessagingNetwork.InMemoryMessage(message.topic, OpaqueBytes(alteredMessageData), message.uniqueMessageId), target, retryId)
|
||||
// } else {
|
||||
// messagingService.send(message, target, retryId)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// DOCEND 1
|
||||
|
||||
// val initiatingReceiveFlow = nodeA.startFlow(FlowA(nodeB.info.legalIdentities.first()))
|
||||
//
|
||||
// mockNet.runNetwork()
|
||||
//
|
||||
// expectedEx.expect(IllegalArgumentException::class.java)
|
||||
// expectedEx.expectMessage("Expected to receive 1")
|
||||
// initiatingReceiveFlow.getOrThrow()
|
||||
// }
|
||||
}
|
@ -60,16 +60,3 @@ directly to the ``megaCorpNode.services.recordTransaction`` method (note that th
|
||||
transactions are valid) inside a ``database.transaction``. All node flows run within a database transaction in the
|
||||
nodes themselves, but any time we need to use the database directly from a unit test, you need to provide a database
|
||||
transaction as shown here.
|
||||
|
||||
.. MockNetwork message manipulation
|
||||
.. --------------------------------
|
||||
.. The MockNetwork has the ability to manipulate message streams. You can use this to test your flows behaviour on corrupted,
|
||||
or malicious data received.
|
||||
|
||||
.. Message modification example in ``TutorialMockNetwork.kt``:
|
||||
|
||||
.. .. literalinclude:: ../../docs/source/example-code/src/main/kotlin/net/corda/docs/tutorial/mocknetwork/TutorialMockNetwork.kt
|
||||
:language: kotlin
|
||||
:start-after: DOCSTART 1
|
||||
:end-before: DOCEND 1
|
||||
:dedent: 8
|
||||
|
@ -5,7 +5,7 @@ Software requirements
|
||||
---------------------
|
||||
Corda uses industry-standard tools:
|
||||
|
||||
* **Oracle JDK 8 JVM** - minimum supported version **8u131**
|
||||
* **Oracle JDK 8 JVM** - minimum supported version **8u171**
|
||||
* **IntelliJ IDEA** - supported versions **2017.x** and **2018.x**
|
||||
* **Git**
|
||||
|
||||
|
@ -183,8 +183,8 @@ Next steps
|
||||
----------
|
||||
There are a number of improvements we could make to this CorDapp:
|
||||
|
||||
* We chould add unit tests, using the contract-test and flow-test frameworks
|
||||
* We chould change ``IOUState.value`` from an integer to a proper amount of a given currency
|
||||
* We could add unit tests, using the contract-test and flow-test frameworks
|
||||
* We could change ``IOUState.value`` from an integer to a proper amount of a given currency
|
||||
* We could add an API, to make it easier to interact with the CorDapp
|
||||
|
||||
But for now, the biggest priority is to add an ``IOUContract`` imposing constraints on the evolution of each
|
||||
|
@ -43,8 +43,9 @@ application development please continue to refer to `the main project documentat
|
||||
tools-index.rst
|
||||
node-internals-index.rst
|
||||
component-library-index.rst
|
||||
troubleshooting.rst
|
||||
serialization-index.rst
|
||||
json.rst
|
||||
troubleshooting.rst
|
||||
|
||||
.. toctree::
|
||||
:caption: Operations
|
||||
@ -57,6 +58,16 @@ application development please continue to refer to `the main project documentat
|
||||
loadtesting.rst
|
||||
certificate-revocation
|
||||
|
||||
.. Documentation is not included in the pdf unless it is included in a toctree somewhere
|
||||
.. only:: pdfmode
|
||||
|
||||
.. toctree::
|
||||
:caption: Other documentation
|
||||
|
||||
deterministic-modules.rst
|
||||
release-notes.rst
|
||||
changelog.rst
|
||||
|
||||
.. only:: htmlmode
|
||||
|
||||
.. toctree::
|
||||
|
@ -119,7 +119,7 @@ These include:
|
||||
* When a node would prefer to use a different notary cluster for a given transaction due to privacy or efficiency
|
||||
concerns
|
||||
|
||||
Before these transactions can be created, the states must first all be repointed to the same notary cluster. This is
|
||||
Before these transactions can be created, the states must first all be re-pointed to the same notary cluster. This is
|
||||
achieved using a special notary-change transaction that takes:
|
||||
|
||||
* A single input state
|
||||
|
@ -1,5 +1,5 @@
|
||||
Tradeoffs
|
||||
=========
|
||||
Trade-offs
|
||||
==========
|
||||
|
||||
.. topic:: Summary
|
||||
|
||||
|
@ -124,7 +124,7 @@ The current set of network parameters:
|
||||
|
||||
More parameters will be added in future releases to regulate things like allowed port numbers, how long a node can be
|
||||
offline before it is evicted from the zone, whether or not IPv6 connectivity is required for zone members, required
|
||||
cryptographic algorithms and rollout schedules (e.g. for moving to post quantum cryptography), parameters related to
|
||||
cryptographic algorithms and roll-out schedules (e.g. for moving to post quantum cryptography), parameters related to
|
||||
SGX and so on.
|
||||
|
||||
Network parameters update process
|
||||
@ -134,7 +134,7 @@ In case of the need to change network parameters Corda zone operator will start
|
||||
that may lead to this decision: adding a notary, setting new fields that were added to enable smooth network interoperability,
|
||||
or a change of the existing compatibility constants is required, for example.
|
||||
|
||||
.. note:: A future release may support the notion of phased rollout of network parameter changes.
|
||||
.. note:: A future release may support the notion of phased roll-out of network parameter changes.
|
||||
|
||||
To synchronize all nodes in the compatibility zone to use the new set of the network parameters two RPC methods are
|
||||
provided. The process requires human interaction and approval of the change, so node operators can review the
|
||||
|
@ -230,7 +230,7 @@ It can be thought of as a DNS equivalent. If you want to de-list a user, you wou
|
||||
It is very likely that your map server won't be entirely standalone, but rather, integrated with whatever your master
|
||||
user database is.
|
||||
|
||||
The network map server also distributes signed network parameter files and controls the rollout schedule for when they
|
||||
The network map server also distributes signed network parameter files and controls the roll-out schedule for when they
|
||||
become available for download and opt-in, and when they become enforced. This is again a policy decision you will
|
||||
probably choose to place some simple UI or workflow tooling around, in particular to enforce restrictions on who can
|
||||
edit the map or the parameters.
|
||||
@ -322,7 +322,7 @@ Selecting parameter values
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
How to choose the parameters? This is the most complex question facing you as a new zone operator. Some settings may seem
|
||||
straightforward and others may involve cost/benefit tradeoffs specific to your business. For example, you could choose
|
||||
straightforward and others may involve cost/benefit trade-offs specific to your business. For example, you could choose
|
||||
to run a validating notary yourself, in which case you would (in the absence of SGX) see all the users' data. Or you could
|
||||
run a non-validating notary, with BFT fault tolerance, which implies recruiting others to take part in the cluster.
|
||||
|
||||
|
@ -1,6 +1,15 @@
|
||||
Quickstart
|
||||
==========
|
||||
|
||||
.. only:: pdfmode
|
||||
|
||||
.. toctree::
|
||||
:caption: Other docs
|
||||
:maxdepth: 1
|
||||
|
||||
getting-set-up.rst
|
||||
tutorial-cordapp.rst
|
||||
|
||||
* :doc:`Set up your machine for CorDapp development <getting-set-up>`
|
||||
* :doc:`Run the Example CorDapp <tutorial-cordapp>`
|
||||
* `View CorDapps in Corda Explore <http://explore.corda.zone/>`_
|
||||
|
12
docs/source/serialization-index.rst
Normal file
12
docs/source/serialization-index.rst
Normal file
@ -0,0 +1,12 @@
|
||||
Serialization
|
||||
=============
|
||||
|
||||
.. toctree::
|
||||
|
||||
:caption: Other docs
|
||||
:maxdepth: 1
|
||||
|
||||
serialization.rst
|
||||
cordapp-custom-serializers
|
||||
serialization-default-evolution.rst
|
||||
serialization-enum-evolution.rst
|
@ -17,7 +17,7 @@ the `CRaSH`_ shell and supports many of the same features. These features includ
|
||||
* Uploading and downloading attachments
|
||||
* Issuing SQL queries to the underlying database
|
||||
* Viewing JMX metrics and monitoring exports
|
||||
* UNIX style pipes for both text and objects, an ``egrep`` command and a command for working with columnular data
|
||||
* UNIX style pipes for both text and objects, an ``egrep`` command and a command for working with columnar data
|
||||
* Shutting the node down.
|
||||
|
||||
Permissions
|
||||
|
@ -1,12 +1,6 @@
|
||||
Hello, World! Pt.2 - Contract constraints
|
||||
=========================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
tut-two-party-contract
|
||||
tut-two-party-flow
|
||||
|
||||
.. note:: This tutorial extends the CorDapp built during the :doc:`Hello, World tutorial <hello-world-introduction>`.
|
||||
|
||||
In the Hello, World tutorial, we built a CorDapp allowing us to model IOUs on ledger. Our CorDapp was made up of two
|
||||
@ -22,3 +16,9 @@ In this tutorial, we'll write a contract to imposes rules on how an ``IOUState``
|
||||
will require some small changes to the flow we defined in the previous tutorial.
|
||||
|
||||
We'll start by writing the contract.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
tut-two-party-contract
|
||||
tut-two-party-flow
|
@ -48,7 +48,7 @@ Observables are described in further detail in :doc:`clientrpc`
|
||||
The graph will be defined as follows:
|
||||
|
||||
* Each transaction is a vertex, represented by printing ``NODE <txhash>``
|
||||
* Each input-output relationship is an edge, represented by prining ``EDGE <txhash> <txhash>``
|
||||
* Each input-output relationship is an edge, represented by printing ``EDGE <txhash> <txhash>``
|
||||
|
||||
.. literalinclude:: example-code/src/main/kotlin/net/corda/docs/ClientRpcTutorial.kt
|
||||
:language: kotlin
|
||||
|
@ -303,7 +303,7 @@ The first line simply gets the time-window out of the transaction. Setting a tim
|
||||
may be missing here. We check for it being null later.
|
||||
|
||||
.. warning:: In the Kotlin version as long as we write a comparison with the transaction time first the compiler will
|
||||
verify we didn't forget to check if it's missing. Unfortunately due to the need for smooth Java interop, this
|
||||
verify we didn't forget to check if it's missing. Unfortunately due to the need for smooth interoperability with Java, this
|
||||
check won't happen if we write e.g. ``someDate > time``, it has to be ``time < someDate``. So it's good practice to
|
||||
always write the transaction time-window first.
|
||||
|
||||
@ -317,7 +317,7 @@ this group. We do not allow multiple units of CP to be split or merged even if t
|
||||
exception if the list size is not 1, otherwise it returns the single item in that list. In Java, this appears as a
|
||||
regular static method of the type familiar from many FooUtils type singleton classes and we have statically imported it
|
||||
here. In Kotlin, it appears as a method that can be called on any JDK list. The syntax is slightly different but
|
||||
behind the scenes, the code compiles to the same bytecodes.
|
||||
behind the scenes, the code compiles to the same bytecode.
|
||||
|
||||
Next, we check that the transaction was signed by the public key that's marked as the current owner of the commercial
|
||||
paper. Because the platform has already verified all the digital signatures before the contract begins execution,
|
||||
|
@ -287,7 +287,7 @@ Testing
|
||||
^^^^^^^
|
||||
|
||||
Test Framework API stabilisation changes (introduced in Corda V3.0)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* MockNetwork API usage has been greatly simplified.
|
||||
|
||||
|
@ -71,7 +71,7 @@ The ``InitiatedBy`` flow does the opposite:
|
||||
* Receives a ``String``
|
||||
* Sends a ``CustomType``
|
||||
|
||||
As long as both the ``IntiatingFlow`` and the ``InitiatedBy`` flows conform to the sequence of actions, the flows can
|
||||
As long as both the ``InitiatingFlow`` and the ``InitiatedBy`` flows conform to the sequence of actions, the flows can
|
||||
be implemented in any way you see fit (including adding proprietary business logic that is not shared with other
|
||||
parties).
|
||||
|
||||
@ -81,7 +81,7 @@ A flow can become backwards-incompatible in two main ways:
|
||||
|
||||
* The sequence of ``send`` and ``receive`` calls changes:
|
||||
|
||||
* A ``send`` or ``receive`` is added or removed from either the ``InitatingFlow`` or ``InitiatedBy`` flow
|
||||
* A ``send`` or ``receive`` is added or removed from either the ``InitiatingFlow`` or ``InitiatedBy`` flow
|
||||
* The sequence of ``send`` and ``receive`` calls changes
|
||||
|
||||
* The types of the ``send`` and ``receive`` calls changes
|
||||
@ -112,7 +112,7 @@ If you shut down all nodes and upgrade them all at the same time, any incompatib
|
||||
|
||||
In situations where some nodes may still be using previous versions of a flow and thus new versions of your flow may
|
||||
talk to old versions, the updated flows need to be backwards-compatible. This will be the case for almost any real
|
||||
deployment in which you cannot easily coordinate the rollout of new code across the network.
|
||||
deployment in which you cannot easily coordinate the roll-out of new code across the network.
|
||||
|
||||
How do I ensure flow backwards-compatibility?
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -50,7 +50,7 @@ Note the following:
|
||||
* a vault update API is internally used by transaction recording flows.
|
||||
* the vault database schemas are directly accessible via JDBC for customer joins and queries
|
||||
|
||||
Section 8 of the `Technical white paper`_ describes features of the vault yet to be implemented including private key managament, state splitting and merging, asset re-issuance and node event scheduling.
|
||||
Section 8 of the `Technical white paper`_ describes features of the vault yet to be implemented including private key management, state splitting and merging, asset re-issuance and node event scheduling.
|
||||
|
||||
.. _`Technical white paper`: _static/corda-technical-whitepaper.pdf
|
||||
|
||||
|
2
gradle/wrapper/gradle-wrapper.properties
vendored
2
gradle/wrapper/gradle-wrapper.properties
vendored
@ -10,6 +10,6 @@
|
||||
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.8-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.8.1-all.zip
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
|
@ -70,6 +70,7 @@ class NodeArgsParser : AbstractArgsParser<CmdLineOptions>() {
|
||||
private val clearNetworkMapCache = optionParser.accepts("clear-network-map-cache", "Clears local copy of network map, on node startup it will be restored from server or file system.")
|
||||
|
||||
override fun doParse(optionSet: OptionSet): CmdLineOptions {
|
||||
require(optionSet.nonOptionArguments().isEmpty()) { "Unrecognized argument(s): ${optionSet.nonOptionArguments().joinToString(separator = ", ")}"}
|
||||
require(!optionSet.has(baseDirectoryArg) || !optionSet.has(configFileArg)) {
|
||||
"${baseDirectoryArg.options()[0]} and ${configFileArg.options()[0]} cannot be specified together"
|
||||
}
|
||||
|
@ -14,18 +14,23 @@ abstract class AbstractArgsParser<out T : Any> {
|
||||
* If the help option is specified then the process is also shutdown after printing the help output to stdout.
|
||||
*/
|
||||
fun parseOrExit(vararg args: String): T {
|
||||
val optionSet = try {
|
||||
optionParser.parse(*args)
|
||||
} catch (e: OptionException) {
|
||||
System.err.println(e.message ?: "Unable to parse arguments.")
|
||||
optionParser.printHelpOn(System.err)
|
||||
exitProcess(1)
|
||||
}
|
||||
try {
|
||||
val optionSet = optionParser.parse(*args)
|
||||
if (optionSet.has(helpOption)) {
|
||||
optionParser.printHelpOn(System.out)
|
||||
exitProcess(0)
|
||||
}
|
||||
return doParse(optionSet)
|
||||
} catch (e: Exception) {
|
||||
when (e) {
|
||||
is OptionException, is IllegalArgumentException -> {
|
||||
System.err.println(e.message ?: "Unable to parse arguments.")
|
||||
optionParser.printHelpOn(System.err)
|
||||
exitProcess(1)
|
||||
}
|
||||
else -> throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun parse(vararg args: String): T = doParse(optionParser.parse(*args))
|
||||
|
@ -189,4 +189,18 @@ class NodeArgsParserTest {
|
||||
assertThat(cmdLineOptions.unknownConfigKeysPolicy).isEqualTo(onUnknownConfigKeyPolicy)
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `invalid argument`() {
|
||||
assertThatExceptionOfType(IllegalArgumentException::class.java).isThrownBy {
|
||||
parser.parse("foo")
|
||||
}.withMessageContaining("Unrecognized argument(s): foo")
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `invalid arguments`() {
|
||||
assertThatExceptionOfType(IllegalArgumentException::class.java).isThrownBy {
|
||||
parser.parse("foo", "bar")
|
||||
}.withMessageContaining("Unrecognized argument(s): foo, bar")
|
||||
}
|
||||
}
|
||||
|
@ -746,6 +746,7 @@ class DriverDSLImpl(
|
||||
Permissions.invokeRpc(CordaRPCOps::stateMachineRecordedTransactionMappingFeed),
|
||||
Permissions.invokeRpc(CordaRPCOps::nodeInfoFromParty),
|
||||
Permissions.invokeRpc(CordaRPCOps::internalVerifiedTransactionsFeed),
|
||||
Permissions.invokeRpc(CordaRPCOps::internalFindVerifiedTransaction),
|
||||
Permissions.invokeRpc("vaultQueryBy"),
|
||||
Permissions.invokeRpc("vaultTrackBy"),
|
||||
Permissions.invokeRpc(CordaRPCOps::registeredFlows)
|
||||
|
Loading…
x
Reference in New Issue
Block a user