mirror of
https://github.com/corda/corda.git
synced 2025-03-17 17:45:17 +00:00
Merge pull request #984 from corda/parkri-os-merge-20180613-1
OS -> ENT merge up to and including d5d46c674dda89332520b019c3fbf8ef7371fa48
This commit is contained in:
commit
ab2b27915a
7
.idea/compiler.xml
generated
7
.idea/compiler.xml
generated
@ -152,11 +152,8 @@
|
||||
<module name="loadtest_test" target="1.8" />
|
||||
<module name="mock_main" target="1.8" />
|
||||
<module name="mock_test" target="1.8" />
|
||||
<module name="network-management-capsule_main" target="1.8" />
|
||||
<module name="network-management-capsule_test" target="1.8" />
|
||||
<module name="network-management_integrationTest" target="1.8" />
|
||||
<module name="network-management_main" target="1.8" />
|
||||
<module name="network-management_test" target="1.8" />
|
||||
<module name="network-bootstrapper_main" target="1.8" />
|
||||
<module name="network-bootstrapper_test" target="1.8" />
|
||||
<module name="network-visualiser_main" target="1.8" />
|
||||
<module name="network-visualiser_test" target="1.8" />
|
||||
<module name="node-api_main" target="1.8" />
|
||||
|
12
build.gradle
12
build.gradle
@ -25,9 +25,9 @@ buildscript {
|
||||
//
|
||||
// TODO: Sort this alphabetically.
|
||||
ext.kotlin_version = constants.getProperty("kotlinVersion")
|
||||
// use our fork of quasar
|
||||
ext.quasar_group = 'com.github.corda.quasar'
|
||||
ext.quasar_version = '7629695563deae6cc95adcfbebcbc8322fd0241a'
|
||||
|
||||
ext.quasar_group = 'co.paralleluniverse'
|
||||
ext.quasar_version = '0.7.10'
|
||||
|
||||
// gradle-capsule-plugin:1.0.2 contains capsule:1.0.1
|
||||
// TODO: Upgrade gradle-capsule-plugin to a version with capsule:1.0.3
|
||||
@ -405,12 +405,12 @@ bintrayConfig {
|
||||
'corda-shell',
|
||||
'corda-serialization',
|
||||
'corda-serialization-deterministic',
|
||||
'corda-tools-blob-inspector',
|
||||
'corda-tools-network-bootstrapper',
|
||||
'corda-bridgeserver',
|
||||
'corda-ptflows',
|
||||
'jmeter-corda',
|
||||
'tools-database-manager',
|
||||
'tools-blob-inspector',
|
||||
'tools-network-bootstrapper'
|
||||
'tools-database-manager'
|
||||
]
|
||||
license {
|
||||
name = 'Apache-2.0'
|
||||
|
@ -8,6 +8,8 @@ Unreleased
|
||||
==========
|
||||
* Introduced a hierarchy of ``DatabaseMigrationException``s, allowing ``NodeStartup`` to gracefully inform users of problems related to database migrations before exiting with a non-zero code.
|
||||
|
||||
* Add ``devModeOptions.allowCompatibilityZone`` to re-enable the use of a compatibility zone and ``devMode``
|
||||
|
||||
* Fixed an issue where ``trackBy`` was returning ``ContractStates`` from a transaction that were not being tracked. The
|
||||
unrelated ``ContractStates`` will now be filtered out from the returned ``Vault.Update``.
|
||||
|
||||
@ -151,7 +153,7 @@ Version 3.1
|
||||
|
||||
* Update the fast-classpath-scanner dependent library version from 2.0.21 to 2.12.3
|
||||
|
||||
* Added `database.hibernateDialect` node configuration option
|
||||
* Added `database.hibernateDialect` node configuration option
|
||||
|
||||
.. _changelog_r3_v3:
|
||||
|
||||
|
@ -205,7 +205,13 @@ absolute path to the node's base directory.
|
||||
:doormanURL: Root address of the network registration service.
|
||||
:networkMapURL: Root address of the network map service.
|
||||
|
||||
.. note:: Only one of ``compatibilityZoneURL`` or ``networkServices`` should be used.
|
||||
.. note:: Only one of ``compatibilityZoneURL`` or ``networkServices`` should be used.
|
||||
|
||||
:devModeOptions: Allows modification of certain ``devMode`` features
|
||||
|
||||
:allowCompatibilityZone: Allows a node configured to operate in development mode to connect to a compatibility zone.
|
||||
|
||||
.. note:: This is an unsupported configuration.
|
||||
|
||||
:jvmArgs: An optional list of JVM args, as strings, which replace those inherited from the command line when launching via ``corda.jar``
|
||||
only. e.g. ``jvmArgs = [ "-Xmx220m", "-Xms220m", "-XX:+UseG1GC" ]``
|
||||
|
@ -170,25 +170,25 @@ Below you can find the example task from the `IRS Demo <https://github.com/corda
|
||||
node {
|
||||
name "O=Notary Service,L=Zurich,C=CH"
|
||||
notary = [validating : true]
|
||||
cordapps = ["${project(":finance").group}:finance:$corda_release_version"]
|
||||
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
|
||||
rpcUsers = rpcUsersList
|
||||
useTestClock true
|
||||
}
|
||||
node {
|
||||
name "O=Bank A,L=London,C=GB"
|
||||
cordapps = ["${project(":finance").group}:finance:$corda_release_version"]
|
||||
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
|
||||
rpcUsers = rpcUsersList
|
||||
useTestClock true
|
||||
}
|
||||
node {
|
||||
name "O=Bank B,L=New York,C=US"
|
||||
cordapps = ["${project(":finance").group}:finance:$corda_release_version"]
|
||||
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
|
||||
rpcUsers = rpcUsersList
|
||||
useTestClock true
|
||||
}
|
||||
node {
|
||||
name "O=Regulator,L=Moscow,C=RU"
|
||||
cordapps = ["${project.group}:finance:$corda_release_version"]
|
||||
cordapps = ["$corda_release_group:corda-finance:$corda_release_version"]
|
||||
rpcUsers = rpcUsersList
|
||||
useTestClock true
|
||||
}
|
||||
|
BIN
lib/quasar.jar
BIN
lib/quasar.jar
Binary file not shown.
@ -13,7 +13,6 @@ package net.corda.nodeapi.internal.network
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.cordform.CordformNode
|
||||
import net.corda.core.contracts.ContractClassName
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
@ -65,7 +64,6 @@ class NetworkBootstrapper {
|
||||
)
|
||||
|
||||
private const val LOGS_DIR_NAME = "logs"
|
||||
private const val EXCLUDE_WHITELIST_FILE_NAME = "exclude_whitelist.txt"
|
||||
|
||||
@JvmStatic
|
||||
fun main(args: Array<String>) {
|
||||
@ -301,35 +299,6 @@ class NetworkBootstrapper {
|
||||
return networkParameters
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
internal fun generateWhitelist(networkParameters: NetworkParameters?,
|
||||
excludeContracts: List<ContractClassName>,
|
||||
cordappJars: List<ContractsJar>): Map<ContractClassName, List<AttachmentId>> {
|
||||
val existingWhitelist = networkParameters?.whitelistedContractImplementations ?: emptyMap()
|
||||
|
||||
if (excludeContracts.isNotEmpty()) {
|
||||
println("Exclude contracts from whitelist: ${excludeContracts.joinToString()}")
|
||||
existingWhitelist.keys.forEach {
|
||||
require(it !in excludeContracts) { "$it is already part of the existing whitelist and cannot be excluded." }
|
||||
}
|
||||
}
|
||||
|
||||
val newWhiteList = cordappJars
|
||||
.flatMap { jar -> (jar.scan() - excludeContracts).map { it to jar.hash } }
|
||||
.toMultiMap()
|
||||
|
||||
return (newWhiteList.keys + existingWhitelist.keys).associateBy({ it }) {
|
||||
val existingHashes = existingWhitelist[it] ?: emptyList()
|
||||
val newHashes = newWhiteList[it] ?: emptyList()
|
||||
(existingHashes + newHashes).distinct()
|
||||
}
|
||||
}
|
||||
|
||||
private fun readExcludeWhitelist(directory: Path): List<String> {
|
||||
val file = directory / EXCLUDE_WHITELIST_FILE_NAME
|
||||
return if (file.exists()) file.readAllLines().map(String::trim) else emptyList()
|
||||
}
|
||||
|
||||
private fun NodeInfo.notaryIdentity(): Party {
|
||||
return when (legalIdentities.size) {
|
||||
// Single node notaries have just one identity like all other nodes. This identity is the notary identity
|
||||
|
@ -0,0 +1,43 @@
|
||||
package net.corda.nodeapi.internal.network
|
||||
|
||||
import net.corda.core.contracts.ContractClassName
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.internal.exists
|
||||
import net.corda.core.internal.readAllLines
|
||||
import net.corda.core.internal.toMultiMap
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.services.AttachmentId
|
||||
import net.corda.nodeapi.internal.ContractsJar
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.nio.file.Path
|
||||
|
||||
private const val EXCLUDE_WHITELIST_FILE_NAME = "exclude_whitelist.txt"
|
||||
private val logger = LoggerFactory.getLogger("net.corda.nodeapi.internal.network.WhitelistGenerator")
|
||||
|
||||
fun generateWhitelist(networkParameters: NetworkParameters?,
|
||||
excludeContracts: List<ContractClassName>,
|
||||
cordappJars: List<ContractsJar>): Map<ContractClassName, List<AttachmentId>> {
|
||||
val existingWhitelist = networkParameters?.whitelistedContractImplementations ?: emptyMap()
|
||||
|
||||
if (excludeContracts.isNotEmpty()) {
|
||||
logger.info("Exclude contracts from whitelist: ${excludeContracts.joinToString()}")
|
||||
existingWhitelist.keys.forEach {
|
||||
require(it !in excludeContracts) { "$it is already part of the existing whitelist and cannot be excluded." }
|
||||
}
|
||||
}
|
||||
|
||||
val newWhiteList = cordappJars
|
||||
.flatMap { jar -> (jar.scan() - excludeContracts).map { it to jar.hash } }
|
||||
.toMultiMap()
|
||||
|
||||
return (newWhiteList.keys + existingWhitelist.keys).associateBy({ it }) {
|
||||
val existingHashes = existingWhitelist[it] ?: emptyList()
|
||||
val newHashes = newWhiteList[it] ?: emptyList()
|
||||
(existingHashes + newHashes).distinct()
|
||||
}
|
||||
}
|
||||
|
||||
fun readExcludeWhitelist(directory: Path): List<String> {
|
||||
val file = directory / EXCLUDE_WHITELIST_FILE_NAME
|
||||
return if (file.exists()) file.readAllLines().map(String::trim) else emptyList()
|
||||
}
|
@ -127,7 +127,7 @@ class NetworkBootstrapperTest {
|
||||
private fun generateWhitelist(existingWhitelist: Map<String, List<AttachmentId>>,
|
||||
excludeContracts: List<ContractClassName>,
|
||||
contractJars: List<TestContractsJar>): Map<String, List<AttachmentId>> {
|
||||
return NetworkBootstrapper().generateWhitelist(
|
||||
return generateWhitelist(
|
||||
testNetworkParameters(whitelistedContractImplementations = existingWhitelist),
|
||||
excludeContracts,
|
||||
contractJars
|
||||
|
@ -0,0 +1,41 @@
|
||||
package net.corda.nodeapi.internal.network
|
||||
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import com.nhaarman.mockito_kotlin.verify
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.nodeapi.internal.ContractsJar
|
||||
import org.junit.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class WhitelistGeneratorTest {
|
||||
|
||||
@Test
|
||||
fun `whitelist generator builds the correct whitelist map`() {
|
||||
// given
|
||||
val jars = (0..9).map {
|
||||
val index = it
|
||||
mock<ContractsJar> {
|
||||
val secureHash = SecureHash.randomSHA256()
|
||||
on { scan() }.then {
|
||||
listOf(index.toString())
|
||||
}
|
||||
on { hash }.then {
|
||||
secureHash
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// when
|
||||
val result = generateWhitelist(null, emptyList(), jars)
|
||||
|
||||
// then
|
||||
jars.forEachIndexed { index, item ->
|
||||
verify(item).scan()
|
||||
val attachmentIds = requireNotNull(result[index.toString()])
|
||||
assertEquals(1, attachmentIds.size)
|
||||
assertTrue { attachmentIds.contains(item.hash) }
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -93,7 +93,7 @@ interface NodeConfiguration : NodeSSLConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
data class DevModeOptions(val disableCheckpointChecker: Boolean = false)
|
||||
data class DevModeOptions(val disableCheckpointChecker: Boolean = false, val allowCompatibilityZone: Boolean = false)
|
||||
|
||||
data class GraphiteOptions(
|
||||
val server: String,
|
||||
@ -314,16 +314,19 @@ data class NodeConfigurationImpl(
|
||||
private fun validateDevModeOptions(): List<String> {
|
||||
if (devMode) {
|
||||
compatibilityZoneURL?.let {
|
||||
return listOf("'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true.")
|
||||
if (devModeOptions?.allowCompatibilityZone != true) {
|
||||
return listOf("'compatibilityZoneURL': present. Property cannot be set when 'devMode' is true unless devModeOptions.allowCompatibilityZone is also true")
|
||||
}
|
||||
}
|
||||
|
||||
// if compatibiliZoneURL is set then it will be copied into the networkServices field and thus skipping
|
||||
// this check by returning above is fine.
|
||||
networkServices?.let {
|
||||
return listOf("'networkServices': present. Property cannot be set when 'devMode' is true.")
|
||||
if (devModeOptions?.allowCompatibilityZone != true) {
|
||||
return listOf("'networkServices': present. Property cannot be set when 'devMode' is true unless devModeOptions.allowCompatibilityZone is also true")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return emptyList()
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseTransaction
|
||||
import net.corda.nodeapi.internal.persistence.contextTransaction
|
||||
import net.corda.nodeapi.internal.persistence.contextTransactionOrNull
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import org.slf4j.Logger
|
||||
import org.slf4j.LoggerFactory
|
||||
import org.slf4j.MDC
|
||||
@ -78,7 +79,8 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
val actionExecutor: ActionExecutor,
|
||||
val stateMachine: StateMachine,
|
||||
val serviceHub: ServiceHubInternal,
|
||||
val checkpointSerializationContext: SerializationContext
|
||||
val checkpointSerializationContext: SerializationContext,
|
||||
val unfinishedFibers: ReusableLatch
|
||||
)
|
||||
|
||||
internal var transientValues: TransientReference<TransientValues>? = null
|
||||
@ -249,6 +251,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
}
|
||||
|
||||
recordDuration(startTime)
|
||||
getTransientField(TransientValues::unfinishedFibers).countDown()
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
|
@ -654,7 +654,8 @@ class MultiThreadedStateMachineManager(
|
||||
actionExecutor = actionExecutor!!,
|
||||
stateMachine = StateMachine(id, secureRandom),
|
||||
serviceHub = serviceHub,
|
||||
checkpointSerializationContext = checkpointSerializationContext!!
|
||||
checkpointSerializationContext = checkpointSerializationContext!!,
|
||||
unfinishedFibers = unfinishedFibers
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -23,20 +23,12 @@ import net.corda.core.flows.FlowInfo
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.ThreadBox
|
||||
import net.corda.core.internal.TimedFlow
|
||||
import net.corda.core.internal.bufferUntilSubscribed
|
||||
import net.corda.core.internal.castIfPossible
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.map
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.serialization.*
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.core.utilities.contextLogger
|
||||
@ -48,11 +40,7 @@ import net.corda.node.services.config.shouldCheckCheckpoints
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.messaging.ReceivedMessage
|
||||
import net.corda.node.services.statemachine.FlowStateMachineImpl.Companion.createSubFlowVersion
|
||||
import net.corda.node.services.statemachine.interceptors.DumpHistoryOnErrorInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationChecker
|
||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationCheckingInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.HospitalisingInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.PrintingInterceptor
|
||||
import net.corda.node.services.statemachine.interceptors.*
|
||||
import net.corda.node.services.statemachine.transitions.StateMachine
|
||||
import net.corda.node.utilities.AffinityExecutor
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
@ -64,16 +52,10 @@ import rx.Observable
|
||||
import rx.subjects.PublishSubject
|
||||
import java.security.SecureRandom
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.ScheduledFuture
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import java.util.concurrent.*
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import kotlin.collections.ArrayList
|
||||
import kotlin.collections.HashMap
|
||||
import kotlin.concurrent.withLock
|
||||
import kotlin.streams.toList
|
||||
|
||||
/**
|
||||
@ -238,7 +220,6 @@ class SingleThreadedStateMachineManager(
|
||||
logger.debug("Killing flow known to physical node.")
|
||||
decrementLiveFibers()
|
||||
totalFinishedFlows.inc()
|
||||
unfinishedFibers.countDown()
|
||||
try {
|
||||
flow.fiber.interrupt()
|
||||
true
|
||||
@ -247,6 +228,7 @@ class SingleThreadedStateMachineManager(
|
||||
checkpointStorage.removeCheckpoint(id)
|
||||
}
|
||||
transitionExecutor.forceRemoveFlow(id)
|
||||
unfinishedFibers.countDown()
|
||||
}
|
||||
} else {
|
||||
// TODO replace with a clustered delete after we'll support clustered nodes
|
||||
@ -290,7 +272,6 @@ class SingleThreadedStateMachineManager(
|
||||
if (flow != null) {
|
||||
decrementLiveFibers()
|
||||
totalFinishedFlows.inc()
|
||||
unfinishedFibers.countDown()
|
||||
return when (removalReason) {
|
||||
is FlowRemovalReason.OrderlyFinish -> removeFlowOrderly(flow, removalReason, lastState)
|
||||
is FlowRemovalReason.ErrorFinish -> removeFlowError(flow, removalReason, lastState)
|
||||
@ -374,8 +355,7 @@ class SingleThreadedStateMachineManager(
|
||||
checkpoint = checkpoint,
|
||||
initialDeduplicationHandler = null,
|
||||
isAnyCheckpointPersisted = true,
|
||||
isStartIdempotent = false,
|
||||
senderUUID = null
|
||||
isStartIdempotent = false
|
||||
)
|
||||
} else {
|
||||
// Just flow initiation message
|
||||
@ -671,7 +651,8 @@ class SingleThreadedStateMachineManager(
|
||||
actionExecutor = actionExecutor!!,
|
||||
stateMachine = StateMachine(id, secureRandom),
|
||||
serviceHub = serviceHub,
|
||||
checkpointSerializationContext = checkpointSerializationContext!!
|
||||
checkpointSerializationContext = checkpointSerializationContext!!,
|
||||
unfinishedFibers = unfinishedFibers
|
||||
)
|
||||
}
|
||||
|
||||
@ -680,8 +661,7 @@ class SingleThreadedStateMachineManager(
|
||||
checkpoint: Checkpoint,
|
||||
isAnyCheckpointPersisted: Boolean,
|
||||
isStartIdempotent: Boolean,
|
||||
initialDeduplicationHandler: DeduplicationHandler?,
|
||||
senderUUID: String? = ourSenderUUID
|
||||
initialDeduplicationHandler: DeduplicationHandler?
|
||||
): Flow {
|
||||
val flowState = checkpoint.flowState
|
||||
val resultFuture = openFuture<Any?>()
|
||||
@ -697,7 +677,7 @@ class SingleThreadedStateMachineManager(
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = logic,
|
||||
senderUUID = senderUUID
|
||||
senderUUID = null
|
||||
)
|
||||
val fiber = FlowStateMachineImpl(id, logic, scheduler)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
@ -716,7 +696,7 @@ class SingleThreadedStateMachineManager(
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = fiber.logic,
|
||||
senderUUID = senderUUID
|
||||
senderUUID = null
|
||||
)
|
||||
fiber.transientValues = TransientReference(createTransientValues(id, resultFuture))
|
||||
fiber.transientState = TransientReference(state)
|
||||
|
@ -206,7 +206,11 @@ class StaffedFlowHospital {
|
||||
if (history.notDischargedForTheSameThingMoreThan(newError.maxRetries, this)) {
|
||||
return Diagnosis.DISCHARGE
|
||||
} else {
|
||||
log.warn("\"Maximum number of retries reached for timed flow ${flowFiber.javaClass}")
|
||||
val errorMsg = "Maximum number of retries reached for flow ${flowFiber.snapshot().flowLogic.javaClass}." +
|
||||
"If the flow involves notarising a transaction, this usually means that the notary is being overloaded and" +
|
||||
"unable to service requests fast enough. Please try again later."
|
||||
newError.setMessage(errorMsg)
|
||||
log.warn(errorMsg)
|
||||
}
|
||||
}
|
||||
return Diagnosis.NOT_MY_SPECIALTY
|
||||
|
@ -10,12 +10,8 @@
|
||||
|
||||
package net.corda.node.services.config
|
||||
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigException
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.*
|
||||
import com.zaxxer.hikari.HikariConfig
|
||||
import com.typesafe.config.ConfigParseOptions
|
||||
import com.typesafe.config.ConfigValueFactory
|
||||
import net.corda.core.internal.toPath
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence.DataSourceConfigTag
|
||||
@ -24,11 +20,9 @@ import net.corda.nodeapi.internal.config.UnknownConfigKeysPolicy
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
|
||||
import net.corda.tools.shell.SSHDConfiguration
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.assertThatCode
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Assert.assertNotNull
|
||||
import org.assertj.core.api.Assertions.*
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Assert.assertNotNull
|
||||
import org.junit.Test
|
||||
import java.net.InetAddress
|
||||
import java.net.URL
|
||||
@ -179,6 +173,17 @@ class NodeConfigurationImplTest {
|
||||
assertThat(errors).hasOnlyOneElementSatisfying { error -> error.contains("compatibilityZoneURL") && error.contains("devMode") }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `validation succeeds when compatibilityZoneURL is present and devMode is true and allowCompatibilityZoneURL is set`() {
|
||||
val configuration = testConfiguration.copy(
|
||||
devMode = true,
|
||||
compatibilityZoneURL = URL("https://r3.com"),
|
||||
devModeOptions = DevModeOptions(allowCompatibilityZone = true))
|
||||
|
||||
val errors = configuration.validate()
|
||||
assertThat(errors).isEmpty()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `mutual exclusion machineName set to default if not explicitly set`() {
|
||||
val config = getConfig("test-config-mutualExclusion-noMachineName.conf").parseAsNodeConfiguration(UnknownConfigKeysPolicy.IGNORE::handle)
|
||||
|
@ -29,6 +29,7 @@ import org.junit.Before
|
||||
import org.junit.Test
|
||||
import java.sql.SQLException
|
||||
import java.time.Duration
|
||||
import java.util.*
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertNotNull
|
||||
import kotlin.test.assertNull
|
||||
@ -47,6 +48,7 @@ class RetryFlowMockTest {
|
||||
RetryFlow.count = 0
|
||||
SendAndRetryFlow.count = 0
|
||||
RetryInsertFlow.count = 0
|
||||
KeepSendingFlow.count = 0
|
||||
}
|
||||
|
||||
private fun <T> StartedNode<MockNode>.startFlow(logic: FlowLogic<T>): CordaFuture<T> {
|
||||
@ -74,7 +76,7 @@ class RetryFlowMockTest {
|
||||
|
||||
@Test
|
||||
fun `Retry does not set senderUUID`() {
|
||||
val messagesSent = mutableListOf<Message>()
|
||||
val messagesSent = Collections.synchronizedList(mutableListOf<Message>())
|
||||
val partyB = nodeB.info.legalIdentities.first()
|
||||
nodeA.setMessagingServiceSpy(object : MessagingServiceSpy(nodeA.network) {
|
||||
override fun send(message: Message, target: MessageRecipients, sequenceKey: Any) {
|
||||
@ -88,6 +90,38 @@ class RetryFlowMockTest {
|
||||
assertEquals(2, SendAndRetryFlow.count)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Restart does not set senderUUID`() {
|
||||
val messagesSent = Collections.synchronizedList(mutableListOf<Message>())
|
||||
val partyB = nodeB.info.legalIdentities.first()
|
||||
nodeA.setMessagingServiceSpy(object : MessagingServiceSpy(nodeA.network) {
|
||||
override fun send(message: Message, target: MessageRecipients, sequenceKey: Any) {
|
||||
messagesSent.add(message)
|
||||
messagingService.send(message, target)
|
||||
}
|
||||
})
|
||||
val count = 10000 // Lots of iterations so the flow keeps going long enough
|
||||
nodeA.startFlow(KeepSendingFlow(count, partyB))
|
||||
while (messagesSent.size < 1) {
|
||||
Thread.sleep(10)
|
||||
}
|
||||
assertNotNull(messagesSent.first().senderUUID)
|
||||
nodeA = mockNet.restartNode(nodeA)
|
||||
// This is a bit racy because restarting the node actually starts it, so we need to make sure there's enough iterations we get here with flow still going.
|
||||
nodeA.setMessagingServiceSpy(object : MessagingServiceSpy(nodeA.network) {
|
||||
override fun send(message: Message, target: MessageRecipients, sequenceKey: Any) {
|
||||
messagesSent.add(message)
|
||||
messagingService.send(message, target)
|
||||
}
|
||||
})
|
||||
// Now short circuit the iterations so the flow finishes soon.
|
||||
KeepSendingFlow.count = count - 2
|
||||
while (nodeA.smm.allStateMachines.size > 0) {
|
||||
Thread.sleep(10)
|
||||
}
|
||||
assertNull(messagesSent.last().senderUUID)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Retry duplicate insert`() {
|
||||
assertEquals(Unit, nodeA.startFlow(RetryInsertFlow(1)).get())
|
||||
@ -105,6 +139,8 @@ class RetryFlowMockTest {
|
||||
|
||||
@Test
|
||||
fun `Patient records do not leak in hospital when using killFlow`() {
|
||||
// Make sure we have seen an update from the hospital, and thus the flow went there.
|
||||
val records = nodeA.smm.flowHospital.track().updates.toBlocking().toIterable().iterator()
|
||||
val flow: FlowStateMachine<Unit> = nodeA.services.startFlow(FinalityHandler(object : FlowSession() {
|
||||
override val counterparty: Party
|
||||
get() = TODO("not implemented")
|
||||
@ -141,8 +177,9 @@ class RetryFlowMockTest {
|
||||
TODO("not implemented")
|
||||
}
|
||||
}), nodeA.services.newContext()).get()
|
||||
// Make sure we have seen an update from the hospital, and thus the flow went there.
|
||||
nodeA.smm.flowHospital.track().updates.toBlocking().first()
|
||||
// Should be 2 records, one for admission and one for keep in.
|
||||
records.next()
|
||||
records.next()
|
||||
// Killing it should remove it.
|
||||
nodeA.smm.killFlow(flow.id)
|
||||
assertThat(nodeA.smm.flowHospital.track().snapshot).isEmpty()
|
||||
@ -155,6 +192,7 @@ class RetryCausingError : SQLException("deadlock")
|
||||
|
||||
class RetryFlow(private val i: Int) : FlowLogic<Unit>() {
|
||||
companion object {
|
||||
@Volatile
|
||||
var count = 0
|
||||
}
|
||||
|
||||
@ -171,29 +209,10 @@ class RetryFlow(private val i: Int) : FlowLogic<Unit>() {
|
||||
}
|
||||
}
|
||||
|
||||
class RetryAndSleepFlow(private val i: Int) : FlowLogic<Unit>() {
|
||||
companion object {
|
||||
var count = 0
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
logger.info("Hello $count")
|
||||
if (count++ < i) {
|
||||
if (i == Int.MAX_VALUE) {
|
||||
throw LimitedRetryCausingError()
|
||||
} else {
|
||||
throw RetryCausingError()
|
||||
}
|
||||
} else {
|
||||
sleep(Duration.ofDays(1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@InitiatingFlow
|
||||
class SendAndRetryFlow(private val i: Int, private val other: Party) : FlowLogic<Unit>() {
|
||||
companion object {
|
||||
@Volatile
|
||||
var count = 0
|
||||
}
|
||||
|
||||
@ -218,8 +237,40 @@ class ReceiveFlow2(private val other: FlowSession) : FlowLogic<Unit>() {
|
||||
}
|
||||
}
|
||||
|
||||
@InitiatingFlow
|
||||
class KeepSendingFlow(private val i: Int, private val other: Party) : FlowLogic<Unit>() {
|
||||
companion object {
|
||||
@Volatile
|
||||
var count = 0
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
val session = initiateFlow(other)
|
||||
session.send(i.toString())
|
||||
do {
|
||||
logger.info("Sending... $count")
|
||||
session.send("Boo")
|
||||
} while (count++ < i)
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("unused")
|
||||
@InitiatedBy(KeepSendingFlow::class)
|
||||
class ReceiveFlow3(private val other: FlowSession) : FlowLogic<Unit>() {
|
||||
@Suspendable
|
||||
override fun call() {
|
||||
var count = other.receive<String>().unwrap { it.toInt() }
|
||||
while (count-- > 0) {
|
||||
val received = other.receive<String>().unwrap { it }
|
||||
logger.info("Received... $received $count")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class RetryInsertFlow(private val i: Int) : FlowLogic<Unit>() {
|
||||
companion object {
|
||||
@Volatile
|
||||
var count = 0
|
||||
}
|
||||
|
||||
|
@ -63,6 +63,7 @@ include 'tools:bootstrapper'
|
||||
include 'tools:blobinspector'
|
||||
include 'tools:dbmigration'
|
||||
include 'tools:shell'
|
||||
include 'tools:network-bootstrapper'
|
||||
include 'example-code'
|
||||
project(':example-code').projectDir = file("$settingsDir/docs/source/example-code")
|
||||
include 'samples:attachment-demo'
|
||||
|
@ -20,7 +20,7 @@ jar {
|
||||
exclude "META-INF/*.DSA"
|
||||
exclude "META-INF/*.RSA"
|
||||
}
|
||||
archiveName = "blob-inspector-${version}.jar"
|
||||
archiveName = "blob-inspector-${corda_release_version}.jar"
|
||||
manifest {
|
||||
attributes(
|
||||
'Automatic-Module-Name': 'net.corda.blobinspector',
|
||||
@ -30,5 +30,5 @@ jar {
|
||||
}
|
||||
|
||||
publish {
|
||||
name 'tools-blob-inspector'
|
||||
name 'corda-tools-blob-inspector'
|
||||
}
|
||||
|
@ -18,13 +18,17 @@ configurations {
|
||||
runtimeArtifacts
|
||||
}
|
||||
|
||||
jar {
|
||||
baseName "corda-tools-network-bootstrapper"
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.slf4j:slf4j-nop:$slf4j_version"
|
||||
}
|
||||
|
||||
task buildBootstrapperJar(type: FatCapsule, dependsOn: project(':node-api').compileJava) {
|
||||
applicationClass 'net.corda.nodeapi.internal.network.NetworkBootstrapper'
|
||||
archiveName = "network-bootstrapper-${version}.jar"
|
||||
archiveName "tools-network-bootstrapper-${corda_release_version}.jar"
|
||||
capsuleManifest {
|
||||
applicationVersion = corda_release_version
|
||||
systemProperties['visualvm.display.name'] = 'Network Bootstrapper'
|
||||
@ -48,6 +52,6 @@ artifacts {
|
||||
}
|
||||
|
||||
publish {
|
||||
name 'tools-network-bootstrapper'
|
||||
disableDefaultJar = true
|
||||
name 'corda-tools-network-bootstrapper'
|
||||
}
|
||||
|
71
tools/network-bootstrapper/build.gradle
Normal file
71
tools/network-bootstrapper/build.gradle
Normal file
@ -0,0 +1,71 @@
|
||||
buildscript {
|
||||
|
||||
ext.tornadofx_version = '1.7.15'
|
||||
ext.controlsfx_version = '8.40.12'
|
||||
|
||||
|
||||
repositories {
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath "org.jetbrains.kotlin:kotlin-noarg:$kotlin_version"
|
||||
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
|
||||
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.1'
|
||||
}
|
||||
}
|
||||
|
||||
repositories {
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'idea'
|
||||
apply plugin: 'java'
|
||||
apply plugin: 'application'
|
||||
apply plugin: 'com.github.johnrengelman.shadow'
|
||||
|
||||
dependencies {
|
||||
|
||||
compile "com.microsoft.azure:azure:1.8.0"
|
||||
compile "com.github.docker-java:docker-java:3.0.6"
|
||||
|
||||
testCompile "org.jetbrains.kotlin:kotlin-test"
|
||||
testCompile "org.jetbrains.kotlin:kotlin-test-junit"
|
||||
|
||||
compile project(':node-api')
|
||||
compile project(':node')
|
||||
|
||||
compile group: "com.typesafe", name: "config", version: typesafe_config_version
|
||||
compile group: "com.fasterxml.jackson.dataformat", name: "jackson-dataformat-yaml", version: "2.9.0"
|
||||
compile group: "com.fasterxml.jackson.core", name: "jackson-databind", version: "2.9.0"
|
||||
compile "com.fasterxml.jackson.module:jackson-module-kotlin:2.9.+"
|
||||
compile group: 'info.picocli', name: 'picocli', version: '3.0.1'
|
||||
|
||||
// TornadoFX: A lightweight Kotlin framework for working with JavaFX UI's.
|
||||
compile "no.tornado:tornadofx:$tornadofx_version"
|
||||
|
||||
compile "org.controlsfx:controlsfx:$controlsfx_version"
|
||||
|
||||
}
|
||||
|
||||
shadowJar {
|
||||
baseName = 'network-bootstrapper'
|
||||
classifier = null
|
||||
version = null
|
||||
zip64 true
|
||||
mainClassName = 'net.corda.bootstrapper.Main'
|
||||
}
|
||||
|
||||
task buildNetworkBootstrapper(dependsOn: shadowJar) {
|
||||
}
|
||||
|
||||
configurations {
|
||||
compile.exclude group: "log4j", module: "log4j"
|
||||
compile.exclude group: "org.apache.logging.log4j"
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
package net.corda.bootstrapper;
|
||||
|
||||
import javafx.scene.control.Alert;
|
||||
import javafx.scene.control.Label;
|
||||
import javafx.scene.control.TextArea;
|
||||
import javafx.scene.layout.GridPane;
|
||||
import javafx.scene.layout.Priority;
|
||||
import javafx.stage.StageStyle;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
|
||||
public class GuiUtils {
|
||||
|
||||
public static void showException(String title, String message, Throwable exception) {
|
||||
Alert alert = new Alert(Alert.AlertType.ERROR);
|
||||
alert.initStyle(StageStyle.UTILITY);
|
||||
alert.setTitle("Exception");
|
||||
alert.setHeaderText(title);
|
||||
alert.setContentText(message);
|
||||
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter pw = new PrintWriter(sw);
|
||||
exception.printStackTrace(pw);
|
||||
String exceptionText = sw.toString();
|
||||
|
||||
Label label = new Label("Details:");
|
||||
|
||||
TextArea textArea = new TextArea(exceptionText);
|
||||
textArea.setEditable(false);
|
||||
textArea.setWrapText(true);
|
||||
|
||||
textArea.setMaxWidth(Double.MAX_VALUE);
|
||||
textArea.setMaxHeight(Double.MAX_VALUE);
|
||||
GridPane.setVgrow(textArea, Priority.ALWAYS);
|
||||
GridPane.setHgrow(textArea, Priority.ALWAYS);
|
||||
|
||||
GridPane expContent = new GridPane();
|
||||
expContent.setMaxWidth(Double.MAX_VALUE);
|
||||
expContent.add(label, 0, 0);
|
||||
expContent.add(textArea, 0, 1);
|
||||
|
||||
alert.getDialogPane().setExpandableContent(expContent);
|
||||
|
||||
alert.showAndWait();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package net.corda.bootstrapper
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator
|
||||
import com.fasterxml.jackson.core.JsonParser
|
||||
import com.fasterxml.jackson.databind.*
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
|
||||
import com.fasterxml.jackson.module.kotlin.registerKotlinModule
|
||||
import com.microsoft.azure.management.resources.ResourceGroup
|
||||
import com.microsoft.azure.management.resources.fluentcore.arm.Region
|
||||
|
||||
class Constants {
|
||||
|
||||
companion object {
|
||||
val NODE_P2P_PORT = 10020
|
||||
val NODE_SSHD_PORT = 12222
|
||||
val NODE_RPC_PORT = 10003
|
||||
val NODE_RPC_ADMIN_PORT = 10005
|
||||
|
||||
val BOOTSTRAPPER_DIR_NAME = ".bootstrapper"
|
||||
|
||||
fun getContextMapper(): ObjectMapper {
|
||||
val objectMapper = ObjectMapper(YAMLFactory()).registerKotlinModule()
|
||||
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
|
||||
objectMapper.registerModule(object : SimpleModule() {}.let {
|
||||
it.addSerializer(Region::class.java, object : JsonSerializer<Region>() {
|
||||
override fun serialize(value: Region, gen: JsonGenerator, serializers: SerializerProvider?) {
|
||||
gen.writeString(value.name())
|
||||
}
|
||||
})
|
||||
it.addDeserializer(Region::class.java, object : JsonDeserializer<Region>() {
|
||||
override fun deserialize(p: JsonParser, ctxt: DeserializationContext): Region {
|
||||
return Region.findByLabelOrName(p.valueAsString)
|
||||
}
|
||||
})
|
||||
})
|
||||
return objectMapper
|
||||
}
|
||||
|
||||
val ALPHA_NUMERIC_ONLY_REGEX = "[^\\p{IsAlphabetic}\\p{IsDigit}]".toRegex()
|
||||
val ALPHA_NUMERIC_DOT_AND_UNDERSCORE_ONLY_REGEX = "[^\\p{IsAlphabetic}\\p{IsDigit}._]".toRegex()
|
||||
val REGION_ARG_NAME = "REGION"
|
||||
|
||||
fun ResourceGroup.restFriendlyName(): String {
|
||||
return this.name().replace(ALPHA_NUMERIC_ONLY_REGEX, "").toLowerCase()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
@file:JvmName("Main")
|
||||
|
||||
package net.corda.bootstrapper
|
||||
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import net.corda.bootstrapper.backends.Backend.BackendType.AZURE
|
||||
import net.corda.bootstrapper.cli.AzureParser
|
||||
import net.corda.bootstrapper.cli.CliParser
|
||||
import net.corda.bootstrapper.cli.CommandLineInterface
|
||||
import net.corda.bootstrapper.cli.GuiSwitch
|
||||
import net.corda.bootstrapper.gui.Gui
|
||||
import net.corda.bootstrapper.serialization.SerializationEngine
|
||||
import picocli.CommandLine
|
||||
|
||||
|
||||
fun main(args: Array<String>) {
|
||||
SerializationEngine.init()
|
||||
|
||||
val entryPointArgs = GuiSwitch();
|
||||
CommandLine(entryPointArgs).parse(*args)
|
||||
|
||||
if (entryPointArgs.usageHelpRequested) {
|
||||
CommandLine.usage(AzureParser(), System.out)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
if (entryPointArgs.gui) {
|
||||
Gui.main(args)
|
||||
} else {
|
||||
val baseArgs = CliParser()
|
||||
CommandLine(baseArgs).parse(*args)
|
||||
val argParser: CliParser = when (baseArgs.backendType) {
|
||||
AZURE -> {
|
||||
val azureArgs = AzureParser()
|
||||
CommandLine(azureArgs).parse(*args)
|
||||
azureArgs
|
||||
}
|
||||
Backend.BackendType.LOCAL_DOCKER -> baseArgs
|
||||
}
|
||||
CommandLineInterface().run(argParser)
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
package net.corda.bootstrapper
|
||||
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.nodes.*
|
||||
import net.corda.bootstrapper.notaries.NotaryCopier
|
||||
import net.corda.bootstrapper.notaries.NotaryFinder
|
||||
import java.io.File
|
||||
import java.util.concurrent.CompletableFuture
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
interface NetworkBuilder {
|
||||
|
||||
companion object {
|
||||
fun instance(): NetworkBuilder {
|
||||
return NetworkBuilderImpl()
|
||||
}
|
||||
}
|
||||
|
||||
fun onNodeLocated(callback: (FoundNode) -> Unit): NetworkBuilder
|
||||
fun onNodeCopied(callback: (CopiedNode) -> Unit): NetworkBuilder
|
||||
fun onNodeBuild(callback: (BuiltNode) -> Unit): NetworkBuilder
|
||||
fun onNodePushed(callback: (PushedNode) -> Unit): NetworkBuilder
|
||||
fun onNodeInstance(callback: (NodeInstance) -> Unit): NetworkBuilder
|
||||
|
||||
fun withNodeCounts(map: Map<String, Int>): NetworkBuilder
|
||||
fun withNetworkName(networtName: String): NetworkBuilder
|
||||
fun withBasedir(baseDir: File): NetworkBuilder
|
||||
fun withBackend(backendType: Backend.BackendType): NetworkBuilder
|
||||
fun withBackendOptions(options: Map<String, String>): NetworkBuilder
|
||||
|
||||
fun build(): CompletableFuture<Pair<List<NodeInstance>, Context>>
|
||||
}
|
||||
|
||||
private class NetworkBuilderImpl : NetworkBuilder {
|
||||
|
||||
|
||||
@Volatile
|
||||
private var onNodeLocatedCallback: ((FoundNode) -> Unit) = {}
|
||||
@Volatile
|
||||
private var onNodeCopiedCallback: ((CopiedNode) -> Unit) = {}
|
||||
@Volatile
|
||||
private var onNodeBuiltCallback: ((BuiltNode) -> Unit) = {}
|
||||
@Volatile
|
||||
private var onNodePushedCallback: ((PushedNode) -> Unit) = {}
|
||||
@Volatile
|
||||
private var onNodeInstanceCallback: ((NodeInstance) -> Unit) = {}
|
||||
@Volatile
|
||||
private var nodeCounts = mapOf<String, Int>()
|
||||
@Volatile
|
||||
private lateinit var networkName: String
|
||||
@Volatile
|
||||
private var workingDir: File? = null
|
||||
private val cacheDirName = Constants.BOOTSTRAPPER_DIR_NAME
|
||||
@Volatile
|
||||
private var backendType = Backend.BackendType.LOCAL_DOCKER
|
||||
@Volatile
|
||||
private var backendOptions: Map<String, String> = mapOf()
|
||||
|
||||
override fun onNodeLocated(callback: (FoundNode) -> Unit): NetworkBuilder {
|
||||
this.onNodeLocatedCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
override fun onNodeCopied(callback: (CopiedNode) -> Unit): NetworkBuilder {
|
||||
this.onNodeCopiedCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
override fun onNodeBuild(callback: (BuiltNode) -> Unit): NetworkBuilder {
|
||||
this.onNodeBuiltCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
override fun onNodePushed(callback: (PushedNode) -> Unit): NetworkBuilder {
|
||||
this.onNodePushedCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
override fun onNodeInstance(callback: (NodeInstance) -> Unit): NetworkBuilder {
|
||||
this.onNodeInstanceCallback = callback;
|
||||
return this
|
||||
}
|
||||
|
||||
override fun withNodeCounts(map: Map<String, Int>): NetworkBuilder {
|
||||
nodeCounts = ConcurrentHashMap(map.entries.map { it.key.toLowerCase() to it.value }.toMap())
|
||||
return this
|
||||
}
|
||||
|
||||
override fun withNetworkName(networtName: String): NetworkBuilder {
|
||||
this.networkName = networtName
|
||||
return this
|
||||
}
|
||||
|
||||
override fun withBasedir(baseDir: File): NetworkBuilder {
|
||||
this.workingDir = baseDir
|
||||
return this
|
||||
}
|
||||
|
||||
override fun withBackend(backendType: Backend.BackendType): NetworkBuilder {
|
||||
this.backendType = backendType
|
||||
return this
|
||||
}
|
||||
|
||||
override fun withBackendOptions(options: Map<String, String>): NetworkBuilder {
|
||||
this.backendOptions = HashMap(options)
|
||||
return this
|
||||
}
|
||||
|
||||
override fun build(): CompletableFuture<Pair<List<NodeInstance>, Context>> {
|
||||
val cacheDir = File(workingDir, cacheDirName)
|
||||
val baseDir = workingDir!!
|
||||
val context = Context(networkName, backendType, backendOptions)
|
||||
if (cacheDir.exists()) cacheDir.deleteRecursively()
|
||||
val (containerPusher, instantiator, volume) = Backend.fromContext(context, cacheDir)
|
||||
val nodeFinder = NodeFinder(baseDir)
|
||||
val notaryFinder = NotaryFinder(baseDir)
|
||||
val notaryCopier = NotaryCopier(cacheDir)
|
||||
|
||||
val nodeInstantiator = NodeInstantiator(instantiator, context)
|
||||
val nodeBuilder = NodeBuilder()
|
||||
val nodeCopier = NodeCopier(cacheDir)
|
||||
val nodePusher = NodePusher(containerPusher, context)
|
||||
|
||||
val nodeDiscoveryFuture = CompletableFuture.supplyAsync {
|
||||
val foundNodes = nodeFinder.findNodes()
|
||||
.map { it to nodeCounts.getOrDefault(it.name.toLowerCase(), 1) }
|
||||
.toMap()
|
||||
foundNodes
|
||||
}
|
||||
|
||||
val notaryDiscoveryFuture = CompletableFuture.supplyAsync {
|
||||
val copiedNotaries = notaryFinder.findNotaries()
|
||||
.map { foundNode: FoundNode ->
|
||||
notaryCopier.copyNotary(foundNode)
|
||||
}
|
||||
volume.notariesForNetworkParams(copiedNotaries)
|
||||
copiedNotaries
|
||||
}
|
||||
|
||||
val notariesFuture = notaryDiscoveryFuture.thenCompose { copiedNotaries ->
|
||||
copiedNotaries
|
||||
.map { copiedNotary ->
|
||||
nodeBuilder.buildNode(copiedNotary)
|
||||
}.map { builtNotary ->
|
||||
nodePusher.pushNode(builtNotary)
|
||||
}.map { pushedNotary ->
|
||||
pushedNotary.thenApplyAsync { nodeInstantiator.createInstanceRequest(it) }
|
||||
}.map { instanceRequest ->
|
||||
instanceRequest.thenComposeAsync { request ->
|
||||
nodeInstantiator.instantiateNotaryInstance(request)
|
||||
}
|
||||
}.toSingleFuture()
|
||||
}
|
||||
|
||||
val nodesFuture = notaryDiscoveryFuture.thenCombineAsync(nodeDiscoveryFuture) { _, nodeCount ->
|
||||
nodeCount.keys
|
||||
.map { foundNode ->
|
||||
nodeCopier.copyNode(foundNode).let {
|
||||
onNodeCopiedCallback.invoke(it)
|
||||
it
|
||||
}
|
||||
}.map { copiedNode: CopiedNode ->
|
||||
nodeBuilder.buildNode(copiedNode).let {
|
||||
onNodeBuiltCallback.invoke(it)
|
||||
it
|
||||
}
|
||||
}.map { builtNode ->
|
||||
nodePusher.pushNode(builtNode).thenApplyAsync {
|
||||
onNodePushedCallback.invoke(it)
|
||||
it
|
||||
}
|
||||
}.map { pushedNode ->
|
||||
pushedNode.thenApplyAsync {
|
||||
nodeInstantiator.createInstanceRequests(it, nodeCount)
|
||||
}
|
||||
}.map { instanceRequests ->
|
||||
instanceRequests.thenComposeAsync { requests ->
|
||||
requests.map { request ->
|
||||
nodeInstantiator.instantiateNodeInstance(request)
|
||||
.thenApplyAsync { nodeInstance ->
|
||||
context.registerNode(nodeInstance)
|
||||
onNodeInstanceCallback.invoke(nodeInstance)
|
||||
nodeInstance
|
||||
}
|
||||
}.toSingleFuture()
|
||||
}
|
||||
}.toSingleFuture()
|
||||
}.thenCompose { it }.thenApplyAsync { it.flatten() }
|
||||
|
||||
return notariesFuture.thenCombineAsync(nodesFuture, { _, nodeInstances ->
|
||||
context.networkInitiated = true
|
||||
nodeInstances to context
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fun <T> List<CompletableFuture<T>>.toSingleFuture(): CompletableFuture<List<T>> {
|
||||
return CompletableFuture.allOf(*this.toTypedArray()).thenApplyAsync {
|
||||
this.map { it.getNow(null) }
|
||||
}
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
package net.corda.bootstrapper.backends
|
||||
|
||||
import com.microsoft.azure.CloudException
|
||||
import com.microsoft.azure.credentials.AzureCliCredentials
|
||||
import com.microsoft.azure.management.Azure
|
||||
import com.microsoft.rest.LogLevel
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.containers.instance.azure.AzureInstantiator
|
||||
import net.corda.bootstrapper.containers.push.azure.AzureContainerPusher
|
||||
import net.corda.bootstrapper.containers.push.azure.RegistryLocator
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.volumes.azure.AzureSmbVolume
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
data class AzureBackend(override val containerPusher: AzureContainerPusher,
|
||||
override val instantiator: AzureInstantiator,
|
||||
override val volume: AzureSmbVolume) : Backend {
|
||||
|
||||
companion object {
|
||||
|
||||
val LOG = LoggerFactory.getLogger(AzureBackend::class.java)
|
||||
|
||||
private val azure: Azure = kotlin.run {
|
||||
Azure.configure()
|
||||
.withLogLevel(LogLevel.NONE)
|
||||
.authenticate(AzureCliCredentials.create())
|
||||
.withDefaultSubscription()
|
||||
}
|
||||
|
||||
fun fromContext(context: Context): AzureBackend {
|
||||
val resourceGroupName = context.networkName.replace(Constants.ALPHA_NUMERIC_DOT_AND_UNDERSCORE_ONLY_REGEX, "")
|
||||
val resourceGroup = try {
|
||||
LOG.info("Attempting to find existing resourceGroup with name: $resourceGroupName")
|
||||
val foundResourceGroup = azure.resourceGroups().getByName(resourceGroupName)
|
||||
|
||||
if (foundResourceGroup == null) {
|
||||
LOG.info("No existing resourceGroup found creating new resourceGroup with name: $resourceGroupName")
|
||||
azure.resourceGroups().define(resourceGroupName).withRegion(context.extraParams[Constants.REGION_ARG_NAME]).create()
|
||||
} else {
|
||||
LOG.info("Found existing resourceGroup, reusing")
|
||||
foundResourceGroup
|
||||
}
|
||||
} catch (e: CloudException) {
|
||||
throw RuntimeException(e)
|
||||
}
|
||||
|
||||
val registryLocatorFuture = CompletableFuture.supplyAsync {
|
||||
RegistryLocator(azure, resourceGroup)
|
||||
}
|
||||
val containerPusherFuture = registryLocatorFuture.thenApplyAsync {
|
||||
AzureContainerPusher(azure, it.registry)
|
||||
}
|
||||
val azureNetworkStore = CompletableFuture.supplyAsync { AzureSmbVolume(azure, resourceGroup) }
|
||||
val azureInstantiatorFuture = azureNetworkStore.thenCombine(registryLocatorFuture,
|
||||
{ azureVolume, registryLocator ->
|
||||
AzureInstantiator(azure, registryLocator.registry, azureVolume, resourceGroup)
|
||||
}
|
||||
)
|
||||
return AzureBackend(containerPusherFuture.get(), azureInstantiatorFuture.get(), azureNetworkStore.get())
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
package net.corda.bootstrapper.backends
|
||||
|
||||
import net.corda.bootstrapper.backends.Backend.BackendType.AZURE
|
||||
import net.corda.bootstrapper.backends.Backend.BackendType.LOCAL_DOCKER
|
||||
import net.corda.bootstrapper.containers.instance.Instantiator
|
||||
import net.corda.bootstrapper.containers.push.ContainerPusher
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.volumes.Volume
|
||||
import java.io.File
|
||||
|
||||
interface Backend {
|
||||
companion object {
|
||||
fun fromContext(context: Context, baseDir: File): Backend {
|
||||
return when (context.backendType) {
|
||||
AZURE -> AzureBackend.fromContext(context)
|
||||
LOCAL_DOCKER -> DockerBackend.fromContext(context, baseDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val containerPusher: ContainerPusher
|
||||
val instantiator: Instantiator
|
||||
val volume: Volume
|
||||
|
||||
enum class BackendType {
|
||||
AZURE, LOCAL_DOCKER
|
||||
}
|
||||
|
||||
operator fun component1(): ContainerPusher {
|
||||
return containerPusher
|
||||
}
|
||||
|
||||
operator fun component2(): Instantiator {
|
||||
return instantiator
|
||||
}
|
||||
|
||||
operator fun component3(): Volume {
|
||||
return volume
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package net.corda.bootstrapper.backends
|
||||
|
||||
import net.corda.bootstrapper.containers.instance.docker.DockerInstantiator
|
||||
import net.corda.bootstrapper.containers.push.docker.DockerContainerPusher
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.volumes.docker.LocalVolume
|
||||
import java.io.File
|
||||
|
||||
class DockerBackend(override val containerPusher: DockerContainerPusher,
|
||||
override val instantiator: DockerInstantiator,
|
||||
override val volume: LocalVolume) : Backend {
|
||||
|
||||
|
||||
companion object {
|
||||
fun fromContext(context: Context, baseDir: File): DockerBackend {
|
||||
val dockerContainerPusher = DockerContainerPusher()
|
||||
val localVolume = LocalVolume(baseDir, context)
|
||||
val dockerInstantiator = DockerInstantiator(localVolume, context)
|
||||
return DockerBackend(dockerContainerPusher, dockerInstantiator, localVolume)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,69 @@
|
||||
package net.corda.bootstrapper.cli
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.NetworkBuilder
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.nodes.NodeAdder
|
||||
import net.corda.bootstrapper.nodes.NodeInstantiator
|
||||
import net.corda.bootstrapper.toSingleFuture
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import java.io.File
|
||||
|
||||
class CommandLineInterface {
|
||||
|
||||
|
||||
fun run(parsedArgs: CliParser) {
|
||||
val baseDir = parsedArgs.baseDirectory
|
||||
val cacheDir = File(baseDir, Constants.BOOTSTRAPPER_DIR_NAME)
|
||||
val networkName = parsedArgs.name
|
||||
val objectMapper = Constants.getContextMapper()
|
||||
val contextFile = File(cacheDir, "$networkName.yaml")
|
||||
if (parsedArgs.isNew()) {
|
||||
val (_, context) = NetworkBuilder.instance()
|
||||
.withBasedir(baseDir)
|
||||
.withNetworkName(networkName)
|
||||
.withNodeCounts(parsedArgs.nodes)
|
||||
.onNodeBuild { builtNode -> println("Built node: ${builtNode.name} to image: ${builtNode.localImageId}") }
|
||||
.onNodePushed { pushedNode -> println("Pushed node: ${pushedNode.name} to: ${pushedNode.remoteImageName}") }
|
||||
.onNodeInstance { instance ->
|
||||
println("Instance of ${instance.name} with id: ${instance.nodeInstanceName} on address: " +
|
||||
"${instance.reachableAddress} {ssh:${instance.portMapping[Constants.NODE_SSHD_PORT]}, " +
|
||||
"p2p:${instance.portMapping[Constants.NODE_P2P_PORT]}}")
|
||||
}
|
||||
.withBackend(parsedArgs.backendType)
|
||||
.withBackendOptions(parsedArgs.backendOptions())
|
||||
.build().getOrThrow()
|
||||
persistContext(contextFile, objectMapper, context)
|
||||
} else {
|
||||
val context = setupContextFromExisting(contextFile, objectMapper, networkName)
|
||||
val (_, instantiator, _) = Backend.fromContext(context, cacheDir)
|
||||
val nodeAdder = NodeAdder(context, NodeInstantiator(instantiator, context))
|
||||
parsedArgs.nodesToAdd.map {
|
||||
nodeAdder.addNode(context, Constants.ALPHA_NUMERIC_ONLY_REGEX.replace(it.toLowerCase(), ""))
|
||||
}.toSingleFuture().getOrThrow()
|
||||
persistContext(contextFile, objectMapper, context)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private fun setupContextFromExisting(contextFile: File, objectMapper: ObjectMapper, networkName: String): Context {
|
||||
return contextFile.let {
|
||||
if (it.exists()) {
|
||||
it.inputStream().use {
|
||||
objectMapper.readValue(it, Context::class.java)
|
||||
}
|
||||
} else {
|
||||
throw IllegalStateException("No existing network context found")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private fun persistContext(contextFile: File, objectMapper: ObjectMapper, context: Context?) {
|
||||
contextFile.outputStream().use {
|
||||
objectMapper.writeValue(it, context)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
package net.corda.bootstrapper.cli
|
||||
|
||||
import com.microsoft.azure.management.resources.fluentcore.arm.Region
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import picocli.CommandLine
|
||||
import picocli.CommandLine.Option
|
||||
import java.io.File
|
||||
|
||||
open class GuiSwitch {
|
||||
|
||||
@Option(names = ["-h", "--help"], usageHelp = true, description = ["display this help message"])
|
||||
var usageHelpRequested: Boolean = false
|
||||
|
||||
@Option(names = ["-g", "--gui"], description = ["Run in Gui Mode"])
|
||||
var gui = false
|
||||
|
||||
@CommandLine.Unmatched
|
||||
var unmatched = arrayListOf<String>()
|
||||
}
|
||||
|
||||
open class CliParser : GuiSwitch() {
|
||||
|
||||
@Option(names = ["-n", "--network-name"], description = ["The resource grouping to use"], required = true)
|
||||
lateinit var name: String
|
||||
|
||||
@Option(names = ["-d", "--nodes-directory"], description = ["The directory to search for nodes in"])
|
||||
var baseDirectory = File(System.getProperty("user.dir"))
|
||||
|
||||
@Option(names = ["-b", "--backend"], description = ["The backend to use when instantiating nodes"])
|
||||
var backendType: Backend.BackendType = Backend.BackendType.LOCAL_DOCKER
|
||||
|
||||
@Option(names = ["-nodes"], split = ":", description = ["The number of each node to create NodeX:2 will create two instances of NodeX"])
|
||||
var nodes: MutableMap<String, Int> = hashMapOf()
|
||||
|
||||
@Option(names = ["--add", "-a"])
|
||||
var nodesToAdd: MutableList<String> = arrayListOf()
|
||||
|
||||
fun isNew(): Boolean {
|
||||
return nodesToAdd.isEmpty()
|
||||
}
|
||||
|
||||
open fun backendOptions(): Map<String, String> {
|
||||
return emptyMap()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class AzureParser : CliParser() {
|
||||
|
||||
companion object {
|
||||
val regions = Region.values().map { it.name() to it }.toMap()
|
||||
}
|
||||
|
||||
@Option(names = ["-r", "--region"], description = ["The azure region to use"], converter = [RegionConverter::class])
|
||||
var region: Region = Region.EUROPE_WEST
|
||||
|
||||
class RegionConverter : CommandLine.ITypeConverter<Region> {
|
||||
override fun convert(value: String): Region {
|
||||
return regions[value] ?: throw Error("Unknown azure region: $value")
|
||||
}
|
||||
}
|
||||
|
||||
override fun backendOptions(): Map<String, String> {
|
||||
return mapOf(Constants.REGION_ARG_NAME to region.name())
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
package net.corda.bootstrapper.containers.instance
|
||||
|
||||
data class InstanceInfo(val groupId: String,
|
||||
val instanceName: String,
|
||||
val instanceAddress: String,
|
||||
val reachableAddress: String,
|
||||
val portMapping: Map<Int, Int> = emptyMap())
|
@ -0,0 +1,18 @@
|
||||
package net.corda.bootstrapper.containers.instance
|
||||
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
|
||||
interface Instantiator {
|
||||
fun instantiateContainer(imageId: String,
|
||||
portsToOpen: List<Int>,
|
||||
instanceName: String,
|
||||
env: Map<String, String>? = null): CompletableFuture<Pair<String, Map<Int, Int>>>
|
||||
|
||||
|
||||
companion object {
|
||||
val ADDITIONAL_NODE_INFOS_PATH = "/opt/corda/additional-node-infos"
|
||||
}
|
||||
|
||||
fun getExpectedFQDN(instanceName: String): String
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
package net.corda.bootstrapper.containers.instance.azure
|
||||
|
||||
import com.microsoft.azure.management.Azure
|
||||
import com.microsoft.azure.management.containerinstance.ContainerGroup
|
||||
import com.microsoft.azure.management.containerinstance.ContainerGroupRestartPolicy
|
||||
import com.microsoft.azure.management.containerregistry.Registry
|
||||
import com.microsoft.azure.management.resources.ResourceGroup
|
||||
import com.microsoft.rest.ServiceCallback
|
||||
import net.corda.bootstrapper.Constants.Companion.restFriendlyName
|
||||
import net.corda.bootstrapper.containers.instance.Instantiator
|
||||
import net.corda.bootstrapper.containers.instance.Instantiator.Companion.ADDITIONAL_NODE_INFOS_PATH
|
||||
import net.corda.bootstrapper.containers.push.azure.RegistryLocator.Companion.parseCredentials
|
||||
import net.corda.bootstrapper.volumes.azure.AzureSmbVolume
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class AzureInstantiator(private val azure: Azure,
|
||||
private val registry: Registry,
|
||||
private val azureSmbVolume: AzureSmbVolume,
|
||||
private val resourceGroup: ResourceGroup
|
||||
) : Instantiator {
|
||||
override fun instantiateContainer(imageId: String,
|
||||
portsToOpen: List<Int>,
|
||||
instanceName: String,
|
||||
env: Map<String, String>?): CompletableFuture<Pair<String, Map<Int, Int>>> {
|
||||
|
||||
findAndKillExistingContainerGroup(resourceGroup, buildIdent(instanceName))
|
||||
|
||||
LOG.info("Starting instantiation of container: $instanceName using $imageId")
|
||||
val registryAddress = registry.loginServerUrl()
|
||||
val (username, password) = registry.parseCredentials();
|
||||
val mountName = "node-setup"
|
||||
val future = CompletableFuture<Pair<String, Map<Int, Int>>>().also {
|
||||
azure.containerGroups().define(buildIdent(instanceName))
|
||||
.withRegion(resourceGroup.region())
|
||||
.withExistingResourceGroup(resourceGroup)
|
||||
.withLinux()
|
||||
.withPrivateImageRegistry(registryAddress, username, password)
|
||||
.defineVolume(mountName)
|
||||
.withExistingReadWriteAzureFileShare(azureSmbVolume.shareName)
|
||||
.withStorageAccountName(azureSmbVolume.storageAccountName)
|
||||
.withStorageAccountKey(azureSmbVolume.storageAccountKey)
|
||||
.attach()
|
||||
.defineContainerInstance(instanceName)
|
||||
.withImage(imageId)
|
||||
.withExternalTcpPorts(*portsToOpen.toIntArray())
|
||||
.withVolumeMountSetting(mountName, ADDITIONAL_NODE_INFOS_PATH)
|
||||
.withEnvironmentVariables(env ?: emptyMap())
|
||||
.attach().withRestartPolicy(ContainerGroupRestartPolicy.ON_FAILURE)
|
||||
.withDnsPrefix(buildIdent(instanceName))
|
||||
.createAsync(object : ServiceCallback<ContainerGroup> {
|
||||
override fun failure(t: Throwable?) {
|
||||
it.completeExceptionally(t)
|
||||
}
|
||||
|
||||
override fun success(result: ContainerGroup) {
|
||||
val fqdn = result.fqdn()
|
||||
LOG.info("Completed instantiation: $instanceName is running at $fqdn with port(s) $portsToOpen exposed")
|
||||
it.complete(result.fqdn() to portsToOpen.map { it to it }.toMap())
|
||||
}
|
||||
})
|
||||
}
|
||||
return future
|
||||
}
|
||||
|
||||
private fun buildIdent(instanceName: String) = "$instanceName-${resourceGroup.restFriendlyName()}"
|
||||
|
||||
override fun getExpectedFQDN(instanceName: String): String {
|
||||
return "${buildIdent(instanceName)}.${resourceGroup.region().name()}.azurecontainer.io"
|
||||
}
|
||||
|
||||
fun findAndKillExistingContainerGroup(resourceGroup: ResourceGroup, containerName: String): ContainerGroup? {
|
||||
val existingContainer = azure.containerGroups().getByResourceGroup(resourceGroup.name(), containerName)
|
||||
if (existingContainer != null) {
|
||||
LOG.info("Found an existing instance of: $containerName destroying ContainerGroup")
|
||||
azure.containerGroups().deleteByResourceGroup(resourceGroup.name(), containerName)
|
||||
}
|
||||
return existingContainer;
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(AzureInstantiator::class.java)
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
package net.corda.bootstrapper.containers.instance.docker
|
||||
|
||||
import com.github.dockerjava.api.model.*
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.containers.instance.Instantiator
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.docker.DockerUtils
|
||||
import net.corda.bootstrapper.volumes.docker.LocalVolume
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
|
||||
class DockerInstantiator(private val volume: LocalVolume,
|
||||
private val context: Context) : Instantiator {
|
||||
|
||||
val networkId = setupNetwork();
|
||||
|
||||
override fun instantiateContainer(imageId: String,
|
||||
portsToOpen: List<Int>,
|
||||
instanceName: String,
|
||||
env: Map<String, String>?): CompletableFuture<Pair<String, Map<Int, Int>>> {
|
||||
|
||||
val localClient = DockerUtils.createLocalDockerClient()
|
||||
val convertedEnv = buildDockerEnv(env)
|
||||
val nodeInfosVolume = Volume(Instantiator.ADDITIONAL_NODE_INFOS_PATH)
|
||||
val existingContainers = localClient.listContainersCmd().withShowAll(true).exec()
|
||||
.map { it.names.first() to it }
|
||||
.filter { it.first.endsWith(instanceName) }
|
||||
existingContainers.forEach { (_, container) ->
|
||||
try {
|
||||
localClient.killContainerCmd(container.id).exec()
|
||||
LOG.info("Found running container: $instanceName killed")
|
||||
} catch (e: Throwable) {
|
||||
//container not running
|
||||
}
|
||||
try {
|
||||
localClient.removeContainerCmd(container.id).exec()
|
||||
LOG.info("Found existing container: $instanceName removed")
|
||||
} catch (e: Throwable) {
|
||||
//this *only* occurs of the container had been previously scheduled for removal
|
||||
//but did not complete before this attempt was begun.
|
||||
}
|
||||
|
||||
}
|
||||
LOG.info("starting local docker instance of: $imageId with name $instanceName and env: $env")
|
||||
val ports = (portsToOpen + Constants.NODE_RPC_ADMIN_PORT).map { ExposedPort.tcp(it) }.map { PortBinding(null, it) }.let { Ports(*it.toTypedArray()) }
|
||||
val createCmd = localClient.createContainerCmd(imageId)
|
||||
.withName(instanceName)
|
||||
.withVolumes(nodeInfosVolume)
|
||||
.withBinds(Bind(volume.getPath(), nodeInfosVolume))
|
||||
.withPortBindings(ports)
|
||||
.withExposedPorts(ports.bindings.map { it.key })
|
||||
.withPublishAllPorts(true)
|
||||
.withNetworkMode(networkId)
|
||||
.withEnv(convertedEnv).exec()
|
||||
|
||||
localClient.startContainerCmd(createCmd.id).exec()
|
||||
val foundContainer = localClient.listContainersCmd().exec()
|
||||
.filter { it.id == (createCmd.id) }
|
||||
.firstOrNull()
|
||||
|
||||
val portMappings = foundContainer?.ports?.map {
|
||||
(it.privatePort ?: 0) to (it.publicPort ?: 0)
|
||||
}?.toMap()?.toMap()
|
||||
?: portsToOpen.map { it to it }.toMap()
|
||||
|
||||
|
||||
|
||||
return CompletableFuture.completedFuture(("localhost") to portMappings)
|
||||
}
|
||||
|
||||
private fun buildDockerEnv(env: Map<String, String>?) =
|
||||
(env ?: emptyMap()).entries.map { (key, value) -> "$key=$value" }.toList()
|
||||
|
||||
override fun getExpectedFQDN(instanceName: String): String {
|
||||
return instanceName
|
||||
}
|
||||
|
||||
private fun setupNetwork(): String {
|
||||
val createLocalDockerClient = DockerUtils.createLocalDockerClient()
|
||||
val existingNetworks = createLocalDockerClient.listNetworksCmd().withNameFilter(context.safeNetworkName).exec()
|
||||
return if (existingNetworks.isNotEmpty()) {
|
||||
if (existingNetworks.size > 1) {
|
||||
throw IllegalStateException("Multiple local docker networks found with name ${context.safeNetworkName}")
|
||||
} else {
|
||||
LOG.info("Found existing network with name: ${context.safeNetworkName} reusing")
|
||||
existingNetworks.first().id
|
||||
}
|
||||
} else {
|
||||
val result = createLocalDockerClient.createNetworkCmd().withName(context.safeNetworkName).exec()
|
||||
LOG.info("Created local docker network: ${result.id} with name: ${context.safeNetworkName}")
|
||||
result.id
|
||||
}
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(DockerInstantiator::class.java)
|
||||
}
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
package net.corda.bootstrapper.containers.push
|
||||
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
interface ContainerPusher {
|
||||
fun pushContainerToImageRepository(localImageId: String,
|
||||
remoteImageName: String,
|
||||
networkName: String): CompletableFuture<String>
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
package net.corda.bootstrapper.containers.push.azure
|
||||
|
||||
import com.github.dockerjava.api.async.ResultCallback
|
||||
import com.github.dockerjava.api.model.PushResponseItem
|
||||
import com.microsoft.azure.management.Azure
|
||||
import com.microsoft.azure.management.containerregistry.Registry
|
||||
import net.corda.bootstrapper.containers.push.ContainerPusher
|
||||
import net.corda.bootstrapper.containers.push.azure.RegistryLocator.Companion.parseCredentials
|
||||
import net.corda.bootstrapper.docker.DockerUtils
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.Closeable
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
|
||||
class AzureContainerPusher(private val azure: Azure, private val azureRegistry: Registry) : ContainerPusher {
|
||||
|
||||
|
||||
override fun pushContainerToImageRepository(localImageId: String,
|
||||
remoteImageName: String,
|
||||
networkName: String): CompletableFuture<String> {
|
||||
|
||||
|
||||
val (registryUser, registryPassword) = azureRegistry.parseCredentials()
|
||||
val dockerClient = DockerUtils.createDockerClient(
|
||||
azureRegistry.loginServerUrl(),
|
||||
registryUser,
|
||||
registryPassword)
|
||||
|
||||
val privateRepoUrl = "${azureRegistry.loginServerUrl()}/$remoteImageName".toLowerCase()
|
||||
dockerClient.tagImageCmd(localImageId, privateRepoUrl, networkName).exec()
|
||||
val result = CompletableFuture<String>()
|
||||
dockerClient.pushImageCmd("$privateRepoUrl:$networkName")
|
||||
.withAuthConfig(dockerClient.authConfig())
|
||||
.exec(object : ResultCallback<PushResponseItem> {
|
||||
override fun onComplete() {
|
||||
LOG.info("completed PUSH image: $localImageId to registryURL: $privateRepoUrl:$networkName")
|
||||
result.complete("$privateRepoUrl:$networkName")
|
||||
}
|
||||
|
||||
override fun close() {
|
||||
}
|
||||
|
||||
override fun onNext(`object`: PushResponseItem) {
|
||||
}
|
||||
|
||||
override fun onError(throwable: Throwable?) {
|
||||
result.completeExceptionally(throwable)
|
||||
}
|
||||
|
||||
override fun onStart(closeable: Closeable?) {
|
||||
LOG.info("starting PUSH image: $localImageId to registryURL: $privateRepoUrl:$networkName")
|
||||
}
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(AzureContainerPusher::class.java)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,55 @@
|
||||
package net.corda.bootstrapper.containers.push.azure
|
||||
|
||||
import com.microsoft.azure.management.Azure
|
||||
import com.microsoft.azure.management.containerregistry.AccessKeyType
|
||||
import com.microsoft.azure.management.containerregistry.Registry
|
||||
import com.microsoft.azure.management.resources.ResourceGroup
|
||||
import net.corda.bootstrapper.Constants.Companion.restFriendlyName
|
||||
import net.corda.bootstrapper.containers.instance.azure.AzureInstantiator
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
class RegistryLocator(private val azure: Azure,
|
||||
private val resourceGroup: ResourceGroup) {
|
||||
|
||||
|
||||
val registry: Registry = locateRegistry()
|
||||
|
||||
|
||||
private fun locateRegistry(): Registry {
|
||||
LOG.info("Attempting to find existing registry with name: ${resourceGroup.restFriendlyName()}")
|
||||
val found = azure.containerRegistries().getByResourceGroup(resourceGroup.name(), resourceGroup.restFriendlyName())
|
||||
|
||||
if (found == null) {
|
||||
LOG.info("Did not find existing container registry - creating new registry with name ${resourceGroup.restFriendlyName()}")
|
||||
return azure.containerRegistries()
|
||||
.define(resourceGroup.restFriendlyName())
|
||||
.withRegion(resourceGroup.region().name())
|
||||
.withExistingResourceGroup(resourceGroup)
|
||||
.withBasicSku()
|
||||
.withRegistryNameAsAdminUser()
|
||||
.create()
|
||||
|
||||
} else {
|
||||
LOG.info("found existing registry with name: ${resourceGroup.restFriendlyName()} reusing")
|
||||
return found
|
||||
}
|
||||
}
|
||||
|
||||
companion object {
|
||||
fun Registry.parseCredentials(): Pair<String, String> {
|
||||
val credentials = this.credentials
|
||||
return credentials.username() to
|
||||
(credentials.accessKeys()[AccessKeyType.PRIMARY]
|
||||
?: throw IllegalStateException("no registry password found"))
|
||||
}
|
||||
|
||||
val LOG = LoggerFactory.getLogger(AzureInstantiator::class.java)
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,15 @@
|
||||
package net.corda.bootstrapper.containers.push.docker
|
||||
|
||||
import net.corda.bootstrapper.containers.push.ContainerPusher
|
||||
import net.corda.bootstrapper.docker.DockerUtils
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class DockerContainerPusher : ContainerPusher {
|
||||
|
||||
|
||||
override fun pushContainerToImageRepository(localImageId: String, remoteImageName: String, networkName: String): CompletableFuture<String> {
|
||||
val dockerClient = DockerUtils.createLocalDockerClient()
|
||||
dockerClient.tagImageCmd(localImageId, remoteImageName, networkName).withForce().exec()
|
||||
return CompletableFuture.completedFuture("$remoteImageName:$networkName")
|
||||
}
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
package net.corda.bootstrapper.context
|
||||
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import net.corda.bootstrapper.nodes.NodeInstanceRequest
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import org.apache.activemq.artemis.utils.collections.ConcurrentHashSet
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
class Context(val networkName: String, val backendType: Backend.BackendType, backendOptions: Map<String, String> = emptyMap()) {
|
||||
|
||||
|
||||
@Volatile
|
||||
var safeNetworkName: String = networkName.replace(Constants.ALPHA_NUMERIC_ONLY_REGEX, "").toLowerCase()
|
||||
|
||||
@Volatile
|
||||
var nodes: MutableMap<String, ConcurrentHashSet<PersistableNodeInstance>> = ConcurrentHashMap()
|
||||
|
||||
@Volatile
|
||||
var networkInitiated: Boolean = false
|
||||
|
||||
@Volatile
|
||||
var extraParams = ConcurrentHashMap<String, String>(backendOptions)
|
||||
|
||||
private fun registerNode(name: String, nodeInstanceRequest: NodeInstanceRequest) {
|
||||
nodes.computeIfAbsent(name, { _ -> ConcurrentHashSet() }).add(nodeInstanceRequest.toPersistable())
|
||||
}
|
||||
|
||||
fun registerNode(request: NodeInstanceRequest) {
|
||||
registerNode(request.name, request)
|
||||
}
|
||||
|
||||
|
||||
data class PersistableNodeInstance(
|
||||
val groupName: String,
|
||||
val groupX500: CordaX500Name?,
|
||||
val instanceName: String,
|
||||
val instanceX500: String,
|
||||
val localImageId: String?,
|
||||
val remoteImageName: String,
|
||||
val rpcPort: Int?,
|
||||
val fqdn: String,
|
||||
val rpcUser: String,
|
||||
val rpcPassword: String)
|
||||
|
||||
|
||||
companion object {
|
||||
fun fromInstanceRequest(nodeInstanceRequest: NodeInstanceRequest): PersistableNodeInstance {
|
||||
return PersistableNodeInstance(
|
||||
nodeInstanceRequest.name,
|
||||
nodeInstanceRequest.nodeConfig.myLegalName,
|
||||
nodeInstanceRequest.nodeInstanceName,
|
||||
nodeInstanceRequest.actualX500,
|
||||
nodeInstanceRequest.localImageId,
|
||||
nodeInstanceRequest.remoteImageName,
|
||||
nodeInstanceRequest.nodeConfig.rpcOptions.address!!.port,
|
||||
nodeInstanceRequest.expectedFqName,
|
||||
"",
|
||||
""
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
fun NodeInstanceRequest.toPersistable(): PersistableNodeInstance {
|
||||
return fromInstanceRequest(this)
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,35 @@
|
||||
package net.corda.bootstrapper.docker
|
||||
|
||||
import com.github.dockerjava.api.DockerClient
|
||||
import com.github.dockerjava.core.DefaultDockerClientConfig
|
||||
import com.github.dockerjava.core.DockerClientBuilder
|
||||
import com.github.dockerjava.core.DockerClientConfig
|
||||
import org.apache.commons.lang3.SystemUtils
|
||||
|
||||
object DockerUtils {
|
||||
|
||||
@Throws(Exception::class)
|
||||
fun createDockerClient(registryServerUrl: String, username: String, password: String): DockerClient {
|
||||
return DockerClientBuilder.getInstance(createDockerClientConfig(registryServerUrl, username, password))
|
||||
.build()
|
||||
}
|
||||
|
||||
fun createLocalDockerClient(): DockerClient {
|
||||
return if (SystemUtils.IS_OS_WINDOWS) {
|
||||
DockerClientBuilder.getInstance("tcp://127.0.0.1:2375").build()
|
||||
} else {
|
||||
DockerClientBuilder.getInstance().build()
|
||||
}
|
||||
}
|
||||
|
||||
private fun createDockerClientConfig(registryServerUrl: String, username: String, password: String): DockerClientConfig {
|
||||
return DefaultDockerClientConfig.createDefaultConfigBuilder()
|
||||
.withDockerTlsVerify(false)
|
||||
.withRegistryUrl(registryServerUrl)
|
||||
.withRegistryUsername(username)
|
||||
.withRegistryPassword(password)
|
||||
.build()
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,323 @@
|
||||
package net.corda.bootstrapper.gui
|
||||
|
||||
import com.microsoft.azure.management.resources.fluentcore.arm.Region
|
||||
import javafx.beans.property.SimpleObjectProperty
|
||||
import javafx.collections.transformation.SortedList
|
||||
import javafx.event.EventHandler
|
||||
import javafx.scene.control.ChoiceDialog
|
||||
import javafx.scene.control.TableView.CONSTRAINED_RESIZE_POLICY
|
||||
import javafx.scene.control.TextInputDialog
|
||||
import javafx.scene.input.MouseEvent
|
||||
import javafx.scene.layout.Priority
|
||||
import javafx.stage.DirectoryChooser
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.GuiUtils
|
||||
import net.corda.bootstrapper.NetworkBuilder
|
||||
import net.corda.bootstrapper.backends.Backend
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.nodes.*
|
||||
import net.corda.bootstrapper.notaries.NotaryFinder
|
||||
import org.apache.commons.lang3.RandomStringUtils
|
||||
import tornadofx.*
|
||||
import java.io.File
|
||||
import java.util.*
|
||||
import java.util.concurrent.CompletableFuture
|
||||
import kotlin.collections.ArrayList
|
||||
|
||||
class BootstrapperView : View("Network Bootstrapper") {
|
||||
|
||||
val YAML_MAPPER = Constants.getContextMapper()
|
||||
|
||||
|
||||
val controller: State by inject()
|
||||
|
||||
val textarea = textarea {
|
||||
maxWidth = Double.MAX_VALUE
|
||||
maxHeight = Double.MAX_VALUE
|
||||
}
|
||||
|
||||
override val root = vbox {
|
||||
|
||||
menubar {
|
||||
menu("File") {
|
||||
item("Open") {
|
||||
action {
|
||||
selectNodeDirectory().thenAcceptAsync({ (notaries: List<FoundNode>, nodes: List<FoundNode>) ->
|
||||
controller.nodes(nodes)
|
||||
controller.notaries(notaries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
item("Build") {
|
||||
enableWhen(controller.baseDir.isNotNull)
|
||||
action {
|
||||
controller.clear()
|
||||
val availableBackends = getAvailableBackends()
|
||||
val backend = ChoiceDialog<Backend.BackendType>(availableBackends.first(), availableBackends).showAndWait()
|
||||
var networkName = "gui-network"
|
||||
backend.ifPresent { selectedBackEnd ->
|
||||
|
||||
val backendParams = when (selectedBackEnd) {
|
||||
Backend.BackendType.LOCAL_DOCKER -> {
|
||||
|
||||
emptyMap<String, String>()
|
||||
}
|
||||
Backend.BackendType.AZURE -> {
|
||||
val defaultName = RandomStringUtils.randomAlphabetic(4) + "-network"
|
||||
val textInputDialog = TextInputDialog(defaultName)
|
||||
textInputDialog.title = "Choose Network Name"
|
||||
networkName = textInputDialog.showAndWait().orElseGet { defaultName }
|
||||
mapOf(Constants.REGION_ARG_NAME to ChoiceDialog<Region>(Region.EUROPE_WEST, Region.values().toList().sortedBy { it.name() }).showAndWait().get().name())
|
||||
}
|
||||
}
|
||||
|
||||
val nodeCount = controller.foundNodes.map { it.id to it.count }.toMap()
|
||||
val result = NetworkBuilder.instance()
|
||||
.withBasedir(controller.baseDir.get())
|
||||
.withNetworkName(networkName)
|
||||
.onNodeBuild(controller::addBuiltNode)
|
||||
.onNodePushed(controller::addPushedNode)
|
||||
.onNodeInstance(controller::addInstance)
|
||||
.withBackend(selectedBackEnd)
|
||||
.withNodeCounts(nodeCount)
|
||||
.withBackendOptions(backendParams)
|
||||
.build()
|
||||
result.handle { v, t ->
|
||||
runLater {
|
||||
if (t != null) {
|
||||
GuiUtils.showException("Failed to build network", "Failure due to", t)
|
||||
} else {
|
||||
controller.networkContext.set(v.second)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
item("Add Node") {
|
||||
enableWhen(controller.networkContext.isNotNull)
|
||||
action {
|
||||
val foundNodes = controller.foundNodes.map { it.id }
|
||||
val nodeToAdd = ChoiceDialog<String>(foundNodes.first(), *foundNodes.toTypedArray()).showAndWait()
|
||||
val context = controller.networkContext.value
|
||||
nodeToAdd.ifPresent { node ->
|
||||
runLater {
|
||||
val (_, instantiator, _) = Backend.fromContext(
|
||||
context,
|
||||
File(controller.baseDir.get(), Constants.BOOTSTRAPPER_DIR_NAME))
|
||||
val nodeAdder = NodeAdder(context, NodeInstantiator(instantiator, context))
|
||||
nodeAdder.addNode(context, node).handleAsync { instanceInfo, t ->
|
||||
t?.let {
|
||||
GuiUtils.showException("Failed", "Failed to add node", it)
|
||||
}
|
||||
instanceInfo?.let {
|
||||
runLater {
|
||||
controller.addInstance(NodeInstanceTableEntry(
|
||||
it.groupId,
|
||||
it.instanceName,
|
||||
it.instanceAddress,
|
||||
it.reachableAddress,
|
||||
it.portMapping[Constants.NODE_P2P_PORT] ?: Constants.NODE_P2P_PORT,
|
||||
it.portMapping[Constants.NODE_SSHD_PORT]
|
||||
?: Constants.NODE_SSHD_PORT))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hbox {
|
||||
vbox {
|
||||
label("Nodes to build")
|
||||
val foundNodesTable = tableview(controller.foundNodes) {
|
||||
readonlyColumn("ID", FoundNodeTableEntry::id)
|
||||
column("Count", FoundNodeTableEntry::count).makeEditable()
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
foundNodesTable.columnResizePolicy = CONSTRAINED_RESIZE_POLICY
|
||||
label("Notaries to build")
|
||||
val notaryListView = listview(controller.foundNotaries) {
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
notaryListView.cellFormat { text = it.name }
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
|
||||
vbox {
|
||||
|
||||
label("Built Nodes")
|
||||
tableview(controller.builtNodes) {
|
||||
readonlyColumn("ID", BuiltNodeTableEntry::id)
|
||||
readonlyColumn("LocalImageId", BuiltNodeTableEntry::localImageId)
|
||||
columnResizePolicy = CONSTRAINED_RESIZE_POLICY
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
|
||||
|
||||
label("Pushed Nodes")
|
||||
tableview(controller.pushedNodes) {
|
||||
readonlyColumn("ID", PushedNode::name)
|
||||
readonlyColumn("RemoteImageId", PushedNode::remoteImageName)
|
||||
columnResizePolicy = CONSTRAINED_RESIZE_POLICY
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
|
||||
borderpane {
|
||||
top = vbox {
|
||||
label("Instances")
|
||||
tableview(controller.nodeInstances) {
|
||||
onMouseClicked = EventHandler<MouseEvent> { _ ->
|
||||
textarea.text = YAML_MAPPER.writeValueAsString(selectionModel.selectedItem)
|
||||
}
|
||||
readonlyColumn("ID", NodeInstanceTableEntry::id)
|
||||
readonlyColumn("InstanceId", NodeInstanceTableEntry::nodeInstanceName)
|
||||
readonlyColumn("Address", NodeInstanceTableEntry::address)
|
||||
columnResizePolicy = CONSTRAINED_RESIZE_POLICY
|
||||
}
|
||||
}
|
||||
center = textarea
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
|
||||
vgrow = Priority.ALWAYS
|
||||
hgrow = Priority.ALWAYS
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private fun getAvailableBackends(): List<Backend.BackendType> {
|
||||
return Backend.BackendType.values().toMutableList();
|
||||
}
|
||||
|
||||
|
||||
fun selectNodeDirectory(): CompletableFuture<Pair<List<FoundNode>, List<FoundNode>>> {
|
||||
val fileChooser = DirectoryChooser();
|
||||
fileChooser.initialDirectory = File(System.getProperty("user.home"))
|
||||
val file = fileChooser.showDialog(null)
|
||||
controller.baseDir.set(file)
|
||||
return processSelectedDirectory(file)
|
||||
}
|
||||
|
||||
|
||||
fun processSelectedDirectory(dir: File): CompletableFuture<Pair<List<FoundNode>, List<FoundNode>>> {
|
||||
val foundNodes = CompletableFuture.supplyAsync {
|
||||
val nodeFinder = NodeFinder(dir)
|
||||
nodeFinder.findNodes()
|
||||
}
|
||||
val foundNotaries = CompletableFuture.supplyAsync {
|
||||
val notaryFinder = NotaryFinder(dir)
|
||||
notaryFinder.findNotaries()
|
||||
}
|
||||
return foundNodes.thenCombine(foundNotaries) { nodes, notaries ->
|
||||
notaries to nodes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class State : Controller() {
|
||||
|
||||
val foundNodes = Collections.synchronizedList(ArrayList<FoundNodeTableEntry>()).observable()
|
||||
val builtNodes = Collections.synchronizedList(ArrayList<BuiltNodeTableEntry>()).observable()
|
||||
val pushedNodes = Collections.synchronizedList(ArrayList<PushedNode>()).observable()
|
||||
|
||||
private val backingUnsortedInstances = Collections.synchronizedList(ArrayList<NodeInstanceTableEntry>()).observable()
|
||||
val nodeInstances = SortedList(backingUnsortedInstances, COMPARATOR)
|
||||
|
||||
val foundNotaries = Collections.synchronizedList(ArrayList<FoundNode>()).observable()
|
||||
val networkContext = SimpleObjectProperty<Context>(null)
|
||||
|
||||
fun clear() {
|
||||
builtNodes.clear()
|
||||
pushedNodes.clear()
|
||||
backingUnsortedInstances.clear()
|
||||
networkContext.set(null)
|
||||
}
|
||||
|
||||
fun nodes(nodes: List<FoundNode>) {
|
||||
foundNodes.clear()
|
||||
nodes.forEach { addFoundNode(it) }
|
||||
}
|
||||
|
||||
fun notaries(notaries: List<FoundNode>) {
|
||||
foundNotaries.clear()
|
||||
notaries.forEach { runLater { foundNotaries.add(it) } }
|
||||
}
|
||||
|
||||
var baseDir = SimpleObjectProperty<File>(null)
|
||||
|
||||
|
||||
fun addFoundNode(foundNode: FoundNode) {
|
||||
runLater {
|
||||
foundNodes.add(FoundNodeTableEntry(foundNode.name))
|
||||
}
|
||||
}
|
||||
|
||||
fun addBuiltNode(builtNode: BuiltNode) {
|
||||
runLater {
|
||||
builtNodes.add(BuiltNodeTableEntry(builtNode.name, builtNode.localImageId))
|
||||
}
|
||||
}
|
||||
|
||||
fun addPushedNode(pushedNode: PushedNode) {
|
||||
runLater {
|
||||
pushedNodes.add(pushedNode)
|
||||
}
|
||||
}
|
||||
|
||||
fun addInstance(nodeInstance: NodeInstance) {
|
||||
runLater {
|
||||
backingUnsortedInstances.add(NodeInstanceTableEntry(
|
||||
nodeInstance.name,
|
||||
nodeInstance.nodeInstanceName,
|
||||
nodeInstance.expectedFqName,
|
||||
nodeInstance.reachableAddress,
|
||||
nodeInstance.portMapping[Constants.NODE_P2P_PORT] ?: Constants.NODE_P2P_PORT,
|
||||
nodeInstance.portMapping[Constants.NODE_SSHD_PORT] ?: Constants.NODE_SSHD_PORT))
|
||||
}
|
||||
}
|
||||
|
||||
fun addInstance(nodeInstance: NodeInstanceTableEntry) {
|
||||
runLater {
|
||||
backingUnsortedInstances.add(nodeInstance)
|
||||
}
|
||||
}
|
||||
|
||||
companion object {
|
||||
val COMPARATOR: (NodeInstanceTableEntry, NodeInstanceTableEntry) -> Int = { o1, o2 ->
|
||||
if (o1.id == (o2.id)) {
|
||||
o1.nodeInstanceName.compareTo(o2.nodeInstanceName)
|
||||
} else {
|
||||
o1.id.compareTo(o2.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
data class FoundNodeTableEntry(val id: String,
|
||||
@Volatile var count: Int = 1)
|
||||
|
||||
data class BuiltNodeTableEntry(val id: String, val localImageId: String)
|
||||
|
||||
data class NodeInstanceTableEntry(val id: String,
|
||||
val nodeInstanceName: String,
|
||||
val address: String,
|
||||
val locallyReachableAddress: String,
|
||||
val rpcPort: Int,
|
||||
val sshPort: Int)
|
@ -0,0 +1,11 @@
|
||||
package net.corda.bootstrapper.gui
|
||||
|
||||
import javafx.application.Application
|
||||
import tornadofx.App
|
||||
|
||||
class Gui : App(BootstrapperView::class) {
|
||||
companion object {
|
||||
@JvmStatic
|
||||
fun main(args: Array<String>) = Application.launch(Gui::class.java, *args)
|
||||
}
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import java.io.File
|
||||
|
||||
open class BuiltNode(configFile: File, baseDirectory: File,
|
||||
copiedNodeConfig: File, copiedNodeDir: File,
|
||||
val nodeConfig: NodeConfiguration, val localImageId: String) : CopiedNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir) {
|
||||
|
||||
|
||||
override fun toString(): String {
|
||||
return "BuiltNode(" +
|
||||
"nodeConfig=$nodeConfig," +
|
||||
"localImageId='$localImageId'" +
|
||||
")" +
|
||||
" ${super.toString()}"
|
||||
}
|
||||
|
||||
fun toPushedNode(remoteImageName: String): PushedNode {
|
||||
return PushedNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, localImageId, remoteImageName)
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import java.io.File
|
||||
|
||||
open class CopiedNode(configFile: File, baseDirectory: File,
|
||||
open val copiedNodeConfig: File, open val copiedNodeDir: File) :
|
||||
FoundNode(configFile, baseDirectory) {
|
||||
|
||||
constructor(foundNode: FoundNode, copiedNodeConfig: File, copiedNodeDir: File) : this(
|
||||
foundNode.configFile, foundNode.baseDirectory, copiedNodeConfig, copiedNodeDir
|
||||
)
|
||||
|
||||
operator fun component4(): File {
|
||||
return copiedNodeDir;
|
||||
}
|
||||
|
||||
operator fun component5(): File {
|
||||
return copiedNodeConfig;
|
||||
}
|
||||
|
||||
|
||||
fun builtNode(nodeConfig: NodeConfiguration, imageId: String): BuiltNode {
|
||||
return BuiltNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, imageId)
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "CopiedNode(" +
|
||||
"copiedNodeConfig=$copiedNodeConfig," +
|
||||
"copiedNodeDir=$copiedNodeDir" +
|
||||
")" +
|
||||
" ${super.toString()}"
|
||||
}
|
||||
|
||||
fun toBuiltNode(nodeConfig: NodeConfiguration, localImageId: String): BuiltNode {
|
||||
return BuiltNode(this.configFile, this.baseDirectory, this.copiedNodeConfig, this.copiedNodeDir, nodeConfig, localImageId)
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import java.io.File
|
||||
|
||||
open class FoundNode(open val configFile: File,
|
||||
open val baseDirectory: File = configFile.parentFile,
|
||||
val name: String = configFile.parentFile.name.toLowerCase().replace(net.corda.bootstrapper.Constants.ALPHA_NUMERIC_ONLY_REGEX, "")) {
|
||||
|
||||
|
||||
operator fun component1(): File {
|
||||
return baseDirectory;
|
||||
}
|
||||
|
||||
operator fun component2(): File {
|
||||
return configFile;
|
||||
}
|
||||
|
||||
operator fun component3(): String {
|
||||
return name;
|
||||
}
|
||||
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (this === other) return true
|
||||
if (javaClass != other?.javaClass) return false
|
||||
|
||||
other as FoundNode
|
||||
|
||||
if (configFile != other.configFile) return false
|
||||
if (baseDirectory != other.baseDirectory) return false
|
||||
if (name != other.name) return false
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = configFile.hashCode()
|
||||
result = 31 * result + baseDirectory.hashCode()
|
||||
result = 31 * result + name.hashCode()
|
||||
return result
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "FoundNode(name='$name', configFile=$configFile, baseDirectory=$baseDirectory)"
|
||||
}
|
||||
|
||||
fun toCopiedNode(copiedNodeConfig: File, copiedNodeDir: File): CopiedNode {
|
||||
return CopiedNode(this.configFile, this.baseDirectory, copiedNodeConfig, copiedNodeDir)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,26 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.bootstrapper.containers.instance.InstanceInfo
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class NodeAdder(val context: Context,
|
||||
val nodeInstantiator: NodeInstantiator) {
|
||||
|
||||
fun addNode(context: Context, nodeGroupName: String): CompletableFuture<InstanceInfo> {
|
||||
return synchronized(context) {
|
||||
val nodeGroup = context.nodes[nodeGroupName]!!
|
||||
val nodeInfo = nodeGroup.iterator().next()
|
||||
val currentNodeSize = nodeGroup.size
|
||||
val newInstanceX500 = nodeInfo.groupX500!!.copy(commonName = nodeInfo.groupX500.commonName + (currentNodeSize)).toString()
|
||||
val newInstanceName = nodeGroupName + (currentNodeSize)
|
||||
val nextNodeInfo = nodeInfo.copy(
|
||||
instanceX500 = newInstanceX500,
|
||||
instanceName = newInstanceName,
|
||||
fqdn = nodeInstantiator.expectedFqdn(newInstanceName)
|
||||
)
|
||||
nodeGroup.add(nextNodeInfo)
|
||||
nodeInstantiator.instantiateNodeInstance(nextNodeInfo)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import com.github.dockerjava.core.command.BuildImageResultCallback
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigValueFactory
|
||||
import net.corda.bootstrapper.docker.DockerUtils
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.parseAsNodeConfiguration
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
|
||||
open class NodeBuilder {
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(NodeBuilder::class.java)
|
||||
}
|
||||
|
||||
fun buildNode(copiedNode: CopiedNode): BuiltNode {
|
||||
val localDockerClient = DockerUtils.createLocalDockerClient()
|
||||
val copiedNodeConfig = copiedNode.copiedNodeConfig
|
||||
val nodeDir = copiedNodeConfig.parentFile
|
||||
if (!copiedNodeConfig.exists()) {
|
||||
throw IllegalStateException("There is no nodeConfig for dir: " + copiedNodeConfig)
|
||||
}
|
||||
val nodeConfig = ConfigFactory.parseFile(copiedNodeConfig)
|
||||
LOG.info("starting to build docker image for: " + nodeDir)
|
||||
val nodeImageId = localDockerClient.buildImageCmd()
|
||||
.withDockerfile(File(nodeDir, "Dockerfile"))
|
||||
.withBaseDirectory(nodeDir)
|
||||
.exec(BuildImageResultCallback()).awaitImageId()
|
||||
LOG.info("finished building docker image for: $nodeDir with id: $nodeImageId")
|
||||
val config = nodeConfig.parseAsNodeConfigWithFallback(ConfigFactory.parseFile(copiedNode.configFile))
|
||||
return copiedNode.builtNode(config, nodeImageId)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
fun Config.parseAsNodeConfigWithFallback(preCopyConfig: Config): NodeConfiguration {
|
||||
val nodeConfig = this
|
||||
.withValue("baseDirectory", ConfigValueFactory.fromAnyRef(""))
|
||||
.withFallback(ConfigFactory.parseResources("reference.conf"))
|
||||
.withFallback(preCopyConfig)
|
||||
.resolve()
|
||||
return nodeConfig.parseAsNodeConfiguration()
|
||||
}
|
||||
|
@ -0,0 +1,101 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigRenderOptions
|
||||
import com.typesafe.config.ConfigValue
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
|
||||
open class NodeCopier(private val cacheDir: File) {
|
||||
|
||||
|
||||
fun copyNode(foundNode: FoundNode): CopiedNode {
|
||||
val nodeCacheDir = File(cacheDir, foundNode.baseDirectory.name)
|
||||
nodeCacheDir.deleteRecursively()
|
||||
LOG.info("copying: ${foundNode.baseDirectory} to $nodeCacheDir")
|
||||
foundNode.baseDirectory.copyRecursively(nodeCacheDir, overwrite = true)
|
||||
copyBootstrapperFiles(nodeCacheDir)
|
||||
val configInCacheDir = File(nodeCacheDir, "node.conf")
|
||||
LOG.info("Applying precanned config " + configInCacheDir)
|
||||
val rpcSettings = getDefaultRpcSettings()
|
||||
val sshSettings = getDefaultSshSettings();
|
||||
mergeConfigs(configInCacheDir, rpcSettings, sshSettings)
|
||||
return CopiedNode(foundNode, configInCacheDir, nodeCacheDir)
|
||||
}
|
||||
|
||||
|
||||
fun copyBootstrapperFiles(nodeCacheDir: File) {
|
||||
this.javaClass.classLoader.getResourceAsStream("node-Dockerfile").use { nodeDockerFileInStream ->
|
||||
val nodeDockerFile = File(nodeCacheDir, "Dockerfile")
|
||||
nodeDockerFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeDockerFileInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
|
||||
this.javaClass.classLoader.getResourceAsStream("run-corda-node.sh").use { nodeRunScriptInStream ->
|
||||
val nodeRunScriptFile = File(nodeCacheDir, "run-corda.sh")
|
||||
nodeRunScriptFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeRunScriptInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
|
||||
this.javaClass.classLoader.getResourceAsStream("node_info_watcher.sh").use { nodeRunScriptInStream ->
|
||||
val nodeInfoWatcherFile = File(nodeCacheDir, "node_info_watcher.sh")
|
||||
nodeInfoWatcherFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeRunScriptInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
internal fun getDefaultRpcSettings(): ConfigValue {
|
||||
return javaClass
|
||||
.classLoader
|
||||
.getResourceAsStream("rpc-settings.conf")
|
||||
.reader().use {
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("rpcSettings")
|
||||
}
|
||||
|
||||
internal fun getDefaultSshSettings(): ConfigValue {
|
||||
return javaClass
|
||||
.classLoader
|
||||
.getResourceAsStream("ssh.conf")
|
||||
.reader().use {
|
||||
ConfigFactory.parseReader(it)
|
||||
}.getValue("sshd")
|
||||
}
|
||||
|
||||
internal fun mergeConfigs(configInCacheDir: File,
|
||||
rpcSettings: ConfigValue,
|
||||
sshSettings: ConfigValue,
|
||||
mergeMode: Mode = Mode.NODE) {
|
||||
var trimmedConfig = ConfigFactory.parseFile(configInCacheDir)
|
||||
.withoutPath("compatibilityZoneURL")
|
||||
.withValue("rpcSettings", rpcSettings)
|
||||
.withValue("sshd", sshSettings)
|
||||
|
||||
if (mergeMode == Mode.NODE) {
|
||||
trimmedConfig = trimmedConfig.withoutPath("p2pAddress")
|
||||
}
|
||||
|
||||
configInCacheDir.outputStream().use {
|
||||
trimmedConfig.root().render(ConfigRenderOptions
|
||||
.defaults()
|
||||
.setOriginComments(false)
|
||||
.setComments(false)
|
||||
.setFormatted(true)
|
||||
.setJson(false)).byteInputStream().copyTo(it)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
internal enum class Mode {
|
||||
NOTARY, NODE
|
||||
}
|
||||
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(NodeCopier::class.java)
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,32 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.bootstrapper.Constants
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
|
||||
class NodeFinder(private val scratchDir: File) {
|
||||
|
||||
|
||||
fun findNodes(): List<FoundNode> {
|
||||
return scratchDir.walkBottomUp().filter { it.name == "node.conf" && !it.absolutePath.contains(Constants.BOOTSTRAPPER_DIR_NAME) }.map {
|
||||
try {
|
||||
ConfigFactory.parseFile(it) to it
|
||||
} catch (t: Throwable) {
|
||||
null
|
||||
}
|
||||
}.filterNotNull()
|
||||
.filter { !it.first.hasPath("notary") }
|
||||
.map { (nodeConfig, nodeConfigFile) ->
|
||||
LOG.info("We've found a node with name: ${nodeConfigFile.parentFile.name}")
|
||||
FoundNode(nodeConfigFile, nodeConfigFile.parentFile)
|
||||
}.toList()
|
||||
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(NodeFinder::class.java)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,31 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import java.io.File
|
||||
|
||||
class NodeInstance(configFile: File,
|
||||
baseDirectory: File,
|
||||
copiedNodeConfig: File,
|
||||
copiedNodeDir: File,
|
||||
nodeConfig: NodeConfiguration,
|
||||
localImageId: String,
|
||||
remoteImageName: String,
|
||||
nodeInstanceName: String,
|
||||
actualX500: String,
|
||||
expectedFqName: String,
|
||||
val reachableAddress: String,
|
||||
val portMapping: Map<Int, Int>) :
|
||||
NodeInstanceRequest(
|
||||
configFile,
|
||||
baseDirectory,
|
||||
copiedNodeConfig,
|
||||
copiedNodeDir,
|
||||
nodeConfig,
|
||||
localImageId,
|
||||
remoteImageName,
|
||||
nodeInstanceName,
|
||||
actualX500,
|
||||
expectedFqName
|
||||
) {
|
||||
}
|
||||
|
@ -0,0 +1,24 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import java.io.File
|
||||
|
||||
open class NodeInstanceRequest(configFile: File, baseDirectory: File,
|
||||
copiedNodeConfig: File, copiedNodeDir: File,
|
||||
nodeConfig: NodeConfiguration, localImageId: String, remoteImageName: String,
|
||||
internal val nodeInstanceName: String,
|
||||
internal val actualX500: String,
|
||||
internal val expectedFqName: String) : PushedNode(
|
||||
configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, localImageId, remoteImageName
|
||||
) {
|
||||
|
||||
override fun toString(): String {
|
||||
return "NodeInstanceRequest(nodeInstanceName='$nodeInstanceName', actualX500='$actualX500', expectedFqName='$expectedFqName') ${super.toString()}"
|
||||
}
|
||||
|
||||
fun toNodeInstance(reachableAddress: String, portMapping: Map<Int, Int>): NodeInstance {
|
||||
return NodeInstance(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, localImageId, remoteImageName, nodeInstanceName, actualX500, expectedFqName, reachableAddress, portMapping)
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.containers.instance.InstanceInfo
|
||||
import net.corda.bootstrapper.containers.instance.Instantiator
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class NodeInstantiator(val instantiator: Instantiator,
|
||||
val context: Context) {
|
||||
|
||||
|
||||
fun createInstanceRequests(pushedNode: PushedNode, nodeCount: Map<FoundNode, Int>): List<NodeInstanceRequest> {
|
||||
|
||||
val namedMap = nodeCount.map { it.key.name.toLowerCase() to it.value }.toMap()
|
||||
|
||||
return (0 until (namedMap[pushedNode.name.toLowerCase()] ?: 1)).map { i ->
|
||||
createInstanceRequest(pushedNode, i)
|
||||
}
|
||||
}
|
||||
|
||||
private fun createInstanceRequest(node: PushedNode, i: Int): NodeInstanceRequest {
|
||||
val nodeInstanceName = node.name + i
|
||||
val expectedName = instantiator.getExpectedFQDN(nodeInstanceName)
|
||||
return node.toNodeInstanceRequest(nodeInstanceName, buildX500(node.nodeConfig.myLegalName, i), expectedName)
|
||||
}
|
||||
|
||||
fun createInstanceRequest(node: PushedNode): NodeInstanceRequest {
|
||||
return createInstanceRequest(node, 0)
|
||||
}
|
||||
|
||||
|
||||
private fun buildX500(baseX500: CordaX500Name, i: Int): String {
|
||||
if (i == 0) {
|
||||
return baseX500.toString()
|
||||
}
|
||||
return baseX500.copy(commonName = (baseX500.commonName ?: "") + i).toString()
|
||||
}
|
||||
|
||||
fun instantiateNodeInstance(request: Context.PersistableNodeInstance): CompletableFuture<InstanceInfo> {
|
||||
return instantiateNodeInstance(request.remoteImageName, request.rpcPort!!, request.instanceName, request.fqdn, request.instanceX500).thenApplyAsync {
|
||||
InstanceInfo(request.groupName, request.instanceName, request.fqdn, it.first, it.second)
|
||||
}
|
||||
}
|
||||
|
||||
fun instantiateNodeInstance(request: NodeInstanceRequest): CompletableFuture<NodeInstance> {
|
||||
return instantiateNodeInstance(request.remoteImageName, request.nodeConfig.rpcOptions.address!!.port, request.nodeInstanceName, request.expectedFqName, request.actualX500)
|
||||
.thenApplyAsync { (reachableName, portMapping) ->
|
||||
request.toNodeInstance(reachableName, portMapping)
|
||||
}
|
||||
}
|
||||
|
||||
fun instantiateNotaryInstance(request: NodeInstanceRequest): CompletableFuture<NodeInstance> {
|
||||
return instantiateNotaryInstance(request.remoteImageName, request.nodeConfig.rpcOptions.address!!.port, request.nodeInstanceName, request.expectedFqName)
|
||||
.thenApplyAsync { (reachableName, portMapping) ->
|
||||
request.toNodeInstance(reachableName, portMapping)
|
||||
}
|
||||
}
|
||||
|
||||
private fun instantiateNotaryInstance(remoteImageName: String,
|
||||
rpcPort: Int,
|
||||
nodeInstanceName: String,
|
||||
expectedFqName: String): CompletableFuture<Pair<String, Map<Int, Int>>> {
|
||||
return instantiator.instantiateContainer(
|
||||
remoteImageName,
|
||||
listOf(Constants.NODE_P2P_PORT, Constants.NODE_RPC_PORT, Constants.NODE_SSHD_PORT),
|
||||
nodeInstanceName,
|
||||
mapOf("OUR_NAME" to expectedFqName,
|
||||
"OUR_PORT" to Constants.NODE_P2P_PORT.toString())
|
||||
)
|
||||
}
|
||||
|
||||
private fun instantiateNodeInstance(remoteImageName: String,
|
||||
rpcPort: Int,
|
||||
nodeInstanceName: String,
|
||||
expectedFqName: String,
|
||||
actualX500: String): CompletableFuture<Pair<String, Map<Int, Int>>> {
|
||||
|
||||
return instantiator.instantiateContainer(
|
||||
remoteImageName,
|
||||
listOf(Constants.NODE_P2P_PORT, Constants.NODE_RPC_PORT, Constants.NODE_SSHD_PORT),
|
||||
nodeInstanceName,
|
||||
mapOf("OUR_NAME" to expectedFqName,
|
||||
"OUR_PORT" to Constants.NODE_P2P_PORT.toString(),
|
||||
"X500" to actualX500)
|
||||
)
|
||||
}
|
||||
|
||||
fun expectedFqdn(newInstanceName: String): String {
|
||||
return instantiator.getExpectedFQDN(newInstanceName)
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.bootstrapper.containers.push.ContainerPusher
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class NodePusher(private val containerPusher: ContainerPusher,
|
||||
private val context: Context) {
|
||||
|
||||
|
||||
fun pushNode(builtNode: BuiltNode): CompletableFuture<PushedNode> {
|
||||
|
||||
val localImageId = builtNode.localImageId
|
||||
val nodeImageIdentifier = "node-${builtNode.name}"
|
||||
val nodeImageNameFuture = containerPusher.pushContainerToImageRepository(localImageId,
|
||||
nodeImageIdentifier, context.networkName)
|
||||
return nodeImageNameFuture.thenApplyAsync { imageName -> builtNode.toPushedNode(imageName) }
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package net.corda.bootstrapper.nodes
|
||||
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import java.io.File
|
||||
|
||||
open class PushedNode(configFile: File, baseDirectory: File,
|
||||
copiedNodeConfig: File, copiedNodeDir: File,
|
||||
nodeConfig: NodeConfiguration, localImageId: String, val remoteImageName: String) : BuiltNode(
|
||||
configFile,
|
||||
baseDirectory,
|
||||
copiedNodeConfig,
|
||||
copiedNodeDir,
|
||||
nodeConfig,
|
||||
localImageId
|
||||
) {
|
||||
fun toNodeInstanceRequest(nodeInstanceName: String, actualX500: String, expectedFqName: String): NodeInstanceRequest {
|
||||
return NodeInstanceRequest(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir, nodeConfig, localImageId, remoteImageName, nodeInstanceName, actualX500, expectedFqName)
|
||||
}
|
||||
|
||||
override fun toString(): String {
|
||||
return "PushedNode(remoteImageName='$remoteImageName') ${super.toString()}"
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
package net.corda.bootstrapper.notaries
|
||||
|
||||
import net.corda.bootstrapper.nodes.CopiedNode
|
||||
import java.io.File
|
||||
|
||||
class CopiedNotary(configFile: File, baseDirectory: File,
|
||||
copiedNodeConfig: File, copiedNodeDir: File, val nodeInfoFile: File) :
|
||||
CopiedNode(configFile, baseDirectory, copiedNodeConfig, copiedNodeDir) {
|
||||
}
|
||||
|
||||
|
||||
fun CopiedNode.toNotary(nodeInfoFile: File): CopiedNotary {
|
||||
return CopiedNotary(this.configFile, this.baseDirectory, this.copiedNodeConfig, this.copiedNodeDir, nodeInfoFile)
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
package net.corda.bootstrapper.notaries
|
||||
|
||||
import net.corda.bootstrapper.nodes.CopiedNode
|
||||
import net.corda.bootstrapper.nodes.FoundNode
|
||||
import net.corda.bootstrapper.nodes.NodeCopier
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
|
||||
class NotaryCopier(val cacheDir: File) : NodeCopier(cacheDir) {
|
||||
|
||||
fun copyNotary(foundNotary: FoundNode): CopiedNotary {
|
||||
val nodeCacheDir = File(cacheDir, foundNotary.baseDirectory.name)
|
||||
nodeCacheDir.deleteRecursively()
|
||||
LOG.info("copying: ${foundNotary.baseDirectory} to $nodeCacheDir")
|
||||
foundNotary.baseDirectory.copyRecursively(nodeCacheDir, overwrite = true)
|
||||
copyNotaryBootstrapperFiles(nodeCacheDir)
|
||||
val configInCacheDir = File(nodeCacheDir, "node.conf")
|
||||
LOG.info("Applying precanned config " + configInCacheDir)
|
||||
val rpcSettings = getDefaultRpcSettings()
|
||||
val sshSettings = getDefaultSshSettings();
|
||||
mergeConfigs(configInCacheDir, rpcSettings, sshSettings, Mode.NOTARY)
|
||||
val generatedNodeInfo = generateNodeInfo(nodeCacheDir)
|
||||
return CopiedNode(foundNotary, configInCacheDir, nodeCacheDir).toNotary(generatedNodeInfo)
|
||||
}
|
||||
|
||||
fun generateNodeInfo(dirToGenerateFrom: File): File {
|
||||
val nodeInfoGeneratorProcess = ProcessBuilder()
|
||||
.command(listOf("java", "-jar", "corda.jar", "--just-generate-node-info"))
|
||||
.directory(dirToGenerateFrom)
|
||||
.inheritIO()
|
||||
.start()
|
||||
|
||||
val exitCode = nodeInfoGeneratorProcess.waitFor()
|
||||
if (exitCode != 0) {
|
||||
throw IllegalStateException("Failed to generate nodeInfo for notary: " + dirToGenerateFrom)
|
||||
}
|
||||
val nodeInfoFile = dirToGenerateFrom.listFiles().filter { it.name.startsWith("nodeInfo") }.single()
|
||||
return nodeInfoFile;
|
||||
}
|
||||
|
||||
private fun copyNotaryBootstrapperFiles(nodeCacheDir: File) {
|
||||
this.javaClass.classLoader.getResourceAsStream("notary-Dockerfile").use { nodeDockerFileInStream ->
|
||||
val nodeDockerFile = File(nodeCacheDir, "Dockerfile")
|
||||
nodeDockerFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeDockerFileInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
|
||||
this.javaClass.classLoader.getResourceAsStream("run-corda-notary.sh").use { nodeRunScriptInStream ->
|
||||
val nodeRunScriptFile = File(nodeCacheDir, "run-corda.sh")
|
||||
nodeRunScriptFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeRunScriptInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
|
||||
this.javaClass.classLoader.getResourceAsStream("node_info_watcher.sh").use { nodeRunScriptInStream ->
|
||||
val nodeInfoWatcherFile = File(nodeCacheDir, "node_info_watcher.sh")
|
||||
nodeInfoWatcherFile.outputStream().use { nodeDockerFileOutStream ->
|
||||
nodeRunScriptInStream.copyTo(nodeDockerFileOutStream)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(NotaryCopier::class.java)
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package net.corda.bootstrapper.notaries
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.bootstrapper.Constants
|
||||
import net.corda.bootstrapper.nodes.FoundNode
|
||||
import java.io.File
|
||||
|
||||
class NotaryFinder(private val dirToSearch: File) {
|
||||
|
||||
fun findNotaries(): List<FoundNode> {
|
||||
return dirToSearch.walkBottomUp().filter { it.name == "node.conf" && !it.absolutePath.contains(Constants.BOOTSTRAPPER_DIR_NAME) }
|
||||
.map {
|
||||
try {
|
||||
ConfigFactory.parseFile(it) to it
|
||||
} catch (t: Throwable) {
|
||||
null
|
||||
}
|
||||
}.filterNotNull()
|
||||
.filter { it.first.hasPath("notary") }
|
||||
.map { (_, nodeConfigFile) ->
|
||||
FoundNode(nodeConfigFile)
|
||||
}.toList()
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,30 @@
|
||||
package net.corda.bootstrapper.serialization
|
||||
|
||||
import net.corda.core.serialization.internal.SerializationEnvironmentImpl
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.node.serialization.amqp.AMQPServerSerializationScheme
|
||||
import net.corda.serialization.internal.AMQP_P2P_CONTEXT
|
||||
import net.corda.serialization.internal.AMQP_STORAGE_CONTEXT
|
||||
import net.corda.serialization.internal.SerializationFactoryImpl
|
||||
|
||||
|
||||
class SerializationEngine {
|
||||
companion object {
|
||||
fun init() {
|
||||
synchronized(this) {
|
||||
if (nodeSerializationEnv == null) {
|
||||
val classloader = this.javaClass.classLoader
|
||||
nodeSerializationEnv = SerializationEnvironmentImpl(
|
||||
SerializationFactoryImpl().apply {
|
||||
registerScheme(AMQPServerSerializationScheme(emptyList()))
|
||||
},
|
||||
p2pContext = AMQP_P2P_CONTEXT.withClassLoader(classloader),
|
||||
rpcServerContext = AMQP_P2P_CONTEXT.withClassLoader(classloader),
|
||||
storageContext = AMQP_STORAGE_CONTEXT.withClassLoader(classloader),
|
||||
checkpointContext = AMQP_P2P_CONTEXT.withClassLoader(classloader)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
package net.corda.bootstrapper.volumes
|
||||
|
||||
import com.microsoft.azure.storage.file.CloudFile
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import net.corda.bootstrapper.notaries.CopiedNotary
|
||||
import net.corda.bootstrapper.serialization.SerializationEngine
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.nodeapi.internal.DEV_ROOT_CA
|
||||
import net.corda.nodeapi.internal.SignedNodeInfo
|
||||
import net.corda.nodeapi.internal.createDevNetworkMapCa
|
||||
import java.io.File
|
||||
import java.security.cert.X509Certificate
|
||||
import java.time.Instant
|
||||
|
||||
interface Volume {
|
||||
fun notariesForNetworkParams(notaries: List<CopiedNotary>)
|
||||
|
||||
companion object {
|
||||
init {
|
||||
SerializationEngine.init()
|
||||
}
|
||||
|
||||
internal val networkMapCa = createDevNetworkMapCa(DEV_ROOT_CA)
|
||||
internal val networkMapCert: X509Certificate = networkMapCa.certificate
|
||||
internal val keyPair = networkMapCa.keyPair
|
||||
|
||||
}
|
||||
|
||||
|
||||
fun CloudFile.uploadFromByteArray(array: ByteArray) {
|
||||
this.uploadFromByteArray(array, 0, array.size)
|
||||
}
|
||||
|
||||
|
||||
fun convertNodeIntoToNetworkParams(notaryFiles: List<Pair<File, File>>): NetworkParameters {
|
||||
val notaryInfos = notaryFiles.map { (configFile, nodeInfoFile) ->
|
||||
val validating = ConfigFactory.parseFile(configFile).getConfig("notary").getBoolean("validating")
|
||||
nodeInfoFile.readBytes().deserialize<SignedNodeInfo>().verified().let { NotaryInfo(it.legalIdentities.first(), validating) }
|
||||
}
|
||||
|
||||
return notaryInfos.let {
|
||||
NetworkParameters(
|
||||
minimumPlatformVersion = 1,
|
||||
notaries = it,
|
||||
maxMessageSize = 10485760,
|
||||
maxTransactionSize = Int.MAX_VALUE,
|
||||
modifiedTime = Instant.now(),
|
||||
epoch = 10,
|
||||
whitelistedContractImplementations = emptyMap())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,81 @@
|
||||
package net.corda.bootstrapper.volumes.azure
|
||||
|
||||
import com.microsoft.azure.management.Azure
|
||||
import com.microsoft.azure.management.resources.ResourceGroup
|
||||
import com.microsoft.azure.management.storage.StorageAccount
|
||||
import com.microsoft.azure.storage.CloudStorageAccount
|
||||
import net.corda.bootstrapper.Constants.Companion.restFriendlyName
|
||||
import net.corda.bootstrapper.notaries.CopiedNotary
|
||||
import net.corda.bootstrapper.volumes.Volume
|
||||
import net.corda.bootstrapper.volumes.Volume.Companion.keyPair
|
||||
import net.corda.bootstrapper.volumes.Volume.Companion.networkMapCert
|
||||
import net.corda.core.internal.signWithCert
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.nodeapi.internal.network.NETWORK_PARAMS_FILE_NAME
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
|
||||
class AzureSmbVolume(private val azure: Azure, private val resourceGroup: ResourceGroup) : Volume {
|
||||
|
||||
private val storageAccount = getStorageAccount()
|
||||
|
||||
private val accKeys = storageAccount.keys[0]
|
||||
|
||||
|
||||
private val cloudFileShare = CloudStorageAccount.parse(
|
||||
"DefaultEndpointsProtocol=https;" +
|
||||
"AccountName=${storageAccount.name()};" +
|
||||
"AccountKey=${accKeys.value()};" +
|
||||
"EndpointSuffix=core.windows.net"
|
||||
)
|
||||
.createCloudFileClient()
|
||||
.getShareReference("nodeinfos")
|
||||
|
||||
val networkParamsFolder = cloudFileShare.rootDirectoryReference.getDirectoryReference("network-params")
|
||||
val shareName: String = cloudFileShare.name
|
||||
val storageAccountName: String
|
||||
get() = resourceGroup.restFriendlyName()
|
||||
val storageAccountKey: String
|
||||
get() = accKeys.value()
|
||||
|
||||
|
||||
init {
|
||||
while (true) {
|
||||
try {
|
||||
cloudFileShare.createIfNotExists()
|
||||
networkParamsFolder.createIfNotExists()
|
||||
break
|
||||
} catch (e: Throwable) {
|
||||
LOG.debug("storage account not ready, waiting")
|
||||
Thread.sleep(5000)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun getStorageAccount(): StorageAccount {
|
||||
return azure.storageAccounts().getByResourceGroup(resourceGroup.name(), resourceGroup.restFriendlyName())
|
||||
?: azure.storageAccounts().define(resourceGroup.restFriendlyName())
|
||||
.withRegion(resourceGroup.region())
|
||||
.withExistingResourceGroup(resourceGroup)
|
||||
.withAccessFromAllNetworks()
|
||||
.create()
|
||||
}
|
||||
|
||||
override fun notariesForNetworkParams(notaries: List<CopiedNotary>) {
|
||||
val networkParamsFile = networkParamsFolder.getFileReference(NETWORK_PARAMS_FILE_NAME)
|
||||
networkParamsFile.deleteIfExists()
|
||||
LOG.info("Storing network-params in AzureFile location: " + networkParamsFile.uri)
|
||||
val networkParameters = convertNodeIntoToNetworkParams(notaries.map { it.configFile to it.nodeInfoFile })
|
||||
networkParamsFile.uploadFromByteArray(networkParameters.signWithCert(keyPair.private, networkMapCert).serialize().bytes)
|
||||
}
|
||||
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(AzureSmbVolume::class.java)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,35 @@
|
||||
package net.corda.bootstrapper.volumes.docker
|
||||
|
||||
import net.corda.bootstrapper.context.Context
|
||||
import net.corda.bootstrapper.notaries.CopiedNotary
|
||||
import net.corda.bootstrapper.volumes.Volume
|
||||
import net.corda.core.internal.signWithCert
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.nodeapi.internal.network.NETWORK_PARAMS_FILE_NAME
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
|
||||
class LocalVolume(scratchDir: File, context: Context) : Volume {
|
||||
|
||||
private val networkDir = File(scratchDir, context.safeNetworkName)
|
||||
private val volumeDir = File(networkDir, "nodeinfos")
|
||||
private val networkParamsDir = File(volumeDir, "network-params")
|
||||
|
||||
override fun notariesForNetworkParams(notaries: List<CopiedNotary>) {
|
||||
volumeDir.deleteRecursively()
|
||||
networkParamsDir.mkdirs()
|
||||
val networkParameters = convertNodeIntoToNetworkParams(notaries.map { it.configFile to it.nodeInfoFile })
|
||||
val networkParamsFile = File(networkParamsDir, NETWORK_PARAMS_FILE_NAME)
|
||||
networkParamsFile.outputStream().use { networkParameters.signWithCert(Volume.keyPair.private, Volume.networkMapCert).serialize().writeTo(it) }
|
||||
LOG.info("wrote network params to local file: ${networkParamsFile.absolutePath}")
|
||||
}
|
||||
|
||||
|
||||
fun getPath(): String {
|
||||
return volumeDir.absolutePath
|
||||
}
|
||||
|
||||
companion object {
|
||||
val LOG = LoggerFactory.getLogger(LocalVolume::class.java)
|
||||
}
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
# Base image from (http://phusion.github.io/baseimage-docker)
|
||||
FROM openjdk:8u151-jre-alpine
|
||||
|
||||
RUN apk upgrade --update && \
|
||||
apk add --update --no-cache bash iputils && \
|
||||
rm -rf /var/cache/apk/* && \
|
||||
# Add user to run the app && \
|
||||
addgroup corda && \
|
||||
adduser -G corda -D -s /bin/bash corda && \
|
||||
# Create /opt/corda directory && \
|
||||
mkdir -p /opt/corda/plugins && \
|
||||
mkdir -p /opt/corda/logs && \
|
||||
mkdir -p /opt/corda/additional-node-infos && \
|
||||
mkdir -p /opt/node-setup
|
||||
|
||||
# Copy corda files
|
||||
ADD --chown=corda:corda corda.jar /opt/corda/corda.jar
|
||||
ADD --chown=corda:corda node.conf /opt/corda/node.conf
|
||||
ADD --chown=corda:corda cordapps/ /opt/corda/cordapps
|
||||
|
||||
# Copy node info watcher script
|
||||
ADD --chown=corda:corda node_info_watcher.sh /opt/corda/
|
||||
|
||||
COPY run-corda.sh /run-corda.sh
|
||||
|
||||
RUN chmod +x /run-corda.sh && \
|
||||
chmod +x /opt/corda/node_info_watcher.sh && \
|
||||
sync && \
|
||||
chown -R corda:corda /opt/corda
|
||||
|
||||
# Working directory for Corda
|
||||
WORKDIR /opt/corda
|
||||
ENV HOME=/opt/corda
|
||||
USER corda
|
||||
|
||||
# Start it
|
||||
CMD ["/run-corda.sh"]
|
12
tools/network-bootstrapper/src/main/resources/node_info_watcher.sh
Executable file
12
tools/network-bootstrapper/src/main/resources/node_info_watcher.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
while [ 1 -lt 2 ]; do
|
||||
NODE_INFO=$(ls | grep nodeInfo)
|
||||
if [ ${#NODE_INFO} -ge 5 ]; then
|
||||
echo "found nodeInfo copying to additional node node info folder"
|
||||
cp ${NODE_INFO} additional-node-infos/
|
||||
exit 0
|
||||
else
|
||||
echo "no node info found waiting"
|
||||
fi
|
||||
sleep 5
|
||||
done
|
3
tools/network-bootstrapper/src/main/resources/node_killer.sh
Executable file
3
tools/network-bootstrapper/src/main/resources/node_killer.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
sleep $1
|
@ -0,0 +1,39 @@
|
||||
# Base image from (http://phusion.github.io/baseimage-docker)
|
||||
FROM openjdk:8u151-jre-alpine
|
||||
|
||||
RUN apk upgrade --update && \
|
||||
apk add --update --no-cache bash iputils && \
|
||||
rm -rf /var/cache/apk/* && \
|
||||
# Add user to run the app && \
|
||||
addgroup corda && \
|
||||
adduser -G corda -D -s /bin/bash corda && \
|
||||
# Create /opt/corda directory && \
|
||||
mkdir -p /opt/corda/plugins && \
|
||||
mkdir -p /opt/corda/logs && \
|
||||
mkdir -p /opt/corda/additional-node-infos && \
|
||||
mkdir -p /opt/node-setup
|
||||
|
||||
# Copy corda files
|
||||
ADD --chown=corda:corda corda.jar /opt/corda/corda.jar
|
||||
ADD --chown=corda:corda node.conf /opt/corda/node.conf
|
||||
ADD --chown=corda:corda cordapps/ /opt/corda/cordapps
|
||||
ADD --chown=corda:corda certificates/ /opt/corda/certificates
|
||||
#ADD --chown=corda:corda nodeInfo-* /opt/corda/
|
||||
|
||||
# Copy node info watcher script
|
||||
ADD --chown=corda:corda node_info_watcher.sh /opt/corda/
|
||||
|
||||
COPY run-corda.sh /run-corda.sh
|
||||
|
||||
RUN chmod +x /run-corda.sh && \
|
||||
chmod +x /opt/corda/node_info_watcher.sh && \
|
||||
sync && \
|
||||
chown -R corda:corda /opt/corda
|
||||
|
||||
# Working directory for Corda
|
||||
WORKDIR /opt/corda
|
||||
ENV HOME=/opt/corda
|
||||
USER corda
|
||||
|
||||
# Start it
|
||||
CMD ["/run-corda.sh"]
|
@ -0,0 +1,4 @@
|
||||
rpcSettings {
|
||||
address="0.0.0.0:10003"
|
||||
adminAddress="127.0.0.1:10005"
|
||||
}
|
26
tools/network-bootstrapper/src/main/resources/run-corda-node.sh
Executable file
26
tools/network-bootstrapper/src/main/resources/run-corda-node.sh
Executable file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
: ${CORDA_HOME:=/opt/corda}
|
||||
: ${JAVA_OPTIONS:=-Xmx512m}
|
||||
: ${X500? Need a value for the x500 name of this node}
|
||||
: ${OUR_NAME? Need a value for the expected FQDN of this node}
|
||||
: ${OUR_PORT? Need a value for the port to bind to}
|
||||
|
||||
export CORDA_HOME JAVA_OPTIONS
|
||||
|
||||
sed -i "/myLegalName/d" node.conf
|
||||
echo "myLegalName=\"${X500}\"" >> node.conf
|
||||
echo "p2pAddress=\"${OUR_NAME}:${OUR_PORT}\"" >> node.conf
|
||||
|
||||
cp additional-node-infos/network-params/network-parameters .
|
||||
|
||||
bash node_info_watcher.sh &
|
||||
|
||||
cd ${CORDA_HOME}
|
||||
|
||||
if java ${JAVA_OPTIONS} -jar ${CORDA_HOME}/corda.jar 2>&1 ; then
|
||||
echo "Corda exited with zero exit code"
|
||||
else
|
||||
echo "Corda exited with nonzero exit code, sleeping to allow log collection"
|
||||
sleep 10000
|
||||
fi
|
25
tools/network-bootstrapper/src/main/resources/run-corda-notary.sh
Executable file
25
tools/network-bootstrapper/src/main/resources/run-corda-notary.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
: ${CORDA_HOME:=/opt/corda}
|
||||
: ${JAVA_OPTIONS:=-Xmx512m}
|
||||
: ${OUR_NAME? Need a value for the expected FQDN of this node}
|
||||
: ${OUR_PORT? Need a value for the port to bind to}
|
||||
|
||||
export CORDA_HOME JAVA_OPTIONS
|
||||
echo "p2pAddress=\"${OUR_NAME}:${OUR_PORT}\"" >> node.conf
|
||||
cp additional-node-infos/network-params/network-parameters .
|
||||
|
||||
bash node_info_watcher.sh &
|
||||
|
||||
cd ${CORDA_HOME}
|
||||
|
||||
|
||||
if java ${JAVA_OPTIONS} -jar ${CORDA_HOME}/corda.jar 2>&1 ; then
|
||||
echo "Corda exited with zero exit code"
|
||||
else
|
||||
echo "Corda exited with nonzero exit code, sleeping to allow log collection"
|
||||
sleep 10000
|
||||
fi
|
||||
|
||||
|
||||
|
3
tools/network-bootstrapper/src/main/resources/ssh.conf
Normal file
3
tools/network-bootstrapper/src/main/resources/ssh.conf
Normal file
@ -0,0 +1,3 @@
|
||||
sshd {
|
||||
port = 12222
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user