mirror of
https://github.com/corda/corda.git
synced 2025-05-09 03:52:54 +00:00
Fixes after merge remote-tracking branch 'remotes/open/master' into szymonsztuka/os-merge-20180824
This commit is contained in:
parent
598e3a327a
commit
c7f666102f
8
.idea/compiler.xml
generated
8
.idea/compiler.xml
generated
@ -43,14 +43,16 @@
|
||||
<module name="capsule-hsm-crl-generator_test" target="1.8" />
|
||||
<module name="capsule-hsm_main" target="1.8" />
|
||||
<module name="capsule-hsm_test" target="1.8" />
|
||||
<module name="cli_main" target="1.8" />
|
||||
<module name="cli_test" target="1.8" />
|
||||
<module name="client_main" target="1.8" />
|
||||
<module name="client_test" target="1.8" />
|
||||
<module name="cliutils_main" target="1.8" />
|
||||
<module name="cliutils_test" target="1.8" />
|
||||
<module name="com.r3.corda_buildSrc_main" target="1.8" />
|
||||
<module name="com.r3.corda_buildSrc_test" target="1.8" />
|
||||
<module name="com.r3.corda_canonicalizer_main" target="1.8" />
|
||||
<module name="com.r3.corda_canonicalizer_test" target="1.8" />
|
||||
<module name="cliutils_main" target="1.8" />
|
||||
<module name="cliutils_test" target="1.8" />
|
||||
<module name="common_main" target="1.8" />
|
||||
<module name="common_test" target="1.8" />
|
||||
<module name="confidential-identities_main" target="1.8" />
|
||||
@ -326,4 +328,4 @@
|
||||
<component name="JavacSettings">
|
||||
<option name="ADDITIONAL_OPTIONS_STRING" value="-parameters" />
|
||||
</component>
|
||||
</project>
|
||||
</project>
|
@ -515,8 +515,3 @@ fun <T : Any> SerializedBytes<Any>.checkPayloadIs(type: Class<T>): Untrustworthy
|
||||
return type.castIfPossible(payloadData)?.let { UntrustworthyData(it) }
|
||||
?: throw IllegalArgumentException("We were expecting a ${type.name} but we instead got a ${payloadData.javaClass.name} ($payloadData)")
|
||||
}
|
||||
|
||||
/**
|
||||
* Extension method to make this method visible to nodeapi module.
|
||||
*/
|
||||
fun MappedSchema.getMigrationResource(): String? = this.internalGetMigrationResource()
|
@ -54,6 +54,7 @@ import net.corda.node.utilities.AffinityExecutor
|
||||
import net.corda.nodeapi.internal.DEV_ROOT_CA
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.isH2Database
|
||||
import net.corda.serialization.internal.*
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import rx.schedulers.Schedulers
|
||||
@ -242,7 +243,10 @@ class FlowWorkerServiceHub(override val configuration: NodeConfiguration, overri
|
||||
servicesForResolution.start(networkParameters)
|
||||
persistentNetworkMapCache.start(networkParameters.notaries)
|
||||
|
||||
database.startHikariPool(configuration.dataSourceProperties, configuration.database, schemaService)
|
||||
val isH2Database = isH2Database(configuration.dataSourceProperties.getProperty("dataSource.url", ""))
|
||||
val schemas = if (isH2Database) schemaService.internalSchemas() else schemaService.schemaOptions.keys
|
||||
|
||||
database.startHikariPool(configuration.dataSourceProperties, configuration.database, schemas)
|
||||
identityService.start(trustRoot, listOf(myInfo.legalIdentitiesAndCerts.first().certificate, nodeCa))
|
||||
|
||||
database.transaction {
|
||||
|
@ -63,11 +63,6 @@ dependencies {
|
||||
// For caches rather than guava
|
||||
compile "com.github.ben-manes.caffeine:caffeine:$caffeine_version"
|
||||
|
||||
// For db migration
|
||||
compile "org.liquibase:liquibase-core:$liquibase_version"
|
||||
compile "com.fasterxml.jackson.core:jackson-databind:$jackson_version"
|
||||
runtime 'com.mattbertolini:liquibase-slf4j:2.0.0'
|
||||
|
||||
// Unit testing helpers.
|
||||
testCompile "junit:junit:$junit_version"
|
||||
testCompile "org.assertj:assertj-core:$assertj_version"
|
||||
|
@ -34,7 +34,7 @@ object MigrationHelpers {
|
||||
}
|
||||
|
||||
// SchemaName will be transformed from camel case to lower_hyphen then add ".changelog-master"
|
||||
private fun migrationResourceNameForSchema(schema: MappedSchema): String {
|
||||
fun migrationResourceNameForSchema(schema: MappedSchema): String {
|
||||
val name: String = schema::class.simpleName!!
|
||||
val fileName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_HYPHEN, name)
|
||||
return "$MIGRATION_PREFIX/$fileName.$CHANGELOG_NAME"
|
||||
|
@ -42,13 +42,13 @@ class SchemaMigration(
|
||||
* Main entry point to the schema migration.
|
||||
* Called during node startup.
|
||||
*/
|
||||
fun nodeStartup(existingCheckpoints: Boolean) {
|
||||
fun nodeStartup(existingCheckpoints: Boolean, isH2Database: Boolean) {
|
||||
when {
|
||||
databaseConfig.initialiseSchema -> {
|
||||
//TODO if it's h2 only
|
||||
databaseConfig.initialiseSchema && isH2Database -> {
|
||||
migrateOlderDatabaseToUseLiquibase(existingCheckpoints)
|
||||
runMigration(existingCheckpoints)
|
||||
}
|
||||
databaseConfig.initialiseSchema -> runMigration(existingCheckpoints)
|
||||
else -> checkState()
|
||||
}
|
||||
}
|
||||
@ -66,7 +66,7 @@ class SchemaMigration(
|
||||
/**
|
||||
* Ensures that the database is up to date with the latest migration changes.
|
||||
*/
|
||||
private fun checkState() = doRunMigration(run = false, outputWriter = null, check = true)
|
||||
fun checkState() = doRunMigration(run = false, outputWriter = null, check = true)
|
||||
|
||||
/**
|
||||
* Can be used from an external tool to release the lock in case something went terribly wrong.
|
||||
@ -138,7 +138,6 @@ class SchemaMigration(
|
||||
check && !run && unRunChanges.isNotEmpty() -> throw OutstandingDatabaseChangesException(unRunChanges.size)
|
||||
check && !run -> {} // Do nothing will be interpreted as "check succeeded"
|
||||
(outputWriter != null) && !check && !run -> liquibase.update(Contexts(), outputWriter)
|
||||
(outputWriter != null) && !check && !run -> liquibase.update(Contexts(), outputWriter)
|
||||
else -> throw IllegalStateException("Invalid usage.")
|
||||
}
|
||||
}
|
||||
|
@ -149,8 +149,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
configuration.database,
|
||||
identityService::wellKnownPartyFromX500Name,
|
||||
identityService::wellKnownPartyFromAnonymous,
|
||||
schemaService,
|
||||
configuration.dataSourceProperties
|
||||
schemaService
|
||||
)
|
||||
init {
|
||||
// TODO Break cyclic dependency
|
||||
@ -1049,11 +1048,10 @@ fun configureDatabase(hikariProperties: Properties,
|
||||
databaseConfig: DatabaseConfig,
|
||||
wellKnownPartyFromX500Name: (CordaX500Name) -> Party?,
|
||||
wellKnownPartyFromAnonymous: (AbstractParty) -> Party?,
|
||||
schemaService: NodeSchemaService = NodeSchemaService()): CordaPersistence {
|
||||
|
||||
schemaService: SchemaService = NodeSchemaService()): CordaPersistence {
|
||||
val isH2Database = isH2Database(hikariProperties.getProperty("dataSource.url", ""))
|
||||
val schemas = if (isH2Database) schemaService.internalSchemas() else schemaService.schemaOptions.keys
|
||||
createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService)
|
||||
val schemas = if (isH2Database) NodeSchemaService().internalSchemas() else NodeSchemaService().schemaOptions.keys
|
||||
return createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService)
|
||||
.apply { startHikariPool(hikariProperties, databaseConfig, schemas) }
|
||||
|
||||
}
|
||||
@ -1061,16 +1059,14 @@ fun configureDatabase(hikariProperties: Properties,
|
||||
fun createCordaPersistence(databaseConfig: DatabaseConfig,
|
||||
wellKnownPartyFromX500Name: (CordaX500Name) -> Party?,
|
||||
wellKnownPartyFromAnonymous: (AbstractParty) -> Party?,
|
||||
schemaService: SchemaService,
|
||||
hikariProperties: Properties): CordaPersistence {
|
||||
schemaService: SchemaService): CordaPersistence {
|
||||
// Register the AbstractPartyDescriptor so Hibernate doesn't warn when encountering AbstractParty. Unfortunately
|
||||
// Hibernate warns about not being able to find a descriptor if we don't provide one, but won't use it by default
|
||||
// so we end up providing both descriptor and converter. We should re-examine this in later versions to see if
|
||||
// either Hibernate can be convinced to stop warning, use the descriptor by default, or something else.
|
||||
JavaTypeDescriptorRegistry.INSTANCE.addDescriptor(AbstractPartyDescriptor(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous))
|
||||
val attributeConverters = listOf(AbstractPartyToX500NameAsStringConverter(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous))
|
||||
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
|
||||
return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, jdbcUrl, attributeConverters)
|
||||
return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, attributeConverters)
|
||||
}
|
||||
|
||||
fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfig: DatabaseConfig, schemas: Set<MappedSchema>) {
|
||||
@ -1078,7 +1074,7 @@ fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfi
|
||||
val dataSource = DataSourceFactory.createDataSource(hikariProperties)
|
||||
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
|
||||
val schemaMigration = SchemaMigration(schemas, dataSource, databaseConfig)
|
||||
schemaMigration.nodeStartup(dataSource.connection.use { DBCheckpointStorage().getCheckpointCount(it) != 0L })
|
||||
schemaMigration.nodeStartup(dataSource.connection.use { DBCheckpointStorage().getCheckpointCount(it) != 0L }, isH2Database(jdbcUrl))
|
||||
start(dataSource, jdbcUrl)
|
||||
} catch (ex: Exception) {
|
||||
when {
|
||||
|
@ -75,10 +75,13 @@ class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet()
|
||||
// Required schemas are those used by internal Corda services
|
||||
private val requiredSchemas: Map<MappedSchema, SchemaService.SchemaOptions> =
|
||||
mapOf(Pair(CommonSchemaV1, SchemaOptions()),
|
||||
Pair(VaultSchemaV1, SchemaOptions()),
|
||||
Pair(NodeInfoSchemaV1, SchemaOptions()),
|
||||
Pair(NodeCoreV1, SchemaOptions())) +
|
||||
if (includeNotarySchemas) mapOf(Pair(NodeNotaryV1, SchemaOptions())) else emptyMap()
|
||||
Pair(VaultSchemaV1, SchemaOptions()),
|
||||
Pair(NodeInfoSchemaV1, SchemaOptions()),
|
||||
Pair(NodeCoreV1, SchemaOptions())) +
|
||||
if (includeNotarySchemas) mapOf(Pair(NodeNotaryV1, SchemaOptions())) else emptyMap()
|
||||
|
||||
fun internalSchemas() = requiredSchemas.keys + extraSchemas.filter { schema -> // when mapped schemas from the finance module are present, they are considered as internal ones
|
||||
schema::class.simpleName == "net.corda.finance.schemas.CashSchemaV1" || schema::class.simpleName == "net.corda.finance.schemas.CommercialPaperSchemaV1" }
|
||||
|
||||
override val schemaOptions: Map<MappedSchema, SchemaService.SchemaOptions> = requiredSchemas + extraSchemas.associateBy({ it }, { SchemaOptions() })
|
||||
|
||||
|
@ -60,7 +60,7 @@ class SchemaMigrationTest {
|
||||
val dataSourceProps = MockServices.makeTestDataSourceProperties()
|
||||
|
||||
//run the migration on the database
|
||||
val migration = SchemaMigration(schemaService.schemaOptions.keys, HikariDataSource(HikariConfig(dataSourceProps)), true, DatabaseConfig())
|
||||
val migration = SchemaMigration(schemaService.schemaOptions.keys, HikariDataSource(HikariConfig(dataSourceProps)), DatabaseConfig())
|
||||
migration.runMigration(false)
|
||||
|
||||
//start the node with "runMigration = false" and check that it started correctly
|
||||
|
@ -114,7 +114,6 @@ open class MockServices private constructor(
|
||||
val cordappLoader = cordappLoaderForPackages(cordappPackages)
|
||||
val dataSourceProps = makeInternalTestDataSourceProperties(initialIdentity.name.organisation, SecureHash.randomSHA256().toString())
|
||||
val schemaService = NodeSchemaService(cordappLoader.cordappSchemas)
|
||||
//TODO different schemas based on h2 or not
|
||||
val database = configureDatabase(dataSourceProps, makeTestDatabaseProperties(initialIdentity.name.organisation), identityService::wellKnownPartyFromX500Name, identityService::wellKnownPartyFromAnonymous, schemaService)
|
||||
val mockService = database.transaction {
|
||||
object : MockServices(cordappLoader, identityService, networkParameters, initialIdentity, moreKeys) {
|
||||
|
@ -169,12 +169,12 @@ private fun handleCommand(options: OptionSet, baseDirectory: Path, configFile: P
|
||||
val config = parsedConfig.parseAs(Configuration::class, UnknownConfigKeysPolicy.IGNORE::handle)
|
||||
|
||||
fun runMigrationCommand(withMigration: (SchemaMigration, DataSource) -> Unit): Unit = runWithDataSource(config, baseDirectory, classLoader) { dataSource ->
|
||||
withMigration(SchemaMigration(schemas, dataSource, true, config.database, classLoader), dataSource)
|
||||
withMigration(SchemaMigration(schemas, dataSource, config.database, classLoader), dataSource)
|
||||
}
|
||||
|
||||
when {
|
||||
options.has(RELEASE_LOCK) -> runWithDataSource(ConfigFactory.parseFile(configFile.toFile()).resolve().parseAs(Configuration::class), baseDirectory, classLoader) {
|
||||
SchemaMigration(emptySet(), it, true, config.database, Thread.currentThread().contextClassLoader).forceReleaseMigrationLock()
|
||||
SchemaMigration(emptySet(), it, config.database, Thread.currentThread().contextClassLoader).forceReleaseMigrationLock()
|
||||
}
|
||||
options.has(DRY_RUN) -> {
|
||||
val writer = getMigrationOutput(baseDirectory, options)
|
||||
|
Loading…
x
Reference in New Issue
Block a user