Merge pull request #1356 from corda/szymonsztuka/os-merge-20180824

OS merge  -  In OS for H2 Liquibase is used to migrate only internal (required) schemas, this behaviour is preserved in ENT so enterprise needs to distinguish if it's running against H2 or other database (to use Liquibase for all MappedSchema not only internal ones) - this preserved existing compatibility that migration for H2 behaved exactly the same between OS and ENT, however in ENT H2 behaved differently then for other database vendors.
This commit is contained in:
szymonsztuka 2018-08-24 16:30:18 +01:00 committed by GitHub
commit 00a80c3c65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 167 additions and 178 deletions

6
.idea/compiler.xml generated
View File

@ -43,14 +43,16 @@
<module name="capsule-hsm-crl-generator_test" target="1.8" /> <module name="capsule-hsm-crl-generator_test" target="1.8" />
<module name="capsule-hsm_main" target="1.8" /> <module name="capsule-hsm_main" target="1.8" />
<module name="capsule-hsm_test" target="1.8" /> <module name="capsule-hsm_test" target="1.8" />
<module name="cli_main" target="1.8" />
<module name="cli_test" target="1.8" />
<module name="client_main" target="1.8" /> <module name="client_main" target="1.8" />
<module name="client_test" target="1.8" /> <module name="client_test" target="1.8" />
<module name="cliutils_main" target="1.8" />
<module name="cliutils_test" target="1.8" />
<module name="com.r3.corda_buildSrc_main" target="1.8" /> <module name="com.r3.corda_buildSrc_main" target="1.8" />
<module name="com.r3.corda_buildSrc_test" target="1.8" /> <module name="com.r3.corda_buildSrc_test" target="1.8" />
<module name="com.r3.corda_canonicalizer_main" target="1.8" /> <module name="com.r3.corda_canonicalizer_main" target="1.8" />
<module name="com.r3.corda_canonicalizer_test" target="1.8" /> <module name="com.r3.corda_canonicalizer_test" target="1.8" />
<module name="cliutils_main" target="1.8" />
<module name="cliutils_test" target="1.8" />
<module name="common_main" target="1.8" /> <module name="common_main" target="1.8" />
<module name="common_test" target="1.8" /> <module name="common_test" target="1.8" />
<module name="confidential-identities_main" target="1.8" /> <module name="confidential-identities_main" target="1.8" />

View File

@ -70,8 +70,8 @@ buildscript {
ext.shiro_version = '1.4.0' ext.shiro_version = '1.4.0'
ext.shadow_version = '2.0.4' ext.shadow_version = '2.0.4'
ext.artifactory_plugin_version = constants.getProperty('artifactoryPluginVersion') ext.artifactory_plugin_version = constants.getProperty('artifactoryPluginVersion')
ext.liquibase_version = '3.5.3'
ext.hikari_version = '2.5.1' ext.hikari_version = '2.5.1'
ext.liquibase_version = '3.6.2'
ext.artifactory_contextUrl = 'https://ci-artifactory.corda.r3cev.com/artifactory' ext.artifactory_contextUrl = 'https://ci-artifactory.corda.r3cev.com/artifactory'
ext.snake_yaml_version = constants.getProperty('snakeYamlVersion') ext.snake_yaml_version = constants.getProperty('snakeYamlVersion')
ext.docker_compose_rule_version = '0.33.0' ext.docker_compose_rule_version = '0.33.0'

View File

@ -515,8 +515,3 @@ fun <T : Any> SerializedBytes<Any>.checkPayloadIs(type: Class<T>): Untrustworthy
return type.castIfPossible(payloadData)?.let { UntrustworthyData(it) } return type.castIfPossible(payloadData)?.let { UntrustworthyData(it) }
?: throw IllegalArgumentException("We were expecting a ${type.name} but we instead got a ${payloadData.javaClass.name} ($payloadData)") ?: throw IllegalArgumentException("We were expecting a ${type.name} but we instead got a ${payloadData.javaClass.name} ($payloadData)")
} }
/**
* Extension method to make this method visible to nodeapi module.
*/
fun MappedSchema.getMigrationResource(): String? = this.internalGetMigrationResource()

View File

@ -59,9 +59,7 @@ open class MappedSchema(schemaFamily: Class<*>,
/** /**
* Points to a classpath resource containing the database changes for the [mappedTypes] * Points to a classpath resource containing the database changes for the [mappedTypes]
*/ */
protected open val migrationResource: String? = null open val migrationResource: String? = null
internal fun internalGetMigrationResource(): String? = migrationResource
override fun toString(): String = "${this.javaClass.simpleName}(name=$name, version=$version)" override fun toString(): String = "${this.javaClass.simpleName}(name=$name, version=$version)"

View File

@ -54,6 +54,7 @@ import net.corda.node.utilities.AffinityExecutor
import net.corda.nodeapi.internal.DEV_ROOT_CA import net.corda.nodeapi.internal.DEV_ROOT_CA
import net.corda.nodeapi.internal.crypto.X509Utilities import net.corda.nodeapi.internal.crypto.X509Utilities
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.isH2Database
import net.corda.serialization.internal.* import net.corda.serialization.internal.*
import org.apache.activemq.artemis.utils.ReusableLatch import org.apache.activemq.artemis.utils.ReusableLatch
import rx.schedulers.Schedulers import rx.schedulers.Schedulers
@ -242,7 +243,10 @@ class FlowWorkerServiceHub(override val configuration: NodeConfiguration, overri
servicesForResolution.start(networkParameters) servicesForResolution.start(networkParameters)
persistentNetworkMapCache.start(networkParameters.notaries) persistentNetworkMapCache.start(networkParameters.notaries)
database.startHikariPool(configuration.dataSourceProperties, configuration.database, schemaService) val isH2Database = isH2Database(configuration.dataSourceProperties.getProperty("dataSource.url", ""))
val schemas = if (isH2Database) schemaService.internalSchemas() else schemaService.schemaOptions.keys
database.startHikariPool(configuration.dataSourceProperties, configuration.database, schemas)
identityService.start(trustRoot, listOf(myInfo.legalIdentitiesAndCerts.first().certificate, nodeCa)) identityService.start(trustRoot, listOf(myInfo.legalIdentitiesAndCerts.first().certificate, nodeCa))
database.transaction { database.transaction {

View File

@ -11,7 +11,6 @@
package net.corda.nodeapi.internal package net.corda.nodeapi.internal
import com.google.common.base.CaseFormat import com.google.common.base.CaseFormat
import net.corda.core.internal.getMigrationResource
import net.corda.core.schemas.MappedSchema import net.corda.core.schemas.MappedSchema
object MigrationHelpers { object MigrationHelpers {
@ -21,7 +20,7 @@ object MigrationHelpers {
private val possibleMigrationExtensions = listOf(".xml", ".sql", ".yml", ".json") private val possibleMigrationExtensions = listOf(".xml", ".sql", ".yml", ".json")
fun getMigrationResource(schema: MappedSchema, classLoader: ClassLoader): String? { fun getMigrationResource(schema: MappedSchema, classLoader: ClassLoader): String? {
val declaredMigration = schema.getMigrationResource() val declaredMigration = schema.migrationResource
if (declaredMigration == null) { if (declaredMigration == null) {
// try to apply the naming convention and find the migration file in the classpath // try to apply the naming convention and find the migration file in the classpath
@ -34,8 +33,7 @@ object MigrationHelpers {
return "$MIGRATION_PREFIX/$declaredMigration.$DEFAULT_MIGRATION_EXTENSION" return "$MIGRATION_PREFIX/$declaredMigration.$DEFAULT_MIGRATION_EXTENSION"
} }
// SchemaName will be transformed from camel case to lower_hyphen // SchemaName will be transformed from camel case to lower_hyphen then add ".changelog-master"
// then add ".changelog-master"
fun migrationResourceNameForSchema(schema: MappedSchema): String { fun migrationResourceNameForSchema(schema: MappedSchema): String {
val name: String = schema::class.simpleName!! val name: String = schema::class.simpleName!!
val fileName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_HYPHEN, name) val fileName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_HYPHEN, name)

View File

@ -100,8 +100,6 @@ class CordaPersistence(
// Check not in read-only mode. // Check not in read-only mode.
transaction { transaction {
check(!connection.metaData.isReadOnly) { "Database should not be readonly." } check(!connection.metaData.isReadOnly) { "Database should not be readonly." }
checkCorrectAttachmentsContractsTableName(connection)
checkCorrectCheckpointTypeOnPostgres(connection)
} }
} }
object DataSourceConfigTag { object DataSourceConfigTag {
@ -306,33 +304,3 @@ private fun Throwable.hasSQLExceptionCause(): Boolean =
} }
class CouldNotCreateDataSourceException(override val message: String?, override val cause: Throwable? = null) : Exception() class CouldNotCreateDataSourceException(override val message: String?, override val cause: Throwable? = null) : Exception()
class DatabaseIncompatibleException(override val message: String?, override val cause: Throwable? = null) : Exception()
private fun checkCorrectAttachmentsContractsTableName(connection: Connection) {
val correctName = "NODE_ATTACHMENTS_CONTRACTS"
val incorrectV30Name = "NODE_ATTACHMENTS_CONTRACT_CLASS_NAME"
val incorrectV31Name = "NODE_ATTCHMENTS_CONTRACTS"
fun warning(incorrectName: String, version: String) = "The database contains the older table name $incorrectName instead of $correctName, see upgrade notes to migrate from Corda database version $version https://docs.corda.net/head/upgrade-notes.html."
if (!connection.metaData.getTables(null, null, correctName, null).next()) {
if (connection.metaData.getTables(null, null, incorrectV30Name, null).next()) { throw DatabaseIncompatibleException(warning(incorrectV30Name, "3.0")) }
if (connection.metaData.getTables(null, null, incorrectV31Name, null).next()) { throw DatabaseIncompatibleException(warning(incorrectV31Name, "3.1")) }
}
}
private fun checkCorrectCheckpointTypeOnPostgres(connection: Connection) {
val metaData = connection.metaData
if (metaData.getDatabaseProductName() != "PostgreSQL") {
return
}
val result = metaData.getColumns(null, null, "node_checkpoints", "checkpoint_value")
if (result.next()) {
val type = result.getString("TYPE_NAME")
if (type != "bytea") {
throw DatabaseIncompatibleException("The type of the 'checkpoint_value' table must be 'bytea', but 'oid' was found. See upgrade notes to migrate from Corda database version 3.1 https://docs.corda.net/head/upgrade-notes.html.")
}
}
}

View File

@ -31,7 +31,6 @@ import javax.sql.DataSource
class SchemaMigration( class SchemaMigration(
val schemas: Set<MappedSchema>, val schemas: Set<MappedSchema>,
val dataSource: DataSource, val dataSource: DataSource,
val failOnMigrationMissing: Boolean,
private val databaseConfig: DatabaseConfig, private val databaseConfig: DatabaseConfig,
private val classLoader: ClassLoader = Thread.currentThread().contextClassLoader) { private val classLoader: ClassLoader = Thread.currentThread().contextClassLoader) {
@ -43,10 +42,14 @@ class SchemaMigration(
* Main entry point to the schema migration. * Main entry point to the schema migration.
* Called during node startup. * Called during node startup.
*/ */
fun nodeStartup(existingCheckpoints: Boolean) { fun nodeStartup(existingCheckpoints: Boolean, isH2Database: Boolean) {
when { when {
databaseConfig.runMigration -> runMigration(existingCheckpoints) databaseConfig.initialiseSchema && isH2Database -> {
failOnMigrationMissing -> checkState() migrateOlderDatabaseToUseLiquibase(existingCheckpoints)
runMigration(existingCheckpoints)
}
databaseConfig.initialiseSchema -> runMigration(existingCheckpoints)
else -> checkState()
} }
} }
@ -74,31 +77,9 @@ class SchemaMigration(
} }
} }
private fun doRunMigration(run: Boolean, outputWriter: Writer?, check: Boolean, existingCheckpoints: Boolean? = null) { /** Create a resourse accessor that aggregates the changelogs included in the schemas into one dynamic stream. */
private class CustomResourceAccessor(val dynamicInclude: String, val changelogList: List<String?>, classLoader: ClassLoader) : ClassLoaderResourceAccessor(classLoader) {
// Virtual file name of the changelog that includes all schemas.
val dynamicInclude = "master.changelog.json"
dataSource.connection.use { connection ->
// Collect all changelog file referenced in the included schemas.
// For backward compatibility reasons, when failOnMigrationMissing=false, we don't manage CorDapps via Liquibase but use the hibernate hbm2ddl=update.
val changelogList = schemas.map { mappedSchema ->
val resource = getMigrationResource(mappedSchema, classLoader)
when {
resource != null -> resource
failOnMigrationMissing -> throw MissingMigrationException(mappedSchema)
else -> {
logger.warn(MissingMigrationException.errorMessageFor(mappedSchema))
null
}
}
}
// Create a resourse accessor that aggregates the changelogs included in the schemas into one dynamic stream.
val customResourceAccessor = object : ClassLoaderResourceAccessor(classLoader) {
override fun getResourcesAsStream(path: String): Set<InputStream> { override fun getResourcesAsStream(path: String): Set<InputStream> {
if (path == dynamicInclude) { if (path == dynamicInclude) {
// Create a map in Liquibase format including all migration files. // Create a map in Liquibase format including all migration files.
val includeAllFiles = mapOf("databaseChangeLog" to changelogList.filter { it != null }.map { file -> mapOf("include" to mapOf("file" to file)) }) val includeAllFiles = mapOf("databaseChangeLog" to changelogList.filter { it != null }.map { file -> mapOf("include" to mapOf("file" to file)) })
@ -113,6 +94,25 @@ class SchemaMigration(
} }
} }
private fun doRunMigration(run: Boolean, outputWriter: Writer?, check: Boolean, existingCheckpoints: Boolean? = null) {
// Virtual file name of the changelog that includes all schemas.
val dynamicInclude = "master.changelog.json"
dataSource.connection.use { connection ->
// Collect all changelog file referenced in the included schemas.
// For backward compatibility reasons, when failOnMigrationMissing=false, we don't manage CorDapps via Liquibase but use the hibernate hbm2ddl=update.
val changelogList = schemas.map { mappedSchema ->
val resource = getMigrationResource(mappedSchema, classLoader)
when {
resource != null -> resource
else -> throw MissingMigrationException(mappedSchema)
}
}
val customResourceAccessor = CustomResourceAccessor(dynamicInclude, changelogList, classLoader)
val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection))) val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection)))
val schemaName: String? = databaseConfig.schema val schemaName: String? = databaseConfig.schema
@ -161,6 +161,54 @@ class SchemaMigration(
return if (liquibaseDbImplementation is MSSQLDatabase) AzureDatabase(conn) else liquibaseDbImplementation return if (liquibaseDbImplementation is MSSQLDatabase) AzureDatabase(conn) else liquibaseDbImplementation
} }
/** For existing database created before verions 4.0 add Liquibase support - creates DATABASECHANGELOG and DATABASECHANGELOGLOCK tables and mark changesets are executed. */
private fun migrateOlderDatabaseToUseLiquibase(existingCheckpoints: Boolean): Boolean {
val isExistingDBWithoutLiquibase = dataSource.connection.use {
it.metaData.getTables(null, null, "NODE%", null).next() &&
!it.metaData.getTables(null, null, "DATABASECHANGELOG", null).next() &&
!it.metaData.getTables(null, null, "DATABASECHANGELOGLOCK", null).next()
}
when {
isExistingDBWithoutLiquibase && existingCheckpoints -> throw CheckpointsException()
isExistingDBWithoutLiquibase -> {
// Virtual file name of the changelog that includes all schemas.
val dynamicInclude = "master.changelog.json"
dataSource.connection.use { connection ->
// Schema migrations pre release 4.0
val preV4Baseline =
listOf("migration/common.changelog-init.xml",
"migration/node-info.changelog-init.xml",
"migration/node-info.changelog-v1.xml",
"migration/node-info.changelog-v2.xml",
"migration/node-core.changelog-init.xml",
"migration/node-core.changelog-v3.xml",
"migration/node-core.changelog-v4.xml",
"migration/node-core.changelog-v5.xml",
"migration/node-core.changelog-pkey.xml",
"migration/vault-schema.changelog-init.xml",
"migration/vault-schema.changelog-v3.xml",
"migration/vault-schema.changelog-v4.xml",
"migration/vault-schema.changelog-pkey.xml",
"migration/cash.changelog-init.xml",
"migration/cash.changelog-v1.xml",
"migration/commercial-paper.changelog-init.xml",
"migration/commercial-paper.changelog-v1.xml") +
if (schemas.any { schema -> schema.migrationResource == "node-notary.changelog-master" })
listOf("migration/node-notary.changelog-init.xml",
"migration/node-notary.changelog-v1.xml",
"migration/vault-schema.changelog-pkey.xml")
else emptyList()
val customResourceAccessor = CustomResourceAccessor(dynamicInclude, preV4Baseline, classLoader)
val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection)))
liquibase.changeLogSync(Contexts(), LabelExpression())
}
}
}
return isExistingDBWithoutLiquibase
}
} }
open class DatabaseMigrationException(message: String) : IllegalArgumentException(message) { open class DatabaseMigrationException(message: String) : IllegalArgumentException(message) {
@ -183,3 +231,9 @@ class CheckpointsException : DatabaseMigrationException("Attempting to update th
"This is dangerous because the node might not be able to restore the flows correctly and could consequently fail. " + "This is dangerous because the node might not be able to restore the flows correctly and could consequently fail. " +
"Updating the database would make reverting to the previous version more difficult. " + "Updating the database would make reverting to the previous version more difficult. " +
"Please drain your node first. See: https://docs.corda.net/upgrading-cordapps.html#flow-drains") "Please drain your node first. See: https://docs.corda.net/upgrading-cordapps.html#flow-drains")
class DatabaseIncompatibleException(@Suppress("MemberVisibilityCanBePrivate") private val reason: String) : DatabaseMigrationException(errorMessageFor(reason)) {
internal companion object {
fun errorMessageFor(reason: String): String = "Incompatible database schema version detected, please run the node with configuration option database.initialiseSchema=true. Reason: $reason"
}
}

View File

@ -1,70 +0,0 @@
package net.corda.node.persistence
import net.corda.client.rpc.CordaRPCClient
import net.corda.core.internal.packageName
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.node.services.Permissions
import net.corda.testMessage.Message
import net.corda.testMessage.MessageState
import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.User
import org.junit.Test
import java.nio.file.Path
import java.sql.DriverManager
import kotlin.test.assertFailsWith
import kotlin.test.assertFalse
import kotlin.test.assertTrue
class FailNodeOnNotMigratedAttachmentContractsTableNameTests {
@Test
fun `node fails when detecting table name not migrated from version 3 dot 0`() {
`node fails when not detecting compatible table name`("NODE_ATTACHMENTS_CONTRACTS", "NODE_ATTACHMENTS_CONTRACT_CLASS_NAME")
}
@Test
fun `node fails when detecting table name not migrated from version 3 dot 1`() {
`node fails when not detecting compatible table name`("NODE_ATTACHMENTS_CONTRACTS", "NODE_ATTCHMENTS_CONTRACTS")
}
private fun `node fails when not detecting compatible table name`(tableNameFromMapping: String, tableNameInDB: String) {
val user = User("mark", "dadada", setOf(Permissions.startFlow<SendMessageFlow>(), Permissions.invokeRpc("vaultQuery")))
val message = Message("Hello world!")
val baseDir: Path = driver(DriverParameters(
inMemoryDB = false,
startNodesInProcess = isQuasarAgentSpecified(),
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
)) {
val (nodeName, baseDir) = {
val nodeHandle = startNode(rpcUsers = listOf(user)).getOrThrow()
val nodeName = nodeHandle.nodeInfo.singleIdentity().name
CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::SendMessageFlow, message, defaultNotaryIdentity).returnValue.getOrThrow()
}
nodeHandle.stop()
Pair(nodeName, nodeHandle.baseDirectory)
}()
// replace the correct table name with one from the former release
DriverManager.getConnection("jdbc:h2:file://$baseDir/persistence", "sa", "").use {
it.createStatement().execute("ALTER TABLE $tableNameFromMapping RENAME TO $tableNameInDB")
it.commit()
}
assertFailsWith(net.corda.nodeapi.internal.persistence.DatabaseIncompatibleException::class) {
val nodeHandle = startNode(providedName = nodeName, rpcUsers = listOf(user)).getOrThrow()
nodeHandle.stop()
}
baseDir
}
// check that the node didn't recreated the correct table matching it's entity mapping
val (hasTableFromMapping, hasTableFromDB) = DriverManager.getConnection("jdbc:h2:file://$baseDir/persistence", "sa", "").use {
Pair(it.metaData.getTables(null, null, tableNameFromMapping, null).next(),
it.metaData.getTables(null, null, tableNameInDB, null).next())
}
assertFalse(hasTableFromMapping)
assertTrue(hasTableFromDB)
}
}

View File

@ -36,6 +36,7 @@ import net.corda.core.internal.uncheckedCast
import net.corda.core.messaging.* import net.corda.core.messaging.*
import net.corda.core.node.* import net.corda.core.node.*
import net.corda.core.node.services.* import net.corda.core.node.services.*
import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.SerializationWhitelist import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken import net.corda.core.serialization.SerializeAsToken
import net.corda.core.serialization.SingletonSerializeAsToken import net.corda.core.serialization.SingletonSerializeAsToken
@ -774,7 +775,9 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
} }
val props = configuration.dataSourceProperties val props = configuration.dataSourceProperties
if (props.isEmpty) throw DatabaseConfigurationException("There must be a database configured.") if (props.isEmpty) throw DatabaseConfigurationException("There must be a database configured.")
database.startHikariPool(props, configuration.database, schemaService) val isH2Database = isH2Database(props.getProperty("dataSource.url", ""))
val schemas = if (isH2Database) schemaService.internalSchemas() else schemaService.schemaOptions.keys
database.startHikariPool(props, configuration.database, schemas)
// Now log the vendor string as this will also cause a connection to be tested eagerly. // Now log the vendor string as this will also cause a connection to be tested eagerly.
logVendorString(database, log) logVendorString(database, log)
} }
@ -1045,9 +1048,13 @@ fun configureDatabase(hikariProperties: Properties,
databaseConfig: DatabaseConfig, databaseConfig: DatabaseConfig,
wellKnownPartyFromX500Name: (CordaX500Name) -> Party?, wellKnownPartyFromX500Name: (CordaX500Name) -> Party?,
wellKnownPartyFromAnonymous: (AbstractParty) -> Party?, wellKnownPartyFromAnonymous: (AbstractParty) -> Party?,
schemaService: SchemaService = NodeSchemaService()): CordaPersistence = schemaService: SchemaService = NodeSchemaService()): CordaPersistence {
createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService) val isH2Database = isH2Database(hikariProperties.getProperty("dataSource.url", ""))
.apply { startHikariPool(hikariProperties, databaseConfig, schemaService) } val schemas = if (isH2Database) NodeSchemaService().internalSchemas() else schemaService.schemaOptions.keys
return createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService)
.apply { startHikariPool(hikariProperties, databaseConfig, schemas) }
}
fun createCordaPersistence(databaseConfig: DatabaseConfig, fun createCordaPersistence(databaseConfig: DatabaseConfig,
wellKnownPartyFromX500Name: (CordaX500Name) -> Party?, wellKnownPartyFromX500Name: (CordaX500Name) -> Party?,
@ -1062,22 +1069,18 @@ fun createCordaPersistence(databaseConfig: DatabaseConfig,
return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, attributeConverters) return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, attributeConverters)
} }
fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfig: DatabaseConfig, schemaService: SchemaService) { fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfig: DatabaseConfig, schemas: Set<MappedSchema>) {
try { try {
val dataSource = DataSourceFactory.createDataSource(hikariProperties) val dataSource = DataSourceFactory.createDataSource(hikariProperties)
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "") val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
val schemaMigration = SchemaMigration( val schemaMigration = SchemaMigration(schemas, dataSource, databaseConfig)
schemaService.schemaOptions.keys, schemaMigration.nodeStartup(dataSource.connection.use { DBCheckpointStorage().getCheckpointCount(it) != 0L }, isH2Database(jdbcUrl))
dataSource,
!isH2Database(jdbcUrl),
databaseConfig
)
schemaMigration.nodeStartup(dataSource.connection.use { DBCheckpointStorage().getCheckpointCount(it) != 0L })
start(dataSource, jdbcUrl) start(dataSource, jdbcUrl)
} catch (ex: Exception) { } catch (ex: Exception) {
when { when {
ex is HikariPool.PoolInitializationException -> throw CouldNotCreateDataSourceException("Could not connect to the database. Please check your JDBC connection URL, or the connectivity to the database.", ex) ex is HikariPool.PoolInitializationException -> throw CouldNotCreateDataSourceException("Could not connect to the database. Please check your JDBC connection URL, or the connectivity to the database.", ex)
ex.cause is ClassNotFoundException -> throw CouldNotCreateDataSourceException("Could not find the database driver class. Please add it to the 'drivers' folder. See: https://docs.corda.net/corda-configuration-file.html") ex.cause is ClassNotFoundException -> throw CouldNotCreateDataSourceException("Could not find the database driver class. Please add it to the 'drivers' folder. See: https://docs.corda.net/corda-configuration-file.html")
ex is OutstandingDatabaseChangesException -> throw (DatabaseIncompatibleException(ex.message))
ex is DatabaseIncompatibleException -> throw ex ex is DatabaseIncompatibleException -> throw ex
else -> throw CouldNotCreateDataSourceException("Could not create the DataSource: ${ex.message}", ex) else -> throw CouldNotCreateDataSourceException("Could not create the DataSource: ${ex.message}", ex)
} }

View File

@ -29,7 +29,6 @@ object NodeInfoSchemaV1 : MappedSchema(
version = 1, version = 1,
mappedTypes = listOf(PersistentNodeInfo::class.java, DBPartyAndCertificate::class.java, DBHostAndPort::class.java, NodePropertiesPersistentStore.DBNodeProperty::class.java) mappedTypes = listOf(PersistentNodeInfo::class.java, DBPartyAndCertificate::class.java, DBHostAndPort::class.java, NodePropertiesPersistentStore.DBNodeProperty::class.java)
) { ) {
override val migrationResource = "node-info.changelog-master" override val migrationResource = "node-info.changelog-master"
@Entity @Entity
@ -83,6 +82,7 @@ object NodeInfoSchemaV1 : MappedSchema(
@GeneratedValue @GeneratedValue
@Column(name = "hosts_id", nullable = false) @Column(name = "hosts_id", nullable = false)
var id: Int, var id: Int,
@Column(name = "host_name")
val host: String? = null, val host: String? = null,
val port: Int? = null val port: Int? = null
) { ) {

View File

@ -30,7 +30,6 @@ interface CheckpointStorage {
*/ */
fun updateCheckpoint(id: StateMachineRunId, checkpoint: SerializedBytes<Checkpoint>) fun updateCheckpoint(id: StateMachineRunId, checkpoint: SerializedBytes<Checkpoint>)
/** /**
* Remove existing checkpoint from the store. * Remove existing checkpoint from the store.
* @return whether the id matched a checkpoint that was removed. * @return whether the id matched a checkpoint that was removed.

View File

@ -18,16 +18,16 @@ import net.corda.node.services.statemachine.Checkpoint
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
import net.corda.nodeapi.internal.persistence.currentDBSession import net.corda.nodeapi.internal.persistence.currentDBSession
import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY import org.apache.commons.lang.ArrayUtils.EMPTY_BYTE_ARRAY
import org.hibernate.annotations.Type
import org.slf4j.Logger import org.slf4j.Logger
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
import java.sql.Connection
import java.sql.SQLException
import java.util.* import java.util.*
import java.util.stream.Stream import java.util.stream.Stream
import javax.persistence.Column import javax.persistence.Column
import javax.persistence.Entity import javax.persistence.Entity
import javax.persistence.Id import javax.persistence.Id
import org.hibernate.annotations.Type
import java.sql.Connection
import java.sql.SQLException
/** /**
* Simple checkpoint key value storage in DB. * Simple checkpoint key value storage in DB.

View File

@ -40,7 +40,7 @@ import net.corda.node.services.vault.VaultSchemaV1
* TODO: support plugins for schema version upgrading or custom mapping not supported by original [QueryableState]. * TODO: support plugins for schema version upgrading or custom mapping not supported by original [QueryableState].
* TODO: create whitelisted tables when a CorDapp is first installed * TODO: create whitelisted tables when a CorDapp is first installed
*/ */
class NodeSchemaService(extraSchemas: Set<MappedSchema> = emptySet(), includeNotarySchemas: Boolean = false) : SchemaService, SingletonSerializeAsToken() { class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet(), includeNotarySchemas: Boolean = false) : SchemaService, SingletonSerializeAsToken() {
// Core Entities used by a Node // Core Entities used by a Node
object NodeCore object NodeCore
@ -77,10 +77,13 @@ class NodeSchemaService(extraSchemas: Set<MappedSchema> = emptySet(), includeNot
mapOf(Pair(CommonSchemaV1, SchemaOptions()), mapOf(Pair(CommonSchemaV1, SchemaOptions()),
Pair(VaultSchemaV1, SchemaOptions()), Pair(VaultSchemaV1, SchemaOptions()),
Pair(NodeInfoSchemaV1, SchemaOptions()), Pair(NodeInfoSchemaV1, SchemaOptions()),
Pair(NodeCoreV1, SchemaOptions())) Pair(NodeCoreV1, SchemaOptions())) +
private val notarySchemas = if (includeNotarySchemas) mapOf(Pair(NodeNotaryV1, SchemaOptions())) else emptyMap<MappedSchema, SchemaService.SchemaOptions>() if (includeNotarySchemas) mapOf(Pair(NodeNotaryV1, SchemaOptions())) else emptyMap()
override val schemaOptions: Map<MappedSchema, SchemaService.SchemaOptions> = requiredSchemas + notarySchemas + extraSchemas.associateBy({ it }, { SchemaOptions() }) fun internalSchemas() = requiredSchemas.keys + extraSchemas.filter { schema -> // when mapped schemas from the finance module are present, they are considered as internal ones
schema::class.simpleName == "net.corda.finance.schemas.CashSchemaV1" || schema::class.simpleName == "net.corda.finance.schemas.CommercialPaperSchemaV1" }
override val schemaOptions: Map<MappedSchema, SchemaService.SchemaOptions> = requiredSchemas + extraSchemas.associateBy({ it }, { SchemaOptions() })
// Currently returns all schemas supported by the state, with no filtering or enrichment. // Currently returns all schemas supported by the state, with no filtering or enrichment.
override fun selectSchemas(state: ContractState): Iterable<MappedSchema> { override fun selectSchemas(state: ContractState): Iterable<MappedSchema> {

View File

@ -19,6 +19,7 @@
<include file="migration/node-core.changelog-v5.xml"/> <include file="migration/node-core.changelog-v5.xml"/>
<include file="migration/node-core.changelog-pkey.xml"/> <include file="migration/node-core.changelog-pkey.xml"/>
<include file="migration/node-core.changelog-postgres-blob.xml"/> <include file="migration/node-core.changelog-postgres-blob.xml"/>
<include file="migration/node-core.changelog-v8.xml"/>
<include file="migration/node-core.changelog-tx-mapping.xml"/> <include file="migration/node-core.changelog-tx-mapping.xml"/>
</databaseChangeLog> </databaseChangeLog>

View File

@ -0,0 +1,17 @@
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
<changeSet author="R3.Corda" id="conditional_attchments_rename">
<preConditions onFail="MARK_RAN"><tableExists tableName="NODE_ATTCHMENTS_CONTRACTS"/></preConditions>
<renameTable oldTableName="NODE_ATTCHMENTS_CONTRACTS" newTableName="NODE_ATTACHMENTS_CONTRACTS" />
</changeSet>
<changeSet author="R3.Corda" id="conditional_attchments_contracts">
<preConditions onFail="MARK_RAN"><tableExists tableName="NODE_ATTACHMENTS_CONTRACT_CLASS_NAME"/></preConditions>
<renameTable oldTableName="NODE_ATTACHMENTS_CONTRACT_CLASS_NAME" newTableName="NODE_ATTACHMENTS_CONTRACTS" />
</changeSet>
</databaseChangeLog>

View File

@ -17,5 +17,6 @@
<include file="migration/node-info.changelog-init.xml"/> <include file="migration/node-info.changelog-init.xml"/>
<include file="migration/node-info.changelog-v1.xml"/> <include file="migration/node-info.changelog-v1.xml"/>
<include file="migration/node-info.changelog-v2.xml"/> <include file="migration/node-info.changelog-v2.xml"/>
<include file="migration/node-info.changelog-v3.xml"/>
</databaseChangeLog> </databaseChangeLog>

View File

@ -0,0 +1,9 @@
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
<changeSet author="R3.Corda" id="column_host_name">
<renameColumn newColumnName="host_name" oldColumnName="host" tableName="node_info_hosts"/>
</changeSet>
</databaseChangeLog>

View File

@ -21,7 +21,6 @@ import net.corda.core.identity.Party;
import net.corda.core.messaging.DataFeed; import net.corda.core.messaging.DataFeed;
import net.corda.core.node.services.IdentityService; import net.corda.core.node.services.IdentityService;
import net.corda.core.node.services.Vault; import net.corda.core.node.services.Vault;
import net.corda.core.node.services.VaultQueryException;
import net.corda.core.node.services.VaultService; import net.corda.core.node.services.VaultService;
import net.corda.core.node.services.vault.*; import net.corda.core.node.services.vault.*;
import net.corda.core.node.services.vault.QueryCriteria.LinearStateQueryCriteria; import net.corda.core.node.services.vault.QueryCriteria.LinearStateQueryCriteria;
@ -44,7 +43,6 @@ import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import rx.Observable;
import java.util.*; import java.util.*;
import java.util.stream.Collectors; import java.util.stream.Collectors;

View File

@ -16,7 +16,8 @@ import net.corda.core.contracts.UniqueIdentifier
import net.corda.core.identity.AbstractParty import net.corda.core.identity.AbstractParty
import net.corda.core.schemas.CommonSchemaV1 import net.corda.core.schemas.CommonSchemaV1
import net.corda.core.schemas.MappedSchema import net.corda.core.schemas.MappedSchema
import net.corda.node.internal.configureDatabase import net.corda.node.internal.createCordaPersistence
import net.corda.node.internal.startHikariPool
import net.corda.node.services.schema.NodeSchemaService import net.corda.node.services.schema.NodeSchemaService
import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
@ -32,9 +33,16 @@ import javax.persistence.*
import java.net.URLClassLoader import java.net.URLClassLoader
import java.nio.file.Files import java.nio.file.Files
import java.nio.file.Path import java.nio.file.Path
import java.util.*
class SchemaMigrationTest { class SchemaMigrationTest {
private fun configureDatabase(hikariProperties: Properties,
databaseConfig: DatabaseConfig,
schemaService: NodeSchemaService = NodeSchemaService()): CordaPersistence =
createCordaPersistence(databaseConfig, { null }, { null }, schemaService)
.apply { startHikariPool(hikariProperties, databaseConfig, schemaService.schemaOptions.keys) }
@Test @Test
fun `Ensure that runMigration is disabled by default`() { fun `Ensure that runMigration is disabled by default`() {
assertThat(DatabaseConfig().runMigration).isFalse() assertThat(DatabaseConfig().runMigration).isFalse()
@ -43,14 +51,14 @@ class SchemaMigrationTest {
@Test @Test
fun `Migration is run when runMigration is disabled, and database is H2`() { fun `Migration is run when runMigration is disabled, and database is H2`() {
val dataSourceProps = MockServices.makeTestDataSourceProperties() val dataSourceProps = MockServices.makeTestDataSourceProperties()
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false), { null }, { null }) val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false))
checkMigrationRun(db) checkMigrationRun(db)
} }
@Test @Test
fun `Migration is run when runMigration is enabled`() { fun `Migration is run when runMigration is enabled`() {
val dataSourceProps = MockServices.makeTestDataSourceProperties() val dataSourceProps = MockServices.makeTestDataSourceProperties()
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true), { null }, { null }) val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true))
checkMigrationRun(db) checkMigrationRun(db)
} }
@ -60,11 +68,11 @@ class SchemaMigrationTest {
val dataSourceProps = MockServices.makeTestDataSourceProperties() val dataSourceProps = MockServices.makeTestDataSourceProperties()
//run the migration on the database //run the migration on the database
val migration = SchemaMigration(schemaService.schemaOptions.keys, HikariDataSource(HikariConfig(dataSourceProps)), true, DatabaseConfig()) val migration = SchemaMigration(schemaService.schemaOptions.keys, HikariDataSource(HikariConfig(dataSourceProps)), DatabaseConfig())
migration.runMigration(false) migration.runMigration(false)
//start the node with "runMigration = false" and check that it started correctly //start the node with "runMigration = false" and check that it started correctly
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false), { null }, { null }, schemaService) val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false), schemaService)
checkMigrationRun(db) checkMigrationRun(db)
} }
@ -78,7 +86,7 @@ class SchemaMigrationTest {
addToClassPath(tmpFolder) addToClassPath(tmpFolder)
// run the migrations for DummyTestSchemaV1, which should pick up the migration file // run the migrations for DummyTestSchemaV1, which should pick up the migration file
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true), { null }, { null }, NodeSchemaService(extraSchemas = setOf(DummyTestSchemaV1))) val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true), NodeSchemaService(extraSchemas = setOf(DummyTestSchemaV1)))
// check that the file was picked up // check that the file was picked up
val nrOfChangesOnDiscoveredFile = db.dataSource.connection.use { val nrOfChangesOnDiscoveredFile = db.dataSource.connection.use {

View File

@ -110,6 +110,7 @@ open class MockServices private constructor(
initialIdentity: TestIdentity, initialIdentity: TestIdentity,
networkParameters: NetworkParameters = testNetworkParameters(), networkParameters: NetworkParameters = testNetworkParameters(),
vararg moreKeys: KeyPair): Pair<CordaPersistence, MockServices> { vararg moreKeys: KeyPair): Pair<CordaPersistence, MockServices> {
val cordappLoader = cordappLoaderForPackages(cordappPackages) val cordappLoader = cordappLoaderForPackages(cordappPackages)
val dataSourceProps = makeInternalTestDataSourceProperties(initialIdentity.name.organisation, SecureHash.randomSHA256().toString()) val dataSourceProps = makeInternalTestDataSourceProperties(initialIdentity.name.organisation, SecureHash.randomSHA256().toString())
val schemaService = NodeSchemaService(cordappLoader.cordappSchemas) val schemaService = NodeSchemaService(cordappLoader.cordappSchemas)

View File

@ -169,12 +169,12 @@ private fun handleCommand(options: OptionSet, baseDirectory: Path, configFile: P
val config = parsedConfig.parseAs(Configuration::class, UnknownConfigKeysPolicy.IGNORE::handle) val config = parsedConfig.parseAs(Configuration::class, UnknownConfigKeysPolicy.IGNORE::handle)
fun runMigrationCommand(withMigration: (SchemaMigration, DataSource) -> Unit): Unit = runWithDataSource(config, baseDirectory, classLoader) { dataSource -> fun runMigrationCommand(withMigration: (SchemaMigration, DataSource) -> Unit): Unit = runWithDataSource(config, baseDirectory, classLoader) { dataSource ->
withMigration(SchemaMigration(schemas, dataSource, true, config.database, classLoader), dataSource) withMigration(SchemaMigration(schemas, dataSource, config.database, classLoader), dataSource)
} }
when { when {
options.has(RELEASE_LOCK) -> runWithDataSource(ConfigFactory.parseFile(configFile.toFile()).resolve().parseAs(Configuration::class), baseDirectory, classLoader) { options.has(RELEASE_LOCK) -> runWithDataSource(ConfigFactory.parseFile(configFile.toFile()).resolve().parseAs(Configuration::class), baseDirectory, classLoader) {
SchemaMigration(emptySet(), it, true, config.database, Thread.currentThread().contextClassLoader).forceReleaseMigrationLock() SchemaMigration(emptySet(), it, config.database, Thread.currentThread().contextClassLoader).forceReleaseMigrationLock()
} }
options.has(DRY_RUN) -> { options.has(DRY_RUN) -> {
val writer = getMigrationOutput(baseDirectory, options) val writer = getMigrationOutput(baseDirectory, options)