mirror of
https://github.com/corda/corda.git
synced 2025-06-16 14:18:20 +00:00
Integrate db migration tool - liquibase (#150)
[ENT-996]: integrate Liquibase for data migration
This commit is contained in:
@ -18,8 +18,7 @@ const val NODE_DATABASE_PREFIX = "node_"
|
||||
|
||||
// This class forms part of the node config and so any changes to it must be handled with care
|
||||
data class DatabaseConfig(
|
||||
val initialiseSchema: Boolean = true,
|
||||
val serverNameTablePrefix: String = "",
|
||||
val runMigration: Boolean = true,
|
||||
val transactionIsolationLevel: TransactionIsolationLevel = TransactionIsolationLevel.REPEATABLE_READ,
|
||||
val schema: String? = null,
|
||||
val exportHibernateJMXStatistics: Boolean = false
|
||||
@ -47,6 +46,7 @@ class CordaPersistence(
|
||||
) : Closeable {
|
||||
val defaultIsolationLevel = databaseConfig.transactionIsolationLevel
|
||||
val hibernateConfig: HibernateConfiguration by lazy {
|
||||
|
||||
transaction {
|
||||
HibernateConfiguration(schemas, databaseConfig, attributeConverters)
|
||||
}
|
||||
|
@ -47,25 +47,17 @@ class HibernateConfiguration(
|
||||
logger.info("Creating session factory for schemas: $schemas")
|
||||
val serviceRegistry = BootstrapServiceRegistryBuilder().build()
|
||||
val metadataSources = MetadataSources(serviceRegistry)
|
||||
// We set a connection provider as the auto schema generation requires it. The auto schema generation will not
|
||||
// necessarily remain and would likely be replaced by something like Liquibase. For now it is very convenient though.
|
||||
// TODO: replace auto schema generation as it isn't intended for production use, according to Hibernate docs.
|
||||
val config = Configuration(metadataSources).setProperty("hibernate.connection.provider_class", NodeDatabaseConnectionProvider::class.java.name)
|
||||
.setProperty("hibernate.hbm2ddl.auto", if (databaseConfig.initialiseSchema) "update" else "validate")
|
||||
.setProperty("hibernate.format_sql", "true")
|
||||
.setProperty("hibernate.connection.isolation", databaseConfig.transactionIsolationLevel.jdbcValue.toString())
|
||||
|
||||
if (databaseConfig.schema != null) {
|
||||
// This property helps 'hibernate.hbm2ddl.auto' to work properly when many schemas have similar table names.
|
||||
config.setProperty("hibernate.default_schema", databaseConfig.schema)
|
||||
}
|
||||
val config = Configuration(metadataSources).setProperty("hibernate.connection.provider_class", NodeDatabaseConnectionProvider::class.java.name)
|
||||
.setProperty("hibernate.hbm2ddl.auto", "validate")
|
||||
.setProperty("hibernate.connection.isolation", databaseConfig.transactionIsolationLevel.jdbcValue.toString())
|
||||
|
||||
schemas.forEach { schema ->
|
||||
// TODO: require mechanism to set schemaOptions (databaseSchema, tablePrefix) which are not global to session
|
||||
schema.mappedTypes.forEach { config.addAnnotatedClass(it) }
|
||||
}
|
||||
|
||||
val sessionFactory = buildSessionFactory(config, metadataSources, databaseConfig.serverNameTablePrefix)
|
||||
val sessionFactory = buildSessionFactory(config, metadataSources)
|
||||
logger.info("Created session factory for schemas: $schemas")
|
||||
|
||||
// export Hibernate JMX statistics
|
||||
@ -92,15 +84,9 @@ class HibernateConfiguration(
|
||||
}
|
||||
}
|
||||
|
||||
private fun buildSessionFactory(config: Configuration, metadataSources: MetadataSources, tablePrefix: String): SessionFactory {
|
||||
private fun buildSessionFactory(config: Configuration, metadataSources: MetadataSources): SessionFactory {
|
||||
config.standardServiceRegistryBuilder.applySettings(config.properties)
|
||||
val metadata = metadataSources.getMetadataBuilder(config.standardServiceRegistryBuilder.build()).run {
|
||||
applyPhysicalNamingStrategy(object : PhysicalNamingStrategyStandardImpl() {
|
||||
override fun toPhysicalTableName(name: Identifier?, context: JdbcEnvironment?): Identifier {
|
||||
val default = super.toPhysicalTableName(name, context)
|
||||
return Identifier.toIdentifier(tablePrefix + default.text, default.isQuoted)
|
||||
}
|
||||
})
|
||||
// register custom converters
|
||||
attributeConverters.forEach { applyAttributeConverter(it) }
|
||||
// Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages.
|
||||
|
@ -0,0 +1,84 @@
|
||||
package net.corda.nodeapi.internal.persistence
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import liquibase.Contexts
|
||||
import liquibase.Liquibase
|
||||
import liquibase.database.Database
|
||||
import liquibase.database.DatabaseFactory
|
||||
import liquibase.database.core.MSSQLDatabase
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
import liquibase.resource.ClassLoaderResourceAccessor
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.getMigrationResource
|
||||
import java.io.*
|
||||
import javax.sql.DataSource
|
||||
|
||||
private const val MIGRATION_PREFIX = "migration"
|
||||
|
||||
class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource) {
|
||||
|
||||
fun generateMigrationScript(outputFile: File) = doRunMigration(PrintWriter(outputFile))
|
||||
|
||||
fun runMigration() = doRunMigration()
|
||||
|
||||
private fun doRunMigration(outputWriter: Writer? = null) {
|
||||
|
||||
// virtual file name of the changelog that includes all schemas
|
||||
val dynamicInclude = "master.changelog.json"
|
||||
|
||||
dataSource.connection.use { connection ->
|
||||
|
||||
//create a resourse accessor that aggregates the changelogs included in the schemas into one dynamic stream
|
||||
val customResourceAccessor = object : ClassLoaderResourceAccessor() {
|
||||
override fun getResourcesAsStream(path: String): Set<InputStream> {
|
||||
|
||||
if (path == dynamicInclude) {
|
||||
//collect all changelog file referenced in the included schemas
|
||||
val changelogList = schemas.map { mappedSchema ->
|
||||
getMigrationResource(mappedSchema).let {
|
||||
"${MIGRATION_PREFIX}/${it}.xml"
|
||||
}
|
||||
}
|
||||
|
||||
//create a map in liquibase format including all migration files
|
||||
val includeAllFiles = mapOf("databaseChangeLog" to changelogList.map { file -> mapOf("include" to mapOf("file" to file)) })
|
||||
|
||||
// transform it to json
|
||||
val includeAllFilesJson = ObjectMapper().writeValueAsBytes(includeAllFiles)
|
||||
|
||||
//return the json as a stream
|
||||
return setOf(ByteArrayInputStream(includeAllFilesJson))
|
||||
}
|
||||
return super.getResourcesAsStream(path)?.take(1)?.toSet() ?: emptySet()
|
||||
}
|
||||
}
|
||||
|
||||
val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection)))
|
||||
|
||||
if (outputWriter != null) {
|
||||
liquibase.update(Contexts(), outputWriter)
|
||||
} else {
|
||||
liquibase.update(Contexts())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun getLiquibaseDatabase(conn: JdbcConnection): Database {
|
||||
|
||||
// the standard MSSQLDatabase in liquibase does not support sequences for Ms Azure
|
||||
// this class just overrides that behaviour
|
||||
class AzureDatabase(conn: JdbcConnection) : MSSQLDatabase() {
|
||||
init {
|
||||
this.connection = conn
|
||||
}
|
||||
|
||||
override fun getShortName(): String = "azure"
|
||||
|
||||
override fun supportsSequences(): Boolean = true
|
||||
}
|
||||
|
||||
val liquibaseDbImplementation = DatabaseFactory.getInstance().findCorrectDatabaseImplementation(conn)
|
||||
|
||||
return if (liquibaseDbImplementation is MSSQLDatabase) AzureDatabase(conn) else liquibaseDbImplementation
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user