mirror of
https://github.com/corda/corda.git
synced 2024-12-26 16:11:12 +00:00
ENT-1282: create standalone db migration tool (#356)
* ENT-1282: add standalone db migration tool that works with both the node and the doorman * ENT-1282: remove cmd line args during node startup * ENT-1282: more clear message * ENT-1282: fix integration test * ENT-1282: more fixes and cleanup code * ENT-1282: address PR comments * ENT-1282: more fixes and refactoring * ENT-1282: more classloader fixes * ENT-1282: changes after demo feedback * ENT-1282: update API * ENT-1282: update documentation * ENT-1282: formatting * ENT-1282: added CONSOLE option for dry-run and logging * ENT-1282: documentation changes * ENT-1282: remove getMigrationResource from the public API * ENT-1282: removed dependency on network-manager, added release-lock * ENT-1282: Update documentation
This commit is contained in:
parent
a564c9bfd3
commit
cef1f9885c
@ -2702,9 +2702,6 @@ public class net.corda.core.schemas.MappedSchema extends java.lang.Object
|
||||
public final void setTxId(String)
|
||||
public String toString()
|
||||
##
|
||||
public final class net.corda.core.schemas.PersistentTypesKt extends java.lang.Object
|
||||
@org.jetbrains.annotations.Nullable public static final String getMigrationResource(net.corda.core.schemas.MappedSchema)
|
||||
##
|
||||
@net.corda.core.serialization.CordaSerializable public interface net.corda.core.schemas.QueryableState extends net.corda.core.contracts.ContractState
|
||||
@org.jetbrains.annotations.NotNull public abstract net.corda.core.schemas.PersistentState generateMappedObject(net.corda.core.schemas.MappedSchema)
|
||||
@org.jetbrains.annotations.NotNull public abstract Iterable supportedSchemas()
|
||||
|
2
.idea/compiler.xml
generated
2
.idea/compiler.xml
generated
@ -42,6 +42,8 @@
|
||||
<module name="core_main" target="1.8" />
|
||||
<module name="core_smokeTest" target="1.8" />
|
||||
<module name="core_test" target="1.8" />
|
||||
<module name="dbmigration_main" target="1.8" />
|
||||
<module name="dbmigration_test" target="1.8" />
|
||||
<module name="demobench_main" target="1.8" />
|
||||
<module name="demobench_test" target="1.8" />
|
||||
<module name="docs_main" target="1.8" />
|
||||
|
@ -69,6 +69,8 @@ buildscript {
|
||||
ext.shiro_version = '1.4.0'
|
||||
ext.artifactory_plugin_version = constants.getProperty('artifactoryPluginVersion')
|
||||
ext.liquibase_version = '3.5.3'
|
||||
ext.shadow_version = '2.0.2'
|
||||
ext.hikari_version = '2.5.1'
|
||||
|
||||
// Update 121 is required for ObjectInputFilter and at time of writing 131 was latest:
|
||||
ext.java8_minUpdateVersion = '131'
|
||||
|
@ -0,0 +1,31 @@
|
||||
import com.google.common.base.CaseFormat
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
|
||||
object MigrationHelpers {
|
||||
private const val MIGRATION_PREFIX = "migration"
|
||||
private const val DEFAULT_MIGRATION_EXTENSION = "xml"
|
||||
private const val CHANGELOG_NAME = "changelog-master"
|
||||
private val possibleMigrationExtensions = listOf(".xml", ".sql", ".yml", ".json")
|
||||
|
||||
fun getMigrationResource(schema: MappedSchema, classLoader: ClassLoader): String? {
|
||||
val declaredMigration = schema.getMigrationResource()
|
||||
|
||||
if (declaredMigration == null) {
|
||||
// try to apply the naming convention and find the migration file in the classpath
|
||||
val resource = migrationResourceNameForSchema(schema)
|
||||
return possibleMigrationExtensions.map { "${resource}${it}" }.firstOrNull {
|
||||
classLoader.getResource(it) != null
|
||||
}
|
||||
}
|
||||
|
||||
return "${MIGRATION_PREFIX}/${declaredMigration}.${DEFAULT_MIGRATION_EXTENSION}"
|
||||
}
|
||||
|
||||
// SchemaName will be transformed from camel case to lower_hyphen
|
||||
// then add ".changelog-master"
|
||||
fun migrationResourceNameForSchema(schema: MappedSchema): String {
|
||||
val name: String = schema::class.simpleName!!
|
||||
val fileName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_HYPHEN, name)
|
||||
return "${MIGRATION_PREFIX}/${fileName}.${CHANGELOG_NAME}"
|
||||
}
|
||||
}
|
@ -78,31 +78,4 @@ data class PersistentStateRef(
|
||||
/**
|
||||
* Marker interface to denote a persistable Corda state entity that will always have a transaction id and index
|
||||
*/
|
||||
interface StatePersistable
|
||||
|
||||
private const val MIGRATION_PREFIX = "migration"
|
||||
private const val DEFAULT_MIGRATION_EXTENSION = "xml"
|
||||
private const val CHANGELOG_NAME = "changelog-master"
|
||||
private val possibleMigrationExtensions = listOf(".xml", ".sql", ".yml", ".json")
|
||||
|
||||
fun getMigrationResource(schema: MappedSchema): String? {
|
||||
val declaredMigration = schema.getMigrationResource()
|
||||
|
||||
if (declaredMigration == null) {
|
||||
// try to apply the naming convention and find the migration file in the classpath
|
||||
val resource = migrationResourceNameForSchema(schema)
|
||||
return possibleMigrationExtensions.map { "${resource}${it}" }.firstOrNull {
|
||||
Thread.currentThread().contextClassLoader.getResource(it) != null
|
||||
}
|
||||
}
|
||||
|
||||
return "${MIGRATION_PREFIX}/${declaredMigration}.${DEFAULT_MIGRATION_EXTENSION}"
|
||||
}
|
||||
|
||||
// SchemaName will be transformed from camel case to lower_hyphen
|
||||
// then add ".changelog-master"
|
||||
fun migrationResourceNameForSchema(schema: MappedSchema): String {
|
||||
val name: String = schema::class.simpleName!!
|
||||
val fileName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_HYPHEN, name)
|
||||
return "${MIGRATION_PREFIX}/${fileName}.${CHANGELOG_NAME}"
|
||||
}
|
||||
interface StatePersistable
|
@ -150,19 +150,31 @@ which is then referenced within a custom flow:
|
||||
Database Migration
|
||||
==================
|
||||
|
||||
As a database migration tool, we use the open source library liquibase <http://www.liquibase.org/>.
|
||||
As a database migration tool, we use the open source library liquibase <http://www.liquibase.org/>.
|
||||
|
||||
If migration is enabled (using the `database.runMigration` node configuration), the database state is checked (and updated) during node startup.
|
||||
For example, after deploying a new version of the code that contains database migrations (see example below for a possible scenario), they are executed at that point (during startup).
|
||||
Possible database changes range from schema changes to data changes (organized on changesets)
|
||||
Migration is enabled by specifying true in the ``database.runMigration`` node configuration setting (default behaviour is false).
|
||||
When enabled, the database state is checked, and updated during node startup.
|
||||
|
||||
The default behaviour (``database.runMigration=false``) is to just check the database state, and fail if it is not up to date. To bring the database to the correct state we provide an advanced migration tool. See below for details.
|
||||
|
||||
For example, if migration is enabled, after deploying a new version of the code that contains database migrations (see example below for a possible scenario), they are executed at that point (during startup).
|
||||
|
||||
Possible database changes range from schema changes to data changes. (The database changes are grouped together in `changesets`. See the example below.).
|
||||
|
||||
About Liquibase
|
||||
---------------
|
||||
|
||||
Liquibase will create a table called ``DATABASECHANGELOG``, that will store information about each executed change (like timestamp, description, user, md5 hash so it can't be changed, etc).
|
||||
This table will be used every time a migration is run to determine what changesets need to be applied.
|
||||
This table will be used every time a migration command is run to determine what changesets need to be applied.
|
||||
Changesets should never be modified once they were executed. Any correction should be applied in a new changeset.
|
||||
We can also "tag" the database at each release to make rollback easier.
|
||||
|
||||
Database changes are maintained in several xml files per ``MappedSchema``, so that only migrations corresponding to the node’s configured schemas are run.
|
||||
By following our convention, and using the node-info schema as an example, if there are any database changes for release 12, the changes will be added to a new file called: ``node-info.changelog-v12.xml`` which has to be included in ``node-info.changelog-master.xml``.
|
||||
The migration file(s) for all ``MappedSchemas`` are dynamically included in the global changelog, as long as they are present on the classpath and are either explicitly declared in the ``MappedSchema`` implementation, or follow a naming convention based on the ``MappedSchema`` name.
|
||||
(The migration tool that we provide can generate liquibase files with the correct name for a schema)
|
||||
|
||||
Our convention is to maintain a "master" changelog file per ``MappedSchema`` which will include "version" changelogs.
|
||||
By following our versioning convention, and using the node-info schema as an example, if there are any database changes for release 12, the changes will be added to a new file called: ``node-info.changelog-v12.xml`` which has to be included in ``node-info.changelog-master.xml``.
|
||||
|
||||
|
||||
Example:
|
||||
@ -234,14 +246,35 @@ Usage:
|
||||
Configurations:
|
||||
|
||||
- To enable migration at startup, set:
|
||||
- ``database.runMigration = true`` // false by default,
|
||||
- ``database.runMigration = true`` // false by default.
|
||||
|
||||
Command line arguments:
|
||||
Migration tool:
|
||||
---------------
|
||||
|
||||
- To export the migration to a file use `—just-generate-db-migration outputSqlFile`. This will generate the delta from the last release, and will output the resulting sql into the outputSqlFile. It will not write to the db. It will not start the node! ( default value for `outputSqlFile` is a `.sql` file with the current date)
|
||||
The Migration tool will be distributed as a standalone jar file, with the following options:
|
||||
|
||||
- To run the migration without starting the node: `--just-run-db-migration`
|
||||
.. table::
|
||||
|
||||
==================================== =======================================================================
|
||||
Option Description
|
||||
==================================== =======================================================================
|
||||
--help Print help message
|
||||
--mode Either 'NODE' or 'DOORMAN'. By default 'NODE'
|
||||
--base-directory The node or doorman directory
|
||||
--config-file The name of the config file. By default 'node.conf' for a simple node and 'network-management.conf' for a doorman.
|
||||
--doorman-jar-path The path to the doorman fat jar
|
||||
--create-migration-sql-for-cordapp Create migration files for a CorDapp. You can specify the fully qualified of the `MappedSchema` class. If not specified it will generate foll all schemas that don't have migrations. The output directory is the base-directory, where a `migration` folder is created.
|
||||
--dry-run Output the database migration to the specified output file. The output directory is the base-directory. You can specify a file name or 'CONSOLE' if you want to send the output to the console.
|
||||
--execute-migration This option will run the db migration on the configured database
|
||||
--release-lock Releases whatever locks are on the database change log table, in case shutdown failed.
|
||||
==================================== =======================================================================
|
||||
|
||||
It is intended to be used by R3 Corda node administrators.
|
||||
Currently it has these features :
|
||||
- it allows running the migration on the database (`--execute-migration` )
|
||||
- offers the option to inspect the actual sql statements that will be run as part of the current migration (`--dry-run` )
|
||||
- can be used to release the migration lock (`--release-lock`)
|
||||
- when a CorDapp released by the open source community is ready to be deployed on a production node, using this tool it can be "upgraded" (`--create-migration-sql-for-cordapp`). See below for details.
|
||||
|
||||
CorDapps:
|
||||
---------
|
||||
@ -250,7 +283,7 @@ CorDapp developers who decide to store contract state in custom entities can cre
|
||||
|
||||
There are 2 ways of associating a migration file with a schema:
|
||||
1) By overriding ``val migrationResource: String`` and pointing to a file that needs to be in the classpath
|
||||
2) By putting a file on the classpath in a `migration` package whose name is the hyphenated name of the schema. ( All supported file extensions will be appended to the name)
|
||||
2) By putting a file on the classpath in a `migration` package whose name is the hyphenated name of the schema. (All supported file extensions will be appended to the name)
|
||||
|
||||
CorDapp developers can use any of the supported formats (xml, sql, json, yaml) for the migration files they create.
|
||||
|
||||
@ -261,12 +294,14 @@ In case CorDapp developers don't distribute a CorDapp with migration files, then
|
||||
|
||||
The following options are available:
|
||||
1) In case the organisation is running a demo or trial node on the default H2 database, then the CorDapp will just work when deployed by relying on the migration tool provided by hibernate, which is not intended for production.
|
||||
2) In case the organisation is running a production node with live data on an enterprise database, then they will have to manage the database migration for the CorDapp.
|
||||
2) In case the organisation is running a production node (with live data) on an enterprise database, then they will have to manage the database migration for the CorDapp.
|
||||
|
||||
These are the steps to do this:
|
||||
- find out the name of the MappedSchema containing the new contract state entities and hyphenate it. For example:
|
||||
- `CommercialPaperSchemaV1` -> `commercial-paper-schema-v1.changelog-master.xml`.
|
||||
- `IOUSchemaV1` -> `i-o-u-schema-v1.changelog-master.xml`.
|
||||
- create a file with the preferred extension and add the migration in it (see <http://www.liquibase.org/documentation/index.html>). For DBAs, the Sql format might be very familiar (basically T-SQL with metadata). The first changeset should be just creating the tables, indexes, etc. The author name should not be `R3.Corda`, because this is reserved.
|
||||
- add the files to a folder named `migration` and create a jar (by convention it could be named: originalCorDappName-migration.jar), and deploy this jar together with the CorDapp
|
||||
- by using the `--just-generate-db-migration` flag, the migration can be tested by inspecting the generated sql.
|
||||
- deploy the CorDapp on your node (copy the jar in the `cordapps` folder)
|
||||
- find out the name of the MappedSchema containing the new contract state entities and hyphenate it. For example:``net.corda.finance.schemasCommercialPaperSchemaV1``
|
||||
- call the migration tool ``java -jar migration-tool.jar --base-directory path_to_node --create-migration-sql-for-cordapp net.corda.finance.schemasCommercialPaperSchemaV1``
|
||||
- this will generate a file called ``commercial-paper-schema-v1.changelog-master.sql`` in a folder called ``migration`` in the `base-directory`
|
||||
- in case you don't specify the actual MappedSchema name, the tool will generate one sql file for each schema defined in the CorDapp
|
||||
- inspect the file(s) to make sure it is correct
|
||||
- create a jar with the `migration` folder (by convention it could be named: originalCorDappName-migration.jar), and deploy this jar together with the CorDapp
|
||||
- To make sure that the new migration will be used, the migration tool can be run in a `dry-run` mode and inspect the output file
|
||||
|
@ -96,7 +96,7 @@ dependencies {
|
||||
compile "com.atlassian.fugue:fugue:2.6.1"
|
||||
|
||||
// SQL connection pooling library
|
||||
compile "com.zaxxer:HikariCP:2.5.1"
|
||||
compile "com.zaxxer:HikariCP:${hikari_version}"
|
||||
|
||||
// For H2 database support in persistence
|
||||
compile "com.h2database:h2:$h2_version"
|
||||
|
@ -34,6 +34,9 @@ dependencies {
|
||||
// For AMQP serialisation.
|
||||
compile "org.apache.qpid:proton-j:0.21.0"
|
||||
|
||||
// SQL connection pooling library
|
||||
compile "com.zaxxer:HikariCP:$hikari_version"
|
||||
|
||||
// For db migration
|
||||
compile "org.liquibase:liquibase-core:$liquibase_version"
|
||||
runtime 'com.mattbertolini:liquibase-slf4j:2.0.0'
|
||||
|
@ -1,44 +1,51 @@
|
||||
package net.corda.node.services.persistence
|
||||
|
||||
import MigrationHelpers.migrationResourceNameForSchema
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.internal.objectOrNewInstance
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.migrationResourceNameForSchema
|
||||
import net.corda.nodeapi.internal.persistence.HibernateConfiguration
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.HibernateConfiguration.Companion.buildHibernateMetadata
|
||||
import org.hibernate.boot.Metadata
|
||||
import org.hibernate.boot.MetadataSources
|
||||
import org.hibernate.boot.registry.BootstrapServiceRegistryBuilder
|
||||
import org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl
|
||||
import org.hibernate.cfg.AvailableSettings.CONNECTION_PROVIDER
|
||||
import org.hibernate.cfg.Configuration
|
||||
import org.hibernate.dialect.Dialect
|
||||
import org.hibernate.cfg.Environment
|
||||
import org.hibernate.engine.jdbc.connections.internal.DatasourceConnectionProviderImpl
|
||||
import org.hibernate.tool.hbm2ddl.SchemaExport
|
||||
import org.hibernate.tool.schema.TargetType
|
||||
import java.io.File
|
||||
import java.nio.file.Path
|
||||
import java.sql.Types
|
||||
import java.util.*
|
||||
import javax.persistence.AttributeConverter
|
||||
import javax.persistence.Converter
|
||||
import javax.sql.DataSource
|
||||
|
||||
/**
|
||||
* This is useful for CorDapp developers who want to enable migrations for
|
||||
* standard "Open Source" Corda CorDapps
|
||||
*/
|
||||
object MigrationExporter {
|
||||
class MigrationExporter(val parent: Path, val datasourceProperties: Properties, val cordappClassLoader: ClassLoader, val dataSource: DataSource) {
|
||||
|
||||
const val LIQUIBASE_HEADER = "--liquibase formatted sql"
|
||||
const val CORDA_USER = "R3.Corda.Generated"
|
||||
|
||||
fun generateMigrationForCorDapp(schemaName: String, parent: Path = File(".").toPath()): Path {
|
||||
val schemaClass = Class.forName(schemaName)
|
||||
val schemaObject = schemaClass.kotlin.objectInstance as MappedSchema
|
||||
return generateMigrationForCorDapp(schemaObject, parent)
|
||||
companion object {
|
||||
const val LIQUIBASE_HEADER = "--liquibase formatted sql"
|
||||
const val CORDA_USER = "R3.Corda.Generated"
|
||||
}
|
||||
|
||||
fun generateMigrationForCorDapp(mappedSchema: MappedSchema, parent: Path): Path {
|
||||
fun generateMigrationForCorDapp(schemaName: String): Path {
|
||||
val schemaClass = cordappClassLoader.loadClass(schemaName)
|
||||
val schemaObject = schemaClass.kotlin.objectOrNewInstance() as MappedSchema
|
||||
return generateMigrationForCorDapp(schemaObject)
|
||||
}
|
||||
|
||||
fun generateMigrationForCorDapp(mappedSchema: MappedSchema): Path {
|
||||
|
||||
//create hibernate metadata for MappedSchema
|
||||
val metadata = createHibernateMetadataForSchema(mappedSchema)
|
||||
|
||||
//create output file and add metadata
|
||||
//create output file and add liquibase headers
|
||||
val outputFile = File(parent.toFile(), "${migrationResourceNameForSchema(mappedSchema)}.sql")
|
||||
outputFile.apply {
|
||||
parentFile.mkdirs()
|
||||
@ -63,12 +70,18 @@ object MigrationExporter {
|
||||
private fun createHibernateMetadataForSchema(mappedSchema: MappedSchema): Metadata {
|
||||
val metadataSources = MetadataSources(BootstrapServiceRegistryBuilder().build())
|
||||
val config = Configuration(metadataSources)
|
||||
mappedSchema.mappedTypes.forEach { config.addAnnotatedClass(it) }
|
||||
val regBuilder = config.standardServiceRegistryBuilder
|
||||
.applySetting("hibernate.dialect", HibernateGenericDialect::class.java.name)
|
||||
val metadataBuilder = metadataSources.getMetadataBuilder(regBuilder.build())
|
||||
.setProperty(CONNECTION_PROVIDER, DatasourceConnectionProviderImpl::class.java.name)
|
||||
|
||||
return HibernateConfiguration.buildHibernateMetadata(metadataBuilder, "",
|
||||
mappedSchema.mappedTypes.forEach { config.addAnnotatedClass(it) }
|
||||
|
||||
val registryBuilder = config.standardServiceRegistryBuilder
|
||||
.addService(org.hibernate.boot.registry.classloading.spi.ClassLoaderService::class.java, ClassLoaderServiceImpl(cordappClassLoader))
|
||||
.applySettings(config.properties)
|
||||
.applySetting(Environment.DATASOURCE, dataSource)
|
||||
|
||||
val metadataBuilder = metadataSources.getMetadataBuilder(registryBuilder.build())
|
||||
|
||||
return buildHibernateMetadata(metadataBuilder, datasourceProperties.getProperty(CordaPersistence.DataSourceConfigTag.DATA_SOURCE_URL),
|
||||
listOf(DummyAbstractPartyToX500NameAsStringConverter()))
|
||||
}
|
||||
|
||||
@ -77,38 +90,7 @@ object MigrationExporter {
|
||||
*/
|
||||
@Converter(autoApply = true)
|
||||
class DummyAbstractPartyToX500NameAsStringConverter : AttributeConverter<AbstractParty, String> {
|
||||
|
||||
override fun convertToDatabaseColumn(party: AbstractParty?) = null
|
||||
|
||||
override fun convertToEntityAttribute(dbData: String?) = null
|
||||
}
|
||||
|
||||
/**
|
||||
* Simplified hibernate dialect used for generating liquibase migration files
|
||||
*/
|
||||
class HibernateGenericDialect : Dialect() {
|
||||
init {
|
||||
registerColumnType(Types.BIGINT, "bigint")
|
||||
registerColumnType(Types.BOOLEAN, "boolean")
|
||||
registerColumnType(Types.BLOB, "blob")
|
||||
registerColumnType(Types.CLOB, "clob")
|
||||
registerColumnType(Types.DATE, "date")
|
||||
registerColumnType(Types.FLOAT, "float")
|
||||
registerColumnType(Types.TIME, "time")
|
||||
registerColumnType(Types.TIMESTAMP, "timestamp")
|
||||
registerColumnType(Types.VARCHAR, "varchar(\$l)")
|
||||
registerColumnType(Types.BINARY, "binary")
|
||||
registerColumnType(Types.BIT, "boolean")
|
||||
registerColumnType(Types.CHAR, "char(\$l)")
|
||||
registerColumnType(Types.DECIMAL, "decimal(\$p,\$s)")
|
||||
registerColumnType(Types.NUMERIC, "decimal(\$p,\$s)")
|
||||
registerColumnType(Types.DOUBLE, "double")
|
||||
registerColumnType(Types.INTEGER, "integer")
|
||||
registerColumnType(Types.LONGVARBINARY, "longvarbinary")
|
||||
registerColumnType(Types.LONGVARCHAR, "longvarchar")
|
||||
registerColumnType(Types.REAL, "real")
|
||||
registerColumnType(Types.SMALLINT, "smallint")
|
||||
registerColumnType(Types.TINYINT, "tinyint")
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
package net.corda.nodeapi.internal.persistence
|
||||
|
||||
import MigrationHelpers.getMigrationResource
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import liquibase.Contexts
|
||||
import liquibase.LabelExpression
|
||||
@ -8,14 +9,19 @@ import liquibase.database.Database
|
||||
import liquibase.database.DatabaseFactory
|
||||
import liquibase.database.core.MSSQLDatabase
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
import liquibase.lockservice.LockServiceFactory
|
||||
import liquibase.resource.ClassLoaderResourceAccessor
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.getMigrationResource
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import java.io.*
|
||||
import javax.sql.DataSource
|
||||
|
||||
class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource, val failOnMigrationMissing: Boolean, private val databaseConfig: DatabaseConfig) {
|
||||
class SchemaMigration(
|
||||
val schemas: Set<MappedSchema>,
|
||||
val dataSource: DataSource,
|
||||
val failOnMigrationMissing: Boolean,
|
||||
private val databaseConfig: DatabaseConfig,
|
||||
private val classLoader: ClassLoader = Thread.currentThread().contextClassLoader) {
|
||||
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
@ -25,7 +31,12 @@ class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource
|
||||
* Main entry point to the schema migration.
|
||||
* Called during node startup.
|
||||
*/
|
||||
fun nodeStartup() = if (databaseConfig.runMigration) runMigration() else checkState()
|
||||
fun nodeStartup(): Unit {
|
||||
when {
|
||||
databaseConfig.runMigration -> runMigration()
|
||||
failOnMigrationMissing -> checkState()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* will run the liquibase migration on the actual database
|
||||
@ -33,15 +44,24 @@ class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource
|
||||
fun runMigration() = doRunMigration(run = true, outputWriter = null, check = false)
|
||||
|
||||
/**
|
||||
* will write the migration to the outputFile
|
||||
* will write the migration to a Writer
|
||||
*/
|
||||
fun generateMigrationScript(outputFile: File) = doRunMigration(run = false, outputWriter = PrintWriter(outputFile), check = false)
|
||||
fun generateMigrationScript(writer: Writer) = doRunMigration(run = false, outputWriter = writer, check = false)
|
||||
|
||||
/**
|
||||
* ensures that the database is up to date with the latest migration changes
|
||||
*/
|
||||
fun checkState() = doRunMigration(run = false, outputWriter = null, check = true)
|
||||
|
||||
/**
|
||||
* can be used from an external tool to release the lock in case something went terribly wrong
|
||||
*/
|
||||
fun forceReleaseMigrationLock(): Unit {
|
||||
dataSource.connection.use { connection ->
|
||||
LockServiceFactory.getInstance().getLockService(getLiquibaseDatabase(JdbcConnection(connection))).forceReleaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
private fun doRunMigration(run: Boolean, outputWriter: Writer?, check: Boolean) {
|
||||
|
||||
// virtual file name of the changelog that includes all schemas
|
||||
@ -52,7 +72,7 @@ class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource
|
||||
// collect all changelog file referenced in the included schemas
|
||||
// for backward compatibility reasons, when failOnMigrationMissing=false, we don't manage CorDapps via Liquibase but use the hibernate hbm2ddl=update
|
||||
val changelogList = schemas.map { mappedSchema ->
|
||||
val resource = getMigrationResource(mappedSchema)
|
||||
val resource = getMigrationResource(mappedSchema, classLoader)
|
||||
when {
|
||||
resource != null -> resource
|
||||
failOnMigrationMissing -> throw IllegalStateException("No migration defined for schema: ${mappedSchema.name} v${mappedSchema.version}")
|
||||
@ -64,7 +84,7 @@ class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource
|
||||
}
|
||||
|
||||
//create a resourse accessor that aggregates the changelogs included in the schemas into one dynamic stream
|
||||
val customResourceAccessor = object : ClassLoaderResourceAccessor() {
|
||||
val customResourceAccessor = object : ClassLoaderResourceAccessor(classLoader) {
|
||||
override fun getResourcesAsStream(path: String): Set<InputStream> {
|
||||
|
||||
if (path == dynamicInclude) {
|
||||
@ -103,7 +123,7 @@ class SchemaMigration(val schemas: Set<MappedSchema>, val dataSource: DataSource
|
||||
check && !run -> {
|
||||
val unRunChanges = liquibase.listUnrunChangeSets(Contexts(), LabelExpression())
|
||||
if (unRunChanges.isNotEmpty()) {
|
||||
throw Exception("There are ${unRunChanges.size} outstanding database changes that need to be run. Please use the provided tools to update the database.")
|
||||
throw IllegalStateException("There are ${unRunChanges.size} outstanding database changes that need to be run. Please use the provided tools to update the database.")
|
||||
}
|
||||
}
|
||||
(outputWriter != null) && !check && !run -> liquibase.update(Contexts(), outputWriter)
|
||||
|
@ -65,7 +65,7 @@ class SchemaMigrationTest {
|
||||
|
||||
// create a migration file for the DummyTestSchemaV1 and add it to the classpath
|
||||
val tmpFolder = Files.createTempDirectory("test")
|
||||
val fileName = MigrationExporter.generateMigrationForCorDapp(DummyTestSchemaV1, tmpFolder).fileName
|
||||
val fileName = MigrationExporter(tmpFolder, dataSourceProps, Thread.currentThread().contextClassLoader, HikariDataSource(HikariConfig(dataSourceProps))).generateMigrationForCorDapp(DummyTestSchemaV1).fileName
|
||||
addToClassPath(tmpFolder)
|
||||
|
||||
// run the migrations for DummyTestSchemaV1, which should pick up the migration file
|
||||
|
@ -132,7 +132,7 @@ dependencies {
|
||||
compile group: 'mysql', name: 'mysql-connector-java', version: '6.0.6'
|
||||
|
||||
// SQL connection pooling library
|
||||
compile "com.zaxxer:HikariCP:2.5.1"
|
||||
compile "com.zaxxer:HikariCP:${hikari_version}"
|
||||
|
||||
// Hibernate: an object relational mapper for writing state objects to the database automatically.
|
||||
compile "org.hibernate:hibernate-core:$hibernate_version"
|
||||
|
@ -37,16 +37,8 @@ class ArgsParser {
|
||||
private val noLocalShellArg = optionParser.accepts("no-local-shell", "Do not start the embedded shell locally.")
|
||||
private val isRegistrationArg = optionParser.accepts("initial-registration", "Start initial node registration with Corda network to obtain certificate from the permissioning server.")
|
||||
private val isVersionArg = optionParser.accepts("version", "Print the version and exit")
|
||||
private val justRunDbMigrationArg = optionParser.accepts("just-run-db-migration",
|
||||
"This will only run the db migration. It will not start the node!")
|
||||
private val justGenerateNodeInfoArg = optionParser.accepts("just-generate-node-info",
|
||||
"Perform the node start-up task necessary to generate its nodeInfo, save it to disk, then quit")
|
||||
private val justGenerateDatabaseMigrationArg = optionParser
|
||||
.accepts("just-generate-db-migration", "Generate the database migration in the specified output file, and then quit.")
|
||||
.withOptionalArg()
|
||||
private val justCreateMigrationForCorDappArg = optionParser
|
||||
.accepts("just-create-migration-cordapp", "Create migration files for a CorDapp")
|
||||
.withRequiredArg()
|
||||
private val bootstrapRaftClusterArg = optionParser.accepts("bootstrap-raft-cluster", "Bootstraps Raft cluster. The node forms a single node cluster (ignoring otherwise configured peer addresses), acting as a seed for other nodes to join the cluster.")
|
||||
private val helpArg = optionParser.accepts("help").forHelp()
|
||||
|
||||
@ -65,15 +57,9 @@ class ArgsParser {
|
||||
val noLocalShell = optionSet.has(noLocalShellArg)
|
||||
val sshdServer = optionSet.has(sshdServerArg)
|
||||
val justGenerateNodeInfo = optionSet.has(justGenerateNodeInfoArg)
|
||||
val justRunDbMigration = optionSet.has(justRunDbMigrationArg)
|
||||
val generateDatabaseMigrationToFile = if (optionSet.has(justGenerateDatabaseMigrationArg))
|
||||
Pair(true, optionSet.valueOf(justGenerateDatabaseMigrationArg) ?: "migration${SimpleDateFormat("yyyyMMddHHmmss").format(Date())}.sql")
|
||||
else
|
||||
Pair(false, null)
|
||||
val createMigrationForCorDapp: String? = optionSet.valueOf(justCreateMigrationForCorDappArg)
|
||||
val bootstrapRaftCluster = optionSet.has(bootstrapRaftClusterArg)
|
||||
return CmdLineOptions(baseDirectory, configFile, help, loggingLevel, logToConsole, isRegistration, isVersion,
|
||||
noLocalShell, sshdServer, justGenerateNodeInfo, justRunDbMigration, generateDatabaseMigrationToFile, bootstrapRaftCluster, createMigrationForCorDapp)
|
||||
noLocalShell, sshdServer, justGenerateNodeInfo, bootstrapRaftCluster)
|
||||
}
|
||||
|
||||
fun printHelp(sink: PrintStream) = optionParser.printHelpOn(sink)
|
||||
@ -89,10 +75,7 @@ data class CmdLineOptions(val baseDirectory: Path,
|
||||
val noLocalShell: Boolean,
|
||||
val sshdServer: Boolean,
|
||||
val justGenerateNodeInfo: Boolean,
|
||||
val justRunDbMigration: Boolean,
|
||||
val generateDatabaseMigrationToFile: Pair<Boolean, String?>,
|
||||
val bootstrapRaftCluster: Boolean,
|
||||
val justCreateMigrationForCorDapp: String?) {
|
||||
val bootstrapRaftCluster: Boolean) {
|
||||
fun loadConfig(): NodeConfiguration {
|
||||
val config = ConfigHelper.loadConfig(baseDirectory, configFile).parseAsNodeConfiguration()
|
||||
if (isRegistration) {
|
||||
|
@ -204,20 +204,6 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
|
||||
// TODO: Migrate classes and factories from start method.
|
||||
}
|
||||
|
||||
fun generateDatabaseSchema(outputFile: String) {
|
||||
HikariDataSource(HikariConfig(configuration.dataSourceProperties)).use { dataSource ->
|
||||
val jdbcUrl = configuration.dataSourceProperties.getProperty("url", "")
|
||||
SchemaMigration(cordappLoader.cordappSchemas, dataSource, !isH2Database(jdbcUrl), configuration.database).generateMigrationScript(File(outputFile))
|
||||
}
|
||||
}
|
||||
|
||||
fun runDbMigration() {
|
||||
HikariDataSource(HikariConfig(configuration.dataSourceProperties)).use { dataSource ->
|
||||
val jdbcUrl = configuration.dataSourceProperties.getProperty("url", "")
|
||||
SchemaMigration(cordappLoader.cordappSchemas, dataSource, !isH2Database(jdbcUrl), configuration.database).runMigration()
|
||||
}
|
||||
}
|
||||
|
||||
open fun start(): StartedNode<AbstractNode> {
|
||||
check(started == null) { "Node has already been started" }
|
||||
log.info("Node starting up ...")
|
||||
@ -886,9 +872,7 @@ fun configureDatabase(hikariProperties: Properties,
|
||||
|
||||
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
|
||||
|
||||
if (databaseConfig.runMigration) {
|
||||
SchemaMigration(schemaService.schemaOptions.keys, dataSource, !isH2Database(jdbcUrl), databaseConfig).runMigration()
|
||||
}
|
||||
SchemaMigration(schemaService.schemaOptions.keys, dataSource, !isH2Database(jdbcUrl), databaseConfig).nodeStartup()
|
||||
|
||||
return CordaPersistence(dataSource, databaseConfig, schemaService.schemaOptions.keys, jdbcUrl, attributeConverters)
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.*
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.NodeConfigurationImpl
|
||||
import net.corda.node.services.persistence.MigrationExporter
|
||||
import net.corda.node.services.transactions.bftSMaRtSerialFilter
|
||||
import net.corda.node.shell.InteractiveShell
|
||||
import net.corda.node.utilities.registration.HTTPNetworkRegistrationService
|
||||
@ -133,22 +132,6 @@ open class NodeStartup(val args: Array<String>) {
|
||||
node.generateAndSaveNodeInfo()
|
||||
return
|
||||
}
|
||||
if (cmdlineOptions.justRunDbMigration) {
|
||||
node.runDbMigration()
|
||||
return
|
||||
}
|
||||
if (cmdlineOptions.generateDatabaseMigrationToFile.first) {
|
||||
node.generateDatabaseSchema(cmdlineOptions.generateDatabaseMigrationToFile.second!!)
|
||||
return
|
||||
}
|
||||
if(cmdlineOptions.justCreateMigrationForCorDapp != null){
|
||||
try {
|
||||
MigrationExporter.generateMigrationForCorDapp(cmdlineOptions.justCreateMigrationForCorDapp)
|
||||
} catch (e: Exception) {
|
||||
logger.error("Could not generate migration for ${cmdlineOptions.justCreateMigrationForCorDapp}", e)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
val startedNode = node.start()
|
||||
Node.printBasicNodeInfo("Loaded CorDapps", startedNode.services.cordappProvider.cordapps.joinToString { it.name })
|
||||
|
@ -43,7 +43,7 @@ import kotlin.streams.toList
|
||||
class CordappLoader private constructor(private val cordappJarPaths: List<RestrictedURL>) {
|
||||
val cordapps: List<Cordapp> by lazy { loadCordapps() + coreCordapp }
|
||||
|
||||
internal val appClassLoader: ClassLoader = URLClassLoader(cordappJarPaths.stream().map { it.url }.toTypedArray(), javaClass.classLoader)
|
||||
val appClassLoader: ClassLoader = URLClassLoader(cordappJarPaths.stream().map { it.url }.toTypedArray(), javaClass.classLoader)
|
||||
|
||||
init {
|
||||
if (cordappJarPaths.isEmpty()) {
|
||||
|
@ -25,10 +25,7 @@ class ArgsParserTest {
|
||||
noLocalShell = false,
|
||||
sshdServer = false,
|
||||
justGenerateNodeInfo = false,
|
||||
justRunDbMigration = false,
|
||||
bootstrapRaftCluster = false,
|
||||
generateDatabaseMigrationToFile = Pair(false, null),
|
||||
justCreateMigrationForCorDapp = null
|
||||
bootstrapRaftCluster = false
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ include 'tools:demobench'
|
||||
include 'tools:loadtest'
|
||||
include 'tools:graphs'
|
||||
include 'tools:bootstrapper'
|
||||
include 'tools:dbmigration'
|
||||
include 'example-code'
|
||||
project(':example-code').projectDir = file("$settingsDir/docs/source/example-code")
|
||||
include 'samples:attachment-demo'
|
||||
|
32
tools/dbmigration/build.gradle
Normal file
32
tools/dbmigration/build.gradle
Normal file
@ -0,0 +1,32 @@
|
||||
description 'Database Migration Tool'
|
||||
|
||||
buildscript {
|
||||
repositories {
|
||||
maven {
|
||||
url "https://plugins.gradle.org/m2/"
|
||||
}
|
||||
}
|
||||
dependencies {
|
||||
classpath "com.github.jengelman.gradle.plugins:shadow:$shadow_version"
|
||||
classpath 'de.sebastianboegl.gradle.plugins:shadow-log4j-transformer:2.1.1'
|
||||
}
|
||||
}
|
||||
|
||||
apply plugin: 'java'
|
||||
apply plugin: 'kotlin'
|
||||
apply plugin: 'application'
|
||||
apply plugin: 'com.github.johnrengelman.shadow'
|
||||
|
||||
mainClassName = 'com.r3.corda.dbmigration.DBMigration'
|
||||
|
||||
dependencies{
|
||||
compile project(':node')
|
||||
// JOpt: for command line flags.
|
||||
compile "net.sf.jopt-simple:jopt-simple:$jopt_simple_version"
|
||||
}
|
||||
|
||||
shadowJar {
|
||||
transform(de.sebastianboegl.gradle.plugins.shadow.transformers.Log4j2PluginsFileTransformer)
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,215 @@
|
||||
@file:JvmName("DBMigration")
|
||||
|
||||
package com.r3.corda.dbmigration
|
||||
|
||||
import MigrationHelpers
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.zaxxer.hikari.HikariConfig
|
||||
import com.zaxxer.hikari.HikariDataSource
|
||||
import joptsimple.OptionException
|
||||
import joptsimple.OptionParser
|
||||
import joptsimple.OptionSet
|
||||
import joptsimple.util.EnumConverter
|
||||
import net.corda.core.internal.copyTo
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.node.internal.cordapp.CordappLoader
|
||||
import net.corda.node.services.config.ConfigHelper
|
||||
import net.corda.node.services.config.parseAsNodeConfiguration
|
||||
import net.corda.node.services.persistence.MigrationExporter
|
||||
import net.corda.node.services.schema.NodeSchemaService
|
||||
import net.corda.nodeapi.internal.config.parseAs
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.io.File
|
||||
import java.io.FileWriter
|
||||
import java.io.PrintWriter
|
||||
import java.io.Writer
|
||||
import java.net.URLClassLoader
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
import java.text.SimpleDateFormat
|
||||
import java.util.*
|
||||
import java.util.jar.JarFile
|
||||
import javax.sql.DataSource
|
||||
|
||||
//command line arguments
|
||||
const val HELP = "help"
|
||||
const val MODE = "mode"
|
||||
const val BASE_DIRECTORY = "base-directory"
|
||||
const val CONFIG = "config-file"
|
||||
const val DOORMAN_JAR_PATH = "doorman-jar-path"
|
||||
const val RUN_MIGRATION = "execute-migration"
|
||||
const val DRY_RUN = "dry-run"
|
||||
const val CREATE_MIGRATION_CORDAPP = "create-migration-sql-for-cordapp"
|
||||
const val RELEASE_LOCK = "release-lock"
|
||||
|
||||
// output type
|
||||
const val CONSOLE = "CONSOLE"
|
||||
|
||||
private val logger = LoggerFactory.getLogger("migration.tool")
|
||||
|
||||
private enum class Mode {
|
||||
NODE, DOORMAN
|
||||
}
|
||||
|
||||
private fun initOptionParser(): OptionParser = OptionParser().apply {
|
||||
accepts(MODE, "Either 'NODE' or 'DOORMAN'. By default 'NODE'")
|
||||
.withOptionalArg()
|
||||
.withValuesConvertedBy(object : EnumConverter<Mode>(Mode::class.java) {})
|
||||
.defaultsTo(Mode.NODE)
|
||||
|
||||
accepts(BASE_DIRECTORY, "The node or doorman directory")
|
||||
.withRequiredArg()
|
||||
|
||||
accepts(CONFIG, "The name of the config file. By default 'node.conf' for a simple node and 'network-management.conf' for a doorman.")
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(DOORMAN_JAR_PATH, "The path to the doorman fat jar")
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(RUN_MIGRATION,
|
||||
"This option will run the db migration on the configured database")
|
||||
|
||||
accepts(DRY_RUN, """Output the database migration to the specified output file.
|
||||
|The output directory is the base-directory.
|
||||
|You can specify a file name or 'CONSOLE' if you want to send the output to the console.""".trimMargin())
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(CREATE_MIGRATION_CORDAPP, """Create migration files for a CorDapp.
|
||||
|You can specify the fully qualified of the `MappedSchema` class. If not specified it will generate foll all schemas that don't have migrations.
|
||||
|The output directory is the base-directory, where a `migration` folder is created.""".trimMargin())
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(RELEASE_LOCK, "Releases whatever locks are on the database change log table, in case shutdown failed.")
|
||||
|
||||
accepts(HELP).forHelp()
|
||||
}
|
||||
|
||||
fun main(args: Array<String>) {
|
||||
val parser = initOptionParser()
|
||||
try {
|
||||
val options = parser.parse(*args)
|
||||
runCommand(options, parser)
|
||||
} catch (e: OptionException) {
|
||||
errorAndExit(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
data class Configuration(val dataSourceProperties: Properties, val database: DatabaseConfig)
|
||||
|
||||
private fun runCommand(options: OptionSet, parser: OptionParser) {
|
||||
|
||||
fun baseDirectory() = Paths.get(options.valueOf(BASE_DIRECTORY) as String).normalize()
|
||||
val mode = options.valueOf(MODE) as Mode
|
||||
fun configFile(defaultCfgName: String) = baseDirectory() / ((options.valueOf(CONFIG) as String?) ?: defaultCfgName)
|
||||
|
||||
when {
|
||||
options.has(HELP) -> parser.printHelpOn(System.out)
|
||||
mode == Mode.NODE -> {
|
||||
val baseDirectory = baseDirectory()
|
||||
val config = configFile("node.conf")
|
||||
val nodeConfig = ConfigHelper.loadConfig(baseDirectory, config).parseAsNodeConfiguration()
|
||||
val cordappLoader = CordappLoader.createDefault(baseDirectory)
|
||||
|
||||
val schemaService = NodeSchemaService(extraSchemas = cordappLoader.cordappSchemas, includeNotarySchemas = nodeConfig.notary != null)
|
||||
|
||||
handleCommand(options, baseDirectory, config, mode, cordappLoader.appClassLoader, schemaService.schemaOptions.keys)
|
||||
}
|
||||
mode == Mode.DOORMAN -> {
|
||||
val fatJarPath = Paths.get(options.valueOf(DOORMAN_JAR_PATH) as String)
|
||||
val doormanClassloader = classLoaderFromCapsuleFatJar(fatJarPath)
|
||||
val doormanSchema = "com.r3.corda.networkmanage.common.persistence.NetworkManagementSchemaServices\$SchemaV1"
|
||||
val schema = loadMappedSchema(doormanSchema, doormanClassloader)
|
||||
handleCommand(options, baseDirectory(), configFile("network-management.conf"), mode, doormanClassloader, setOf(schema))
|
||||
}
|
||||
}
|
||||
logger.info("Done")
|
||||
}
|
||||
|
||||
private fun handleCommand(options: OptionSet, baseDirectory: Path, configFile: Path, mode: Mode, classLoader: ClassLoader, schemas: Set<MappedSchema>) {
|
||||
val config = ConfigFactory.parseFile(configFile.toFile()).resolve().parseAs(Configuration::class)
|
||||
|
||||
fun runMigrationCommand(withMigration: (SchemaMigration) -> Unit): Unit = runWithDataSource(config) { dataSource ->
|
||||
withMigration(SchemaMigration(schemas, dataSource, true, config.database, classLoader))
|
||||
}
|
||||
|
||||
when {
|
||||
options.has(RELEASE_LOCK) -> runWithDataSource(ConfigFactory.parseFile(configFile.toFile()).resolve().parseAs(Configuration::class)) {
|
||||
SchemaMigration(emptySet(), it, true, config.database, Thread.currentThread().contextClassLoader).forceReleaseMigrationLock()
|
||||
}
|
||||
options.has(DRY_RUN) -> {
|
||||
val writer = getMigrationOutput(baseDirectory, options)
|
||||
logger.info("Exporting the current db migrations ...")
|
||||
runMigrationCommand {
|
||||
it.generateMigrationScript(writer)
|
||||
}
|
||||
}
|
||||
options.has(RUN_MIGRATION) -> {
|
||||
logger.info("Running the database migration on ${baseDirectory}")
|
||||
runMigrationCommand { it.runMigration() }
|
||||
}
|
||||
options.has(CREATE_MIGRATION_CORDAPP) && (mode == Mode.NODE) -> {
|
||||
|
||||
fun generateMigrationFileForSchema(schemaClass: String) {
|
||||
logger.info("Creating database migration files for schema: ${schemaClass} into ${baseDirectory / "migration"}")
|
||||
try {
|
||||
runWithDataSource(config) {
|
||||
MigrationExporter(baseDirectory, config.dataSourceProperties, classLoader, it).generateMigrationForCorDapp(schemaClass)
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
e.printStackTrace()
|
||||
errorAndExit("Could not generate migration for ${schemaClass}: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
if (options.hasArgument(CREATE_MIGRATION_CORDAPP)) {
|
||||
val schemaClass = options.valueOf(CREATE_MIGRATION_CORDAPP) as String
|
||||
generateMigrationFileForSchema(schemaClass)
|
||||
} else {
|
||||
schemas.filter { MigrationHelpers.getMigrationResource(it, classLoader) == null }.forEach {
|
||||
generateMigrationFileForSchema(it.javaClass.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
else -> errorAndExit("Please specify a correct command")
|
||||
}
|
||||
}
|
||||
|
||||
//only used for capsule
|
||||
private fun classLoaderFromCapsuleFatJar(fatJarPath: Path): ClassLoader {
|
||||
val dir = createTempDir()
|
||||
dir.deleteOnExit()
|
||||
val jarFile = JarFile(fatJarPath.toFile())
|
||||
val jars = jarFile.entries().toList().filter { !it.isDirectory && it.name.endsWith("jar", ignoreCase = true) }.map { entry ->
|
||||
val dest = File(dir, entry.name).toPath()
|
||||
jarFile.getInputStream(entry).copyTo(dest)
|
||||
dest
|
||||
}
|
||||
return URLClassLoader(jars.map { it.toUri().toURL() }.toTypedArray())
|
||||
}
|
||||
|
||||
private fun loadMappedSchema(schemaName: String, classLoader: ClassLoader) = classLoader.loadClass(schemaName).kotlin.objectInstance as MappedSchema
|
||||
|
||||
private fun getMigrationOutput(baseDirectory: Path, options: OptionSet): Writer {
|
||||
val option = options.valueOf(DRY_RUN) as String?
|
||||
return when (option) {
|
||||
null -> FileWriter(File(baseDirectory.toFile(), "migration${SimpleDateFormat("yyyyMMddHHmmss").format(Date())}.sql"))
|
||||
CONSOLE -> PrintWriter(System.out)
|
||||
else -> FileWriter(File(baseDirectory.toFile(), option))
|
||||
}
|
||||
}
|
||||
|
||||
private fun runWithDataSource(config: Configuration, withDatasource: (DataSource) -> Unit) {
|
||||
val cfg = HikariConfig(config.dataSourceProperties)
|
||||
cfg.maximumPoolSize = 1
|
||||
return HikariDataSource(cfg).use { dataSource ->
|
||||
withDatasource(dataSource)
|
||||
}
|
||||
}
|
||||
|
||||
private fun errorAndExit(message: String?) {
|
||||
System.err.println(message)
|
||||
System.exit(1)
|
||||
}
|
30
tools/dbmigration/src/main/resources/log4j2.xml
Normal file
30
tools/dbmigration/src/main/resources/log4j2.xml
Normal file
@ -0,0 +1,30 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Configuration status="info">
|
||||
|
||||
<Properties>
|
||||
<Property name="consoleLogLevel">info</Property>
|
||||
<Property name="defaultLogLevel">debug</Property>
|
||||
</Properties>
|
||||
|
||||
<ThresholdFilter level="trace"/>
|
||||
|
||||
<Appenders>
|
||||
<Console name="Console-Appender" target="SYSTEM_OUT">
|
||||
<PatternLayout pattern="-- %date{ISO8601}{UTC}Z %c{2}.%method - %msg %X%n"/>
|
||||
</Console>
|
||||
|
||||
<File name="File-Appender" fileName="logs/migration.log">
|
||||
<PatternLayout pattern="[%-5level] %date{ISO8601}{UTC}Z [%t] %c{2}.%method - %msg %X%n"/>
|
||||
</File>
|
||||
</Appenders>
|
||||
|
||||
<Loggers>
|
||||
<Root level="${sys:defaultLogLevel}">
|
||||
<AppenderRef ref="File-Appender" />
|
||||
</Root>
|
||||
<Logger name="migration.tool" >
|
||||
<AppenderRef ref="Console-Appender"/>
|
||||
<AppenderRef ref="File-Appender" />
|
||||
</Logger>
|
||||
</Loggers>
|
||||
</Configuration>
|
Loading…
Reference in New Issue
Block a user