Merge branch 'master' into shams-os-merge-120118

This commit is contained in:
Shams Asari
2018-01-15 15:23:27 +00:00
8 changed files with 299 additions and 92 deletions

View File

@ -0,0 +1,114 @@
package net.corda.node.services.persistence
import net.corda.core.identity.AbstractParty
import net.corda.core.schemas.MappedSchema
import net.corda.core.schemas.migrationResourceNameForSchema
import net.corda.nodeapi.internal.persistence.HibernateConfiguration
import org.hibernate.boot.Metadata
import org.hibernate.boot.MetadataSources
import org.hibernate.boot.registry.BootstrapServiceRegistryBuilder
import org.hibernate.cfg.Configuration
import org.hibernate.dialect.Dialect
import org.hibernate.tool.hbm2ddl.SchemaExport
import org.hibernate.tool.schema.TargetType
import java.io.File
import java.nio.file.Path
import java.sql.Types
import java.util.*
import javax.persistence.AttributeConverter
import javax.persistence.Converter
/**
* This is useful for CorDapp developers who want to enable migrations for
* standard "Open Source" Corda CorDapps
*/
object MigrationExporter {
const val LIQUIBASE_HEADER = "--liquibase formatted sql"
const val CORDA_USER = "R3.Corda.Generated"
fun generateMigrationForCorDapp(schemaName: String, parent: Path = File(".").toPath()): Path {
val schemaClass = Class.forName(schemaName)
val schemaObject = schemaClass.kotlin.objectInstance as MappedSchema
return generateMigrationForCorDapp(schemaObject, parent)
}
fun generateMigrationForCorDapp(mappedSchema: MappedSchema, parent: Path): Path {
//create hibernate metadata for MappedSchema
val metadata = createHibernateMetadataForSchema(mappedSchema)
//create output file and add metadata
val outputFile = File(parent.toFile(), "${migrationResourceNameForSchema(mappedSchema)}.sql")
outputFile.apply {
parentFile.mkdirs()
delete()
createNewFile()
appendText(LIQUIBASE_HEADER)
appendText("\n\n")
appendText("--changeset ${CORDA_USER}:initial_schema_for_${mappedSchema::class.simpleName!!}")
appendText("\n")
}
//export the schema to that file
SchemaExport().apply {
setDelimiter(";")
setFormat(true)
setOutputFile(outputFile.absolutePath)
execute(EnumSet.of(TargetType.SCRIPT), SchemaExport.Action.CREATE, metadata)
}
return outputFile.toPath()
}
private fun createHibernateMetadataForSchema(mappedSchema: MappedSchema): Metadata {
val metadataSources = MetadataSources(BootstrapServiceRegistryBuilder().build())
val config = Configuration(metadataSources)
mappedSchema.mappedTypes.forEach { config.addAnnotatedClass(it) }
val regBuilder = config.standardServiceRegistryBuilder
.applySetting("hibernate.dialect", HibernateGenericDialect::class.java.name)
val metadataBuilder = metadataSources.getMetadataBuilder(regBuilder.build())
return HibernateConfiguration.buildHibernateMetadata(metadataBuilder, "",
listOf(DummyAbstractPartyToX500NameAsStringConverter()))
}
/**
* used just for generating columns
*/
@Converter(autoApply = true)
class DummyAbstractPartyToX500NameAsStringConverter : AttributeConverter<AbstractParty, String> {
override fun convertToDatabaseColumn(party: AbstractParty?) = null
override fun convertToEntityAttribute(dbData: String?) = null
}
/**
* Simplified hibernate dialect used for generating liquibase migration files
*/
class HibernateGenericDialect : Dialect() {
init {
registerColumnType(Types.BIGINT, "bigint")
registerColumnType(Types.BOOLEAN, "boolean")
registerColumnType(Types.BLOB, "blob")
registerColumnType(Types.CLOB, "clob")
registerColumnType(Types.DATE, "date")
registerColumnType(Types.FLOAT, "float")
registerColumnType(Types.TIME, "time")
registerColumnType(Types.TIMESTAMP, "timestamp")
registerColumnType(Types.VARCHAR, "varchar(\$l)")
registerColumnType(Types.BINARY, "binary")
registerColumnType(Types.BIT, "boolean")
registerColumnType(Types.CHAR, "char(\$l)")
registerColumnType(Types.DECIMAL, "decimal(\$p,\$s)")
registerColumnType(Types.NUMERIC, "decimal(\$p,\$s)")
registerColumnType(Types.DOUBLE, "double")
registerColumnType(Types.INTEGER, "integer")
registerColumnType(Types.LONGVARBINARY, "longvarbinary")
registerColumnType(Types.LONGVARCHAR, "longvarchar")
registerColumnType(Types.REAL, "real")
registerColumnType(Types.SMALLINT, "smallint")
registerColumnType(Types.TINYINT, "tinyint")
}
}
}

View File

@ -5,6 +5,8 @@ import net.corda.core.schemas.MappedSchema
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.toHexString
import org.hibernate.SessionFactory
import org.hibernate.boot.Metadata
import org.hibernate.boot.MetadataBuilder
import org.hibernate.boot.MetadataSources
import org.hibernate.boot.registry.BootstrapServiceRegistryBuilder
import org.hibernate.cfg.Configuration
@ -28,6 +30,22 @@ class HibernateConfiguration(
) {
companion object {
private val logger = contextLogger()
// register custom converters
fun buildHibernateMetadata(metadataBuilder: MetadataBuilder, jdbcUrl:String, attributeConverters: Collection<AttributeConverter<*, *>>): Metadata {
metadataBuilder.run {
attributeConverters.forEach { applyAttributeConverter(it) }
// Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages.
// to avoid OOM when large blobs might get logged.
applyBasicType(CordaMaterializedBlobType, CordaMaterializedBlobType.name)
applyBasicType(CordaWrapperBinaryType, CordaWrapperBinaryType.name)
// When connecting to SqlServer (and only then) do we need to tell hibernate to use
// nationalised (i.e. Unicode) strings by default
val forceUnicodeForSqlServer = jdbcUrl.contains(":sqlserver:", ignoreCase = true)
enableGlobalNationalizedCharacterDataSupport(forceUnicodeForSqlServer)
return build()
}
}
}
// TODO: make this a guava cache or similar to limit ability for this to grow forever.
@ -54,7 +72,7 @@ class HibernateConfiguration(
//preserving case-sensitive schema name for PostgreSQL by wrapping in double quotes, schema without double quotes would be treated as case-insensitive (lower cases)
val schemaName = if (jdbcUrl.contains(":postgresql:", ignoreCase = true) && !databaseConfig.schema.startsWith("\"")) {
"\"" + databaseConfig.schema + "\""
} else {
} else {
databaseConfig.schema
}
config.setProperty("hibernate.default_schema", schemaName)
@ -93,19 +111,8 @@ class HibernateConfiguration(
private fun buildSessionFactory(config: Configuration, metadataSources: MetadataSources): SessionFactory {
config.standardServiceRegistryBuilder.applySettings(config.properties)
val metadata = metadataSources.getMetadataBuilder(config.standardServiceRegistryBuilder.build()).run {
// register custom converters
attributeConverters.forEach { applyAttributeConverter(it) }
// Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages.
// to avoid OOM when large blobs might get logged.
applyBasicType(CordaMaterializedBlobType, CordaMaterializedBlobType.name)
applyBasicType(CordaWrapperBinaryType, CordaWrapperBinaryType.name)
// When connecting to SqlServer (and only then) do we need to tell hibernate to use
// nationalised (i.e. Unicode) strings by default
val forceUnicodeForSqlServer = jdbcUrl.contains(":sqlserver:", ignoreCase = true)
enableGlobalNationalizedCharacterDataSupport(forceUnicodeForSqlServer)
build()
}
val metadataBuilder = metadataSources.getMetadataBuilder(config.standardServiceRegistryBuilder.build())
val metadata = buildHibernateMetadata(metadataBuilder, jdbcUrl, attributeConverters)
return metadata.sessionFactoryBuilder.run {
allowOutOfTransactionUpdateOperations(true)
@ -140,14 +147,14 @@ class HibernateConfiguration(
}
// A tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages. Also logs in hex.
private object CordaMaterializedBlobType : AbstractSingleColumnStandardBasicType<ByteArray>(BlobTypeDescriptor.DEFAULT, CordaPrimitiveByteArrayTypeDescriptor) {
object CordaMaterializedBlobType : AbstractSingleColumnStandardBasicType<ByteArray>(BlobTypeDescriptor.DEFAULT, CordaPrimitiveByteArrayTypeDescriptor) {
override fun getName(): String {
return "materialized_blob"
}
}
// A tweaked version of `org.hibernate.type.descriptor.java.PrimitiveByteArrayTypeDescriptor` that truncates logged messages.
private object CordaPrimitiveByteArrayTypeDescriptor : PrimitiveByteArrayTypeDescriptor() {
object CordaPrimitiveByteArrayTypeDescriptor : PrimitiveByteArrayTypeDescriptor() {
private val LOG_SIZE_LIMIT = 1024
override fun extractLoggableRepresentation(value: ByteArray?): String {
@ -164,7 +171,7 @@ class HibernateConfiguration(
}
// A tweaked version of `org.hibernate.type.WrapperBinaryType` that deals with ByteArray (java primitive byte[] type).
private object CordaWrapperBinaryType : AbstractSingleColumnStandardBasicType<ByteArray>(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) {
object CordaWrapperBinaryType : AbstractSingleColumnStandardBasicType<ByteArray>(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) {
override fun getRegistrationKeys(): Array<String> {
return arrayOf(name, "ByteArray", ByteArray::class.java.name)
}
@ -176,4 +183,4 @@ class HibernateConfiguration(
}
/** Allow Oracle database drivers ojdbc7.jar and ojdbc8.jar to deserialize classes from oracle.sql.converter package. */
fun oracleJdbcDriverSerialFilter(clazz: Class<*>) : Boolean = clazz.name.startsWith("oracle.sql.converter.")
fun oracleJdbcDriverSerialFilter(clazz: Class<*>): Boolean = clazz.name.startsWith("oracle.sql.converter.")

View File

@ -0,0 +1,121 @@
package net.corda.node.services.persistence
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.HikariDataSource
import net.corda.core.contracts.UniqueIdentifier
import net.corda.core.identity.AbstractParty
import net.corda.core.schemas.CommonSchemaV1
import net.corda.core.schemas.MappedSchema
import net.corda.node.internal.configureDatabase
import net.corda.node.services.schema.NodeSchemaService
import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.nodeapi.internal.persistence.SchemaMigration
import net.corda.testing.internal.rigorousMock
import net.corda.testing.node.MockServices
import org.apache.commons.io.FileUtils
import org.assertj.core.api.Assertions.assertThat
import org.junit.Test
import java.math.BigInteger
import java.net.URL
import javax.persistence.*
import java.net.URLClassLoader
import java.nio.file.Files
import java.nio.file.Path
class SchemaMigrationTest {
@Test
fun `Ensure that runMigration is disabled by default`() {
assertThat(DatabaseConfig().runMigration).isFalse()
}
@Test
fun `Migration is run when runMigration is disabled, and database is H2`() {
val dataSourceProps = MockServices.makeTestDataSourceProperties()
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false), rigorousMock())
checkMigrationRun(db)
}
@Test
fun `Migration is run when runMigration is enabled`() {
val dataSourceProps = MockServices.makeTestDataSourceProperties()
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true), rigorousMock())
checkMigrationRun(db)
}
@Test
fun `Verification passes when migration is run as a separate step`() {
val schemaService = NodeSchemaService()
val dataSourceProps = MockServices.makeTestDataSourceProperties()
//run the migration on the database
val migration = SchemaMigration(schemaService.schemaOptions.keys, HikariDataSource(HikariConfig(dataSourceProps)), true, DatabaseConfig())
migration.runMigration()
//start the node with "runMigration = false" and check that it started correctly
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = false), rigorousMock(), schemaService)
checkMigrationRun(db)
}
@Test
fun `The migration picks up migration files on the classpath if they follow the convention`() {
val dataSourceProps = MockServices.makeTestDataSourceProperties()
// create a migration file for the DummyTestSchemaV1 and add it to the classpath
val tmpFolder = Files.createTempDirectory("test")
val fileName = MigrationExporter.generateMigrationForCorDapp(DummyTestSchemaV1, tmpFolder).fileName
addToClassPath(tmpFolder)
// run the migrations for DummyTestSchemaV1, which should pick up the migration file
val db = configureDatabase(dataSourceProps, DatabaseConfig(runMigration = true), rigorousMock(), NodeSchemaService(extraSchemas = setOf(DummyTestSchemaV1)))
// check that the file was picked up
val nrOfChangesOnDiscoveredFile = db.dataSource.connection.use {
it.createStatement().executeQuery("select count(*) from DATABASECHANGELOG where filename ='migration/${fileName}'").use { rs ->
rs.next()
rs.getInt(1)
}
}
assertThat(nrOfChangesOnDiscoveredFile).isGreaterThan(0)
//clean up
FileUtils.deleteDirectory(tmpFolder.toFile())
}
private fun checkMigrationRun(db: CordaPersistence) {
//check that the hibernate_sequence was created which means the migration was run
db.transaction {
val value = this.session.createNativeQuery("SELECT NEXT VALUE FOR hibernate_sequence").uniqueResult() as BigInteger
assertThat(value).isGreaterThan(BigInteger.ZERO)
}
}
//hacky way to add a folder to the classpath
fun addToClassPath(file: Path) = URLClassLoader::class.java.getDeclaredMethod("addURL", URL::class.java).apply {
isAccessible = true
invoke(ClassLoader.getSystemClassLoader(), file.toFile().toURL())
}
object DummyTestSchema
object DummyTestSchemaV1 : MappedSchema(schemaFamily = DummyTestSchema.javaClass, version = 1, mappedTypes = listOf(PersistentDummyTestState::class.java)) {
@Entity
@Table(name = "dummy_test_states")
class PersistentDummyTestState(
@ElementCollection
@Column(name = "participants")
@CollectionTable(name = "dummy_deal_states_participants", joinColumns = arrayOf(
JoinColumn(name = "output_index", referencedColumnName = "output_index"),
JoinColumn(name = "transaction_id", referencedColumnName = "transaction_id")))
override var participants: MutableSet<AbstractParty>? = null,
@Transient
val uid: UniqueIdentifier
) : CommonSchemaV1.LinearState(uuid = uid.id, externalId = uid.externalId, participants = participants)
}
}