diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index b8baf0a528..5fed738c7b 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -8,6 +8,10 @@ Unreleased ---------- * Introduced ``TestCorDapp`` and utilities to support asymmetric setups for nodes through ``DriverDSL``, ``MockNetwork`` and ``MockServices``. +* Change type of the `checkpoint_value` column. Please check the upgrade-notes on how to update your database. + +* Removed buggy :serverNameTablePrefix: configuration. + * ``freeLocalHostAndPort``, ``freePort``, and ``getFreeLocalPorts`` from ``TestUtils`` have been deprecated as they don't provide any guarantee the returned port will be available which can result in flaky tests. Use ``PortAllocation.Incremental`` instead. diff --git a/docs/source/corda-configuration-file.rst b/docs/source/corda-configuration-file.rst index 0b8e0061bf..a8f680774c 100644 --- a/docs/source/corda-configuration-file.rst +++ b/docs/source/corda-configuration-file.rst @@ -76,7 +76,6 @@ absolute path to the node's base directory. :database: Database configuration: - :serverNameTablePrefix: Prefix string to apply to all the database tables. The default is no prefix. :transactionIsolationLevel: Transaction isolation level as defined by the ``TRANSACTION_`` constants in ``java.sql.Connection``, but without the ``TRANSACTION_`` prefix. Defaults to REPEATABLE_READ. :exportHibernateJMXStatistics: Whether to export Hibernate JMX statistics (caution: expensive run-time overhead) diff --git a/docs/source/upgrade-notes.rst b/docs/source/upgrade-notes.rst index 482f3d812d..8f5775d301 100644 --- a/docs/source/upgrade-notes.rst +++ b/docs/source/upgrade-notes.rst @@ -33,6 +33,22 @@ UNRELEASED <<< Fill this in >>> +* Database upgrade - Change the type of the ``checkpoint_value``. +This will address the issue that the `vacuum` function is unable to clean up deleted checkpoints as they are still referenced from the ``pg_shdepend`` table. + +For Postgres: + + .. sourcecode:: sql + + ALTER TABLE node_checkpoints ALTER COLUMN checkpoint_value set data type bytea; + +For H2: + + .. sourcecode:: sql + + ALTER TABLE node_checkpoints ALTER COLUMN checkpoint_value set data type VARBINARY(33554432); + + * API change: ``net.corda.core.schemas.PersistentStateRef`` fields (``index`` and ``txId``) incorrectly marked as nullable are now non-nullable, :doc:`changelog` contains the explanation. diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt index f5c8c39ae8..10347e8de1 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/CordaPersistence.kt @@ -22,7 +22,6 @@ const val NODE_DATABASE_PREFIX = "node_" // This class forms part of the node config and so any changes to it must be handled with care data class DatabaseConfig( val initialiseSchema: Boolean = true, - val serverNameTablePrefix: String = "", val transactionIsolationLevel: TransactionIsolationLevel = TransactionIsolationLevel.REPEATABLE_READ, val exportHibernateJMXStatistics: Boolean = false, val mappedSchemaCacheSize: Long = 100 @@ -51,6 +50,7 @@ val contextDatabaseOrNull: CordaPersistence? get() = _contextDatabase.get() class CordaPersistence( databaseConfig: DatabaseConfig, schemas: Set, + val jdbcUrl: String, attributeConverters: Collection> = emptySet() ) : Closeable { companion object { @@ -60,7 +60,7 @@ class CordaPersistence( private val defaultIsolationLevel = databaseConfig.transactionIsolationLevel val hibernateConfig: HibernateConfiguration by lazy { transaction { - HibernateConfiguration(schemas, databaseConfig, attributeConverters) + HibernateConfiguration(schemas, databaseConfig, attributeConverters, jdbcUrl) } } val entityManagerFactory get() = hibernateConfig.sessionFactoryForRegisteredSchemas @@ -84,6 +84,7 @@ class CordaPersistence( transaction { check(!connection.metaData.isReadOnly) { "Database should not be readonly." } checkCorrectAttachmentsContractsTableName(connection) + checkCorrectCheckpointTypeOnPostgres(connection) } } @@ -272,7 +273,7 @@ private fun Throwable.hasSQLExceptionCause(): Boolean = class CouldNotCreateDataSourceException(override val message: String?, override val cause: Throwable? = null) : Exception() -class IncompatibleAttachmentsContractsTableName(override val message: String?, override val cause: Throwable? = null) : Exception() +class DatabaseIncompatibleException(override val message: String?, override val cause: Throwable? = null) : Exception() private fun checkCorrectAttachmentsContractsTableName(connection: Connection) { val correctName = "NODE_ATTACHMENTS_CONTRACTS" @@ -282,7 +283,22 @@ private fun checkCorrectAttachmentsContractsTableName(connection: Connection) { fun warning(incorrectName: String, version: String) = "The database contains the older table name $incorrectName instead of $correctName, see upgrade notes to migrate from Corda database version $version https://docs.corda.net/head/upgrade-notes.html." if (!connection.metaData.getTables(null, null, correctName, null).next()) { - if (connection.metaData.getTables(null, null, incorrectV30Name, null).next()) { throw IncompatibleAttachmentsContractsTableName(warning(incorrectV30Name, "3.0")) } - if (connection.metaData.getTables(null, null, incorrectV31Name, null).next()) { throw IncompatibleAttachmentsContractsTableName(warning(incorrectV31Name, "3.1")) } + if (connection.metaData.getTables(null, null, incorrectV30Name, null).next()) { throw DatabaseIncompatibleException(warning(incorrectV30Name, "3.0")) } + if (connection.metaData.getTables(null, null, incorrectV31Name, null).next()) { throw DatabaseIncompatibleException(warning(incorrectV31Name, "3.1")) } + } +} + +private fun checkCorrectCheckpointTypeOnPostgres(connection: Connection) { + val metaData = connection.metaData + if (metaData.getDatabaseProductName() != "PostgreSQL") { + return + } + + val result = metaData.getColumns(null, null, "node_checkpoints", "checkpoint_value") + if (result.next()) { + val type = result.getString("TYPE_NAME") + if (type != "bytea") { + throw DatabaseIncompatibleException("The type of the 'checkpoint_value' table must be 'bytea', but 'oid' was found. See upgrade notes to migrate from Corda database version 3.1 https://docs.corda.net/head/upgrade-notes.html.") + } } } diff --git a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/HibernateConfiguration.kt b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/HibernateConfiguration.kt index 365fe63bcc..2b809e1ce9 100644 --- a/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/HibernateConfiguration.kt +++ b/node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence/HibernateConfiguration.kt @@ -6,17 +6,17 @@ import net.corda.core.schemas.MappedSchema import net.corda.core.utilities.contextLogger import net.corda.core.utilities.toHexString import org.hibernate.SessionFactory +import org.hibernate.boot.Metadata +import org.hibernate.boot.MetadataBuilder import org.hibernate.boot.MetadataSources -import org.hibernate.boot.model.naming.Identifier -import org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl import org.hibernate.boot.registry.BootstrapServiceRegistryBuilder import org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl import org.hibernate.boot.registry.classloading.spi.ClassLoaderService import org.hibernate.cfg.Configuration import org.hibernate.engine.jdbc.connections.spi.ConnectionProvider -import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment import org.hibernate.service.UnknownUnwrapTypeException import org.hibernate.type.AbstractSingleColumnStandardBasicType +import org.hibernate.type.MaterializedBlobType import org.hibernate.type.descriptor.java.PrimitiveByteArrayTypeDescriptor import org.hibernate.type.descriptor.sql.BlobTypeDescriptor import org.hibernate.type.descriptor.sql.VarbinaryTypeDescriptor @@ -29,10 +29,32 @@ class HibernateConfiguration( schemas: Set, private val databaseConfig: DatabaseConfig, private val attributeConverters: Collection>, + private val jdbcUrl: String, val cordappClassLoader: ClassLoader? = null ) { companion object { private val logger = contextLogger() + + // register custom converters + fun buildHibernateMetadata(metadataBuilder: MetadataBuilder, jdbcUrl:String, attributeConverters: Collection>): Metadata { + metadataBuilder.run { + attributeConverters.forEach { applyAttributeConverter(it) } + // Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages. + // to avoid OOM when large blobs might get logged. + applyBasicType(CordaMaterializedBlobType, CordaMaterializedBlobType.name) + applyBasicType(CordaWrapperBinaryType, CordaWrapperBinaryType.name) + + // Create a custom type that will map a blob to byteA in postgres and as a normal blob for all other dbms. + // This is required for the Checkpoints as a workaround for the issue that postgres has on azure. + if (jdbcUrl.contains(":postgresql:", ignoreCase = true)) { + applyBasicType(MapBlobToPostgresByteA, MapBlobToPostgresByteA.name) + } else { + applyBasicType(MapBlobToNormalBlob, MapBlobToNormalBlob.name) + } + + return build() + } + } } private val sessionFactories = Caffeine.newBuilder().maximumSize(databaseConfig.mappedSchemaCacheSize).build, SessionFactory>() @@ -62,7 +84,7 @@ class HibernateConfiguration( schema.mappedTypes.forEach { config.addAnnotatedClass(it) } } - val sessionFactory = buildSessionFactory(config, metadataSources, databaseConfig.serverNameTablePrefix, cordappClassLoader) + val sessionFactory = buildSessionFactory(config, metadataSources, cordappClassLoader) logger.info("Created session factory for schemas: $schemas") // export Hibernate JMX statistics @@ -83,13 +105,12 @@ class HibernateConfiguration( try { mbeanServer.registerMBean(statisticsMBean, statsName) - } - catch (e: Exception) { + } catch (e: Exception) { logger.warn(e.message) } } - private fun buildSessionFactory(config: Configuration, metadataSources: MetadataSources, tablePrefix: String, cordappClassLoader: ClassLoader?): SessionFactory { + private fun buildSessionFactory(config: Configuration, metadataSources: MetadataSources, cordappClassLoader: ClassLoader?): SessionFactory { config.standardServiceRegistryBuilder.applySettings(config.properties) if (cordappClassLoader != null) { @@ -98,22 +119,8 @@ class HibernateConfiguration( ClassLoaderServiceImpl(cordappClassLoader)) } - val metadata = metadataSources.getMetadataBuilder(config.standardServiceRegistryBuilder.build()).run { - applyPhysicalNamingStrategy(object : PhysicalNamingStrategyStandardImpl() { - override fun toPhysicalTableName(name: Identifier?, context: JdbcEnvironment?): Identifier { - val default = super.toPhysicalTableName(name, context) - return Identifier.toIdentifier(tablePrefix + default.text, default.isQuoted) - } - }) - // register custom converters - attributeConverters.forEach { applyAttributeConverter(it) } - // Register a tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages. - // to avoid OOM when large blobs might get logged. - applyBasicType(CordaMaterializedBlobType, CordaMaterializedBlobType.name) - applyBasicType(CordaWrapperBinaryType, CordaWrapperBinaryType.name) - build() - } - + val metadataBuilder = metadataSources.getMetadataBuilder(config.standardServiceRegistryBuilder.build()) + val metadata = buildHibernateMetadata(metadataBuilder, jdbcUrl, attributeConverters) return metadata.sessionFactoryBuilder.run { allowOutOfTransactionUpdateOperations(true) applySecondLevelCacheSupport(false) @@ -148,7 +155,7 @@ class HibernateConfiguration( } // A tweaked version of `org.hibernate.type.MaterializedBlobType` that truncates logged messages. Also logs in hex. - private object CordaMaterializedBlobType : AbstractSingleColumnStandardBasicType(BlobTypeDescriptor.DEFAULT, CordaPrimitiveByteArrayTypeDescriptor) { + object CordaMaterializedBlobType : AbstractSingleColumnStandardBasicType(BlobTypeDescriptor.DEFAULT, CordaPrimitiveByteArrayTypeDescriptor) { override fun getName(): String { return "materialized_blob" } @@ -172,7 +179,7 @@ class HibernateConfiguration( } // A tweaked version of `org.hibernate.type.WrapperBinaryType` that deals with ByteArray (java primitive byte[] type). - private object CordaWrapperBinaryType : AbstractSingleColumnStandardBasicType(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) { + object CordaWrapperBinaryType : AbstractSingleColumnStandardBasicType(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) { override fun getRegistrationKeys(): Array { return arrayOf(name, "ByteArray", ByteArray::class.java.name) } @@ -181,4 +188,21 @@ class HibernateConfiguration( return "corda-wrapper-binary" } } -} \ No newline at end of file + + // Maps to a byte array on postgres. + object MapBlobToPostgresByteA : AbstractSingleColumnStandardBasicType(VarbinaryTypeDescriptor.INSTANCE, PrimitiveByteArrayTypeDescriptor.INSTANCE) { + override fun getRegistrationKeys(): Array { + return arrayOf(name, "ByteArray", ByteArray::class.java.name) + } + + override fun getName(): String { + return "corda-blob" + } + } + + object MapBlobToNormalBlob : MaterializedBlobType() { + override fun getName(): String { + return "corda-blob" + } + } +} diff --git a/node/src/integration-test/kotlin/net/corda/node/persistence/FailNodeOnNotMigratedAttachmentContractsTableNameTests.kt b/node/src/integration-test/kotlin/net/corda/node/persistence/FailNodeOnNotMigratedAttachmentContractsTableNameTests.kt index 80f7a11ee0..aabc23e0e2 100644 --- a/node/src/integration-test/kotlin/net/corda/node/persistence/FailNodeOnNotMigratedAttachmentContractsTableNameTests.kt +++ b/node/src/integration-test/kotlin/net/corda/node/persistence/FailNodeOnNotMigratedAttachmentContractsTableNameTests.kt @@ -52,7 +52,7 @@ class FailNodeOnNotMigratedAttachmentContractsTableNameTests { it.createStatement().execute("ALTER TABLE $tableNameFromMapping RENAME TO $tableNameInDB") it.commit() } - assertFailsWith(net.corda.nodeapi.internal.persistence.IncompatibleAttachmentsContractsTableName::class) { + assertFailsWith(net.corda.nodeapi.internal.persistence.DatabaseIncompatibleException::class) { val nodeHandle = startNode(providedName = nodeName, rpcUsers = listOf(user)).getOrThrow() nodeHandle.stop() } diff --git a/node/src/integration-test/kotlin/net/corda/node/services/transactions/RaftTransactionCommitLogTests.kt b/node/src/integration-test/kotlin/net/corda/node/services/transactions/RaftTransactionCommitLogTests.kt index 11de8b52e3..3edb3d3f3e 100644 --- a/node/src/integration-test/kotlin/net/corda/node/services/transactions/RaftTransactionCommitLogTests.kt +++ b/node/src/integration-test/kotlin/net/corda/node/services/transactions/RaftTransactionCommitLogTests.kt @@ -153,7 +153,7 @@ class RaftTransactionCommitLogTests { private fun createReplica(myAddress: NetworkHostAndPort, clusterAddress: NetworkHostAndPort? = null): CompletableFuture { val storage = Storage.builder().withStorageLevel(StorageLevel.MEMORY).build() val address = Address(myAddress.host, myAddress.port) - val database = configureDatabase(makeTestDataSourceProperties(), DatabaseConfig(serverNameTablePrefix = "PORT_${myAddress.port}_"), { null }, { null }, NodeSchemaService(includeNotarySchemas = true)) + val database = configureDatabase(makeTestDataSourceProperties(), DatabaseConfig(), { null }, { null }, NodeSchemaService(includeNotarySchemas = true)) databases.add(database) val stateMachineFactory = { RaftTransactionCommitLog(database, Clock.systemUTC(), RaftUniquenessProvider.Companion::createMap) } diff --git a/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt b/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt index fedfab8287..be34d7c63c 100644 --- a/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt +++ b/node/src/main/kotlin/net/corda/node/internal/AbstractNode.kt @@ -70,7 +70,7 @@ import net.corda.nodeapi.internal.crypto.X509Utilities import net.corda.nodeapi.internal.persistence.CordaPersistence import net.corda.nodeapi.internal.persistence.CouldNotCreateDataSourceException import net.corda.nodeapi.internal.persistence.DatabaseConfig -import net.corda.nodeapi.internal.persistence.IncompatibleAttachmentsContractsTableName +import net.corda.nodeapi.internal.persistence.DatabaseIncompatibleException import net.corda.nodeapi.internal.storeLegalIdentity import net.corda.tools.shell.InteractiveShell import org.apache.activemq.artemis.utils.ReusableLatch @@ -152,7 +152,8 @@ abstract class AbstractNode(val configuration: NodeConfiguration, configuration.database, identityService::wellKnownPartyFromX500Name, identityService::wellKnownPartyFromAnonymous, - schemaService + schemaService, + configuration.dataSourceProperties ) init { // TODO Break cyclic dependency @@ -1010,7 +1011,7 @@ fun configureDatabase(hikariProperties: Properties, wellKnownPartyFromX500Name: (CordaX500Name) -> Party?, wellKnownPartyFromAnonymous: (AbstractParty) -> Party?, schemaService: SchemaService = NodeSchemaService()): CordaPersistence { - val persistence = createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService) + val persistence = createCordaPersistence(databaseConfig, wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous, schemaService, hikariProperties) persistence.hikariStart(hikariProperties) return persistence } @@ -1018,14 +1019,16 @@ fun configureDatabase(hikariProperties: Properties, fun createCordaPersistence(databaseConfig: DatabaseConfig, wellKnownPartyFromX500Name: (CordaX500Name) -> Party?, wellKnownPartyFromAnonymous: (AbstractParty) -> Party?, - schemaService: SchemaService): CordaPersistence { + schemaService: SchemaService, + hikariProperties: Properties): CordaPersistence { // Register the AbstractPartyDescriptor so Hibernate doesn't warn when encountering AbstractParty. Unfortunately // Hibernate warns about not being able to find a descriptor if we don't provide one, but won't use it by default // so we end up providing both descriptor and converter. We should re-examine this in later versions to see if // either Hibernate can be convinced to stop warning, use the descriptor by default, or something else. JavaTypeDescriptorRegistry.INSTANCE.addDescriptor(AbstractPartyDescriptor(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous)) val attributeConverters = listOf(AbstractPartyToX500NameAsStringConverter(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous)) - return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, attributeConverters) + val jdbcUrl = hikariProperties.getProperty("dataSource.url", "") + return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, jdbcUrl, attributeConverters) } fun CordaPersistence.hikariStart(hikariProperties: Properties) { @@ -1035,7 +1038,7 @@ fun CordaPersistence.hikariStart(hikariProperties: Properties) { when { ex is HikariPool.PoolInitializationException -> throw CouldNotCreateDataSourceException("Could not connect to the database. Please check your JDBC connection URL, or the connectivity to the database.", ex) ex.cause is ClassNotFoundException -> throw CouldNotCreateDataSourceException("Could not find the database driver class. Please add it to the 'drivers' folder. See: https://docs.corda.net/corda-configuration-file.html") - ex is IncompatibleAttachmentsContractsTableName -> throw ex + ex is DatabaseIncompatibleException -> throw ex else -> throw CouldNotCreateDataSourceException("Could not create the DataSource: ${ex.message}", ex) } } diff --git a/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt b/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt index 57ee92a4cc..5f6180a2a2 100644 --- a/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt +++ b/node/src/main/kotlin/net/corda/node/internal/NodeStartup.kt @@ -27,9 +27,9 @@ import net.corda.node.utilities.registration.UnableToRegisterNodeWithDoormanExce import net.corda.node.utilities.saveToKeyStore import net.corda.node.utilities.saveToTrustStore import net.corda.nodeapi.internal.addShutdownHook +import net.corda.nodeapi.internal.persistence.DatabaseIncompatibleException import net.corda.nodeapi.internal.config.UnknownConfigurationKeysException import net.corda.nodeapi.internal.persistence.CouldNotCreateDataSourceException -import net.corda.nodeapi.internal.persistence.IncompatibleAttachmentsContractsTableName import net.corda.tools.shell.InteractiveShell import org.fusesource.jansi.Ansi import org.fusesource.jansi.AnsiConsole @@ -174,7 +174,7 @@ open class NodeStartup(val args: Array) { } catch (e: NetworkParametersReader.Error) { logger.error(e.message) return false - } catch (e: IncompatibleAttachmentsContractsTableName) { + } catch (e: DatabaseIncompatibleException) { e.message?.let { Node.printWarning(it) } logger.error(e.message) return false diff --git a/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt b/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt index 9750f7cc5c..9613112e0b 100644 --- a/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt +++ b/node/src/main/kotlin/net/corda/node/services/persistence/DBCheckpointStorage.kt @@ -15,7 +15,7 @@ import java.util.stream.Stream import javax.persistence.Column import javax.persistence.Entity import javax.persistence.Id -import javax.persistence.Lob +import org.hibernate.annotations.Type /** * Simple checkpoint key value storage in DB. @@ -30,7 +30,7 @@ class DBCheckpointStorage : CheckpointStorage { @Column(name = "checkpoint_id", length = 64, nullable = false) var checkpointId: String = "", - @Lob + @Type(type = "corda-blob") @Column(name = "checkpoint_value", nullable = false) var checkpoint: ByteArray = EMPTY_BYTE_ARRAY ) diff --git a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt index fe9bc5d2a8..2036c40e27 100644 --- a/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt +++ b/testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt @@ -578,9 +578,7 @@ class DriverDSLImpl( localNetworkMap, spec.rpcUsers, spec.verifierType, - customOverrides = notaryConfig(clusterAddress) + mapOf( - "database.serverNameTablePrefix" to nodeNames[0].toString().replace(Regex("[^0-9A-Za-z]+"), "") - ) + customOverrides = notaryConfig(clusterAddress) ) // All other nodes will join the cluster @@ -591,9 +589,7 @@ class DriverDSLImpl( localNetworkMap, spec.rpcUsers, spec.verifierType, - customOverrides = notaryConfig(nodeAddress, clusterAddress) + mapOf( - "database.serverNameTablePrefix" to it.toString().replace(Regex("[^0-9A-Za-z]+"), "") - ) + customOverrides = notaryConfig(nodeAddress, clusterAddress) ) }