mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2025-04-05 18:36:43 +00:00
DB, Config: Implement modules to interact with the sqlite DB and to manage configuration
These modules allow managing the models in the sqlite database and the configuration for the supervisor. The database will now have a schema version, and the supervisor will normalize any legacy data when migrating from an older schema (i.e. when doing a supervisor update). This will make model changes cleaner. If a migration is needed, the DB initialization will return "true" and store the legacy data in a legacyData table. Once the supervisor finishes migrating the data, it calls `db.finishMigration` to mark the migration complete and clear the legacyData table. Changes in the models: * The database implements the tables for multicontainer applications that now have services, networks and volumes as in a docker compose file. * Dependent apps and devices now have separate tables to store their target states. * The deviceConfig table now only stores target values, as the current ones will be inferred from the state of the device. * We keep a table for images as we have no way to label them in docker storage, so we need to keep our own track of what images are relevant for the supervisor. The Config object allows transparent management of configuration values, mainly through `get`, `getMany` and `set` functions. The values can be stored in config.json or the database, and this is managed with a schema definition that also defines whether values are mutable and whether they have default values. Some configuration values are of the "func" type, which means that instead of corresponding to a config.json or database key, they result from a helper function that aggregates other configuration values or gets the value from other sources, like OS version and supervisor version. Writes to config.json are atomic if a path to the file via /mnt/root can be found. We keep a write-through cache of the file to avoid unnecessary IO. Changelog-Entry: Implement the multicontainer app models, and change the supervisor configuration management to avoid duplication between fields in config.json and fields in the internal database Change-Type: patch Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
parent
23f81c28f5
commit
0dc9fea4d3
@ -1,41 +1,338 @@
|
||||
{ checkInt, checkString } = require './lib/validation'
|
||||
Promise = require 'bluebird'
|
||||
_ = require 'lodash'
|
||||
Lock = require 'rwlock'
|
||||
deviceRegister = require 'resin-register-device'
|
||||
fs = Promise.promisifyAll(require('fs'))
|
||||
EventEmitter = require 'events'
|
||||
|
||||
dockerRoot = checkString(process.env.DOCKER_ROOT) ? '/mnt/root/var/lib/rce'
|
||||
apiEndpoint = checkString(process.env.API_ENDPOINT)
|
||||
{ writeAndSyncFile, writeFileAtomic } = require './lib/fs-utils'
|
||||
osRelease = require './lib/os-release'
|
||||
supervisorVersion = require './lib/supervisor-version'
|
||||
constants = require './lib/constants'
|
||||
|
||||
# Defaults needed for both gosuper and node supervisor are declared in entry.sh
|
||||
module.exports =
|
||||
apiEndpoint: apiEndpoint
|
||||
apiTimeout: checkInt(process.env.API_TIMEOUT, positive: true) ? 15 * 60 * 1000
|
||||
listenPort: checkInt(process.env.LISTEN_PORT, positive: true) ? 48484
|
||||
gosuperAddress: "http://unix:#{process.env.GOSUPER_SOCKET}:"
|
||||
deltaHost: checkString(process.env.DELTA_ENDPOINT) ? 'https://delta.resin.io'
|
||||
registryEndpoint: checkString(process.env.REGISTRY_ENDPOINT) ? 'registry.resin.io'
|
||||
pubnub:
|
||||
subscribe_key: checkString(process.env.PUBNUB_SUBSCRIBE_KEY) ? process.env.DEFAULT_PUBNUB_SUBSCRIBE_KEY
|
||||
publish_key: checkString(process.env.PUBNUB_PUBLISH_KEY) ? process.env.DEFAULT_PUBNUB_PUBLISH_KEY
|
||||
ssl: true
|
||||
mixpanelToken: checkString(process.env.MIXPANEL_TOKEN) ? process.env.DEFAULT_MIXPANEL_TOKEN
|
||||
mixpanelHost: "#{apiEndpoint}/mixpanel"
|
||||
dockerSocket: process.env.DOCKER_SOCKET
|
||||
supervisorImage: checkString(process.env.SUPERVISOR_IMAGE) ? 'resin/rpi-supervisor'
|
||||
configMountPoint: checkString(process.env.CONFIG_MOUNT_POINT) ? '/mnt/mmcblk0p1/config.json'
|
||||
ledFile: checkString(process.env.LED_FILE) ? '/sys/class/leds/led0/brightness'
|
||||
bootstrapRetryDelay: checkInt(process.env.BOOTSTRAP_RETRY_DELAY_MS, positive: true) ? 30000
|
||||
restartSuccessTimeout: checkInt(process.env.RESTART_SUCCESS_TIMEOUT, positive: true) ? 60000
|
||||
appUpdatePollInterval: checkInt(process.env.APPLICATION_UPDATE_POLL_INTERVAL, positive: true) ? 60000
|
||||
successMessage: 'SUPERVISOR OK'
|
||||
forceSecret:
|
||||
api: checkString(process.env.RESIN_SUPERVISOR_SECRET) ? null
|
||||
logsChannel: checkString(process.env.RESIN_SUPERVISOR_LOGS_CHANNEL) ? null
|
||||
vpnStatusPath: checkString(process.env.VPN_STATUS_PATH) ? '/mnt/root/run/openvpn/vpn_status'
|
||||
hostOSVersionPath: checkString(process.env.HOST_OS_VERSION_PATH) ? '/mnt/root/etc/os-release'
|
||||
dockerRoot: dockerRoot
|
||||
btrfsRoot: checkString(process.env.BTRFS_ROOT) ? "#{dockerRoot}/btrfs/subvolumes"
|
||||
privateAppEnvVars: [
|
||||
'RESIN_SUPERVISOR_API_KEY'
|
||||
'RESIN_API_KEY'
|
||||
]
|
||||
dataPath: checkString(process.env.RESIN_DATA_PATH) ? '/resin-data'
|
||||
bootMountPoint: checkString(process.env.BOOT_MOUNTPOINT) ? '/boot'
|
||||
proxyvisorHookReceiver: checkString(process.env.RESIN_PROXYVISOR_HOOK_RECEIVER) ? 'http://0.0.0.0:1337'
|
||||
module.exports = class Config extends EventEmitter
|
||||
constructor: ({ @db, @configPath }) ->
|
||||
# These are values that come from env vars or hardcoded defaults and can be resolved synchronously
|
||||
# Defaults needed for both gosuper and node supervisor are declared in entry.sh
|
||||
@constants = constants
|
||||
|
||||
@funcs =
|
||||
version: ->
|
||||
Promise.resolve(supervisorVersion)
|
||||
currentApiKey: =>
|
||||
@getMany([ 'apiKey', 'deviceApiKey' ])
|
||||
.then ({ apiKey, deviceApiKey }) ->
|
||||
return apiKey ? deviceApiKey
|
||||
offlineMode: =>
|
||||
@getMany([ 'resinApiEndpoint', 'supervisorOfflineMode' ])
|
||||
.then ({ resinApiEndpoint, supervisorOfflineMode }) ->
|
||||
return Boolean(supervisorOfflineMode) or !Boolean(resinApiEndpoint)
|
||||
pubnub: =>
|
||||
@getMany([ 'pubnubSubscribeKey', 'pubnubPublishKey' ])
|
||||
.then ({ pubnubSubscribeKey, pubnubPublishKey }) ->
|
||||
pubnub = {
|
||||
subscribe_key: pubnubSubscribeKey
|
||||
publish_key: pubnubPublishKey
|
||||
ssl: true
|
||||
}
|
||||
return pubnub
|
||||
resinApiEndpoint: =>
|
||||
# Fall back to checking if an API endpoint was passed via env vars if there's none in config.json (legacy)
|
||||
@get('apiEndpoint')
|
||||
.then (apiEndpoint) ->
|
||||
return apiEndpoint ? @constants.apiEndpointFromEnv
|
||||
|
||||
provisioned: =>
|
||||
@getMany([ 'uuid', 'resinApiEndpoint', 'registered_at', 'deviceId' ])
|
||||
.then (requiredValues) ->
|
||||
return _.every(_.values(requiredValues), Boolean)
|
||||
|
||||
osVersion: =>
|
||||
osRelease.getOSVersion(@constants.hostOSVersionPath)
|
||||
|
||||
osVariant: =>
|
||||
osRelease.getOSVariant(@constants.hostOSVersionPath)
|
||||
|
||||
provisioningOptions: =>
|
||||
@getMany([
|
||||
'uuid'
|
||||
'userId'
|
||||
'applicationId'
|
||||
'apiKey'
|
||||
'deviceApiKey'
|
||||
'deviceType'
|
||||
'resinApiEndpoint'
|
||||
'apiTimeout'
|
||||
]).then (conf) ->
|
||||
return {
|
||||
uuid: conf.uuid
|
||||
applicationId: conf.applicationId
|
||||
userId: conf.userId
|
||||
deviceType: conf.deviceType
|
||||
provisioningApiKey: conf.apiKey
|
||||
deviceApiKey: conf.deviceApiKey
|
||||
apiEndpoint: conf.resinApiEndpoint
|
||||
apiTimeout: conf.apiTimeout
|
||||
}
|
||||
|
||||
mixpanelHost: =>
|
||||
@get('resinApiEndpoint')
|
||||
.then (apiEndpoint) ->
|
||||
return apiEndpoint + '/mixpanel'
|
||||
|
||||
extendedEnvOptions: =>
|
||||
@getMany([ 'uuid', 'listenPort', 'name', 'apiSecret', 'deviceApiKey', 'version', 'deviceType', 'osVersion' ])
|
||||
|
||||
fetchOptions: =>
|
||||
@getMany([
|
||||
'uuid'
|
||||
'currentApiKey'
|
||||
'resinApiEndpoint'
|
||||
'deltaEndpoint'
|
||||
'delta'
|
||||
'deltaRequestTimeout'
|
||||
'deltaApplyTimeout'
|
||||
'deltaRetryCount'
|
||||
'deltaRetryInterval'
|
||||
])
|
||||
|
||||
@schema = {
|
||||
apiEndpoint: { source: 'config.json' }
|
||||
apiTimeout: { source: 'config.json', default: 15 * 60 * 1000 }
|
||||
listenPort: { source: 'config.json', default: 48484 }
|
||||
deltaEndpoint: { source: 'config.json', default: 'https://delta.resin.io' }
|
||||
uuid: { source: 'config.json', mutable: true }
|
||||
apiKey: { source: 'config.json', mutable: true, removeIfNull: true }
|
||||
deviceApiKey: { source: 'config.json', mutable: true }
|
||||
deviceType: { source: 'config.json', default: 'raspberry-pi' }
|
||||
username: { source: 'config.json' }
|
||||
userId: { source: 'config.json' }
|
||||
deviceId: { source: 'config.json', mutable: true }
|
||||
registered_at: { source: 'config.json', mutable: true }
|
||||
applicationId: { source: 'config.json' }
|
||||
appUpdatePollInterval: { source: 'config.json', mutable: true, default: 60000 }
|
||||
pubnubSubscribeKey: { source: 'config.json', default: @constants.defaultPubnubSubscribeKey }
|
||||
pubnubPublishKey: { source: 'config.json', default: @constants.defaultPubnubPublishKey }
|
||||
mixpanelToken: { source: 'config.json', default: @constants.defaultMixpanelToken }
|
||||
bootstrapRetryDelay: { source: 'config.json', default: 30000 }
|
||||
supervisorOfflineMode: { source: 'config.json', default: false }
|
||||
|
||||
version: { source: 'func' }
|
||||
currentApiKey: { source: 'func' }
|
||||
offlineMode: { source: 'func' }
|
||||
pubnub: { source: 'func' }
|
||||
resinApiEndpoint: { source: 'func' }
|
||||
provisioned: { source: 'func' }
|
||||
osVersion: { source: 'func' }
|
||||
osVariant: { source: 'func' }
|
||||
provisioningOptions: { source: 'func' }
|
||||
mixpanelHost: { source: 'func' }
|
||||
extendedEnvOptions: { source: 'func' }
|
||||
fetchOptions: { source: 'func' }
|
||||
|
||||
apiSecret: { source: 'db', mutable: true }
|
||||
logsChannelSecret: { source: 'db', mutable: true }
|
||||
name: { source: 'db', mutable: true }
|
||||
initialConfigReported: { source: 'db', mutable: true, default: 'false' }
|
||||
initialConfigSaved: { source: 'db', mutable: true, default: 'false' }
|
||||
containersNormalised: { source: 'db', mutable: true, default: 'false' }
|
||||
localMode: { source: 'db', mutable: true, default: 'false' }
|
||||
loggingEnabled: { source: 'db', mutable: true, default: 'true' }
|
||||
connectivityCheckEnabled: { source: 'db', mutable: true, default: 'true' }
|
||||
delta: { source: 'db', mutable: true, default: 'false' }
|
||||
deltaRequestTimeout: { source: 'db', mutable: true, default: '' }
|
||||
deltaApplyTimeout: { source: 'db', mutable: true, default: '' }
|
||||
deltaRetryCount: { source: 'db', mutable: true, default: '' }
|
||||
deltaRetryInterval: { source: 'db', mutable: true, default: '' }
|
||||
lockOverride: { source: 'db', mutable: true, default: 'false' }
|
||||
}
|
||||
|
||||
@configJsonCache = {}
|
||||
|
||||
@_lock = new Lock()
|
||||
@_writeLock = Promise.promisify(@_lock.async.writeLock)
|
||||
@writeLockConfigJson = =>
|
||||
@_writeLock('config.json')
|
||||
.disposer (release) ->
|
||||
release()
|
||||
|
||||
@_readLock = Promise.promisify(@_lock.async.readLock)
|
||||
@readLockConfigJson = =>
|
||||
@_readLock('config.json')
|
||||
.disposer (release) ->
|
||||
release()
|
||||
|
||||
writeConfigJson: =>
|
||||
atomicWritePossible = true
|
||||
@configJsonPathOnHost()
|
||||
.catch (err) =>
|
||||
console.error(err.message)
|
||||
atomicWritePossible = false
|
||||
return @constants.configJsonNonAtomicPath
|
||||
.then (path) =>
|
||||
if atomicWritePossible
|
||||
writeFileAtomic(path, JSON.stringify(@configJsonCache))
|
||||
else
|
||||
writeAndSyncFile(path, JSON.stringify(@configJsonCache))
|
||||
|
||||
|
||||
configJsonSet: (keyVals) =>
|
||||
changed = false
|
||||
Promise.using @writeLockConfigJson(), =>
|
||||
Promise.mapSeries _.keys(keyVals), (key) =>
|
||||
value = keyVals[key]
|
||||
if @configJsonCache[key] != value
|
||||
@configJsonCache[key] = value
|
||||
delete @configJsonCache[key] if !value? and @schema[key].removeIfNull
|
||||
changed = true
|
||||
.then =>
|
||||
@writeConfigJson() if changed
|
||||
|
||||
configJsonRemove: (key) =>
|
||||
changed = false
|
||||
Promise.using @writeLockConfigJson(), =>
|
||||
Promise.try =>
|
||||
if @configJsonCache[key]?
|
||||
delete @configJsonCache[key]
|
||||
changed = true
|
||||
.then =>
|
||||
@writeConfigJson() if changed
|
||||
|
||||
configJsonPathOnHost: =>
|
||||
Promise.try =>
|
||||
return @configPath if @configPath?
|
||||
return @constants.configJsonPathOnHost if @constants.configJsonPathOnHost?
|
||||
osRelease.getOSVersion(@constants.hostOSVersionPath)
|
||||
.then (osVersion) =>
|
||||
if /^Resin OS 2./.test(osVersion)
|
||||
return "#{@constants.bootMountPointFromEnv}/config.json"
|
||||
else if /^Resin OS 1./.test(osVersion)
|
||||
# In Resin OS 1.12, $BOOT_MOUNTPOINT was added and it coincides with config.json's path
|
||||
return "#{@constants.bootMountPointFromEnv}/config.json" if @constants.bootMountPointFromEnv
|
||||
# Older 1.X versions have config.json here
|
||||
return '/mnt/conf/config.json'
|
||||
else
|
||||
# In non-resinOS hosts (or older than 1.0.0), if CONFIG_JSON_PATH wasn't passed then we can't do atomic changes
|
||||
# (only access to config.json we have is in /boot, which is assumed to be a file bind mount where rename is impossible)
|
||||
throw new Error('Could not determine config.json path on host, atomic write will not be possible')
|
||||
.then (path) =>
|
||||
return "#{@constants.rootMountPoint}#{path}"
|
||||
|
||||
configJsonPath: =>
|
||||
@configJsonPathOnHost()
|
||||
.catch (err) =>
|
||||
console.error(err.message)
|
||||
return @constants.configJsonNonAtomicPath
|
||||
|
||||
readConfigJson: =>
|
||||
@configJsonPath()
|
||||
.then (path) ->
|
||||
fs.readFileAsync(path)
|
||||
.then(JSON.parse)
|
||||
|
||||
newUniqueKey: ->
|
||||
deviceRegister.generateUniqueKey()
|
||||
|
||||
generateRequiredFields: =>
|
||||
@getMany([ 'uuid', 'deviceApiKey', 'apiSecret', 'logsChannelSecret' ])
|
||||
.then ({ uuid, deviceApiKey, apiSecret, logsChannelSecret }) =>
|
||||
if !uuid? or !deviceApiKey? or !apiSecret? or !logsChannelSecret?
|
||||
uuid ?= @newUniqueKey()
|
||||
deviceApiKey ?= @newUniqueKey()
|
||||
apiSecret ?= @newUniqueKey()
|
||||
logsChannelSecret ?= @newUniqueKey()
|
||||
@set({ uuid, deviceApiKey, apiSecret, logsChannelSecret })
|
||||
|
||||
regenerateRegistrationFields: =>
|
||||
uuid = deviceRegister.generateUniqueKey()
|
||||
deviceApiKey = deviceRegister.generateUniqueKey()
|
||||
@set({ uuid, deviceApiKey })
|
||||
|
||||
get: (key, trx) =>
|
||||
db = trx ? @db.models
|
||||
# Get value for "key" from config.json or db
|
||||
Promise.try =>
|
||||
switch @schema[key]?.source
|
||||
when undefined
|
||||
throw new Error("Unknown config value #{key}")
|
||||
when 'func'
|
||||
@funcs[key]()
|
||||
.catch (err) ->
|
||||
console.error("Error getting config value for #{key}", err, err.stack)
|
||||
return null
|
||||
when 'config.json'
|
||||
Promise.using @readLockConfigJson(), =>
|
||||
return @configJsonCache[key]
|
||||
when 'db'
|
||||
db('config').select('value').where({ key })
|
||||
.then ([ conf ]) ->
|
||||
return conf?.value
|
||||
.then (value) =>
|
||||
if !value? and @schema[key]?.default?
|
||||
return @schema[key].default
|
||||
return value
|
||||
|
||||
getMany: (keys, trx) =>
|
||||
# Get the values for several keys in an array
|
||||
Promise.all(_.map(keys, (key) => @get(key, trx) ))
|
||||
.then (values) ->
|
||||
out = {}
|
||||
for key, i in keys
|
||||
out[key] = values[i]
|
||||
return out
|
||||
|
||||
# Sets config values as atomically as possible
|
||||
# Is atomic if all values have the same source, otherwise it's atomic for each source
|
||||
set: (keyValues, trx) =>
|
||||
Promise.try =>
|
||||
# Write value to config.json or DB
|
||||
{ configJsonVals, dbVals } = _.reduce(keyValues, (acc, val, key) =>
|
||||
throw new Error("Attempt to change immutable config value #{key}") if !@schema[key]?.mutable
|
||||
switch @schema[key]?.source
|
||||
when 'config.json'
|
||||
acc.configJsonVals[key] = val
|
||||
when 'db'
|
||||
acc.dbVals[key] = val
|
||||
return acc
|
||||
, { configJsonVals: {}, dbVals: {} })
|
||||
|
||||
setValuesInTransaction = (tx) =>
|
||||
dbKeys = _.keys(dbVals)
|
||||
@getMany(dbKeys, tx)
|
||||
.then (oldValues) =>
|
||||
Promise.map dbKeys, (key) =>
|
||||
value = dbVals[key]
|
||||
if oldValues[key] != value
|
||||
@db.upsertModel('config', { key, value }, { key }, tx)
|
||||
.then =>
|
||||
@configJsonSet(configJsonVals) if !_.isEmpty(configJsonVals)
|
||||
if trx?
|
||||
setValuesInTransaction(trx)
|
||||
else
|
||||
@db.transaction (tx) ->
|
||||
setValuesInTransaction(tx)
|
||||
.then =>
|
||||
setImmediate =>
|
||||
@emit('change', keyValues)
|
||||
|
||||
# Clear a value from config.json or DB
|
||||
# (will be used to clear the provisioning key)
|
||||
# only mutable fields!
|
||||
remove: (key) =>
|
||||
Promise.try =>
|
||||
throw new Error("Attempt to delete immutable config value #{key}") if !@schema[key]?.mutable
|
||||
switch @schema[key]?.source
|
||||
when 'config.json'
|
||||
@configJsonRemove(key)
|
||||
when 'db'
|
||||
@db.models('config').del().where({ key })
|
||||
|
||||
init: =>
|
||||
# Read config.json and cache its values
|
||||
# get or generate apiSecret, logsChannelSecret, uuid
|
||||
@readConfigJson()
|
||||
.then (configJson) =>
|
||||
_.assign(@configJsonCache, configJson)
|
||||
.then =>
|
||||
# get or generate uuid, apiSecret, logsChannelSecret
|
||||
@generateRequiredFields()
|
||||
|
321
src/db.coffee
321
src/db.coffee
@ -1,113 +1,228 @@
|
||||
Promise = require 'bluebird'
|
||||
Knex = require 'knex'
|
||||
|
||||
knex = Knex(
|
||||
client: 'sqlite3'
|
||||
connection:
|
||||
filename: '/data/database.sqlite'
|
||||
useNullAsDefault: true
|
||||
)
|
||||
constants = require './lib/constants'
|
||||
|
||||
addColumn = (table, column, type) ->
|
||||
knex.schema.hasColumn(table, column)
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.table table, (t) ->
|
||||
t[type](column)
|
||||
module.exports = class DB
|
||||
constructor: ({ @databasePath } = {}) ->
|
||||
@databasePath ?= constants.databasePath
|
||||
@knex = Knex(
|
||||
client: 'sqlite3'
|
||||
connection:
|
||||
filename: @databasePath
|
||||
useNullAsDefault: true
|
||||
)
|
||||
|
||||
knex.init = Promise.all([
|
||||
knex.schema.hasTable('config')
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.createTable 'config', (t) ->
|
||||
t.string('key').primary()
|
||||
t.string('value')
|
||||
addColumn: (table, column, type) =>
|
||||
@knex.schema.hasColumn(table, column)
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.table table, (t) ->
|
||||
t[type](column)
|
||||
|
||||
knex.schema.hasTable('deviceConfig')
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.createTable 'deviceConfig', (t) ->
|
||||
t.json('values')
|
||||
t.json('targetValues')
|
||||
.then ->
|
||||
knex('deviceConfig').select()
|
||||
.then (deviceConfigs) ->
|
||||
knex('deviceConfig').insert({ values: '{}', targetValues: '{}' }) if deviceConfigs.length == 0
|
||||
dropColumn: (table, column) =>
|
||||
@knex.schema.hasColumn(table, column)
|
||||
.then (exists) =>
|
||||
if exists
|
||||
@knex.schema.table table, (t) ->
|
||||
t.dropColumn(column)
|
||||
|
||||
knex.schema.hasTable('app')
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.createTable 'app', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('name')
|
||||
t.string('containerName')
|
||||
t.string('commit')
|
||||
t.string('imageId')
|
||||
t.string('appId')
|
||||
t.boolean('privileged')
|
||||
t.json('env')
|
||||
t.json('config')
|
||||
t.boolean('markedForDeletion')
|
||||
else
|
||||
Promise.all [
|
||||
addColumn('app', 'commit', 'string')
|
||||
addColumn('app', 'appId', 'string')
|
||||
addColumn('app', 'containerName', 'string')
|
||||
addColumn('app', 'config', 'json')
|
||||
addColumn('app', 'markedForDeletion', 'boolean')
|
||||
]
|
||||
.then ->
|
||||
# When updating from older supervisors, config can be null
|
||||
knex('app').update({ config: '{}' }).whereNull('config')
|
||||
.then ->
|
||||
knex('app').update({ markedForDeletion: false }).whereNull('markedForDeletion')
|
||||
dropTableIfExists: (tableName, trx) =>
|
||||
knex = trx ? @knex
|
||||
knex.schema.hasTable(tableName)
|
||||
.then (exists) ->
|
||||
knex.schema.dropTable(tableName) if exists
|
||||
|
||||
knex.schema.hasTable('dependentApp')
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.createTable 'dependentApp', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('appId')
|
||||
t.string('parentAppId')
|
||||
t.string('name')
|
||||
t.string('commit')
|
||||
t.string('imageId')
|
||||
t.json('config')
|
||||
t.json('environment')
|
||||
else
|
||||
addColumn('dependentApp', 'environment', 'json')
|
||||
_migrateToV2: =>
|
||||
# Drop all tables, but keep the info we need
|
||||
@transaction (trx) =>
|
||||
trx.schema.hasTable('legacyData')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
trx.schema.createTable 'legacyData', (t) ->
|
||||
t.json('apps')
|
||||
t.json('dependentApps')
|
||||
t.json('dependentDevices')
|
||||
.then =>
|
||||
Promise.join(
|
||||
trx.schema.hasTable('app')
|
||||
.then (exists) ->
|
||||
if exists
|
||||
trx.select().from('app')
|
||||
else
|
||||
return []
|
||||
.then(JSON.stringify)
|
||||
trx.schema.hasTable('dependentDevice')
|
||||
.then (exists) ->
|
||||
if exists
|
||||
trx.select().from('dependentDevice')
|
||||
else
|
||||
return []
|
||||
.then(JSON.stringify)
|
||||
trx.schema.hasTable('dependentApp')
|
||||
.then (exists) ->
|
||||
if exists
|
||||
trx.select().from('dependentApp')
|
||||
else
|
||||
return []
|
||||
.then(JSON.stringify)
|
||||
(apps, dependentDevices, dependentApps) =>
|
||||
@upsertModel('legacyData', { apps, dependentDevices, dependentApps }, {}, trx)
|
||||
)
|
||||
.then =>
|
||||
@dropTableIfExists('app', trx)
|
||||
.then =>
|
||||
@dropTableIfExists('deviceConfig', trx)
|
||||
.then =>
|
||||
@dropTableIfExists('dependentApp', trx)
|
||||
.then =>
|
||||
@dropTableIfExists('dependentDevice', trx)
|
||||
.then =>
|
||||
@dropTableIfExists('image', trx)
|
||||
.then =>
|
||||
@dropTableIfExists('container', trx)
|
||||
|
||||
knex.schema.hasTable('dependentDevice')
|
||||
.then (exists) ->
|
||||
if not exists
|
||||
knex.schema.createTable 'dependentDevice', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('uuid')
|
||||
t.string('appId')
|
||||
t.string('localId')
|
||||
t.string('device_type')
|
||||
t.string('logs_channel')
|
||||
t.string('deviceId')
|
||||
t.boolean('is_online')
|
||||
t.string('name')
|
||||
t.string('status')
|
||||
t.string('download_progress')
|
||||
t.string('is_managed_by')
|
||||
t.dateTime('lock_expiry_date')
|
||||
t.string('commit')
|
||||
t.string('targetCommit')
|
||||
t.json('environment')
|
||||
t.json('targetEnvironment')
|
||||
t.json('config')
|
||||
t.json('targetConfig')
|
||||
t.boolean('markedForDeletion')
|
||||
else
|
||||
Promise.all [
|
||||
addColumn('dependentDevice', 'markedForDeletion', 'boolean')
|
||||
addColumn('dependentDevice', 'localId', 'string')
|
||||
addColumn('dependentDevice', 'is_managed_by', 'string')
|
||||
addColumn('dependentDevice', 'lock_expiry_date', 'dateTime')
|
||||
]
|
||||
])
|
||||
finishMigration: =>
|
||||
@transaction (trx) =>
|
||||
@upsertModel('config', { key: 'schema-version', value: '2' }, { key: 'schema-version' }, trx)
|
||||
.then =>
|
||||
@dropTableIfExists('legacyData', trx)
|
||||
|
||||
module.exports = knex
|
||||
_initConfigAndGetSchemaVersion: =>
|
||||
@knex.schema.hasTable('config')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'config', (t) ->
|
||||
t.string('key').primary()
|
||||
t.string('value')
|
||||
.then =>
|
||||
@knex('config').insert({ key: 'schema-version', value: '2' })
|
||||
.then =>
|
||||
@knex('config').where({ key: 'schema-version' }).select()
|
||||
.then ([ schemaVersion ]) ->
|
||||
return schemaVersion
|
||||
|
||||
init: =>
|
||||
migrationNeeded = false
|
||||
@_initConfigAndGetSchemaVersion()
|
||||
.then (schemaVersion) =>
|
||||
if !schemaVersion? or schemaVersion.value != '2'
|
||||
# We're on an old db, need to migrate
|
||||
migrationNeeded = true
|
||||
@_migrateToV2()
|
||||
.then =>
|
||||
Promise.all([
|
||||
@knex.schema.hasTable('deviceConfig')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'deviceConfig', (t) ->
|
||||
t.json('targetValues')
|
||||
.then =>
|
||||
@knex('deviceConfig').select()
|
||||
.then (deviceConfigs) =>
|
||||
@knex('deviceConfig').insert({ targetValues: '{}' }) if deviceConfigs.length == 0
|
||||
|
||||
@knex.schema.hasTable('app')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'app', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('name')
|
||||
t.string('releaseId')
|
||||
t.string('commit')
|
||||
t.string('appId')
|
||||
t.json('services')
|
||||
t.json('networks')
|
||||
t.json('volumes')
|
||||
|
||||
@knex.schema.hasTable('dependentAppTarget')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'dependentAppTarget', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('appId')
|
||||
t.string('parentApp')
|
||||
t.string('name')
|
||||
t.string('commit')
|
||||
t.string('releaseId')
|
||||
t.string('imageId')
|
||||
t.string('image')
|
||||
t.json('environment')
|
||||
t.json('config')
|
||||
|
||||
@knex.schema.hasTable('dependentDeviceTarget')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'dependentDeviceTarget', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('uuid')
|
||||
t.string('name')
|
||||
t.json('apps')
|
||||
|
||||
@knex.schema.hasTable('dependentApp')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'dependentApp', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('appId')
|
||||
t.string('parentApp')
|
||||
t.string('name')
|
||||
t.string('commit')
|
||||
t.string('releaseId')
|
||||
t.string('image')
|
||||
t.json('environment')
|
||||
t.json('config')
|
||||
|
||||
@knex.schema.hasTable('dependentDevice')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'dependentDevice', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('uuid')
|
||||
t.string('appId')
|
||||
t.string('localId')
|
||||
t.string('device_type')
|
||||
t.string('logs_channel')
|
||||
t.string('deviceId')
|
||||
t.boolean('is_online')
|
||||
t.string('name')
|
||||
t.string('status')
|
||||
t.string('download_progress')
|
||||
t.string('is_managed_by')
|
||||
t.dateTime('lock_expiry_date')
|
||||
t.string('commit')
|
||||
t.string('targetCommit')
|
||||
t.json('environment')
|
||||
t.json('targetEnvironment')
|
||||
t.json('config')
|
||||
t.json('targetConfig')
|
||||
t.boolean('markedForDeletion')
|
||||
|
||||
@knex.schema.hasTable('image')
|
||||
.then (exists) =>
|
||||
if not exists
|
||||
@knex.schema.createTable 'image', (t) ->
|
||||
t.increments('id').primary()
|
||||
t.string('name')
|
||||
t.string('appId')
|
||||
t.string('serviceId')
|
||||
t.string('serviceName')
|
||||
t.string('imageId')
|
||||
t.string('releaseId')
|
||||
t.boolean('dependent')
|
||||
])
|
||||
.then ->
|
||||
return migrationNeeded
|
||||
|
||||
# Returns a knex object for one of the models (tables)
|
||||
models: (modelName) =>
|
||||
@knex(modelName)
|
||||
|
||||
upsertModel: (modelName, obj, id, trx) =>
|
||||
knex = trx ? @knex
|
||||
knex(modelName).update(obj).where(id)
|
||||
.then (n) ->
|
||||
knex(modelName).insert(obj) if n == 0
|
||||
|
||||
transaction: (cb) =>
|
||||
@knex.transaction(cb)
|
||||
|
34
src/lib/constants.coffee
Normal file
34
src/lib/constants.coffee
Normal file
@ -0,0 +1,34 @@
|
||||
{ checkString } = require './validation'
|
||||
|
||||
bootMountPointFromEnv = checkString(process.env.BOOT_MOUNTPOINT)
|
||||
rootMountPoint = checkString(process.env.ROOT_MOUNTPOINT) ? '/mnt/root'
|
||||
|
||||
module.exports =
|
||||
rootMountPoint: rootMountPoint
|
||||
databasePath: checkString(process.env.DATABASE_PATH) ? '/data/database.sqlite'
|
||||
gosuperAddress: "http://unix:#{process.env.GOSUPER_SOCKET}:"
|
||||
dockerSocket: process.env.DOCKER_SOCKET ? '/var/run/docker.sock'
|
||||
supervisorImage: checkString(process.env.SUPERVISOR_IMAGE) ? 'resin/rpi-supervisor'
|
||||
ledFile: checkString(process.env.LED_FILE) ? '/sys/class/leds/led0/brightness'
|
||||
forceSecret: # Only used for development
|
||||
api: checkString(process.env.RESIN_SUPERVISOR_SECRET) ? null
|
||||
logsChannel: checkString(process.env.RESIN_SUPERVISOR_LOGS_CHANNEL) ? null
|
||||
vpnStatusPath: checkString(process.env.VPN_STATUS_PATH) ? "#{rootMountPoint}/run/openvpn/vpn_status"
|
||||
hostOSVersionPath: checkString(process.env.HOST_OS_VERSION_PATH) ? "#{rootMountPoint}/etc/os-release"
|
||||
privateAppEnvVars: [
|
||||
'RESIN_SUPERVISOR_API_KEY'
|
||||
'RESIN_API_KEY'
|
||||
]
|
||||
dataPath: checkString(process.env.RESIN_DATA_PATH) ? '/resin-data'
|
||||
bootMountPointFromEnv: bootMountPointFromEnv
|
||||
bootMountPoint: bootMountPointFromEnv ? '/boot'
|
||||
configJsonPathOnHost: checkString(process.env.CONFIG_JSON_PATH)
|
||||
proxyvisorHookReceiver: checkString(process.env.RESIN_PROXYVISOR_HOOK_RECEIVER) ? 'http://0.0.0.0:1337'
|
||||
apiEndpointFromEnv: checkString(process.env.API_ENDPOINT)
|
||||
configJsonNonAtomicPath: '/boot/config.json'
|
||||
defaultPubnubSubscribeKey: process.env.DEFAULT_PUBNUB_SUBSCRIBE_KEY
|
||||
defaultPubnubPublishKey: process.env.DEFAULT_PUBNUB_PUBLISH_KEY
|
||||
defaultMixpanelToken: process.env.DEFAULT_MIXPANEL_TOKEN
|
||||
allowedInterfaces: ['resin-vpn', 'tun0', 'docker0', 'lo']
|
||||
appsJsonPath: process.env.APPS_JSON_PATH ? '/boot/apps.json'
|
||||
ipAddressUpdateInterval: 30000
|
@ -10,10 +10,12 @@ exports.getOSReleaseField = (path, field) ->
|
||||
for line in lines
|
||||
[ key, val ] = line.split('=')
|
||||
releaseItems[_.trim(key)] = _.trim(val)
|
||||
if !releaseItems[field]?
|
||||
throw new Error("Field #{field} not available in #{path}")
|
||||
# Remove enclosing quotes: http://stackoverflow.com/a/19156197/2549019
|
||||
return releaseItems[field].replace(/^"(.+(?="$))"$/, '$1')
|
||||
catch err
|
||||
console.log('Could not get OS release field: ', err, err.stack)
|
||||
console.log('Could not get OS release field: ', err.message)
|
||||
return undefined
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user