mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2024-12-19 05:37:53 +00:00
Various bugfixes and sytlistic improvements
* Use the correct defaults for the delta config variables that have them * Only mount /lib/firmware and /lib/modules if they exist on the host * hardcode-migrations.js: Nicer line separation * APIBinder: switch to using a header for authentication, and keep credentials saved in the API clients * Fix hrtime measurements in milliseconds * Do not uses classes for routers * compose: properly initialize networkMode to the first entry in networks if there is one * Fix some details regarding defaults in validation and service Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
parent
ec22bfcb29
commit
58b167b43d
@ -2,6 +2,8 @@
|
||||
// This hack makes the migrations directory a constant so that at least we can use webpack contexts for the
|
||||
// require.
|
||||
module.exports = function (source) {
|
||||
return source.toString().replace("require(directory + '/' + name);", "require('./migrations/' + name);")
|
||||
.replace("require(_path2.default.join(this._absoluteConfigDir(), name));", "require('./migrations/' + name);")
|
||||
return source
|
||||
.toString()
|
||||
.replace("require(directory + '/' + name);", "require('./migrations/' + name);")
|
||||
.replace("require(_path2.default.join(this._absoluteConfigDir(), name));", "require('./migrations/' + name);")
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ Promise = require 'bluebird'
|
||||
_ = require 'lodash'
|
||||
url = require 'url'
|
||||
TypedError = require 'typed-error'
|
||||
PlatformAPI = require 'pinejs-client'
|
||||
PinejsClient = require 'pinejs-client'
|
||||
deviceRegister = require 'resin-register-device'
|
||||
express = require 'express'
|
||||
bodyParser = require 'body-parser'
|
||||
@ -18,17 +18,16 @@ ExchangeKeyError = class ExchangeKeyError extends TypedError
|
||||
REPORT_SUCCESS_DELAY = 1000
|
||||
REPORT_RETRY_DELAY = 5000
|
||||
|
||||
class APIBinderRouter
|
||||
constructor: (@apiBinder) ->
|
||||
{ @eventTracker } = @apiBinder
|
||||
@router = express.Router()
|
||||
@router.use(bodyParser.urlencoded(extended: true))
|
||||
@router.use(bodyParser.json())
|
||||
@router.post '/v1/update', (req, res) =>
|
||||
@eventTracker.track('Update notification')
|
||||
if @apiBinder.readyForUpdates
|
||||
@apiBinder.getAndSetTargetState(req.body.force)
|
||||
res.sendStatus(204)
|
||||
createAPIBinderRouter = (apiBinder) ->
|
||||
router = express.Router()
|
||||
router.use(bodyParser.urlencoded(extended: true))
|
||||
router.use(bodyParser.json())
|
||||
router.post '/v1/update', (req, res) ->
|
||||
apiBinder.eventTracker.track('Update notification')
|
||||
if apiBinder.readyForUpdates
|
||||
apiBinder.getAndSetTargetState(req.body.force)
|
||||
res.sendStatus(204)
|
||||
return router
|
||||
|
||||
module.exports = class APIBinder
|
||||
constructor: ({ @config, @db, @deviceState, @eventTracker }) ->
|
||||
@ -41,8 +40,7 @@ module.exports = class APIBinder
|
||||
@_targetStateInterval = null
|
||||
@reportPending = false
|
||||
@stateReportErrors = 0
|
||||
@_router = new APIBinderRouter(this)
|
||||
@router = @_router.router
|
||||
@router = createAPIBinderRouter(this)
|
||||
_lock = new Lock()
|
||||
@_writeLock = Promise.promisify(_lock.async.writeLock)
|
||||
@readyForUpdates = false
|
||||
@ -52,7 +50,9 @@ module.exports = class APIBinder
|
||||
.then (conf) =>
|
||||
if conf.offlineMode
|
||||
return true
|
||||
stateFetchHealthy = process.hrtime(@lastTargetStateFetch)[0] < 2 * conf.appUpdatePollInterval
|
||||
timeSinceLastFetch = process.hrtime(@lastTargetStateFetch)
|
||||
timeSinceLastFetchMs = timeSinceLastFetch[0] * 1000 + timeSinceLastFetch[1] / 1e6
|
||||
stateFetchHealthy = timeSinceLastFetchMs < 2 * conf.appUpdatePollInterval
|
||||
stateReportHealthy = !conf.connectivityCheckEnabled or !@deviceState.connected or @stateReportErrors < 3
|
||||
return stateFetchHealthy and stateReportHealthy
|
||||
|
||||
@ -61,19 +61,22 @@ module.exports = class APIBinder
|
||||
release()
|
||||
|
||||
init: (startServices = true) ->
|
||||
@config.getMany([ 'offlineMode', 'resinApiEndpoint', 'bootstrapRetryDelay' ])
|
||||
.then ({ offlineMode, resinApiEndpoint, bootstrapRetryDelay }) =>
|
||||
@config.getMany([ 'offlineMode', 'resinApiEndpoint', 'bootstrapRetryDelay', 'currentApiKey' ])
|
||||
.then ({ offlineMode, resinApiEndpoint, bootstrapRetryDelay, currentApiKey }) =>
|
||||
if offlineMode
|
||||
console.log('Offline Mode is set, skipping API binder initialization')
|
||||
return
|
||||
baseUrl = url.resolve(resinApiEndpoint, '/v4/')
|
||||
@resinApi = new PlatformAPI
|
||||
passthrough = _.cloneDeep(requestOpts)
|
||||
passthrough.headers ?= {}
|
||||
passthrough.headers.Authorization = "Bearer #{currentApiKey}"
|
||||
@resinApi = new PinejsClient
|
||||
apiPrefix: baseUrl
|
||||
passthrough: requestOpts
|
||||
passthrough: passthrough
|
||||
baseUrlLegacy = url.resolve(resinApiEndpoint, '/v2/')
|
||||
@resinApiLegacy = new PlatformAPI
|
||||
@resinApiLegacy = new PinejsClient
|
||||
apiPrefix: baseUrlLegacy
|
||||
passthrough: requestOpts
|
||||
passthrough: passthrough
|
||||
@cachedResinApi = @resinApi.clone({}, cache: {})
|
||||
if !startServices
|
||||
return
|
||||
@ -100,8 +103,8 @@ module.exports = class APIBinder
|
||||
options:
|
||||
filter:
|
||||
uuid: uuid
|
||||
customOptions:
|
||||
apikey: apiKey
|
||||
passthrough:
|
||||
headers: Authorization: "Bearer: #{apiKey}"
|
||||
.get(0)
|
||||
.catchReturn(null)
|
||||
.timeout(timeout)
|
||||
@ -121,20 +124,21 @@ module.exports = class APIBinder
|
||||
return device
|
||||
# If it's not valid/doesn't exist then we try to use the user/provisioning api key for the exchange
|
||||
@fetchDevice(opts.uuid, opts.provisioningApiKey, opts.apiTimeout)
|
||||
.then (device) ->
|
||||
.tap (device) ->
|
||||
if not device?
|
||||
throw new ExchangeKeyError("Couldn't fetch device with provisioning key")
|
||||
# We found the device, we can try to register a working device key for it
|
||||
request.postAsync("#{opts.apiEndpoint}/api-key/device/#{device.id}/device-key?apikey=#{opts.provisioningApiKey}", {
|
||||
request.postAsync("#{opts.apiEndpoint}/api-key/device/#{device.id}/device-key", {
|
||||
json: true
|
||||
body:
|
||||
apiKey: opts.deviceApiKey
|
||||
headers:
|
||||
Authorization: "Bearer #{opts.provisioningApiKey}"
|
||||
})
|
||||
.spread (res, body) ->
|
||||
if res.statusCode != 200
|
||||
throw new ExchangeKeyError("Couldn't register device key with provisioning key")
|
||||
.timeout(opts.apiTimeout)
|
||||
.return(device)
|
||||
|
||||
_exchangeKeyAndGetDeviceOrRegenerate: (opts) =>
|
||||
@_exchangeKeyAndGetDevice(opts)
|
||||
@ -167,6 +171,7 @@ module.exports = class APIBinder
|
||||
console.log('Device is registered but we still have an apiKey, attempting key exchange')
|
||||
@_exchangeKeyAndGetDevice(opts)
|
||||
.then ({ id }) =>
|
||||
@resinApi.passthrough.headers.Authorization = "Bearer #{opts.deviceApiKey}"
|
||||
configToUpdate = {
|
||||
registered_at: opts.registered_at
|
||||
deviceId: id
|
||||
@ -200,7 +205,6 @@ module.exports = class APIBinder
|
||||
@config.getMany([
|
||||
'offlineMode'
|
||||
'provisioned'
|
||||
'currentApiKey'
|
||||
'apiTimeout'
|
||||
'userId'
|
||||
'deviceId'
|
||||
@ -221,8 +225,6 @@ module.exports = class APIBinder
|
||||
@resinApi.post
|
||||
resource: 'device'
|
||||
body: device
|
||||
customOptions:
|
||||
apikey: conf.currentApiKey
|
||||
.timeout(conf.apiTimeout)
|
||||
|
||||
# This uses resin API v2 for now, as the proxyvisor expects to be able to patch the device's commit
|
||||
@ -230,7 +232,6 @@ module.exports = class APIBinder
|
||||
@config.getMany([
|
||||
'offlineMode'
|
||||
'provisioned'
|
||||
'currentApiKey'
|
||||
'apiTimeout'
|
||||
])
|
||||
.then (conf) =>
|
||||
@ -242,8 +243,6 @@ module.exports = class APIBinder
|
||||
resource: 'device'
|
||||
id: id
|
||||
body: updatedFields
|
||||
customOptions:
|
||||
apikey: conf.currentApiKey
|
||||
.timeout(conf.apiTimeout)
|
||||
|
||||
# Creates the necessary config vars in the API to match the current device state,
|
||||
@ -252,8 +251,8 @@ module.exports = class APIBinder
|
||||
Promise.join(
|
||||
@deviceState.getCurrentForComparison()
|
||||
@getTargetState()
|
||||
@config.getMany([ 'currentApiKey', 'deviceId' ])
|
||||
(currentState, targetState, conf) =>
|
||||
@config.get('deviceId')
|
||||
(currentState, targetState, deviceId) =>
|
||||
currentConfig = currentState.local.config
|
||||
targetConfig = targetState.local.config
|
||||
Promise.mapSeries _.toPairs(currentConfig), ([ key, value ]) =>
|
||||
@ -263,14 +262,12 @@ module.exports = class APIBinder
|
||||
if !targetConfig[key]?
|
||||
envVar = {
|
||||
value
|
||||
device: conf.deviceId
|
||||
device: deviceId
|
||||
name: key
|
||||
}
|
||||
@resinApi.post
|
||||
resource: 'device_config_variable'
|
||||
body: envVar
|
||||
customOptions:
|
||||
apikey: conf.currentApiKey
|
||||
)
|
||||
.then =>
|
||||
@config.set({ initialConfigReported: 'true' })
|
||||
@ -284,13 +281,13 @@ module.exports = class APIBinder
|
||||
@reportInitialConfig(retryDelay)
|
||||
|
||||
getTargetState: =>
|
||||
@config.getMany([ 'uuid', 'currentApiKey', 'resinApiEndpoint', 'apiTimeout' ])
|
||||
.then ({ uuid, currentApiKey, resinApiEndpoint, apiTimeout }) =>
|
||||
@config.getMany([ 'uuid', 'resinApiEndpoint', 'apiTimeout' ])
|
||||
.then ({ uuid, resinApiEndpoint, apiTimeout }) =>
|
||||
endpoint = url.resolve(resinApiEndpoint, "/device/v2/#{uuid}/state")
|
||||
|
||||
requestParams = _.extend
|
||||
method: 'GET'
|
||||
url: "#{endpoint}?&apikey=#{currentApiKey}"
|
||||
url: "#{endpoint}"
|
||||
, @cachedResinApi.passthrough
|
||||
|
||||
@cachedResinApi._request(requestParams)
|
||||
@ -312,11 +309,10 @@ module.exports = class APIBinder
|
||||
@lastTargetStateFetch = process.hrtime()
|
||||
|
||||
_pollTargetState: =>
|
||||
if @_targetStateInterval?
|
||||
clearInterval(@_targetStateInterval)
|
||||
@_targetStateInterval = null
|
||||
@config.get('appUpdatePollInterval')
|
||||
.then (appUpdatePollInterval) =>
|
||||
if @_targetStateInterval?
|
||||
clearInterval(@_targetStateInterval)
|
||||
@_targetStateInterval = setInterval(@getAndSetTargetState, appUpdatePollInterval)
|
||||
@getAndSetTargetState()
|
||||
return
|
||||
@ -336,20 +332,20 @@ module.exports = class APIBinder
|
||||
dependent: _.omitBy @stateForReport.dependent, (val, key) =>
|
||||
_.isEqual(@lastReportedState.dependent[key], val)
|
||||
}
|
||||
return _.pickBy(diff, (val) -> !_.isEmpty(val))
|
||||
return _.pickBy(diff, _.negate(_.isEmpty))
|
||||
|
||||
_sendReportPatch: (stateDiff, conf) =>
|
||||
endpoint = url.resolve(conf.resinApiEndpoint, "/device/v2/#{conf.uuid}/state")
|
||||
requestParams = _.extend
|
||||
method: 'PATCH'
|
||||
url: "#{endpoint}?&apikey=#{conf.currentApiKey}"
|
||||
url: "#{endpoint}"
|
||||
body: stateDiff
|
||||
, @cachedResinApi.passthrough
|
||||
|
||||
@cachedResinApi._request(requestParams)
|
||||
|
||||
_report: =>
|
||||
@config.getMany([ 'currentApiKey', 'deviceId', 'apiTimeout', 'resinApiEndpoint', 'uuid' ])
|
||||
@config.getMany([ 'deviceId', 'apiTimeout', 'resinApiEndpoint', 'uuid' ])
|
||||
.then (conf) =>
|
||||
stateDiff = @_getStateDiff()
|
||||
if _.size(stateDiff) is 0
|
||||
|
@ -3,10 +3,11 @@ _ = require 'lodash'
|
||||
EventEmitter = require 'events'
|
||||
express = require 'express'
|
||||
bodyParser = require 'body-parser'
|
||||
fs = Promise.promisifyAll(require('fs'))
|
||||
path = require 'path'
|
||||
|
||||
constants = require './lib/constants'
|
||||
|
||||
process.env.DOCKER_HOST ?= "unix://#{constants.dockerSocket}"
|
||||
Docker = require './lib/docker-utils'
|
||||
updateLock = require './lib/update-lock'
|
||||
{ checkTruthy, checkInt, checkString } = require './lib/validation'
|
||||
@ -20,11 +21,8 @@ Volumes = require './compose/volumes'
|
||||
|
||||
Proxyvisor = require './proxyvisor'
|
||||
|
||||
serviceAction = (action, serviceId, current, target, options) ->
|
||||
obj = { action, serviceId, current, target }
|
||||
if options?
|
||||
obj.options = options
|
||||
return obj
|
||||
serviceAction = (action, serviceId, current, target, options = {}) ->
|
||||
return { action, serviceId, current, target, options }
|
||||
|
||||
# TODO: move this to an Image class?
|
||||
imageForService = (service) ->
|
||||
@ -44,234 +42,193 @@ fetchAction = (service) ->
|
||||
image: imageForService(service)
|
||||
serviceId: service.serviceId
|
||||
}
|
||||
|
||||
pathExistsOnHost = (p) ->
|
||||
fs.statAsync(path.join(constants.rootMountPoint, p))
|
||||
.return(true)
|
||||
.catchReturn(false)
|
||||
|
||||
appNotFoundMsg = "App not found: an app needs to be installed for this endpoint to work.
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first."
|
||||
|
||||
# TODO: implement additional v2 endpoints
|
||||
# v1 endpoins only work for single-container apps as they assume the app has a single service.
|
||||
class ApplicationManagerRouter
|
||||
constructor: (@applications) ->
|
||||
{ @proxyvisor, @eventTracker, @deviceState, @_lockingIfNecessary, @logger } = @applications
|
||||
@router = express.Router()
|
||||
@router.use(bodyParser.urlencoded(extended: true))
|
||||
@router.use(bodyParser.json())
|
||||
# Some v1 endpoins only work for single-container apps as they assume the app has a single service.
|
||||
createApplicationManagerRouter = (applications) ->
|
||||
{ eventTracker, deviceState, _lockingIfNecessary, logger } = applications
|
||||
router = express.Router()
|
||||
router.use(bodyParser.urlencoded(extended: true))
|
||||
router.use(bodyParser.json())
|
||||
|
||||
doRestart = (appId, force) =>
|
||||
@_lockingIfNecessary appId, { force }, =>
|
||||
@deviceState.getCurrentForComparison()
|
||||
.then (currentState) =>
|
||||
app = currentState.local.apps[appId]
|
||||
imageIds = _.map(app.services, 'imageId')
|
||||
@applications.clearTargetVolatileForServices(imageIds)
|
||||
stoppedApp = _.cloneDeep(app)
|
||||
stoppedApp.services = []
|
||||
currentState.local.apps[appId] = stoppedApp
|
||||
@deviceState.pausingApply =>
|
||||
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.then =>
|
||||
currentState.local.apps[appId] = app
|
||||
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.finally =>
|
||||
@deviceState.triggerApplyTarget()
|
||||
doRestart = (appId, force) ->
|
||||
_lockingIfNecessary appId, { force }, ->
|
||||
deviceState.getCurrentForComparison()
|
||||
.then (currentState) ->
|
||||
app = currentState.local.apps[appId]
|
||||
imageIds = _.map(app.services, 'imageId')
|
||||
applications.clearTargetVolatileForServices(imageIds)
|
||||
stoppedApp = _.cloneDeep(app)
|
||||
stoppedApp.services = []
|
||||
currentState.local.apps[appId] = stoppedApp
|
||||
deviceState.pausingApply ->
|
||||
deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.then ->
|
||||
currentState.local.apps[appId] = app
|
||||
deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.finally ->
|
||||
deviceState.triggerApplyTarget()
|
||||
|
||||
doPurge = (appId, force) =>
|
||||
@logger.logSystemMessage("Purging data for app #{appId}", { appId }, 'Purge data')
|
||||
@_lockingIfNecessary appId, { force }, =>
|
||||
@deviceState.getCurrentForComparison()
|
||||
.then (currentState) =>
|
||||
app = currentState.local.apps[appId]
|
||||
purgedApp = _.cloneDeep(app)
|
||||
purgedApp.services = []
|
||||
purgedApp.volumes = {}
|
||||
currentState.local.apps[appId] = purgedApp
|
||||
@deviceState.pausingApply =>
|
||||
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.then =>
|
||||
currentState.local.apps[appId] = app
|
||||
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.finally =>
|
||||
@deviceState.triggerApplyTarget()
|
||||
.tap =>
|
||||
@logger.logSystemMessage('Purged data', { appId }, 'Purge data success')
|
||||
.catch (err) =>
|
||||
@logger.logSystemMessage("Error purging data: #{err}", { appId, error: err }, 'Purge data error')
|
||||
throw err
|
||||
doPurge = (appId, force) ->
|
||||
logger.logSystemMessage("Purging data for app #{appId}", { appId }, 'Purge data')
|
||||
_lockingIfNecessary appId, { force }, ->
|
||||
deviceState.getCurrentForComparison()
|
||||
.then (currentState) ->
|
||||
app = currentState.local.apps[appId]
|
||||
if !app?
|
||||
throw new Error(appNotFoundMsg)
|
||||
purgedApp = _.cloneDeep(app)
|
||||
purgedApp.services = []
|
||||
purgedApp.volumes = {}
|
||||
currentState.local.apps[appId] = purgedApp
|
||||
deviceState.pausingApply ->
|
||||
deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.then ->
|
||||
currentState.local.apps[appId] = app
|
||||
deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
||||
.finally ->
|
||||
deviceState.triggerApplyTarget()
|
||||
.tap ->
|
||||
logger.logSystemMessage('Purged data', { appId }, 'Purge data success')
|
||||
.tapCatch (err) ->
|
||||
logger.logSystemMessage("Error purging data: #{err}", { appId, error: err }, 'Purge data error')
|
||||
|
||||
@router.post '/v1/restart', (req, res) =>
|
||||
appId = checkInt(req.body.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
@eventTracker.track('Restart container (v1)', { appId })
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
doRestart(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.post '/v1/restart', (req, res) ->
|
||||
appId = checkInt(req.body.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
eventTracker.track('Restart container (v1)', { appId })
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
doRestart(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v1/apps/:appId/stop', (req, res) =>
|
||||
appId = checkInt(req.params.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) =>
|
||||
v1StopOrStart = (req, res, action) ->
|
||||
appId = checkInt(req.params.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
applications.getCurrentApp(appId)
|
||||
.then (app) ->
|
||||
service = app?.services?[0]
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
applications.setTargetVolatileForService(service.imageId, running: action != 'stop')
|
||||
applications.executeStepAction(serviceAction(action, service.serviceId, service), { force })
|
||||
.then (service) ->
|
||||
res.status(200).json({ containerId: service.containerId })
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
router.post '/v1/apps/:appId/stop', (req, res) ->
|
||||
v1StopOrStart(req, res, 'stop')
|
||||
|
||||
router.post '/v1/apps/:appId/start', (req, res) ->
|
||||
v1StopOrStart(req, res, 'start')
|
||||
|
||||
router.get '/v1/apps/:appId', (req, res) ->
|
||||
appId = checkInt(req.params.appId)
|
||||
eventTracker.track('GET app (v1)', appId)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
Promise.join(
|
||||
applications.getCurrentApp(appId)
|
||||
applications.getStatus()
|
||||
(app, status) ->
|
||||
service = app?.services?[0]
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
@applications.setTargetVolatileForService(service.imageId, running: false)
|
||||
@applications.executeStepAction(serviceAction('stop', service.serviceId, service), { force })
|
||||
.then (service) ->
|
||||
res.status(200).json({ containerId: service.containerId })
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
# Don't return data that will be of no use to the user
|
||||
appToSend = {
|
||||
appId
|
||||
containerId: service.containerId
|
||||
env: _.omit(service.environment, constants.privateAppEnvVars)
|
||||
releaseId: service.releaseId
|
||||
imageId: service.image
|
||||
}
|
||||
if status.commit?
|
||||
appToSend.commit = status.commit
|
||||
res.json(appToSend)
|
||||
)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v1/apps/:appId/start', (req, res) =>
|
||||
appId = checkInt(req.params.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) =>
|
||||
service = app?.services?[0]
|
||||
router.post '/v1/purge', (req, res) ->
|
||||
appId = checkInt(req.body.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
if !appId?
|
||||
errMsg = 'Invalid or missing appId'
|
||||
return res.status(400).send(errMsg)
|
||||
doPurge(appId, force)
|
||||
.then ->
|
||||
res.status(200).json(Data: 'OK', Error: '')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
router.post '/v2/applications/:appId/purge', (req, res) ->
|
||||
{ force } = req.body
|
||||
{ appId } = req.params
|
||||
doPurge(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
handleServiceAction = (req, res, action) ->
|
||||
{ imageId, force } = req.body
|
||||
{ appId } = req.params
|
||||
_lockingIfNecessary appId, { force }, ->
|
||||
applications.getCurrentApp(appId)
|
||||
.then (app) ->
|
||||
if !app?
|
||||
return res.status(404).send(appNotFoundMsg)
|
||||
service = _.find(app.services, { imageId })
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
@applications.setTargetVolatileForService(service.imageId, running: true)
|
||||
@applications.executeStepAction(serviceAction('start', service.serviceId, null, service), { force })
|
||||
.then (service) ->
|
||||
res.status(200).json({ containerId: service.containerId })
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
errMsg = 'Service not found, a container must exist for this endpoint to work.'
|
||||
return res.status(404).send(errMsg)
|
||||
applications.setTargetVolatileForService(service.imageId, running: action != 'stop')
|
||||
applications.executeStepAction(serviceAction(action, service.serviceId, service, service, { wait: true }), { skipLock: true })
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.get '/v1/apps/:appId', (req, res) =>
|
||||
appId = checkInt(req.params.appId)
|
||||
@eventTracker.track('GET app (v1)', appId)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
Promise.join(
|
||||
@applications.getCurrentApp(appId)
|
||||
@applications.getStatus()
|
||||
(app, status) ->
|
||||
service = app?.services?[0]
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
# Don't return data that will be of no use to the user
|
||||
appToSend = {
|
||||
appId
|
||||
containerId: service.containerId
|
||||
env: _.omit(service.environment, constants.privateAppEnvVars)
|
||||
releaseId: service.releaseId
|
||||
imageId: service.image
|
||||
}
|
||||
if status.commit?
|
||||
appToSend.commit = status.commit
|
||||
res.json(appToSend)
|
||||
)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.post '/v2/applications/:appId/restart-service', (req, res) ->
|
||||
handleServiceAction(req, res, 'restart')
|
||||
|
||||
@router.post '/v1/purge', (req, res) ->
|
||||
appId = checkInt(req.body.appId)
|
||||
force = checkTruthy(req.body.force)
|
||||
if !appId?
|
||||
errMsg = "App not found: an app needs to be installed for purge to work.
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first."
|
||||
return res.status(400).send(errMsg)
|
||||
doPurge(appId, force)
|
||||
.then ->
|
||||
res.status(200).json(Data: 'OK', Error: '')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.post '/v2/applications/:appId/stop-service', (req, res) ->
|
||||
handleServiceAction(req, res, 'stop')
|
||||
|
||||
@router.post '/v2/applications/:appId/purge', (req, res) ->
|
||||
{ force } = req.body
|
||||
{ appId } = req.params
|
||||
doPurge(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.post '/v2/applications/:appId/start-service', (req, res) ->
|
||||
handleServiceAction(req, res, 'start')
|
||||
|
||||
@router.post '/v2/applications/:appId/restart-service', (req, res) =>
|
||||
{ imageId, force } = req.body
|
||||
{ appId } = req.params
|
||||
@_lockingIfNecessary appId, { force }, =>
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) =>
|
||||
if !app?
|
||||
errMsg = "App not found: an app needs to be installed for restart-service to work.
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first."
|
||||
return res.status(404).send(errMsg)
|
||||
service = _.find(app.services, (s) -> s.imageId == imageId)
|
||||
if !service?
|
||||
errMsg = 'Service not found, a container must exist for service restart to work.'
|
||||
return res.status(404).send(errMsg)
|
||||
@applications.setTargetVolatileForService(service.imageId, running: true)
|
||||
@applications.executeStepAction(serviceAction('restart', service.serviceId, service, service), { skipLock: true })
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.post '/v2/applications/:appId/restart', (req, res) ->
|
||||
{ force } = req.body
|
||||
{ appId } = req.params
|
||||
doRestart(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v2/applications/:appId/stop-service', (req, res) =>
|
||||
{ imageId, force } = req.body
|
||||
{ appId } = req.params
|
||||
@_lockingIfNecessary appId, { force }, =>
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) =>
|
||||
if !app?
|
||||
errMsg = "App not found: an app needs to be installed for stop-service to work.
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first."
|
||||
return res.status(404).send(errMsg)
|
||||
service = _.find(app.services, (s) -> s.imageId == imageId)
|
||||
if !service?
|
||||
errMsg = 'Service not found, a container must exist for service stop to work.'
|
||||
return res.status(404).send(errMsg)
|
||||
@applications.setTargetVolatileForService(service.imageId, running: false)
|
||||
@applications.executeStepAction(serviceAction('stop', service.serviceId, service, service, { wait: true }), { skipLock: true })
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.use(applications.proxyvisor.router)
|
||||
|
||||
@router.post '/v2/applications/:appId/start-service', (req, res) =>
|
||||
{ imageId, force } = req.body
|
||||
{ appId } = req.params
|
||||
@_lockingIfNecessary appId, { force }, =>
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) =>
|
||||
if !app?
|
||||
errMsg = "App not found: an app needs to be installed for stop-service to work.
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first."
|
||||
return res.status(404).send(errMsg)
|
||||
service = _.find(app.services, (s) -> s.imageId == imageId)
|
||||
if !service?
|
||||
errMsg = 'Service not found, a container must exist for service start to work.'
|
||||
return res.status(404).send(errMsg)
|
||||
@applications.setTargetVolatileForService(service.imageId, running: true)
|
||||
@applications.executeStepAction(serviceAction('start', service.serviceId, service, service), { skipLock: true })
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v2/applications/:appId/restart', (req, res) ->
|
||||
{ force } = req.body
|
||||
{ appId } = req.params
|
||||
doRestart(appId, force)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.use(@proxyvisor.router)
|
||||
return router
|
||||
|
||||
module.exports = class ApplicationManager extends EventEmitter
|
||||
constructor: ({ @logger, @config, @db, @eventTracker, @deviceState }) ->
|
||||
@ -304,8 +261,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@services.updateMetadata(step.current, step.target)
|
||||
restart: (step, { force = false, skipLock = false } = {}) =>
|
||||
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
||||
Promise.try =>
|
||||
@services.kill(step.current, { wait: true })
|
||||
@services.kill(step.current, { wait: true })
|
||||
.then =>
|
||||
delete @_containerStarted[step.current.containerId]
|
||||
.then =>
|
||||
@ -331,8 +287,13 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
opts.deltaSource = @bestDeltaSource(step.image, availableImages)
|
||||
@images.triggerFetch step.image, opts, (success) =>
|
||||
@fetchesInProgress -= 1
|
||||
@timeSpentFetching += process.hrtime(startTime)[0]
|
||||
elapsed = process.hrtime(startTime)
|
||||
elapsedMs = elapsed[0] * 1000 + elapsed[1] / 1e6
|
||||
@timeSpentFetching += elapsedMs
|
||||
if success
|
||||
# update_downloaded is true if *any* image has been downloaded,
|
||||
# and it's relevant mostly for the legacy GET /v1/device endpoint
|
||||
# that assumes a single-container app
|
||||
@reportCurrentState(update_downloaded: true)
|
||||
)
|
||||
removeImage: (step) =>
|
||||
@ -351,9 +312,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@networks.ensureSupervisorNetwork()
|
||||
}
|
||||
@validActions = _.keys(@actionExecutors).concat(@proxyvisor.validActions)
|
||||
@_router = new ApplicationManagerRouter(this)
|
||||
@router = @_router.router
|
||||
|
||||
@router = createApplicationManagerRouter(this)
|
||||
@images.on('change', @reportCurrentState)
|
||||
@services.on('change', @reportCurrentState)
|
||||
|
||||
@ -372,7 +331,6 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@services.listenToEvents()
|
||||
|
||||
# Returns the status of applications and their services
|
||||
# TODO: discuss: I think commit could be deduced by the UI looking at the image_installs on the API?
|
||||
getStatus: =>
|
||||
Promise.join(
|
||||
@services.getStatus()
|
||||
@ -382,11 +340,13 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
apps = {}
|
||||
dependent = {}
|
||||
releaseId = null
|
||||
creationTimesAndReleases = {}
|
||||
# We iterate over the current running services and add them to the current state
|
||||
# of the app they belong to.
|
||||
for service in services
|
||||
appId = service.appId
|
||||
apps[appId] ?= {}
|
||||
creationTimesAndReleases[appId] = {}
|
||||
apps[appId].services ?= {}
|
||||
# We only send commit if all services have the same release, and it matches the target release
|
||||
if !releaseId?
|
||||
@ -395,11 +355,11 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
releaseId = false
|
||||
if !apps[appId].services[service.imageId]?
|
||||
apps[appId].services[service.imageId] = _.pick(service, [ 'status', 'releaseId' ])
|
||||
creationTimesAndReleases[appId][service.imageId] = _.pick(service, [ 'createdAt', 'releaseId' ])
|
||||
apps[appId].services[service.imageId].download_progress = null
|
||||
else
|
||||
# There's two containers with the same imageId, so this has to be a handover
|
||||
previousReleaseId = apps[appId].services[service.imageId].releaseId
|
||||
apps[appId].services[service.imageId].releaseId = Math.max(previousReleaseId, service.releaseId)
|
||||
apps[appId].services[service.imageId].releaseId = _.minBy([ creationTimesAndReleases[appId][service.imageId], service ], 'createdAt').releaseId
|
||||
apps[appId].services[service.imageId].status = 'Handing over'
|
||||
|
||||
for image in images
|
||||
@ -409,14 +369,13 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
apps[appId].services ?= {}
|
||||
apps[appId].services[image.imageId] ?= _.pick(image, [ 'status', 'releaseId' ])
|
||||
apps[appId].services[image.imageId].download_progress = image.downloadProgress
|
||||
else if image.imageId?
|
||||
dependent[appId] ?= {}
|
||||
dependent[appId].images ?= {}
|
||||
dependent[appId].images[image.imageId] = _.pick(image, [ 'status' ])
|
||||
dependent[appId].images[image.imageId].download_progress = image.downloadProgress
|
||||
else
|
||||
if image.imageId?
|
||||
dependent[appId] ?= {}
|
||||
dependent[appId].images ?= {}
|
||||
dependent[appId].images[image.imageId] = _.pick(image, [ 'status' ])
|
||||
dependent[appId].images[image.imageId].download_progress = image.downloadProgress
|
||||
else
|
||||
console.log('Ignoring legacy dependent image', image)
|
||||
console.log('Ignoring legacy dependent image', image)
|
||||
|
||||
obj = { local: apps, dependent }
|
||||
if releaseId and targetApps[0]?.releaseId == releaseId
|
||||
@ -454,8 +413,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@services.getAll()
|
||||
@networks.getAll()
|
||||
@volumes.getAll()
|
||||
(services, networks, volumes) =>
|
||||
return @_buildApps(services, networks, volumes)
|
||||
@_buildApps
|
||||
)
|
||||
|
||||
getCurrentApp: (appId) =>
|
||||
@ -463,9 +421,8 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@services.getAllByAppId(appId)
|
||||
@networks.getAllByAppId(appId)
|
||||
@volumes.getAllByAppId(appId)
|
||||
(services, networks, volumes) =>
|
||||
return @_buildApps(services, networks, volumes)[appId]
|
||||
)
|
||||
@_buildApps
|
||||
).get(appId)
|
||||
|
||||
getTargetApp: (appId) =>
|
||||
@db.models('app').where({ appId }).select()
|
||||
@ -486,7 +443,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
|
||||
toBeRemoved = _.difference(currentServiceIds, targetServiceIds)
|
||||
for serviceId in toBeRemoved
|
||||
servicesToRemove = _.filter(currentServices, (s) -> s.serviceId == serviceId)
|
||||
servicesToRemove = _.filter(currentServices, { serviceId })
|
||||
for service in servicesToRemove
|
||||
removePairs.push({
|
||||
current: service
|
||||
@ -496,7 +453,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
|
||||
toBeInstalled = _.difference(targetServiceIds, currentServiceIds)
|
||||
for serviceId in toBeInstalled
|
||||
serviceToInstall = _.find(targetServices, (s) -> s.serviceId == serviceId)
|
||||
serviceToInstall = _.find(targetServices, { serviceId })
|
||||
if serviceToInstall?
|
||||
installPairs.push({
|
||||
current: null
|
||||
@ -508,8 +465,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
currentServicesPerId = {}
|
||||
targetServicesPerId = _.keyBy(targetServices, 'serviceId')
|
||||
for serviceId in toBeMaybeUpdated
|
||||
currentServiceContainers = _.filter currentServices, (service) ->
|
||||
return service.serviceId == serviceId
|
||||
currentServiceContainers = _.filter(currentServices, { serviceId })
|
||||
if currentServiceContainers.length > 1
|
||||
currentServicesPerId[serviceId] = _.maxBy(currentServiceContainers, 'createdAt')
|
||||
# All but the latest container for this service are spurious and should be removed
|
||||
@ -525,12 +481,14 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
# Returns true if a service matches its target except it should be running and it is not, but we've
|
||||
# already started it before. In this case it means it just exited so we don't want to start it again.
|
||||
alreadyStarted = (serviceId) =>
|
||||
currentServicesPerId[serviceId].isEqualExceptForRunningState(targetServicesPerId[serviceId]) and
|
||||
return (
|
||||
currentServicesPerId[serviceId].isEqualExceptForRunningState(targetServicesPerId[serviceId]) and
|
||||
targetServicesPerId[serviceId].running and
|
||||
@_containerStarted[currentServicesPerId[serviceId].containerId]
|
||||
)
|
||||
|
||||
needUpdate = _.filter toBeMaybeUpdated, (serviceId) ->
|
||||
return !currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId]) and !alreadyStarted(serviceId)
|
||||
!currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId]) and !alreadyStarted(serviceId)
|
||||
for serviceId in needUpdate
|
||||
updatePairs.push({
|
||||
current: currentServicesPerId[serviceId]
|
||||
@ -599,26 +557,24 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
name = _.split(volume, ':')[0]
|
||||
_.some volumePairs, (pair) ->
|
||||
"#{service.appId}_#{pair.current?.name}" == name
|
||||
if hasVolume
|
||||
return true
|
||||
return false
|
||||
return hasVolume
|
||||
|
||||
# TODO: account for volumes-from, networks-from, links, etc
|
||||
# TODO: support networks instead of only networkMode
|
||||
_dependenciesMetForServiceStart: (target, networkPairs, volumePairs, pendingPairs) ->
|
||||
# for dependsOn, check no install or update pairs have that service
|
||||
dependencyUnmet = _.some target.dependsOn ? [], (dependency) ->
|
||||
_.find(pendingPairs, (pair) -> pair.target?.serviceName == dependency)?
|
||||
_.some(pendingPairs, (pair) -> pair.target?.serviceName == dependency)
|
||||
if dependencyUnmet
|
||||
return false
|
||||
# for networks and volumes, check no network pairs have that volume name
|
||||
if _.find(networkPairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == target.networkMode)?
|
||||
if _.some(networkPairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == target.networkMode)
|
||||
return false
|
||||
volumeUnmet = _.some target.volumes, (volumeDefinition) ->
|
||||
[ sourceName, destName ] = volumeDefinition.split(':')
|
||||
if !destName? # If this is not a named volume, ignore it
|
||||
return false
|
||||
return _.find(volumePairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == sourceName)?
|
||||
return _.some(volumePairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == sourceName)
|
||||
return !volumeUnmet
|
||||
|
||||
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
|
||||
@ -627,8 +583,8 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
_dependenciesMetForServiceKill: (target, targetApp, availableImages) =>
|
||||
if target.dependsOn?
|
||||
for dependency in target.dependsOn
|
||||
dependencyService = _.find(targetApp.services, (s) -> s.serviceName == dependency)
|
||||
if !_.find(availableImages, (image) => image.dockerImageId == dependencyService.image or @images.isSameImage(image, { name: dependencyService.imageName }))?
|
||||
dependencyService = _.find(targetApp.services, serviceName: dependency)
|
||||
if !_.some(availableImages, (image) => image.dockerImageId == dependencyService.image or @images.isSameImage(image, { name: dependencyService.imageName }))
|
||||
return false
|
||||
return true
|
||||
|
||||
@ -644,7 +600,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
|
||||
steps = []
|
||||
for dependency in dependencies
|
||||
if dependency.status != 'Stopping' and !_.some(changingPairs, (pair) -> pair.serviceId == dependency.serviceId)
|
||||
if dependency.status != 'Stopping' and !_.some(changingPairs, serviceId: dependency.serviceId)
|
||||
steps.push(serviceAction('kill', dependency.serviceId, dependency))
|
||||
return steps
|
||||
else if target?
|
||||
@ -696,7 +652,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
'hand-over': (current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout) ->
|
||||
if needsDownload
|
||||
return fetchAction(target)
|
||||
else if needsSpecialKill && dependenciesMetForKill()
|
||||
else if needsSpecialKill and dependenciesMetForKill()
|
||||
return serviceAction('kill', target.serviceId, current, target)
|
||||
else if dependenciesMetForStart()
|
||||
return serviceAction('handover', target.serviceId, current, target, timeout: timeout)
|
||||
@ -747,8 +703,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
else
|
||||
# Create the default network for the target app
|
||||
targetApp.networks['default'] ?= {}
|
||||
if !currentApp?
|
||||
currentApp = emptyApp
|
||||
currentApp ?= emptyApp
|
||||
if currentApp.services?.length == 1 and targetApp.services?.length == 1 and
|
||||
targetApp.services[0].serviceName == currentApp.services[0].serviceName and
|
||||
checkTruthy(currentApp.services[0].labels['io.resin.legacy-container'])
|
||||
@ -828,10 +783,15 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@config.get('extendedEnvOptions')
|
||||
@docker.getNetworkGateway(constants.supervisorNetworkInterface)
|
||||
.catchReturn('127.0.0.1')
|
||||
(opts, supervisorApiHost) =>
|
||||
Promise.props({
|
||||
firmware: pathExistsOnHost('/lib/firmware')
|
||||
modules: pathExistsOnHost('/lib/modules')
|
||||
})
|
||||
(opts, supervisorApiHost, hostPathExists) =>
|
||||
configOpts = {
|
||||
appName: app.name
|
||||
supervisorApiHost
|
||||
hostPathExists
|
||||
}
|
||||
_.assign(configOpts, opts)
|
||||
volumes = JSON.parse(app.volumes)
|
||||
@ -931,10 +891,10 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
allImagesForTargetApp = (app) -> _.map(app.services, imageForService)
|
||||
allImagesForCurrentApp = (app) ->
|
||||
_.map app.services, (service) ->
|
||||
_.omit(_.find(available, (image) -> image.dockerImageId == service.image and image.imageId == service.imageId), [ 'dockerImageId', 'id' ])
|
||||
_.omit(_.find(available, { dockerImageId: service.image, imageId: service.imageId }), [ 'dockerImageId', 'id' ])
|
||||
availableWithoutIds = _.map(available, (image) -> _.omit(image, [ 'dockerImageId', 'id' ]))
|
||||
currentImages = _.flatten(_.map(current.local.apps, allImagesForCurrentApp))
|
||||
targetImages = _.flatten(_.map(target.local.apps, allImagesForTargetApp))
|
||||
currentImages = _.flatMap(current.local.apps, allImagesForCurrentApp)
|
||||
targetImages = _.flatMap(target.local.apps, allImagesForTargetApp)
|
||||
availableAndUnused = _.filter availableWithoutIds, (image) ->
|
||||
!_.some currentImages.concat(targetImages), (imageInUse) -> _.isEqual(image, imageInUse)
|
||||
imagesToDownload = _.filter targetImages, (targetImage) =>
|
||||
@ -942,13 +902,13 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
# Images that are available but we don't have them in the DB with the exact metadata:
|
||||
imagesToSave = _.filter targetImages, (targetImage) =>
|
||||
_.some(available, (availableImage) => @images.isSameImage(availableImage, targetImage)) and
|
||||
!_.find(availableWithoutIds, (img) -> _.isEqual(img, targetImage))?
|
||||
!_.some(availableWithoutIds, (img) -> _.isEqual(img, targetImage))
|
||||
|
||||
deltaSources = _.map imagesToDownload, (image) =>
|
||||
return @bestDeltaSource(image, available)
|
||||
proxyvisorImages = @proxyvisor.imagesInUse(current, target)
|
||||
imagesToRemove = _.filter availableAndUnused, (image) =>
|
||||
notUsedForDelta = !_.some deltaSources, (deltaSource) -> deltaSource == image.name
|
||||
notUsedForDelta = !_.includes(deltaSources, image.name)
|
||||
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) => @images.isSameImage(image, { name: proxyvisorImage })
|
||||
return notUsedForDelta and notUsedByProxyvisor
|
||||
return { imagesToSave, imagesToRemove }
|
||||
@ -986,7 +946,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
if !ignoreImages and delta and newDownloads > 0
|
||||
downloadsToBlock = downloading.length + newDownloads - constants.maxDeltaDownloads
|
||||
while downloadsToBlock > 0
|
||||
_.pull(nextSteps, _.find(nextSteps, (s) -> s.action == 'fetch'))
|
||||
_.pull(nextSteps, _.find(nextSteps, action: 'fetch'))
|
||||
downloadsToBlock -= 1
|
||||
if !ignoreImages and _.isEmpty(nextSteps) and !_.isEmpty(downloading)
|
||||
nextSteps.push({ action: 'noop' })
|
||||
@ -1002,7 +962,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
|
||||
_lockingIfNecessary: (appId, { force = false, skipLock = false } = {}, fn) =>
|
||||
if skipLock
|
||||
return Promise.try( -> fn())
|
||||
return Promise.try(fn)
|
||||
@config.get('lockOverride')
|
||||
.then (lockOverride) ->
|
||||
return checkTruthy(lockOverride) or force
|
||||
@ -1026,7 +986,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
(cleanupNeeded, availableImages, downloading, supervisorNetworkReady, conf) =>
|
||||
@_inferNextSteps(cleanupNeeded, availableImages, downloading, supervisorNetworkReady, currentState, targetState, ignoreImages, conf)
|
||||
.then (nextSteps) =>
|
||||
if ignoreImages and _.some(nextSteps, (step) -> step.action == 'fetch')
|
||||
if ignoreImages and _.some(nextSteps, action: 'fetch')
|
||||
throw new Error('Cannot fetch images while executing an API action')
|
||||
@proxyvisor.getRequiredSteps(availableImages, downloading, currentState, targetState, nextSteps)
|
||||
.then (proxyvisorSteps) ->
|
||||
|
@ -21,7 +21,7 @@ validation = require '../lib/validation'
|
||||
# }
|
||||
|
||||
hasDigest = (name) ->
|
||||
name?.split?('@')?[1]? ? false
|
||||
name?.split?('@')?[1]?
|
||||
|
||||
module.exports = class Images extends EventEmitter
|
||||
constructor: ({ @docker, @logger, @db }) ->
|
||||
@ -136,10 +136,10 @@ module.exports = class Images extends EventEmitter
|
||||
# Image has a regular tag, so we might have to remove unnecessary tags
|
||||
@docker.getImage(img.dockerImageId).inspect()
|
||||
.then (dockerImg) =>
|
||||
differentTags = _.filter(imagesFromDB, (dbImage) -> dbImage.name != img.name)
|
||||
differentTags = _.reject(imagesFromDB, name: img.name)
|
||||
if dockerImg.RepoTags.length > 1 and
|
||||
_.some(dockerImg.RepoTags, (tag) -> tag == img.name) and
|
||||
_.some(dockerImg.RepoTags, (tag) -> _.some(differentTags, (dbImage) -> dbImage.name == tag))
|
||||
_.includes(dockerImg.RepoTags, img.name) and
|
||||
_.some(dockerImg.RepoTags, (tag) -> _.some(differentTags, name: tag))
|
||||
@docker.getImage(img.name).remove(noprune: true)
|
||||
.return(false)
|
||||
else
|
||||
@ -155,9 +155,8 @@ module.exports = class Images extends EventEmitter
|
||||
|
||||
remove: (image) =>
|
||||
@_removeImageIfNotNeeded(image)
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemEvent(logTypes.deleteImageError, { image, error: err })
|
||||
throw err
|
||||
|
||||
getByDockerId: (id) =>
|
||||
@db.models('image').where(dockerImageId: id).first()
|
||||
@ -167,7 +166,7 @@ module.exports = class Images extends EventEmitter
|
||||
.then(@remove)
|
||||
|
||||
getNormalisedTags: (image) ->
|
||||
Promise.map(image.RepoTags ? [], (tag) => @normalise(tag))
|
||||
Promise.map(image.RepoTags ? [], @normalise)
|
||||
|
||||
_withImagesFromDockerAndDB: (callback) =>
|
||||
Promise.join(
|
||||
@ -194,7 +193,7 @@ module.exports = class Images extends EventEmitter
|
||||
|
||||
getDownloadingImageIds: =>
|
||||
Promise.try =>
|
||||
return _.map(_.keys(_.pickBy(@volatileState, (s) -> s.status == 'Downloading')), validation.checkInt)
|
||||
return _.map(_.keys(_.pickBy(@volatileState, status: 'Downloading')), validation.checkInt)
|
||||
|
||||
cleanupDatabase: =>
|
||||
@_withImagesFromDockerAndDB (dockerImages, supervisedImages) =>
|
||||
@ -252,7 +251,6 @@ module.exports = class Images extends EventEmitter
|
||||
.then =>
|
||||
toCleanup = _.filter _.uniq(images), (image) =>
|
||||
!@imageCleanupFailures[image]? or Date.now() - @imageCleanupFailures[image] > constants.imageCleanupErrorIgnoreTimeout
|
||||
#console.log(toCleanup)
|
||||
return toCleanup
|
||||
inspectByName: (imageName) =>
|
||||
@docker.getImage(imageName).inspect()
|
||||
|
@ -47,16 +47,14 @@ module.exports = class Networks
|
||||
'io.resin.supervised': 'true'
|
||||
}
|
||||
})
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemEvent(logTypes.createNetworkError, { network: { name, appId }, error: err })
|
||||
throw err
|
||||
|
||||
remove: ({ name, appId }) =>
|
||||
@logger.logSystemEvent(logTypes.removeNetwork, { network: { name, appId } })
|
||||
@docker.getNetwork("#{appId}_#{name}").remove()
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemEvent(logTypes.removeNetworkError, { network: { name, appId }, error: err })
|
||||
throw err
|
||||
|
||||
supervisorNetworkReady: =>
|
||||
# For mysterious reasons sometimes the balena/docker network exists
|
||||
@ -80,8 +78,7 @@ module.exports = class Networks
|
||||
removeIt()
|
||||
else
|
||||
fs.statAsync("/sys/class/net/#{constants.supervisorNetworkInterface}")
|
||||
.catch ENOENT, ->
|
||||
removeIt()
|
||||
.catch(ENOENT, removeIt)
|
||||
.catch NotFoundError, =>
|
||||
console.log('Creating supervisor0 network')
|
||||
@docker.createNetwork({
|
||||
|
@ -66,7 +66,6 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
throw err
|
||||
.tap =>
|
||||
delete @containerHasDied[containerId]
|
||||
.tap =>
|
||||
@logger.logSystemEvent(logTypes.stopServiceSuccess, { service })
|
||||
.catch (err) =>
|
||||
@logger.logSystemEvent(logTypes.stopServiceError, { service, error: err })
|
||||
@ -101,13 +100,11 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
.tap (container) =>
|
||||
service.containerId = container.id
|
||||
Promise.map nets, ({ name, endpointConfig }) =>
|
||||
console.log('Connecting ' + container.id + ' to network ' + name)
|
||||
@docker.getNetwork(name).connect({ Container: container.id, EndpointConfig: endpointConfig })
|
||||
.tap =>
|
||||
@logger.logSystemEvent(logTypes.installServiceSuccess, { service })
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemEvent(logTypes.installServiceError, { service, error: err })
|
||||
throw err
|
||||
.finally =>
|
||||
@reportChange(mockContainerId)
|
||||
|
||||
@ -135,12 +132,12 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
else
|
||||
# rethrow the same error
|
||||
throw err
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
# If starting the container failed, we remove it so that it doesn't litter
|
||||
container.remove(v: true)
|
||||
.finally =>
|
||||
.catch(_.noop)
|
||||
.then =>
|
||||
@logger.logSystemEvent(logTypes.startServiceError, { service, error: err })
|
||||
throw err
|
||||
.then =>
|
||||
@logger.attach(@docker, container.id, service.serviceId)
|
||||
.tap =>
|
||||
@ -176,7 +173,7 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
.then (services) =>
|
||||
status = _.clone(@volatileState)
|
||||
for service in services
|
||||
status[service.containerId] ?= _.pick(service, [ 'appId', 'imageId', 'status', 'releaseId', 'commit' ])
|
||||
status[service.containerId] ?= _.pick(service, [ 'appId', 'imageId', 'status', 'releaseId', 'commit', 'createdAt' ])
|
||||
return _.values(status)
|
||||
|
||||
getByDockerContainerId: (containerId) =>
|
||||
@ -233,7 +230,7 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
@docker.getEvents(filters: type: [ 'container' ])
|
||||
.then (stream) =>
|
||||
stream.on 'error', (err) ->
|
||||
console.error('Error on docker events stream:', err, err.stack)
|
||||
console.error('Error on docker events stream:', err)
|
||||
parser = JSONStream.parse()
|
||||
parser.on 'data', (data) =>
|
||||
if data?.status in ['die', 'start']
|
||||
@ -257,9 +254,8 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
parser
|
||||
.on 'error', (err) ->
|
||||
console.error('Error on docker events stream', err)
|
||||
reject()
|
||||
reject(err)
|
||||
.on 'end', ->
|
||||
console.error('Docker events stream ended, restarting listener')
|
||||
resolve()
|
||||
stream.pipe(parser)
|
||||
.catch (err) ->
|
||||
|
@ -24,55 +24,33 @@ parseMemoryNumber = (numAsString, defaultVal) ->
|
||||
# Construct a restart policy based on its name.
|
||||
# The default policy (if name is not a valid policy) is "always".
|
||||
createRestartPolicy = (name) ->
|
||||
if not (name in validRestartPolicies)
|
||||
if name not in validRestartPolicies
|
||||
name = 'always'
|
||||
return { Name: name, MaximumRetryCount: 0 }
|
||||
|
||||
getCommand = (service, imageInfo) ->
|
||||
cmd = null
|
||||
if service.command?
|
||||
cmd = service.command
|
||||
else if imageInfo?.Config?.Cmd?
|
||||
cmd = imageInfo.Config.Cmd
|
||||
cmd = service.command ? imageInfo?.Config?.Cmd ? null
|
||||
if _.isString(cmd)
|
||||
cmd = parseCommand(cmd)
|
||||
return cmd
|
||||
|
||||
getEntrypoint = (service, imageInfo) ->
|
||||
entry = null
|
||||
if service.entrypoint?
|
||||
entry = service.entrypoint
|
||||
else if imageInfo?.Config?.Entrypoint?
|
||||
entry = imageInfo.Config.Entrypoint
|
||||
entry = service.entrypoint ? imageInfo?.Config?.Entrypoint ? null
|
||||
if _.isString(entry)
|
||||
entry = parseCommand(entry)
|
||||
return entry
|
||||
|
||||
getStopSignal = (service, imageInfo) ->
|
||||
sig = null
|
||||
if service.stop_signal?
|
||||
sig = service.stop_signal
|
||||
else if imageInfo?.Config?.StopSignal?
|
||||
sig = imageInfo.Config.StopSignal
|
||||
sig = service.stopSignal ? imageInfo?.Config?.StopSignal ? null
|
||||
if sig? and !_.isString(sig) # In case the YAML was parsed as a number
|
||||
sig = sig.toString()
|
||||
return sig
|
||||
|
||||
getUser = (service, imageInfo) ->
|
||||
user = ''
|
||||
if service.user?
|
||||
user = service.user
|
||||
else if imageInfo?.Config?.User?
|
||||
user = imageInfo.Config.User
|
||||
return user
|
||||
return service.user ? imageInfo?.Config?.User ? ''
|
||||
|
||||
getWorkingDir = (service, imageInfo) ->
|
||||
workingDir = ''
|
||||
if service.workingDir?
|
||||
workingDir = service.workingDir
|
||||
else if imageInfo?.Config?.WorkingDir?
|
||||
workingDir = imageInfo.Config.WorkingDir
|
||||
return workingDir
|
||||
return service.workingDir ? imageInfo?.Config?.WorkingDir ? ''
|
||||
|
||||
buildHealthcheckTest = (test) ->
|
||||
if _.isString(test)
|
||||
@ -101,13 +79,11 @@ overrideHealthcheckFromCompose = (serviceHealthcheck, imageHealthcheck = {}) ->
|
||||
return imageHealthcheck
|
||||
|
||||
getHealthcheck = (service, imageInfo) ->
|
||||
healthcheck = null
|
||||
if imageInfo?.Config?.Healthcheck?
|
||||
healthcheck = imageInfo.Config.Healthcheck
|
||||
healthcheck = imageInfo?.Config?.Healthcheck ? null
|
||||
if service.healthcheck?
|
||||
healthcheck = overrideHealthcheckFromCompose(service.healthcheck, healthcheck)
|
||||
# Set invalid healthchecks back to null
|
||||
if healthcheck and (!healthcheck.Test? or _.isEqual(healthcheck.Test, []))
|
||||
if healthcheck? and (!healthcheck.Test? or _.isEqual(healthcheck.Test, []))
|
||||
healthcheck = null
|
||||
return healthcheck
|
||||
|
||||
@ -128,7 +104,8 @@ formatDevices = (devices) ->
|
||||
|
||||
# TODO: Support configuration for "networks"
|
||||
module.exports = class Service
|
||||
constructor: (serviceProperties, opts = {}) ->
|
||||
constructor: (props, opts = {}) ->
|
||||
serviceProperties = _.mapKeys(props, (v, k) -> _.camelCase(k))
|
||||
{
|
||||
@image
|
||||
@imageName
|
||||
@ -194,7 +171,7 @@ module.exports = class Service
|
||||
@macAddress
|
||||
@user
|
||||
@workingDir
|
||||
} = _.mapKeys(serviceProperties, (v, k) -> _.camelCase(k))
|
||||
} = serviceProperties
|
||||
|
||||
@networks ?= {}
|
||||
@privileged ?= false
|
||||
@ -255,15 +232,18 @@ module.exports = class Service
|
||||
|
||||
# If the service has no containerId, it is a target service and has to be normalised and extended
|
||||
if !@containerId?
|
||||
@networkMode ?= 'default'
|
||||
if !@networkMode?
|
||||
if !_.isEmpty(@networks)
|
||||
@networkMode = _.keys(@networks)[0]
|
||||
else
|
||||
@networkMode = 'default'
|
||||
if @networkMode not in [ 'host', 'bridge', 'none' ]
|
||||
@networkMode = "#{@appId}_#{@networkMode}"
|
||||
|
||||
@networks = _.mapKeys @networks, (v, k) =>
|
||||
if k not in [ 'host', 'bridge', 'none' ]
|
||||
return "#{@appId}_#{k}"
|
||||
else
|
||||
return k
|
||||
return k
|
||||
|
||||
@networks[@networkMode] ?= {}
|
||||
|
||||
@ -282,14 +262,11 @@ module.exports = class Service
|
||||
@devices = formatDevices(@devices)
|
||||
@addFeaturesFromLabels(opts)
|
||||
if @dns?
|
||||
if !Array.isArray(@dns)
|
||||
@dns = [ @dns ]
|
||||
@dns = _.castArray(@dns)
|
||||
if @dnsSearch?
|
||||
if !Array.isArray(@dnsSearch)
|
||||
@dnsSearch = [ @dns ]
|
||||
@dnsSearch = _.castArray(@dnsSearch)
|
||||
if @tmpfs?
|
||||
if !Array.isArray(@tmpfs)
|
||||
@tmpfs = [ @tmpfs ]
|
||||
@tmpfs = _.castArray(@tmpfs)
|
||||
|
||||
@nanoCpus = Math.round(Number(@cpus) * 10 ** 9)
|
||||
|
||||
@ -330,9 +307,9 @@ module.exports = class Service
|
||||
addFeaturesFromLabels: (opts) =>
|
||||
if checkTruthy(@labels['io.resin.features.dbus'])
|
||||
@volumes.push('/run/dbus:/host/run/dbus')
|
||||
if checkTruthy(@labels['io.resin.features.kernel-modules'])
|
||||
if checkTruthy(@labels['io.resin.features.kernel-modules']) and opts.hostPathExists.modules
|
||||
@volumes.push('/lib/modules:/lib/modules')
|
||||
if checkTruthy(@labels['io.resin.features.firmware'])
|
||||
if checkTruthy(@labels['io.resin.features.firmware']) and opts.hostPathExists.firmware
|
||||
@volumes.push('/lib/firmware:/lib/firmware')
|
||||
if checkTruthy(@labels['io.resin.features.balena-socket'])
|
||||
@volumes.push('/var/run/balena.sock:/var/run/balena.sock')
|
||||
@ -388,7 +365,7 @@ module.exports = class Service
|
||||
extendAndSanitiseVolumes: (imageInfo) =>
|
||||
volumes = []
|
||||
for vol in @volumes
|
||||
isBind = /:/.test(vol)
|
||||
isBind = _.includes(vol, ':')
|
||||
if isBind
|
||||
[ bindSource, bindDest, mode ] = vol.split(':')
|
||||
if !path.isAbsolute(bindSource)
|
||||
@ -409,7 +386,7 @@ module.exports = class Service
|
||||
getNamedVolumes: =>
|
||||
defaults = @defaultBinds()
|
||||
validVolumes = _.map @volumes, (vol) ->
|
||||
if _.includes(defaults, vol) or !/:/.test(vol)
|
||||
if _.includes(defaults, vol) or !_.includes(vol, ':')
|
||||
return null
|
||||
bindSource = vol.split(':')[0]
|
||||
if !path.isAbsolute(bindSource)
|
||||
@ -417,7 +394,7 @@ module.exports = class Service
|
||||
return m[1]
|
||||
else
|
||||
return null
|
||||
return _.filter(validVolumes, (v) -> !_.isNull(v))
|
||||
return _.reject(validVolumes, _.isNil)
|
||||
|
||||
lockPath: =>
|
||||
return updateLock.lockPath(@appId)
|
||||
@ -562,7 +539,7 @@ module.exports = class Service
|
||||
binds = []
|
||||
volumes = {}
|
||||
for vol in @volumes
|
||||
isBind = /:/.test(vol)
|
||||
isBind = _.includes(vol, ':')
|
||||
if isBind
|
||||
binds.push(vol)
|
||||
else
|
||||
@ -714,11 +691,11 @@ module.exports = class Service
|
||||
|
||||
# This can be very useful for debugging so I'm leaving it commented for now.
|
||||
# Uncomment to see the services whenever they don't match.
|
||||
#if !isEq
|
||||
# console.log(JSON.stringify(this, null, 2))
|
||||
# console.log(JSON.stringify(otherService, null, 2))
|
||||
# diff = _.omitBy this, (prop, k) -> _.isEqual(prop, otherService[k])
|
||||
# console.log(JSON.stringify(diff, null, 2))
|
||||
if !isEq
|
||||
console.log(JSON.stringify(this, null, 2))
|
||||
console.log(JSON.stringify(otherService, null, 2))
|
||||
diff = _.omitBy this, (prop, k) -> _.isEqual(prop, otherService[k])
|
||||
console.log(JSON.stringify(diff, null, 2))
|
||||
|
||||
return isEq
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
Promise = require 'bluebird'
|
||||
_ = require 'lodash'
|
||||
fs = Promise.promisifyAll(require('fs'))
|
||||
path = require 'path'
|
||||
|
||||
logTypes = require '../lib/log-types'
|
||||
@ -8,6 +7,7 @@ constants = require '../lib/constants'
|
||||
{ checkInt } = require '../lib/validation'
|
||||
{ NotFoundError } = require '../lib/errors'
|
||||
{ defaultLegacyVolume } = require '../lib/migration'
|
||||
{ safeRename } = require '../lib/fs-utils'
|
||||
|
||||
module.exports = class Volumes
|
||||
constructor: ({ @docker, @logger }) ->
|
||||
@ -31,18 +31,16 @@ module.exports = class Volumes
|
||||
volumes = response.Volumes ? []
|
||||
Promise.map volumes, (volume) =>
|
||||
@docker.getVolume(volume.Name).inspect()
|
||||
.then (vol) =>
|
||||
@format(vol)
|
||||
.then(@format)
|
||||
|
||||
getAllByAppId: (appId) =>
|
||||
@getAll()
|
||||
.then (volumes) ->
|
||||
_.filter(volumes, (v) -> v.appId == appId)
|
||||
_.filter(volumes, { appId })
|
||||
|
||||
get: ({ name, appId }) ->
|
||||
@docker.getVolume("#{appId}_#{name}").inspect()
|
||||
.then (volume) =>
|
||||
return @format(volume)
|
||||
.then(@format)
|
||||
|
||||
defaultLabels: ->
|
||||
return {
|
||||
@ -67,25 +65,17 @@ module.exports = class Volumes
|
||||
Labels: labels
|
||||
DriverOpts: driverOpts
|
||||
})
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemEvent(logTypes.createVolumeError, { volume: { name }, error: err })
|
||||
throw err
|
||||
|
||||
createFromLegacy: (appId) =>
|
||||
name = defaultLegacyVolume()
|
||||
@create({ name, appId })
|
||||
.then (v) ->
|
||||
v.inspect()
|
||||
.call('inspect')
|
||||
.then (v) ->
|
||||
volumePath = path.join(constants.rootMountPoint, v.Mountpoint)
|
||||
legacyPath = path.join(constants.rootMountPoint, constants.dataPath, appId.toString())
|
||||
fs.renameAsync(legacyPath, volumePath)
|
||||
.then ->
|
||||
fs.openAsync(path.dirname(volumePath))
|
||||
.then (parent) ->
|
||||
fs.fsyncAsync(parent)
|
||||
.then ->
|
||||
fs.closeAsync(parent)
|
||||
safeRename(legacyPath, volumePath)
|
||||
.catch (err) ->
|
||||
@logger.logSystemMessage("Warning: could not migrate legacy /data volume: #{err.message}", { error: err }, 'Volume migration error')
|
||||
|
||||
|
@ -4,7 +4,7 @@ Lock = require 'rwlock'
|
||||
deviceRegister = require 'resin-register-device'
|
||||
fs = Promise.promisifyAll(require('fs'))
|
||||
EventEmitter = require 'events'
|
||||
|
||||
path = require 'path'
|
||||
{ writeAndSyncFile, writeFileAtomic } = require './lib/fs-utils'
|
||||
osRelease = require './lib/os-release'
|
||||
supervisorVersion = require './lib/supervisor-version'
|
||||
@ -12,10 +12,6 @@ constants = require './lib/constants'
|
||||
|
||||
module.exports = class Config extends EventEmitter
|
||||
constructor: ({ @db, @configPath }) ->
|
||||
# These are values that come from env vars or hardcoded defaults and can be resolved synchronously
|
||||
# Defaults needed for both gosuper and node supervisor are declared in entry.sh
|
||||
@constants = constants
|
||||
|
||||
@funcs =
|
||||
version: ->
|
||||
Promise.resolve(supervisorVersion)
|
||||
@ -40,18 +36,18 @@ module.exports = class Config extends EventEmitter
|
||||
# Fall back to checking if an API endpoint was passed via env vars if there's none in config.json (legacy)
|
||||
@get('apiEndpoint')
|
||||
.then (apiEndpoint) ->
|
||||
return apiEndpoint ? @constants.apiEndpointFromEnv
|
||||
return apiEndpoint ? constants.apiEndpointFromEnv
|
||||
|
||||
provisioned: =>
|
||||
@getMany([ 'uuid', 'resinApiEndpoint', 'registered_at', 'deviceId' ])
|
||||
.then (requiredValues) ->
|
||||
return _.every(_.values(requiredValues), Boolean)
|
||||
|
||||
osVersion: =>
|
||||
osRelease.getOSVersion(@constants.hostOSVersionPath)
|
||||
osVersion: ->
|
||||
osRelease.getOSVersion(constants.hostOSVersionPath)
|
||||
|
||||
osVariant: =>
|
||||
osRelease.getOSVariant(@constants.hostOSVersionPath)
|
||||
osVariant: ->
|
||||
osRelease.getOSVariant(constants.hostOSVersionPath)
|
||||
|
||||
provisioningOptions: =>
|
||||
@getMany([
|
||||
@ -111,9 +107,9 @@ module.exports = class Config extends EventEmitter
|
||||
registered_at: { source: 'config.json', mutable: true }
|
||||
applicationId: { source: 'config.json' }
|
||||
appUpdatePollInterval: { source: 'config.json', mutable: true, default: 60000 }
|
||||
pubnubSubscribeKey: { source: 'config.json', default: @constants.defaultPubnubSubscribeKey }
|
||||
pubnubPublishKey: { source: 'config.json', default: @constants.defaultPubnubPublishKey }
|
||||
mixpanelToken: { source: 'config.json', default: @constants.defaultMixpanelToken }
|
||||
pubnubSubscribeKey: { source: 'config.json', default: constants.defaultPubnubSubscribeKey }
|
||||
pubnubPublishKey: { source: 'config.json', default: constants.defaultPubnubPublishKey }
|
||||
mixpanelToken: { source: 'config.json', default: constants.defaultMixpanelToken }
|
||||
bootstrapRetryDelay: { source: 'config.json', default: 30000 }
|
||||
supervisorOfflineMode: { source: 'config.json', default: false }
|
||||
hostname: { source: 'config.json', mutable: true }
|
||||
@ -141,41 +137,41 @@ module.exports = class Config extends EventEmitter
|
||||
loggingEnabled: { source: 'db', mutable: true, default: 'true' }
|
||||
connectivityCheckEnabled: { source: 'db', mutable: true, default: 'true' }
|
||||
delta: { source: 'db', mutable: true, default: 'false' }
|
||||
deltaRequestTimeout: { source: 'db', mutable: true, default: '' }
|
||||
deltaRequestTimeout: { source: 'db', mutable: true, default: '30000' }
|
||||
deltaApplyTimeout: { source: 'db', mutable: true, default: '' }
|
||||
deltaRetryCount: { source: 'db', mutable: true, default: '' }
|
||||
deltaRetryInterval: { source: 'db', mutable: true, default: '' }
|
||||
deltaRetryCount: { source: 'db', mutable: true, default: '30' }
|
||||
deltaRetryInterval: { source: 'db', mutable: true, default: '10000' }
|
||||
lockOverride: { source: 'db', mutable: true, default: 'false' }
|
||||
legacyAppsPresent: { source: 'db', mutable: true, default: 'false' }
|
||||
}
|
||||
|
||||
@configJsonCache = {}
|
||||
|
||||
@_lock = new Lock()
|
||||
@_writeLock = Promise.promisify(@_lock.async.writeLock)
|
||||
@writeLockConfigJson = =>
|
||||
@_writeLock('config.json')
|
||||
_lock = new Lock()
|
||||
_writeLock = Promise.promisify(_lock.async.writeLock)
|
||||
@writeLockConfigJson = ->
|
||||
_writeLock('config.json')
|
||||
.disposer (release) ->
|
||||
release()
|
||||
|
||||
@_readLock = Promise.promisify(@_lock.async.readLock)
|
||||
@readLockConfigJson = =>
|
||||
@_readLock('config.json')
|
||||
_readLock = Promise.promisify(_lock.async.readLock)
|
||||
@readLockConfigJson = ->
|
||||
_readLock('config.json')
|
||||
.disposer (release) ->
|
||||
release()
|
||||
|
||||
writeConfigJson: =>
|
||||
atomicWritePossible = true
|
||||
@configJsonPathOnHost()
|
||||
.catch (err) =>
|
||||
.catch (err) ->
|
||||
console.error(err.message)
|
||||
atomicWritePossible = false
|
||||
return @constants.configJsonNonAtomicPath
|
||||
.then (path) =>
|
||||
return constants.configJsonNonAtomicPath
|
||||
.then (configPath) =>
|
||||
if atomicWritePossible
|
||||
writeFileAtomic(path, JSON.stringify(@configJsonCache))
|
||||
writeFileAtomic(configPath, JSON.stringify(@configJsonCache))
|
||||
else
|
||||
writeAndSyncFile(path, JSON.stringify(@configJsonCache))
|
||||
writeAndSyncFile(configPath, JSON.stringify(@configJsonCache))
|
||||
|
||||
|
||||
configJsonSet: (keyVals) =>
|
||||
@ -195,48 +191,46 @@ module.exports = class Config extends EventEmitter
|
||||
configJsonRemove: (key) =>
|
||||
changed = false
|
||||
Promise.using @writeLockConfigJson(), =>
|
||||
Promise.try =>
|
||||
if @configJsonCache[key]?
|
||||
delete @configJsonCache[key]
|
||||
changed = true
|
||||
.then =>
|
||||
if changed
|
||||
@writeConfigJson()
|
||||
if @configJsonCache[key]?
|
||||
delete @configJsonCache[key]
|
||||
changed = true
|
||||
if changed
|
||||
@writeConfigJson()
|
||||
|
||||
configJsonPathOnHost: =>
|
||||
Promise.try =>
|
||||
if @configPath?
|
||||
return @configPath
|
||||
if @constants.configJsonPathOnHost?
|
||||
return @constants.configJsonPathOnHost
|
||||
osRelease.getOSVersion(@constants.hostOSVersionPath)
|
||||
.then (osVersion) =>
|
||||
if constants.configJsonPathOnHost?
|
||||
return constants.configJsonPathOnHost
|
||||
osRelease.getOSVersion(constants.hostOSVersionPath)
|
||||
.then (osVersion) ->
|
||||
if /^Resin OS 2./.test(osVersion)
|
||||
return "#{@constants.bootMountPointFromEnv}/config.json"
|
||||
return path.join(constants.bootMountPointFromEnv, 'config.json')
|
||||
else if /^Resin OS 1./.test(osVersion)
|
||||
# In Resin OS 1.12, $BOOT_MOUNTPOINT was added and it coincides with config.json's path
|
||||
if @constants.bootMountPointFromEnv
|
||||
return "#{@constants.bootMountPointFromEnv}/config.json"
|
||||
if constants.bootMountPointFromEnv
|
||||
return path.join(constants.bootMountPointFromEnv, 'config.json')
|
||||
# Older 1.X versions have config.json here
|
||||
return '/mnt/conf/config.json'
|
||||
else
|
||||
# In non-resinOS hosts (or older than 1.0.0), if CONFIG_JSON_PATH wasn't passed then we can't do atomic changes
|
||||
# (only access to config.json we have is in /boot, which is assumed to be a file bind mount where rename is impossible)
|
||||
throw new Error('Could not determine config.json path on host, atomic write will not be possible')
|
||||
.then (path) =>
|
||||
return "#{@constants.rootMountPoint}#{path}"
|
||||
.then (pathOnHost) ->
|
||||
return path.join(constants.rootMountPoint, pathOnHost)
|
||||
|
||||
configJsonPath: =>
|
||||
@configJsonPathOnHost()
|
||||
.catch (err) =>
|
||||
.catch (err) ->
|
||||
console.error(err.message)
|
||||
return @constants.configJsonNonAtomicPath
|
||||
return constants.configJsonNonAtomicPath
|
||||
|
||||
readConfigJson: =>
|
||||
@configJsonPath()
|
||||
.then (path) ->
|
||||
fs.readFileAsync(path)
|
||||
.then(JSON.parse)
|
||||
.then (configPath) ->
|
||||
fs.readFileAsync(configPath)
|
||||
.then(JSON.parse)
|
||||
|
||||
newUniqueKey: ->
|
||||
deviceRegister.generateUniqueKey()
|
||||
@ -282,7 +276,7 @@ module.exports = class Config extends EventEmitter
|
||||
|
||||
getMany: (keys, trx) =>
|
||||
# Get the values for several keys in an array
|
||||
Promise.all(_.map(keys, (key) => @get(key, trx) ))
|
||||
Promise.map(keys, (key) => @get(key, trx))
|
||||
.then (values) ->
|
||||
out = {}
|
||||
for key, i in keys
|
||||
|
@ -44,10 +44,10 @@ module.exports = class DeviceConfig
|
||||
connectivityCheckEnabled: { envVarName: 'RESIN_SUPERVISOR_CONNECTIVITY_CHECK', varType: 'bool', defaultValue: 'true' }
|
||||
loggingEnabled: { envVarName: 'RESIN_SUPERVISOR_LOG_CONTROL', varType: 'bool', defaultValue: 'true' }
|
||||
delta: { envVarName: 'RESIN_SUPERVISOR_DELTA', varType: 'bool', defaultValue: 'false' }
|
||||
deltaRequestTimeout: { envVarName: 'RESIN_SUPERVISOR_DELTA_REQUEST_TIMEOUT', varType: 'int' }
|
||||
deltaRequestTimeout: { envVarName: 'RESIN_SUPERVISOR_DELTA_REQUEST_TIMEOUT', varType: 'int', defaultValue: '30000' }
|
||||
deltaApplyTimeout: { envVarName: 'RESIN_SUPERVISOR_DELTA_APPLY_TIMEOUT', varType: 'int' }
|
||||
deltaRetryCount: { envVarName: 'RESIN_SUPERVISOR_DELTA_RETRY_COUNT', varType: 'int' }
|
||||
deltaRetryInterval: { envVarName: 'RESIN_SUPERVISOR_DELTA_RETRY_INTERVAL', varType: 'int' }
|
||||
deltaRetryCount: { envVarName: 'RESIN_SUPERVISOR_DELTA_RETRY_COUNT', varType: 'int', defaultValue: '30' }
|
||||
deltaRetryInterval: { envVarName: 'RESIN_SUPERVISOR_DELTA_RETRY_INTERVAL', varType: 'int', defaultValue: '10000' }
|
||||
lockOverride: { envVarName: 'RESIN_SUPERVISOR_OVERRIDE_LOCK', varType: 'bool', defaultValue: 'false' }
|
||||
}
|
||||
@validKeys = [ 'RESIN_HOST_LOG_TO_DISPLAY', 'RESIN_SUPERVISOR_VPN_CONTROL', 'RESIN_OVERRIDE_LOCK' ].concat(_.map(@configKeys, 'envVarName'))
|
||||
@ -57,18 +57,16 @@ module.exports = class DeviceConfig
|
||||
@config.set(step.target)
|
||||
.then =>
|
||||
@logger.logConfigChange(step.humanReadableTarget, { success: true })
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logConfigChange(step.humanReadableTarget, { err })
|
||||
throw err
|
||||
setLogToDisplay: (step) =>
|
||||
logValue = { RESIN_HOST_LOG_TO_DISPLAY: step.target }
|
||||
@logger.logConfigChange(logValue)
|
||||
@setLogToDisplay(step.target)
|
||||
.then =>
|
||||
@logger.logConfigChange(logValue, { success: true })
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logConfigChange(logValue, { err })
|
||||
throw err
|
||||
setVPNEnabled: (step, { initial = false } = {}) =>
|
||||
logValue = { RESIN_SUPERVISOR_VPN_CONTROL: step.target }
|
||||
if !initial
|
||||
@ -77,9 +75,8 @@ module.exports = class DeviceConfig
|
||||
.then =>
|
||||
if !initial
|
||||
@logger.logConfigChange(logValue, { success: true })
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logConfigChange(logValue, { err })
|
||||
throw err
|
||||
setBootConfig: (step) =>
|
||||
@config.get('deviceType')
|
||||
.then (deviceType) =>
|
||||
@ -138,7 +135,7 @@ module.exports = class DeviceConfig
|
||||
@logger.logSystemMessage(err, { error: err }, 'Apply boot config error')
|
||||
throw new Error(err)
|
||||
return true
|
||||
else return false
|
||||
return false
|
||||
|
||||
getRequiredSteps: (currentState, targetState) =>
|
||||
current = _.clone(currentState.local?.config ? {})
|
||||
@ -191,8 +188,7 @@ module.exports = class DeviceConfig
|
||||
action: 'reboot'
|
||||
})
|
||||
return
|
||||
.then ->
|
||||
return steps
|
||||
.return(steps)
|
||||
|
||||
executeStepAction: (step, opts) =>
|
||||
@actionExecutors[step.action](step, opts)
|
||||
@ -251,7 +247,7 @@ module.exports = class DeviceConfig
|
||||
gosuper.get('/v1/log-to-display', { json: true })
|
||||
.spread (res, body) ->
|
||||
if res.statusCode == 404
|
||||
return undefined
|
||||
return
|
||||
if res.statusCode != 200
|
||||
throw new Error("Error getting log to display status: #{res.statusCode} #{body.Error}")
|
||||
return Boolean(body.Data)
|
||||
@ -293,9 +289,8 @@ module.exports = class DeviceConfig
|
||||
@logger.logSystemMessage("Applied boot config: #{JSON.stringify(conf)}", {}, 'Apply boot config success')
|
||||
@rebootRequired = true
|
||||
return true
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@logger.logSystemMessage("Error setting boot config: #{err}", { error: err }, 'Apply boot config error')
|
||||
throw err
|
||||
|
||||
getVPNEnabled: ->
|
||||
gosuper.get('/v1/vpncontrol', { json: true })
|
||||
@ -304,9 +299,8 @@ module.exports = class DeviceConfig
|
||||
throw new Error("Error getting vpn status: #{res.statusCode} #{body.Error}")
|
||||
@gosuperHealthy = true
|
||||
return Boolean(body.Data)
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@gosuperHealthy = false
|
||||
throw err
|
||||
|
||||
setVPNEnabled: (val) ->
|
||||
enable = checkTruthy(val) ? true
|
||||
|
@ -41,78 +41,77 @@ validateState = Promise.method (state) ->
|
||||
if state.dependent?
|
||||
validateDependentState(state.dependent)
|
||||
|
||||
class DeviceStateRouter
|
||||
constructor: (@deviceState) ->
|
||||
{ @applications, @config } = @deviceState
|
||||
@router = express.Router()
|
||||
@router.use(bodyParser.urlencoded(extended: true))
|
||||
@router.use(bodyParser.json())
|
||||
createDeviceStateRouter = (deviceState) ->
|
||||
router = express.Router()
|
||||
router.use(bodyParser.urlencoded(extended: true))
|
||||
router.use(bodyParser.json())
|
||||
|
||||
@router.post '/v1/reboot', (req, res) =>
|
||||
force = validation.checkTruthy(req.body.force)
|
||||
@deviceState.executeStepAction({ action: 'reboot' }, { force })
|
||||
.then (response) ->
|
||||
res.status(202).json(response)
|
||||
.catch (err) ->
|
||||
if err instanceof updateLock.UpdatesLockedError
|
||||
status = 423
|
||||
else
|
||||
status = 500
|
||||
res.status(status).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
router.post '/v1/reboot', (req, res) ->
|
||||
force = validation.checkTruthy(req.body.force)
|
||||
deviceState.executeStepAction({ action: 'reboot' }, { force })
|
||||
.then (response) ->
|
||||
res.status(202).json(response)
|
||||
.catch (err) ->
|
||||
if err instanceof updateLock.UpdatesLockedError
|
||||
status = 423
|
||||
else
|
||||
status = 500
|
||||
res.status(status).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
|
||||
@router.post '/v1/shutdown', (req, res) =>
|
||||
force = validation.checkTruthy(req.body.force)
|
||||
@deviceState.executeStepAction({ action: 'shutdown' }, { force })
|
||||
.then (response) ->
|
||||
res.status(202).json(response)
|
||||
.catch (err) ->
|
||||
if err instanceof updateLock.UpdatesLockedError
|
||||
status = 423
|
||||
else
|
||||
status = 500
|
||||
res.status(status).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
router.post '/v1/shutdown', (req, res) ->
|
||||
force = validation.checkTruthy(req.body.force)
|
||||
deviceState.executeStepAction({ action: 'shutdown' }, { force })
|
||||
.then (response) ->
|
||||
res.status(202).json(response)
|
||||
.catch (err) ->
|
||||
if err instanceof updateLock.UpdatesLockedError
|
||||
status = 423
|
||||
else
|
||||
status = 500
|
||||
res.status(status).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
|
||||
@router.get '/v1/device/host-config', (req, res) ->
|
||||
hostConfig.get()
|
||||
.then (conf) ->
|
||||
res.json(conf)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.get '/v1/device/host-config', (req, res) ->
|
||||
hostConfig.get()
|
||||
.then (conf) ->
|
||||
res.json(conf)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.patch '/v1/device/host-config', (req, res) =>
|
||||
hostConfig.patch(req.body, @config)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.patch '/v1/device/host-config', (req, res) ->
|
||||
hostConfig.patch(req.body, deviceState.config)
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.get '/v1/device', (req, res) =>
|
||||
@deviceState.getStatus()
|
||||
.then (state) ->
|
||||
stateToSend = _.pick(state.local, [
|
||||
'api_port'
|
||||
'ip_address'
|
||||
'os_version'
|
||||
'supervisor_version'
|
||||
'update_pending'
|
||||
'update_failed'
|
||||
'update_downloaded'
|
||||
])
|
||||
if state.local.is_on__commit?
|
||||
stateToSend.commit = state.local.is_on__commit
|
||||
# Will produce nonsensical results for multicontainer apps...
|
||||
service = _.toPairs(_.toPairs(state.local.apps)[0]?[1]?.services)[0]?[1]
|
||||
if service?
|
||||
stateToSend.status = service.status
|
||||
# For backwards compatibility, we adapt Running to the old "Idle"
|
||||
if stateToSend.status == 'Running'
|
||||
stateToSend.status = 'Idle'
|
||||
stateToSend.download_progress = service.download_progress
|
||||
res.json(stateToSend)
|
||||
.catch (err) ->
|
||||
res.status(500).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
router.get '/v1/device', (req, res) ->
|
||||
deviceState.getStatus()
|
||||
.then (state) ->
|
||||
stateToSend = _.pick(state.local, [
|
||||
'api_port'
|
||||
'ip_address'
|
||||
'os_version'
|
||||
'supervisor_version'
|
||||
'update_pending'
|
||||
'update_failed'
|
||||
'update_downloaded'
|
||||
])
|
||||
if state.local.is_on__commit?
|
||||
stateToSend.commit = state.local.is_on__commit
|
||||
# Will produce nonsensical results for multicontainer apps...
|
||||
service = _.toPairs(_.toPairs(state.local.apps)[0]?[1]?.services)[0]?[1]
|
||||
if service?
|
||||
stateToSend.status = service.status
|
||||
# For backwards compatibility, we adapt Running to the old "Idle"
|
||||
if stateToSend.status == 'Running'
|
||||
stateToSend.status = 'Idle'
|
||||
stateToSend.download_progress = service.download_progress
|
||||
res.json(stateToSend)
|
||||
.catch (err) ->
|
||||
res.status(500).json({ Data: '', Error: err?.message or err or 'Unknown error' })
|
||||
|
||||
@router.use(@applications.router)
|
||||
router.use(deviceState.applications.router)
|
||||
return router
|
||||
|
||||
module.exports = class DeviceState extends EventEmitter
|
||||
constructor: ({ @db, @config, @eventTracker }) ->
|
||||
@ -131,27 +130,20 @@ module.exports = class DeviceState extends EventEmitter
|
||||
@lastApplyStart = process.hrtime()
|
||||
@scheduledApply = null
|
||||
@shuttingDown = false
|
||||
@_router = new DeviceStateRouter(this)
|
||||
@router = @_router.router
|
||||
@router = createDeviceStateRouter(this)
|
||||
@on 'apply-target-state-end', (err) ->
|
||||
if err?
|
||||
console.log("Apply error #{err}")
|
||||
else
|
||||
console.log('Apply success!')
|
||||
#@on 'step-completed', (err) ->
|
||||
# if err?
|
||||
# console.log("Step completed with error #{err}")
|
||||
# else
|
||||
# console.log('Step success!')
|
||||
#@on 'step-error', (err) ->
|
||||
# console.log("Step error #{err}")
|
||||
|
||||
@applications.on('change', @reportCurrentState)
|
||||
|
||||
healthcheck: =>
|
||||
@config.getMany([ 'appUpdatePollInterval', 'offlineMode' ])
|
||||
.then (conf) =>
|
||||
cycleTimeWithinInterval = process.hrtime(@lastApplyStart)[0] - @applications.timeSpentFetching < 2 * conf.appUpdatePollInterval
|
||||
cycleTime = process.hrtime(@lastApplyStart)
|
||||
cycleTimeMs = cycleTime[0] * 1000 + cycleTime[1] / 1e6
|
||||
cycleTimeWithinInterval = cycleTimeMs - @applications.timeSpentFetching < 2 * conf.appUpdatePollInterval
|
||||
applyTargetHealthy = conf.offlineMode or !@applyInProgress or @applications.fetchesInProgress > 0 or cycleTimeWithinInterval
|
||||
return applyTargetHealthy and @deviceConfig.gosuperHealthy
|
||||
|
||||
@ -228,7 +220,7 @@ module.exports = class DeviceState extends EventEmitter
|
||||
@reportCurrentState(
|
||||
ip_address: addresses.join(' ')
|
||||
)
|
||||
, @config.constants.ipAddressUpdateInterval
|
||||
, constants.ipAddressUpdateInterval
|
||||
|
||||
saveInitialConfig: =>
|
||||
@deviceConfig.getCurrent()
|
||||
@ -331,7 +323,7 @@ module.exports = class DeviceState extends EventEmitter
|
||||
if _.isArray(stateFromFile)
|
||||
# This is a legacy apps.json
|
||||
stateFromFile = @_convertLegacyAppsJson(stateFromFile)
|
||||
images = _.flatten(_.map(stateFromFile.apps, (app, appId) =>
|
||||
images = _.flatMap stateFromFile.apps, (app, appId) =>
|
||||
_.map app.services, (service, serviceId) =>
|
||||
svc = {
|
||||
imageName: service.image
|
||||
@ -342,7 +334,6 @@ module.exports = class DeviceState extends EventEmitter
|
||||
appId
|
||||
}
|
||||
return @applications.imageForService(svc)
|
||||
))
|
||||
Promise.map images, (img) =>
|
||||
@applications.images.normalise(img.name)
|
||||
.then (name) =>
|
||||
@ -350,12 +341,12 @@ module.exports = class DeviceState extends EventEmitter
|
||||
@applications.images.save(img)
|
||||
.then =>
|
||||
@deviceConfig.getCurrent()
|
||||
.then (deviceConf) =>
|
||||
_.defaults(stateFromFile.config, deviceConf)
|
||||
stateFromFile.name ?= ''
|
||||
@setTarget({
|
||||
local: stateFromFile
|
||||
})
|
||||
.then (deviceConf) =>
|
||||
_.defaults(stateFromFile.config, deviceConf)
|
||||
stateFromFile.name ?= ''
|
||||
@setTarget({
|
||||
local: stateFromFile
|
||||
})
|
||||
.catch (err) =>
|
||||
@eventTracker.track('Loading preloaded apps failed', { error: err })
|
||||
|
||||
@ -364,17 +355,18 @@ module.exports = class DeviceState extends EventEmitter
|
||||
.then =>
|
||||
@logger.logSystemMessage('Rebooting', {}, 'Reboot')
|
||||
device.reboot()
|
||||
.tap =>
|
||||
@emit('shutdown')
|
||||
.tap =>
|
||||
@shuttingDown = true
|
||||
@emitAsync('shutdown')
|
||||
|
||||
shutdown: (force, skipLock) =>
|
||||
@applications.stopAll({ force, skipLock })
|
||||
.then =>
|
||||
@logger.logSystemMessage('Shutting down', {}, 'Shutdown')
|
||||
device.shutdown()
|
||||
.tap =>
|
||||
@shuttingDown = true
|
||||
@emitAsync('shutdown')
|
||||
.tap =>
|
||||
@shuttingDown = true
|
||||
@emitAsync('shutdown')
|
||||
|
||||
executeStepAction: (step, { force, initial, skipLock }) =>
|
||||
Promise.try =>
|
||||
@ -397,27 +389,25 @@ module.exports = class DeviceState extends EventEmitter
|
||||
if @shuttingDown
|
||||
return
|
||||
@executeStepAction(step, { force, initial, skipLock })
|
||||
.catch (err) =>
|
||||
.tapCatch (err) =>
|
||||
@emitAsync('step-error', err, step)
|
||||
throw err
|
||||
.then (stepResult) =>
|
||||
@emitAsync('step-completed', null, step, stepResult)
|
||||
|
||||
applyError: (err, { force, initial, intermediate }) =>
|
||||
@emitAsync('apply-target-state-error', err)
|
||||
@emitAsync('apply-target-state-end', err)
|
||||
if !intermediate
|
||||
@failedUpdates += 1
|
||||
@reportCurrentState(update_failed: true)
|
||||
if @scheduledApply?
|
||||
console.log("Updating failed, but there's another update scheduled immediately: ", err)
|
||||
else
|
||||
delay = Math.min((2 ** @failedUpdates) * 500, 30000)
|
||||
# If there was an error then schedule another attempt briefly in the future.
|
||||
console.log('Scheduling another update attempt due to failure: ', delay, err)
|
||||
@triggerApplyTarget({ force, delay, initial })
|
||||
else
|
||||
if intermediate
|
||||
throw err
|
||||
@failedUpdates += 1
|
||||
@reportCurrentState(update_failed: true)
|
||||
if @scheduledApply?
|
||||
console.log("Updating failed, but there's another update scheduled immediately: ", err)
|
||||
else
|
||||
delay = Math.min((2 ** @failedUpdates) * 500, 30000)
|
||||
# If there was an error then schedule another attempt briefly in the future.
|
||||
console.log('Scheduling another update attempt due to failure: ', delay, err)
|
||||
@triggerApplyTarget({ force, delay, initial })
|
||||
|
||||
applyTarget: ({ force = false, initial = false, intermediate = false, skipLock = false } = {}) =>
|
||||
nextDelay = 200
|
||||
@ -453,8 +443,7 @@ module.exports = class DeviceState extends EventEmitter
|
||||
nextDelay = 1000
|
||||
Promise.map steps, (step) =>
|
||||
@applyStep(step, { force, initial, intermediate, skipLock })
|
||||
.then ->
|
||||
Promise.delay(nextDelay)
|
||||
.delay(nextDelay)
|
||||
.then =>
|
||||
@applyTarget({ force, initial, intermediate, skipLock })
|
||||
.catch (err) =>
|
||||
@ -497,13 +486,13 @@ module.exports = class DeviceState extends EventEmitter
|
||||
@lastApplyStart = process.hrtime()
|
||||
console.log('Applying target state')
|
||||
@applyTarget({ force, initial })
|
||||
.finally =>
|
||||
@applyInProgress = false
|
||||
@reportCurrentState()
|
||||
if @scheduledApply?
|
||||
@triggerApplyTarget(@scheduledApply)
|
||||
@scheduledApply = null
|
||||
return
|
||||
.finally =>
|
||||
@applyInProgress = false
|
||||
@reportCurrentState()
|
||||
if @scheduledApply?
|
||||
@triggerApplyTarget(@scheduledApply)
|
||||
@scheduledApply = null
|
||||
return null
|
||||
|
||||
applyIntermediateTarget: (intermediateTarget, { force = false, skipLock = false } = {}) =>
|
||||
@intermediateTarget = _.cloneDeep(intermediateTarget)
|
||||
|
@ -27,6 +27,7 @@ module.exports = class EventTracker
|
||||
if properties instanceof Error
|
||||
properties = error: properties
|
||||
|
||||
properties = _.cloneDeep(properties)
|
||||
# If the properties has an error argument that is an Error object then it treats it nicely,
|
||||
# rather than letting it become `{}`
|
||||
if properties.error instanceof Error
|
||||
@ -34,8 +35,6 @@ module.exports = class EventTracker
|
||||
message: properties.error.message
|
||||
stack: properties.error.stack
|
||||
|
||||
properties = _.cloneDeep(properties)
|
||||
|
||||
# Don't log private env vars (e.g. api keys) or other secrets - use a whitelist to mask what we send
|
||||
properties = mask(properties, mixpanelMask)
|
||||
@_logEvent('Event:', ev, JSON.stringify(properties))
|
||||
|
@ -37,3 +37,5 @@ module.exports =
|
||||
ipAddressUpdateInterval: 30 * 1000
|
||||
imageCleanupErrorIgnoreTimeout: 3600 * 1000
|
||||
maxDeltaDownloads: 3
|
||||
|
||||
process.env.DOCKER_HOST ?= "unix://#{module.exports.dockerSocket}"
|
||||
|
@ -4,6 +4,5 @@ exports.envArrayToObject = (env) ->
|
||||
# env is an array of strings that say 'key=value'
|
||||
toPair = (keyVal) ->
|
||||
m = keyVal.match(/^([^=]+)=(.*)$/)
|
||||
[ _unused, key, val ] = m
|
||||
return [ key, val ]
|
||||
return m[1..]
|
||||
_.fromPairs(_.map(env, toPair))
|
||||
|
@ -1,15 +1,14 @@
|
||||
gosuper = require './gosuper'
|
||||
|
||||
exports.reboot = ->
|
||||
gosuper.post('/v1/reboot', { json: true })
|
||||
gosuperAction = (action) ->
|
||||
gosuper.post("/v1/#{action}", { json: true })
|
||||
.spread (res, body) ->
|
||||
if res.statusCode != 202
|
||||
throw new Error(body.Error)
|
||||
return body
|
||||
|
||||
exports.reboot = ->
|
||||
gosuperAction('reboot')
|
||||
|
||||
exports.shutdown = ->
|
||||
gosuper.post('/v1/shutdown', { json: true })
|
||||
.spread (res, body) ->
|
||||
if res.statusCode != 202
|
||||
throw new Error(body.Error)
|
||||
return body
|
||||
gosuperAction('shutdown')
|
||||
|
@ -31,8 +31,8 @@ module.exports = class DockerUtils extends DockerToolbelt
|
||||
super(opts)
|
||||
@dockerProgress = new DockerProgress(dockerToolbelt: this)
|
||||
@supervisorTagPromise = @normaliseImageName(constants.supervisorImage)
|
||||
@InvalidNetGatewayError = class InvalidNetGatewayError extends TypedError
|
||||
return this
|
||||
|
||||
InvalidNetGatewayError: class InvalidNetGatewayError extends TypedError
|
||||
|
||||
getRepoAndTag: (image) =>
|
||||
@getRegistryAndName(image)
|
||||
@ -71,14 +71,14 @@ module.exports = class DockerUtils extends DockerToolbelt
|
||||
url = "#{tokenEndpoint}?service=#{dstInfo.registry}&scope=repository:#{dstInfo.imageName}:pull&scope=repository:#{srcInfo.imageName}:pull"
|
||||
request.getAsync(url, opts)
|
||||
.get(1)
|
||||
.then (b) ->
|
||||
.then (responseBody) ->
|
||||
opts =
|
||||
followRedirect: false
|
||||
timeout: requestTimeout
|
||||
|
||||
if b?.token?
|
||||
if responseBody?.token?
|
||||
opts.auth =
|
||||
bearer: b.token
|
||||
bearer: responseBody.token
|
||||
sendImmediately: true
|
||||
new Promise (resolve, reject) ->
|
||||
request.get("#{deltaEndpoint}/api/v2/delta?src=#{deltaSource}&dest=#{imgDest}", opts)
|
||||
@ -116,8 +116,7 @@ module.exports = class DockerUtils extends DockerToolbelt
|
||||
getImageEnv: (id) ->
|
||||
@getImage(id).inspect()
|
||||
.get('Config').get('Env')
|
||||
.then (env) ->
|
||||
envArrayToObject(env)
|
||||
.then(envArrayToObject)
|
||||
.catch (err) ->
|
||||
console.log('Error getting env from image', err, err.stack)
|
||||
return {}
|
||||
|
@ -2,3 +2,4 @@
|
||||
|
||||
exports.NotFoundError = (err) -> checkInt(err.statusCode) is 404
|
||||
exports.ENOENT = (err) -> err.code is 'ENOENT'
|
||||
exports.EEXIST = (err) -> err.code is 'EEXIST'
|
||||
|
@ -1,5 +1,6 @@
|
||||
Promise = require 'bluebird'
|
||||
fs = Promise.promisifyAll(require('fs'))
|
||||
path = require 'path'
|
||||
|
||||
exports.writeAndSyncFile = (path, data) ->
|
||||
fs.openAsync(path, 'w')
|
||||
@ -14,3 +15,10 @@ exports.writeFileAtomic = (path, data) ->
|
||||
exports.writeAndSyncFile("#{path}.new", data)
|
||||
.then ->
|
||||
fs.renameAsync("#{path}.new", path)
|
||||
|
||||
exports.safeRename = (src, dest) ->
|
||||
fs.renameAsync(src, dest)
|
||||
.then ->
|
||||
fs.openAsync(path.dirname(dest))
|
||||
.tap(fs.fsyncAsync)
|
||||
.then(fs.closeAsync)
|
||||
|
@ -3,13 +3,13 @@ childProcess = Promise.promisifyAll(require('child_process'))
|
||||
|
||||
clearAndAppendIptablesRule = (rule) ->
|
||||
childProcess.execAsync("iptables -D #{rule}")
|
||||
.catch(->)
|
||||
.catchReturn()
|
||||
.then ->
|
||||
childProcess.execAsync("iptables -A #{rule}")
|
||||
|
||||
clearAndInsertIptablesRule = (rule) ->
|
||||
childProcess.execAsync("iptables -D #{rule}")
|
||||
.catch(->)
|
||||
.catchReturn()
|
||||
.then ->
|
||||
childProcess.execAsync("iptables -I #{rule}")
|
||||
|
||||
|
@ -19,16 +19,9 @@ exports.singleToMulticontainerApp = (app) ->
|
||||
}
|
||||
defaultVolume = exports.defaultLegacyVolume()
|
||||
newApp.volumes[defaultVolume] = {}
|
||||
updateStrategy = conf['RESIN_SUPERVISOR_UPDATE_STRATEGY']
|
||||
if !updateStrategy?
|
||||
updateStrategy = 'download-then-kill'
|
||||
handoverTimeout = conf['RESIN_SUPERVISOR_HANDOVER_TIMEOUT']
|
||||
if !handoverTimeout?
|
||||
handoverTimeout = ''
|
||||
restartPolicy = conf['RESIN_APP_RESTART_POLICY']
|
||||
if !restartPolicy?
|
||||
restartPolicy = 'always'
|
||||
|
||||
updateStrategy = conf['RESIN_SUPERVISOR_UPDATE_STRATEGY'] ? 'download-then-kill'
|
||||
handoverTimeout = conf['RESIN_SUPERVISOR_HANDOVER_TIMEOUT'] ? ''
|
||||
restartPolicy = conf['RESIN_APP_RESTART_POLICY'] ? 'always'
|
||||
newApp.services = {
|
||||
'1': {
|
||||
appId: appId
|
||||
|
@ -39,25 +39,23 @@ exports.lock = do ->
|
||||
Promise.map _.clone(locksTaken), (lockName) ->
|
||||
_.pull(locksTaken, lockName)
|
||||
lockFile.unlockAsync(lockName)
|
||||
.finally ->
|
||||
release()
|
||||
.finally(release)
|
||||
_writeLock(appId)
|
||||
.tap (release) ->
|
||||
theLockDir = path.join(constants.rootMountPoint, baseLockPath(appId))
|
||||
fs.readdirAsync(theLockDir)
|
||||
.catch ENOENT, -> []
|
||||
.catchReturn(ENOENT, [])
|
||||
.mapSeries (serviceName) ->
|
||||
tmpLockName = lockFileOnHost(appId, serviceName)
|
||||
Promise.try ->
|
||||
lockFile.unlockAsync(tmpLockName) if force == true
|
||||
.then ->
|
||||
lockFile.lockAsync(tmpLockName)
|
||||
.then ->
|
||||
locksTaken.push(tmpLockName)
|
||||
.catch ENOENT, _.noop
|
||||
.then ->
|
||||
locksTaken.push(tmpLockName)
|
||||
.catchReturn(ENOENT, null)
|
||||
.catch (err) ->
|
||||
dispose(release)
|
||||
.finally ->
|
||||
throw new exports.UpdatesLockedError("Updates are locked: #{err.message}")
|
||||
.throw(new exports.UpdatesLockedError("Updates are locked: #{err.message}"))
|
||||
.disposer(dispose)
|
||||
Promise.using takeTheLock(), -> fn()
|
||||
|
@ -14,7 +14,7 @@ exports.checkInt = checkInt = (s, options = {}) ->
|
||||
exports.checkString = (s) ->
|
||||
# Make sure `s` exists and is not an empty string, or 'null' or 'undefined'.
|
||||
# This might happen if the parsing of config.json on the host using jq is wrong (it is buggy in some versions).
|
||||
if !s? or !_.isString(s) or s == 'null' or s == 'undefined' or s == ''
|
||||
if !s? or !_.isString(s) or s in [ 'null', 'undefined', '' ]
|
||||
return
|
||||
return s
|
||||
|
||||
@ -28,11 +28,11 @@ exports.isValidShortText = isValidShortText = (t) ->
|
||||
_.isString(t) and t.length <= 255
|
||||
exports.isValidEnv = isValidEnv = (obj) ->
|
||||
_.isObject(obj) and _.every obj, (val, key) ->
|
||||
isValidShortText(key) and /^[a-zA-Z_]+[a-zA-Z0-9_]*$/.test(key) and _.isString(val)
|
||||
isValidShortText(key) and /^[a-zA-Z_][a-zA-Z0-9_]*$/.test(key) and _.isString(val)
|
||||
|
||||
exports.isValidLabelsObject = isValidLabelsObject = (obj) ->
|
||||
_.isObject(obj) and _.every obj, (val, key) ->
|
||||
isValidShortText(key) and /^[a-zA-Z_]+[a-zA-Z0-9\.\-]*$/.test(key) and _.isString(val)
|
||||
isValidShortText(key) and /^[a-zA-Z][a-zA-Z0-9\.\-]*$/.test(key) and _.isString(val)
|
||||
|
||||
undefinedOrValidEnv = (val) ->
|
||||
if val? and !isValidEnv(val)
|
||||
@ -41,39 +41,54 @@ undefinedOrValidEnv = (val) ->
|
||||
|
||||
exports.isValidDependentAppsObject = (apps) ->
|
||||
return false if !_.isObject(apps)
|
||||
return false if !_.every apps, (val, appId) ->
|
||||
return _.every apps, (val, appId) ->
|
||||
val = _.defaults(_.clone(val), { config: undefined, environment: undefined, commit: undefined, image: undefined })
|
||||
return false if !isValidShortText(appId) or !checkInt(appId)?
|
||||
return false if !isValidShortText(val.name)
|
||||
return false if val.commit? and (!isValidShortText(val.image) or !isValidShortText(val.commit))
|
||||
return undefinedOrValidEnv(val.config) and undefinedOrValidEnv(val.environment)
|
||||
return true
|
||||
return _.conformsTo(val, {
|
||||
name: isValidShortText
|
||||
image: (i) -> !val.commit? or isValidShortText(i)
|
||||
commit: (c) -> !c? or isValidShortText(c)
|
||||
config: undefinedOrValidEnv
|
||||
environment: undefinedOrValidEnv
|
||||
})
|
||||
|
||||
isValidService = (service, serviceId) ->
|
||||
return false if !isValidEnv(service.environment)
|
||||
return false if !isValidShortText(service.serviceName)
|
||||
return false if !isValidShortText(service.image)
|
||||
return false if !isValidShortText(serviceId) or !checkInt(serviceId)
|
||||
return false if !checkInt(service.imageId)?
|
||||
return false if !isValidLabelsObject(service.labels)
|
||||
return true
|
||||
return _.conformsTo(service, {
|
||||
serviceName: isValidShortText
|
||||
image: isValidShortText
|
||||
environment: isValidEnv
|
||||
imageId: (i) -> checkInt(i)?
|
||||
labels: isValidLabelsObject
|
||||
})
|
||||
|
||||
exports.isValidAppsObject = (obj) ->
|
||||
return false if !_.isObject(obj)
|
||||
return false if !_.every obj, (val, appId) ->
|
||||
return _.every obj, (val, appId) ->
|
||||
return false if !isValidShortText(appId) or !checkInt(appId)?
|
||||
return false if !isValidShortText(val.name) or (val.releaseId? and !checkInt(val.releaseId)?)
|
||||
return false if !_.isObject(val.services)
|
||||
return false if !_.every(val.services, isValidService)
|
||||
return true
|
||||
return true
|
||||
return _.conformsTo(_.defaults(_.clone(val), { releaseId: undefined }), {
|
||||
name: isValidShortText
|
||||
releaseId: (r) -> !r? or checkInt(r)?
|
||||
services: (s) -> _.isObject(s) and _.every(s, isValidService)
|
||||
})
|
||||
|
||||
exports.isValidDependentDevicesObject = (devices) ->
|
||||
return false if !_.isObject(devices)
|
||||
return false if !_.every devices, (val, uuid) ->
|
||||
return _.every devices, (val, uuid) ->
|
||||
return false if !isValidShortText(uuid)
|
||||
return false if !isValidShortText(val.name)
|
||||
return false if !_.isObject(val.apps) or _.isEmpty(val.apps)
|
||||
return false if !_.every val.apps, (app) ->
|
||||
return undefinedOrValidEnv(app.config) and undefinedOrValidEnv(app.environment)
|
||||
return true
|
||||
return true
|
||||
return _.conformsTo(val, {
|
||||
name: isValidShortText
|
||||
apps: (a) ->
|
||||
return (
|
||||
_.isObject(a) and
|
||||
!_.isEmpty(a) and
|
||||
_.every a, (app) ->
|
||||
app = _.defaults(_.clone(app), { config: undefined, environment: undefined })
|
||||
_.conformsTo(app, { config: undefinedOrValidEnv, environment: undefinedOrValidEnv })
|
||||
)
|
||||
})
|
||||
|
||||
exports.validStringOrUndefined = (s) ->
|
||||
_.isUndefined(s) or (_.isString(s) and !_.isEmpty(s))
|
||||
exports.validObjectOrUndefined = (o) ->
|
||||
_.isUndefined(o) or _.isObject(o)
|
||||
|
@ -8,7 +8,7 @@
|
||||
// history without actually adding drop statements (mostly just becoming unused, but still there).
|
||||
|
||||
exports.up = function (knex, Promise) {
|
||||
let addColumn = function (table, column, type) {
|
||||
const addColumn = function (table, column, type) {
|
||||
return knex.schema.hasColumn(table, column)
|
||||
.then((exists) => {
|
||||
if (!exists) {
|
||||
@ -16,7 +16,7 @@ exports.up = function (knex, Promise) {
|
||||
}
|
||||
})
|
||||
}
|
||||
let dropColumn = function (table, column) {
|
||||
const dropColumn = function (table, column) {
|
||||
return knex.schema.hasColumn(table, column)
|
||||
.then((exists) => {
|
||||
if (exists) {
|
||||
@ -24,7 +24,7 @@ exports.up = function (knex, Promise) {
|
||||
}
|
||||
})
|
||||
}
|
||||
let createTableOrRun = function (tableName, tableCreator, runIfTableExists) {
|
||||
const createTableOrRun = function (tableName, tableCreator, runIfTableExists) {
|
||||
return knex.schema.hasTable(tableName)
|
||||
.then((exists) => {
|
||||
if (!exists) {
|
||||
@ -34,7 +34,7 @@ exports.up = function (knex, Promise) {
|
||||
}
|
||||
})
|
||||
}
|
||||
let dropTable = function (tableName) {
|
||||
const dropTable = function (tableName) {
|
||||
return knex.schema.hasTable(tableName)
|
||||
.then((exists) => {
|
||||
if (exists) {
|
||||
@ -132,5 +132,5 @@ exports.up = function (knex, Promise) {
|
||||
}
|
||||
|
||||
exports.down = function(knex, Promise) {
|
||||
return Promise.try(() => { throw new Error('Not implemented') })
|
||||
return Promise.reject(new Error('Not implemented'))
|
||||
}
|
||||
|
@ -15,16 +15,16 @@ var tryParse = function (obj) {
|
||||
var singleToMulticontainerApp = function (app) {
|
||||
// From *very* old supervisors, env or config may be null
|
||||
// so we ignore errors parsing them
|
||||
let conf = tryParse(app.config)
|
||||
let env = tryParse(app.env)
|
||||
let environment = {}
|
||||
let appId = parseInt(app.appId)
|
||||
const conf = tryParse(app.config)
|
||||
const env = tryParse(app.env)
|
||||
const environment = {}
|
||||
const appId = parseInt(app.appId)
|
||||
for (let key in env) {
|
||||
if (!/^RESIN_/.test(key)) {
|
||||
environment[key] = env[key]
|
||||
}
|
||||
}
|
||||
let newApp = {
|
||||
const newApp = {
|
||||
appId: appId,
|
||||
commit: app.commit,
|
||||
name: app.name,
|
||||
@ -32,7 +32,7 @@ var singleToMulticontainerApp = function (app) {
|
||||
networks: {},
|
||||
volumes: {}
|
||||
}
|
||||
let defaultVolume = defaultLegacyVolume(appId)
|
||||
const defaultVolume = defaultLegacyVolume(appId)
|
||||
newApp.volumes[defaultVolume] = {}
|
||||
let updateStrategy = conf['RESIN_SUPERVISOR_UPDATE_STRATEGY']
|
||||
if (updateStrategy == null) {
|
||||
@ -79,7 +79,7 @@ var singleToMulticontainerApp = function (app) {
|
||||
}
|
||||
|
||||
var jsonifyAppFields = function (app) {
|
||||
let newApp = Object.assign({}, app)
|
||||
const newApp = Object.assign({}, app)
|
||||
newApp.services = JSON.stringify(app.services)
|
||||
newApp.networks = JSON.stringify(app.networks)
|
||||
newApp.volumes = JSON.stringify(app.volumes)
|
||||
@ -87,7 +87,7 @@ var jsonifyAppFields = function (app) {
|
||||
}
|
||||
|
||||
var imageForApp = function (app) {
|
||||
let service = app.services[0]
|
||||
const service = app.services[0]
|
||||
return {
|
||||
name: service.image,
|
||||
appId: service.appId,
|
||||
@ -147,7 +147,7 @@ exports.up = function (knex, Promise) {
|
||||
})
|
||||
})
|
||||
.map((app) => {
|
||||
let migratedApp = singleToMulticontainerApp(app)
|
||||
const migratedApp = singleToMulticontainerApp(app)
|
||||
return knex('app').insert(jsonifyAppFields(migratedApp))
|
||||
.then(() => knex('image').insert(imageForApp(migratedApp)))
|
||||
})
|
||||
@ -160,9 +160,8 @@ exports.up = function (knex, Promise) {
|
||||
.then((deviceConf) => {
|
||||
return knex.schema.dropTable('deviceConfig')
|
||||
.then(() => {
|
||||
let values = JSON.parse(deviceConf[0].values)
|
||||
let promises = []
|
||||
let configKeys = {
|
||||
const values = JSON.parse(deviceConf[0].values)
|
||||
const configKeys = {
|
||||
'RESIN_SUPERVISOR_POLL_INTERVAL': 'appUpdatePollInterval',
|
||||
'RESIN_SUPERVISOR_LOCAL_MODE': 'localMode',
|
||||
'RESIN_SUPERVISOR_CONNECTIVITY_CHECK': 'connectivityCheckEnabled',
|
||||
@ -174,12 +173,11 @@ exports.up = function (knex, Promise) {
|
||||
'RESIN_SUPERVISOR_DELTA_RETRY_INTERVAL': 'deltaRequestTimeout',
|
||||
'RESIN_SUPERVISOR_OVERRIDE_LOCK': 'lockOverride'
|
||||
}
|
||||
for (let envVarName in values) {
|
||||
return Promise.map(Object.keys(values), (envVarName) => {
|
||||
if (configKeys[envVarName] != null) {
|
||||
promises.push(knex('config').insert({ key: configKeys[envVarName], value: values[envVarName]}))
|
||||
return knex('config').insert({ key: configKeys[envVarName], value: values[envVarName]})
|
||||
}
|
||||
}
|
||||
return Promise.all(promises)
|
||||
})
|
||||
})
|
||||
})
|
||||
.then(() => {
|
||||
@ -222,7 +220,7 @@ exports.up = function (knex, Promise) {
|
||||
})
|
||||
.then(() => {
|
||||
return Promise.map(dependentApps, (app) => {
|
||||
let newApp = {
|
||||
const newApp = {
|
||||
appId: parseInt(app.appId),
|
||||
parentApp: parseInt(app.parentAppId),
|
||||
image: app.imageId,
|
||||
@ -232,7 +230,7 @@ exports.up = function (knex, Promise) {
|
||||
config: JSON.stringify(tryParse(app.config)),
|
||||
environment: JSON.stringify(tryParse(app.environment))
|
||||
}
|
||||
let image = imageForDependentApp(newApp)
|
||||
const image = imageForDependentApp(newApp)
|
||||
return knex('image').insert(image)
|
||||
.then(() => knex('dependentApp').insert(newApp))
|
||||
.then(() => knex('dependentAppTarget').insert(newApp))
|
||||
@ -276,7 +274,7 @@ exports.up = function (knex, Promise) {
|
||||
})
|
||||
.then(() => {
|
||||
return Promise.map(dependentDevices, (device) => {
|
||||
let newDevice = Object.assign({}, device)
|
||||
const newDevice = Object.assign({}, device)
|
||||
newDevice.appId = parseInt(device.appId)
|
||||
newDevice.deviceId = parseInt(device.deviceId)
|
||||
if (device.is_managed_by != null) {
|
||||
@ -289,7 +287,7 @@ exports.up = function (knex, Promise) {
|
||||
if (newDevice.markedForDeletion == null) {
|
||||
newDevice.markedForDeletion = false
|
||||
}
|
||||
let deviceTarget = {
|
||||
const deviceTarget = {
|
||||
uuid: device.uuid,
|
||||
name: device.name,
|
||||
apps: {}
|
||||
@ -307,5 +305,5 @@ exports.up = function (knex, Promise) {
|
||||
}
|
||||
|
||||
exports.down = function(knex, Promise) {
|
||||
return Promise.try(() => { throw new Error('Not implemented') })
|
||||
return Promise.reject(new Error('Not implemented'))
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
// Adds a dockerImageId column to the image table to identify images downloaded with deltas
|
||||
exports.up = function (knex, Promise) {
|
||||
return knex.schema.table('image', (t) => {
|
||||
t.string('dockerImageId')
|
||||
})
|
||||
t.string('dockerImageId')
|
||||
})
|
||||
}
|
||||
|
||||
exports.down = function(knex, Promise) {
|
||||
return Promise.try(() => { throw new Error('Not implemented') })
|
||||
return Promise.reject(new Error('Not implemented'))
|
||||
}
|
@ -8,6 +8,7 @@ fs = Promise.promisifyAll(require('fs'))
|
||||
constants = require './lib/constants'
|
||||
{ checkTruthy } = require './lib/validation'
|
||||
blink = require './lib/blink'
|
||||
{ EEXIST } = require './lib/errors'
|
||||
|
||||
networkPattern =
|
||||
blinks: 4
|
||||
@ -40,9 +41,6 @@ vpnStatusInotifyCallback = ->
|
||||
.catch ->
|
||||
pauseConnectivityCheck = false
|
||||
|
||||
# Use the following to catch EEXIST errors
|
||||
EEXIST = (err) -> err.code is 'EEXIST'
|
||||
|
||||
exports.startConnectivityCheck = _.once (apiEndpoint, enable, onChangeCallback) ->
|
||||
exports.enableConnectivityCheck(enable)
|
||||
if !apiEndpoint?
|
||||
@ -62,8 +60,7 @@ exports.startConnectivityCheck = _.once (apiEndpoint, enable, onChangeCallback)
|
||||
port: parsedUrl.port ? (if parsedUrl.protocol is 'https:' then 443 else 80)
|
||||
interval: 10 * 1000
|
||||
(connected) ->
|
||||
if onChangeCallback?
|
||||
onChangeCallback(connected)
|
||||
onChangeCallback?(connected)
|
||||
if connected
|
||||
console.log('Internet Connectivity: OK')
|
||||
blink.pattern.stop()
|
||||
@ -91,9 +88,9 @@ exports.getIPAddresses = ->
|
||||
# - the docker network for the supervisor API (supervisor0)
|
||||
# - custom docker network bridges (br- + 12 hex characters)
|
||||
_.flatten(_.map(_.omitBy(os.networkInterfaces(), (interfaceFields, interfaceName) ->
|
||||
/^(balena[0-9]+)|(docker[0-9]+)|(rce[0-9]+)|(tun[0-9]+)|(resin-vpn)|(lo)|(resin-dns)|(supervisor0)|(br-[0-9a-f]{12})$/.test(interfaceName))
|
||||
/^(?:balena|docker|rce|tun)[0-9]+|tun[0-9]+|resin-vpn|lo|resin-dns|supervisor0|br-[0-9a-f]{12}$/.test(interfaceName))
|
||||
, (validInterfaces) ->
|
||||
_.map(_.omitBy(validInterfaces, (a) -> a.family != 'IPv4' ), 'address'))
|
||||
_.map(_.pickBy(validInterfaces, family: 'IPv4'), 'address'))
|
||||
)
|
||||
|
||||
exports.startIPAddressUpdate = do ->
|
||||
|
@ -4,7 +4,7 @@ express = require 'express'
|
||||
fs = Promise.promisifyAll require 'fs'
|
||||
{ request } = require './lib/request'
|
||||
constants = require './lib/constants'
|
||||
{ checkInt } = require './lib/validation'
|
||||
{ checkInt, validStringOrUndefined, validObjectOrUndefined } = require './lib/validation'
|
||||
path = require 'path'
|
||||
mkdirp = Promise.promisify(require('mkdirp'))
|
||||
bodyParser = require 'body-parser'
|
||||
@ -22,12 +22,6 @@ parseDeviceFields = (device) ->
|
||||
device.targetEnvironment = JSON.parse(device.targetEnvironment ? '{}')
|
||||
return _.omit(device, 'markedForDeletion', 'logs_channel')
|
||||
|
||||
# TODO move to lib/validation
|
||||
validStringOrUndefined = (s) ->
|
||||
_.isUndefined(s) or !_.isEmpty(s)
|
||||
validObjectOrUndefined = (o) ->
|
||||
_.isUndefined(o) or _.isObject(o)
|
||||
|
||||
tarDirectory = (appId) ->
|
||||
return "/data/dependent-assets/#{appId}"
|
||||
|
||||
@ -54,11 +48,9 @@ cleanupTars = (appId, commit) ->
|
||||
.catchReturn([])
|
||||
.then (files) ->
|
||||
if fileToKeep?
|
||||
files = _.filter files, (file) ->
|
||||
return file isnt fileToKeep
|
||||
files = _.reject(files, fileToKeep)
|
||||
Promise.map files, (file) ->
|
||||
if !fileToKeep? or (file isnt fileToKeep)
|
||||
fs.unlinkAsync(path.join(dir, file))
|
||||
fs.unlinkAsync(path.join(dir, file))
|
||||
|
||||
formatTargetAsState = (device) ->
|
||||
return {
|
||||
@ -76,168 +68,168 @@ formatCurrentAsState = (device) ->
|
||||
config: device.config
|
||||
}
|
||||
|
||||
class ProxyvisorRouter
|
||||
constructor: (@proxyvisor) ->
|
||||
{ @config, @logger, @db, @docker } = @proxyvisor
|
||||
@router = express.Router()
|
||||
@router.use(bodyParser.urlencoded(extended: true))
|
||||
@router.use(bodyParser.json())
|
||||
@router.get '/v1/devices', (req, res) =>
|
||||
@db.models('dependentDevice').select()
|
||||
.map(parseDeviceFields)
|
||||
.then (devices) ->
|
||||
res.json(devices)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
createProxyvisorRouter = (proxyvisor) ->
|
||||
{ db } = proxyvisor
|
||||
router = express.Router()
|
||||
router.use(bodyParser.urlencoded(extended: true))
|
||||
router.use(bodyParser.json())
|
||||
router.get '/v1/devices', (req, res) ->
|
||||
db.models('dependentDevice').select()
|
||||
.map(parseDeviceFields)
|
||||
.then (devices) ->
|
||||
res.json(devices)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v1/devices', (req, res) =>
|
||||
{ appId, device_type } = req.body
|
||||
router.post '/v1/devices', (req, res) ->
|
||||
{ appId, device_type } = req.body
|
||||
|
||||
if !appId? or _.isNaN(parseInt(appId)) or parseInt(appId) <= 0
|
||||
res.status(400).send('appId must be a positive integer')
|
||||
if !appId? or _.isNaN(parseInt(appId)) or parseInt(appId) <= 0
|
||||
res.status(400).send('appId must be a positive integer')
|
||||
return
|
||||
device_type = 'generic' if !device_type?
|
||||
d =
|
||||
belongs_to__application: req.body.appId
|
||||
device_type: device_type
|
||||
proxyvisor.apiBinder.provisionDependentDevice(d)
|
||||
.then (dev) ->
|
||||
# If the response has id: null then something was wrong in the request
|
||||
# but we don't know precisely what.
|
||||
if !dev.id?
|
||||
res.status(400).send('Provisioning failed, invalid appId or credentials')
|
||||
return
|
||||
device_type = 'generic' if !device_type?
|
||||
d =
|
||||
belongs_to__application: req.body.appId
|
||||
device_type: device_type
|
||||
@proxyvisor.apiBinder.provisionDependentDevice(d)
|
||||
.then (dev) =>
|
||||
# If the response has id: null then something was wrong in the request
|
||||
# but we don't know precisely what.
|
||||
if !dev.id?
|
||||
res.status(400).send('Provisioning failed, invalid appId or credentials')
|
||||
return
|
||||
deviceForDB = {
|
||||
uuid: dev.uuid
|
||||
appId
|
||||
device_type: dev.device_type
|
||||
deviceId: dev.id
|
||||
name: dev.name
|
||||
status: dev.status
|
||||
logs_channel: dev.logs_channel
|
||||
}
|
||||
@db.models('dependentDevice').insert(deviceForDB)
|
||||
.then ->
|
||||
res.status(201).send(dev)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.get '/v1/devices/:uuid', (req, res) =>
|
||||
uuid = req.params.uuid
|
||||
@db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
res.json(parseDeviceFields(device))
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.post '/v1/devices/:uuid/logs', (req, res) =>
|
||||
uuid = req.params.uuid
|
||||
m = {
|
||||
message: req.body.message
|
||||
timestamp: req.body.timestamp or Date.now()
|
||||
deviceForDB = {
|
||||
uuid: dev.uuid
|
||||
appId
|
||||
device_type: dev.device_type
|
||||
deviceId: dev.id
|
||||
name: dev.name
|
||||
status: dev.status
|
||||
logs_channel: dev.logs_channel
|
||||
}
|
||||
m.isSystem = req.body.isSystem if req.body.isSystem?
|
||||
db.models('dependentDevice').insert(deviceForDB)
|
||||
.then ->
|
||||
res.status(201).send(dev)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) =>
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
@logger.log(m, { channel: "device-#{device.logs_channel}-logs" })
|
||||
res.status(202).send('OK')
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
router.get '/v1/devices/:uuid', (req, res) ->
|
||||
uuid = req.params.uuid
|
||||
db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
res.json(parseDeviceFields(device))
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@router.put '/v1/devices/:uuid', (req, res) =>
|
||||
uuid = req.params.uuid
|
||||
{ status, is_online, commit, releaseId, environment, config } = req.body
|
||||
validateDeviceFields = ->
|
||||
if isDefined(is_online) and !_.isBoolean(is_online)
|
||||
return 'is_online must be a boolean'
|
||||
if !validStringOrUndefined(status)
|
||||
return 'status must be a non-empty string'
|
||||
if !validStringOrUndefined(commit)
|
||||
return 'commit must be a non-empty string'
|
||||
if !validStringOrUndefined(releaseId)
|
||||
return 'commit must be a non-empty string'
|
||||
if !validObjectOrUndefined(environment)
|
||||
return 'environment must be an object'
|
||||
if !validObjectOrUndefined(config)
|
||||
return 'config must be an object'
|
||||
return null
|
||||
requestError = validateDeviceFields()
|
||||
if requestError?
|
||||
res.status(400).send(requestError)
|
||||
return
|
||||
router.post '/v1/devices/:uuid/logs', (req, res) ->
|
||||
uuid = req.params.uuid
|
||||
m = {
|
||||
message: req.body.message
|
||||
timestamp: req.body.timestamp or Date.now()
|
||||
}
|
||||
m.isSystem = req.body.isSystem if req.body.isSystem?
|
||||
|
||||
environment = JSON.stringify(environment) if isDefined(environment)
|
||||
config = JSON.stringify(config) if isDefined(config)
|
||||
db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
proxyvisor.logger.log(m, { channel: "device-#{device.logs_channel}-logs" })
|
||||
res.status(202).send('OK')
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
fieldsToUpdateOnDB = _.pickBy({ status, is_online, commit, releaseId, config, environment }, isDefined)
|
||||
fieldsToUpdateOnAPI = _.pick(fieldsToUpdateOnDB, 'status', 'is_online', 'commit', 'releaseId')
|
||||
router.put '/v1/devices/:uuid', (req, res) ->
|
||||
uuid = req.params.uuid
|
||||
{ status, is_online, commit, releaseId, environment, config } = req.body
|
||||
validateDeviceFields = ->
|
||||
if isDefined(is_online) and !_.isBoolean(is_online)
|
||||
return 'is_online must be a boolean'
|
||||
if !validStringOrUndefined(status)
|
||||
return 'status must be a non-empty string'
|
||||
if !validStringOrUndefined(commit)
|
||||
return 'commit must be a non-empty string'
|
||||
if !validStringOrUndefined(releaseId)
|
||||
return 'commit must be a non-empty string'
|
||||
if !validObjectOrUndefined(environment)
|
||||
return 'environment must be an object'
|
||||
if !validObjectOrUndefined(config)
|
||||
return 'config must be an object'
|
||||
return null
|
||||
requestError = validateDeviceFields()
|
||||
if requestError?
|
||||
res.status(400).send(requestError)
|
||||
return
|
||||
|
||||
if _.isEmpty(fieldsToUpdateOnDB)
|
||||
res.status(400).send('At least one device attribute must be updated')
|
||||
return
|
||||
environment = JSON.stringify(environment) if isDefined(environment)
|
||||
config = JSON.stringify(config) if isDefined(config)
|
||||
|
||||
@db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) =>
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
throw new Error('Device is invalid') if !device.deviceId?
|
||||
Promise.try =>
|
||||
if !_.isEmpty(fieldsToUpdateOnAPI)
|
||||
@proxyvisor.apiBinder.patchDevice(device.deviceId, fieldsToUpdateOnAPI)
|
||||
.then =>
|
||||
@db.models('dependentDevice').update(fieldsToUpdateOnDB).where({ uuid })
|
||||
.then =>
|
||||
@db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
res.json(parseDeviceFields(device))
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
fieldsToUpdateOnDB = _.pickBy({ status, is_online, commit, releaseId, config, environment }, isDefined)
|
||||
fieldsToUpdateOnAPI = _.pick(fieldsToUpdateOnDB, 'status', 'is_online', 'commit', 'releaseId')
|
||||
|
||||
@router.get '/v1/dependent-apps/:appId/assets/:commit', (req, res) =>
|
||||
@db.models('dependentApp').select().where(_.pick(req.params, 'appId', 'commit'))
|
||||
.then ([ app ]) =>
|
||||
return res.status(404).send('Not found') if !app
|
||||
dest = tarPath(app.appId, app.commit)
|
||||
fs.lstatAsync(dest)
|
||||
.catch =>
|
||||
Promise.using @docker.imageRootDirMounted(app.image), (rootDir) ->
|
||||
getTarArchive(rootDir + '/assets', dest)
|
||||
.then ->
|
||||
res.sendFile(dest)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
if _.isEmpty(fieldsToUpdateOnDB)
|
||||
res.status(400).send('At least one device attribute must be updated')
|
||||
return
|
||||
|
||||
@router.get '/v1/dependent-apps', (req, res) =>
|
||||
@db.models('dependentApp').select()
|
||||
.map (app) ->
|
||||
return {
|
||||
id: parseInt(app.appId)
|
||||
commit: app.commit
|
||||
name: app.name
|
||||
config: JSON.parse(app.config ? '{}')
|
||||
}
|
||||
.then (apps) ->
|
||||
res.json(apps)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
return res.status(404).send('Device not found') if !device?
|
||||
return res.status(410).send('Device deleted') if device.markedForDeletion
|
||||
throw new Error('Device is invalid') if !device.deviceId?
|
||||
Promise.try ->
|
||||
if !_.isEmpty(fieldsToUpdateOnAPI)
|
||||
proxyvisor.apiBinder.patchDevice(device.deviceId, fieldsToUpdateOnAPI)
|
||||
.then ->
|
||||
db.models('dependentDevice').update(fieldsToUpdateOnDB).where({ uuid })
|
||||
.then ->
|
||||
db.models('dependentDevice').select().where({ uuid })
|
||||
.then ([ device ]) ->
|
||||
res.json(parseDeviceFields(device))
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
router.get '/v1/dependent-apps/:appId/assets/:commit', (req, res) ->
|
||||
db.models('dependentApp').select().where(_.pick(req.params, 'appId', 'commit'))
|
||||
.then ([ app ]) ->
|
||||
return res.status(404).send('Not found') if !app
|
||||
dest = tarPath(app.appId, app.commit)
|
||||
fs.lstatAsync(dest)
|
||||
.catch ->
|
||||
Promise.using proxyvisor.docker.imageRootDirMounted(app.image), (rootDir) ->
|
||||
getTarArchive(rootDir + '/assets', dest)
|
||||
.then ->
|
||||
res.sendFile(dest)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
router.get '/v1/dependent-apps', (req, res) ->
|
||||
db.models('dependentApp').select()
|
||||
.map (app) ->
|
||||
return {
|
||||
id: parseInt(app.appId)
|
||||
commit: app.commit
|
||||
name: app.name
|
||||
config: JSON.parse(app.config ? '{}')
|
||||
}
|
||||
.then (apps) ->
|
||||
res.json(apps)
|
||||
.catch (err) ->
|
||||
console.error("Error on #{req.method} #{url.parse(req.url).pathname}", err, err.stack)
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
return router
|
||||
|
||||
module.exports = class Proxyvisor
|
||||
constructor: ({ @config, @logger, @db, @docker, @images, @applications }) ->
|
||||
@acknowledgedState = {}
|
||||
@lastRequestForDevice = {}
|
||||
@_router = new ProxyvisorRouter(this)
|
||||
@router = @_router.router
|
||||
@router = createProxyvisorRouter(this)
|
||||
@actionExecutors = {
|
||||
updateDependentTargets: (step) =>
|
||||
@config.getMany([ 'currentApiKey', 'apiTimeout' ])
|
||||
@ -460,7 +452,7 @@ module.exports = class Proxyvisor
|
||||
return images
|
||||
|
||||
_imageAvailable: (image, available) ->
|
||||
_.some(available, (availableImage) -> availableImage.name == image)
|
||||
_.some(available, name: image)
|
||||
|
||||
_getHookStep: (currentDevices, appId) =>
|
||||
hookStep = {
|
||||
@ -572,7 +564,7 @@ module.exports = class Proxyvisor
|
||||
for appId in allAppIds
|
||||
devicesForApp = (devices) ->
|
||||
_.filter devices, (d) ->
|
||||
_.includes(_.keys(d.apps), appId)
|
||||
_.has(d.apps, appId)
|
||||
|
||||
currentDevices = devicesForApp(current.dependent.devices)
|
||||
targetDevices = devicesForApp(target.dependent.devices)
|
||||
@ -590,8 +582,7 @@ module.exports = class Proxyvisor
|
||||
.then (parentApp) =>
|
||||
Promise.map parentApp?.services ? [], (service) =>
|
||||
@docker.getImageEnv(service.image)
|
||||
.then (imageEnv) ->
|
||||
return imageEnv.RESIN_DEPENDENT_DEVICES_HOOK_ADDRESS
|
||||
.get('RESIN_DEPENDENT_DEVICES_HOOK_ADDRESS')
|
||||
.then (imageHookAddresses) ->
|
||||
for addr in imageHookAddresses
|
||||
return addr if addr?
|
||||
|
@ -7,6 +7,8 @@ APIBinder = require './api-binder'
|
||||
DeviceState = require './device-state'
|
||||
SupervisorAPI = require './supervisor-api'
|
||||
|
||||
constants = require './lib/constants'
|
||||
|
||||
startupConfigFields = [
|
||||
'uuid'
|
||||
'listenPort'
|
||||
@ -44,7 +46,7 @@ module.exports = class Supervisor extends EventEmitter
|
||||
.then =>
|
||||
# initialize API
|
||||
console.log('Starting API server')
|
||||
@api.listen(@config.constants.allowedInterfaces, conf.listenPort, conf.apiTimeout)
|
||||
@api.listen(constants.allowedInterfaces, conf.listenPort, conf.apiTimeout)
|
||||
@deviceState.on('shutdown', => @api.stop())
|
||||
.then =>
|
||||
@apiBinder.init() # this will first try to provision if it's a new device
|
||||
|
Loading…
Reference in New Issue
Block a user