2017-11-01 07:55:36 +00:00
|
|
|
Promise = require 'bluebird'
|
|
|
|
_ = require 'lodash'
|
|
|
|
EventEmitter = require 'events'
|
|
|
|
express = require 'express'
|
|
|
|
bodyParser = require 'body-parser'
|
|
|
|
|
|
|
|
constants = require './lib/constants'
|
|
|
|
|
|
|
|
process.env.DOCKER_HOST ?= "unix://#{constants.dockerSocket}"
|
|
|
|
Docker = require './lib/docker-utils'
|
|
|
|
updateLock = require './lib/update-lock'
|
|
|
|
{ checkTruthy, checkInt, checkString } = require './lib/validation'
|
2017-12-15 05:15:59 +00:00
|
|
|
{ NotFoundError } = require './lib/errors'
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
ServiceManager = require './compose/service-manager'
|
|
|
|
Service = require './compose/service'
|
|
|
|
Images = require './compose/images'
|
|
|
|
Networks = require './compose/networks'
|
|
|
|
Volumes = require './compose/volumes'
|
|
|
|
|
|
|
|
Proxyvisor = require './proxyvisor'
|
|
|
|
|
|
|
|
serviceAction = (action, serviceId, current, target, options) ->
|
|
|
|
obj = { action, serviceId, current, target }
|
2017-11-29 21:32:57 +00:00
|
|
|
if options?
|
|
|
|
obj.options = options
|
2017-11-01 07:55:36 +00:00
|
|
|
return obj
|
|
|
|
|
|
|
|
# TODO: move this to an Image class?
|
|
|
|
imageForService = (service) ->
|
|
|
|
return {
|
2017-12-15 05:15:59 +00:00
|
|
|
name: service.imageName
|
2017-11-01 07:55:36 +00:00
|
|
|
appId: service.appId
|
|
|
|
serviceId: service.serviceId
|
|
|
|
serviceName: service.serviceName
|
2017-11-29 21:32:57 +00:00
|
|
|
imageId: service.imageId
|
|
|
|
releaseId: service.releaseId
|
2017-11-01 07:55:36 +00:00
|
|
|
dependent: 0
|
|
|
|
}
|
|
|
|
|
|
|
|
fetchAction = (service) ->
|
|
|
|
return {
|
|
|
|
action: 'fetch'
|
|
|
|
image: imageForService(service)
|
|
|
|
serviceId: service.serviceId
|
|
|
|
}
|
2018-01-19 19:31:02 +00:00
|
|
|
# TODO: implement additional v2 endpoints
|
2017-11-01 07:55:36 +00:00
|
|
|
# v1 endpoins only work for single-container apps as they assume the app has a single service.
|
|
|
|
class ApplicationManagerRouter
|
|
|
|
constructor: (@applications) ->
|
2018-01-23 17:23:26 +00:00
|
|
|
{ @proxyvisor, @eventTracker, @deviceState, @_lockingIfNecessary, @logger } = @applications
|
2017-11-01 07:55:36 +00:00
|
|
|
@router = express.Router()
|
|
|
|
@router.use(bodyParser.urlencoded(extended: true))
|
|
|
|
@router.use(bodyParser.json())
|
|
|
|
|
2018-01-23 17:23:26 +00:00
|
|
|
doRestart = (appId, force) =>
|
|
|
|
@_lockingIfNecessary appId, { force }, =>
|
|
|
|
@deviceState.getCurrentForComparison()
|
|
|
|
.then (currentState) =>
|
|
|
|
app = currentState.local.apps[appId]
|
|
|
|
stoppedApp = _.cloneDeep(app)
|
|
|
|
stoppedApp.services = []
|
|
|
|
currentState.local.apps[appId] = stoppedApp
|
2018-01-25 20:53:54 +00:00
|
|
|
@deviceState.pausingApply =>
|
2018-01-23 17:23:26 +00:00
|
|
|
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
2018-01-25 20:53:54 +00:00
|
|
|
.then =>
|
|
|
|
currentState.local.apps[appId] = app
|
|
|
|
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
|
|
|
.finally =>
|
|
|
|
@deviceState.triggerApplyTarget()
|
2018-01-23 17:23:26 +00:00
|
|
|
|
|
|
|
doPurge = (appId, force) =>
|
|
|
|
@logger.logSystemMessage("Purging data for app #{appId}", { appId }, 'Purge data')
|
|
|
|
@_lockingIfNecessary appId, { force }, =>
|
|
|
|
@deviceState.getCurrentForComparison()
|
|
|
|
.then (currentState) =>
|
|
|
|
app = currentState.local.apps[appId]
|
|
|
|
purgedApp = _.cloneDeep(app)
|
|
|
|
purgedApp.services = []
|
|
|
|
purgedApp.volumes = {}
|
|
|
|
currentState.local.apps[appId] = purgedApp
|
2018-01-25 20:53:54 +00:00
|
|
|
@deviceState.pausingApply =>
|
2018-01-23 17:23:26 +00:00
|
|
|
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
2018-01-25 20:53:54 +00:00
|
|
|
.then =>
|
|
|
|
currentState.local.apps[appId] = app
|
|
|
|
@deviceState.applyIntermediateTarget(currentState, { skipLock: true })
|
|
|
|
.finally =>
|
|
|
|
@deviceState.triggerApplyTarget()
|
2018-01-23 17:23:26 +00:00
|
|
|
.tap =>
|
|
|
|
@logger.logSystemMessage('Purged data', { appId }, 'Purge data success')
|
|
|
|
.catch (err) =>
|
|
|
|
@logger.logSystemMessage("Error purging data: #{err}", { appId, error: err }, 'Purge data error')
|
|
|
|
throw err
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
@router.post '/v1/restart', (req, res) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appId = checkInt(req.body.appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
force = checkTruthy(req.body.force)
|
|
|
|
@eventTracker.track('Restart container (v1)', { appId })
|
|
|
|
if !appId?
|
|
|
|
return res.status(400).send('Missing app id')
|
2018-01-23 17:23:26 +00:00
|
|
|
doRestart(appId, force)
|
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
2017-11-01 07:55:36 +00:00
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
|
|
|
@router.post '/v1/apps/:appId/stop', (req, res) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appId = checkInt(req.params.appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
force = checkTruthy(req.body.force)
|
|
|
|
if !appId?
|
|
|
|
return res.status(400).send('Missing app id')
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) =>
|
|
|
|
service = app?.services?[0]
|
2017-11-29 21:32:57 +00:00
|
|
|
if !service?
|
|
|
|
return res.status(400).send('App not found')
|
|
|
|
if app.services.length > 1
|
2018-01-25 17:41:00 +00:00
|
|
|
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
|
|
|
@applications.setTargetVolatileForService(service.imageId, running: false)
|
2017-11-01 07:55:36 +00:00
|
|
|
@applications.executeStepAction(serviceAction('stop', service.serviceId, service), { force })
|
|
|
|
.then (service) ->
|
|
|
|
res.status(200).json({ containerId: service.containerId })
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
|
|
|
@router.post '/v1/apps/:appId/start', (req, res) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appId = checkInt(req.params.appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
force = checkTruthy(req.body.force)
|
|
|
|
if !appId?
|
|
|
|
return res.status(400).send('Missing app id')
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) =>
|
|
|
|
service = app?.services?[0]
|
2017-11-29 21:32:57 +00:00
|
|
|
if !service?
|
|
|
|
return res.status(400).send('App not found')
|
|
|
|
if app.services.length > 1
|
2018-01-25 17:41:00 +00:00
|
|
|
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
|
|
|
@applications.setTargetVolatileForService(service.imageId, running: true)
|
2017-11-01 07:55:36 +00:00
|
|
|
@applications.executeStepAction(serviceAction('start', service.serviceId, null, service), { force })
|
|
|
|
.then (service) ->
|
|
|
|
res.status(200).json({ containerId: service.containerId })
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
|
|
|
@router.get '/v1/apps/:appId', (req, res) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appId = checkInt(req.params.appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
@eventTracker.track('GET app (v1)', appId)
|
|
|
|
if !appId?
|
|
|
|
return res.status(400).send('Missing app id')
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) ->
|
|
|
|
service = app?.services?[0]
|
2017-11-29 21:32:57 +00:00
|
|
|
if !service?
|
|
|
|
return res.status(400).send('App not found')
|
|
|
|
if app.services.length > 1
|
2018-01-25 17:41:00 +00:00
|
|
|
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
2017-11-01 07:55:36 +00:00
|
|
|
# Don't return data that will be of no use to the user
|
|
|
|
appToSend = {
|
|
|
|
appId
|
|
|
|
containerId: service.containerId
|
|
|
|
env: _.omit(service.environment, constants.privateAppEnvVars)
|
|
|
|
commit: service.commit
|
|
|
|
releaseId: app.releaseId
|
|
|
|
imageId: service.image
|
|
|
|
}
|
|
|
|
res.json(appToSend)
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
2018-01-23 17:23:26 +00:00
|
|
|
@router.post '/v1/purge', (req, res) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
appId = checkInt(req.body.appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
force = checkTruthy(req.body.force)
|
|
|
|
if !appId?
|
|
|
|
errMsg = "App not found: an app needs to be installed for purge to work.
|
|
|
|
If you've recently moved this device from another app,
|
|
|
|
please push an app and wait for it to be installed first."
|
|
|
|
return res.status(400).send(errMsg)
|
2018-01-23 17:23:26 +00:00
|
|
|
doPurge(appId, force)
|
|
|
|
.then ->
|
|
|
|
res.status(200).json(Data: 'OK', Error: '')
|
2017-11-01 07:55:36 +00:00
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
2018-01-19 19:31:02 +00:00
|
|
|
|
2018-01-23 17:23:26 +00:00
|
|
|
@router.post '/v2/applications/:appId/purge', (req, res) ->
|
2018-01-19 19:31:02 +00:00
|
|
|
{ force } = req.body
|
|
|
|
{ appId } = req.params
|
2018-01-23 17:23:26 +00:00
|
|
|
doPurge(appId, force)
|
2018-01-19 19:31:02 +00:00
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
|
|
|
@router.post '/v2/applications/:appId/restart-service', (req, res) =>
|
|
|
|
{ imageId, force } = req.body
|
|
|
|
{ appId } = req.params
|
|
|
|
@_lockingIfNecessary appId, { force }, =>
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) =>
|
|
|
|
if !app?
|
2018-01-25 17:41:00 +00:00
|
|
|
errMsg = "App not found: an app needs to be installed for restart-service to work.
|
2018-01-19 19:31:02 +00:00
|
|
|
If you've recently moved this device from another app,
|
|
|
|
please push an app and wait for it to be installed first."
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
service = _.find(app.services, (s) -> s.imageId == imageId)
|
|
|
|
if !service?
|
|
|
|
errMsg = 'Service not found, a container must exist for service restart to work.'
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
@applications.executeStepAction(serviceAction('restart', service.serviceId, service, service), { skipLock: true })
|
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
2018-01-25 17:41:00 +00:00
|
|
|
@router.post '/v2/applications/:appId/stop-service', (req, res) =>
|
|
|
|
{ imageId, force } = req.body
|
|
|
|
{ appId } = req.params
|
|
|
|
@_lockingIfNecessary appId, { force }, =>
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) =>
|
|
|
|
if !app?
|
|
|
|
errMsg = "App not found: an app needs to be installed for stop-service to work.
|
|
|
|
If you've recently moved this device from another app,
|
|
|
|
please push an app and wait for it to be installed first."
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
service = _.find(app.services, (s) -> s.imageId == imageId)
|
|
|
|
if !service?
|
|
|
|
errMsg = 'Service not found, a container must exist for service stop to work.'
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
@applications.setTargetVolatileForService(service.imageId, running: false)
|
2018-01-27 01:29:19 +00:00
|
|
|
@applications.executeStepAction(serviceAction('stop', service.serviceId, service, service, { wait: true }), { skipLock: true })
|
2018-01-25 17:41:00 +00:00
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
|
|
|
@router.post '/v2/applications/:appId/start-service', (req, res) =>
|
|
|
|
{ imageId, force } = req.body
|
|
|
|
{ appId } = req.params
|
|
|
|
@_lockingIfNecessary appId, { force }, =>
|
|
|
|
@applications.getCurrentApp(appId)
|
|
|
|
.then (app) =>
|
|
|
|
if !app?
|
|
|
|
errMsg = "App not found: an app needs to be installed for stop-service to work.
|
|
|
|
If you've recently moved this device from another app,
|
|
|
|
please push an app and wait for it to be installed first."
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
service = _.find(app.services, (s) -> s.imageId == imageId)
|
|
|
|
if !service?
|
|
|
|
errMsg = 'Service not found, a container must exist for service start to work.'
|
|
|
|
return res.status(404).send(errMsg)
|
|
|
|
@applications.setTargetVolatileForService(service.imageId, running: true)
|
|
|
|
@applications.executeStepAction(serviceAction('start', service.serviceId, service, service), { skipLock: true })
|
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
2018-01-23 17:23:26 +00:00
|
|
|
@router.post '/v2/applications/:appId/restart', (req, res) ->
|
2018-01-19 19:31:02 +00:00
|
|
|
{ force } = req.body
|
|
|
|
{ appId } = req.params
|
2018-01-23 17:23:26 +00:00
|
|
|
doRestart(appId, force)
|
2018-01-19 19:31:02 +00:00
|
|
|
.then ->
|
|
|
|
res.status(200).send('OK')
|
|
|
|
.catch (err) ->
|
|
|
|
res.status(503).send(err?.message or err or 'Unknown error')
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
@router.use(@proxyvisor.router)
|
|
|
|
|
|
|
|
module.exports = class ApplicationManager extends EventEmitter
|
2018-01-19 19:31:02 +00:00
|
|
|
constructor: ({ @logger, @config, @db, @eventTracker, @deviceState }) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
@docker = new Docker()
|
|
|
|
@images = new Images({ @docker, @logger, @db })
|
|
|
|
@services = new ServiceManager({ @docker, @logger, @images, @config })
|
|
|
|
@networks = new Networks({ @docker, @logger })
|
|
|
|
@volumes = new Volumes({ @docker, @logger })
|
|
|
|
@proxyvisor = new Proxyvisor({ @config, @logger, @db, @docker, @images, applications: this })
|
2017-12-08 01:18:21 +00:00
|
|
|
@timeSpentFetching = 0
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
@fetchesInProgress = 0
|
2018-01-25 17:41:00 +00:00
|
|
|
@_targetVolatilePerImageId = {}
|
2017-11-29 21:32:57 +00:00
|
|
|
@actionExecutors = {
|
2018-01-19 19:31:02 +00:00
|
|
|
stop: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2018-01-27 01:29:19 +00:00
|
|
|
wait = step.options?.wait ? false
|
|
|
|
@services.kill(step.current, { removeContainer: false, wait })
|
2018-01-19 19:31:02 +00:00
|
|
|
kill: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2017-11-29 21:32:57 +00:00
|
|
|
@services.kill(step.current)
|
|
|
|
.then =>
|
|
|
|
if step.options?.removeImage
|
2017-12-15 05:15:59 +00:00
|
|
|
@images.removeByDockerId(step.current.image)
|
2017-11-29 21:32:57 +00:00
|
|
|
updateMetadata: (step) =>
|
|
|
|
@services.updateMetadata(step.current, step.target)
|
2018-01-19 19:31:02 +00:00
|
|
|
restart: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2017-11-29 21:32:57 +00:00
|
|
|
Promise.try =>
|
2018-01-27 01:29:19 +00:00
|
|
|
@services.kill(step.current, { wait: true })
|
2017-11-29 21:32:57 +00:00
|
|
|
.then =>
|
|
|
|
@services.start(step.target)
|
2018-01-19 19:31:02 +00:00
|
|
|
stopAll: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@stopAll({ force, skipLock })
|
2017-11-29 21:32:57 +00:00
|
|
|
start: (step) =>
|
|
|
|
@services.start(step.target)
|
2018-01-19 19:31:02 +00:00
|
|
|
handover: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2017-11-29 21:32:57 +00:00
|
|
|
@services.handover(step.current, step.target)
|
|
|
|
fetch: (step) =>
|
2017-12-08 01:18:21 +00:00
|
|
|
startTime = process.hrtime()
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
@fetchesInProgress += 1
|
2017-11-29 21:32:57 +00:00
|
|
|
Promise.join(
|
|
|
|
@config.get('fetchOptions')
|
|
|
|
@images.getAvailable()
|
|
|
|
(opts, availableImages) =>
|
|
|
|
opts.deltaSource = @bestDeltaSource(step.image, availableImages)
|
2018-01-27 01:29:19 +00:00
|
|
|
@images.triggerFetch step.image, opts, (success) =>
|
|
|
|
@fetchesInProgress -= 1
|
|
|
|
@timeSpentFetching += process.hrtime(startTime)[0]
|
|
|
|
if success
|
|
|
|
@reportCurrentState(update_downloaded: true)
|
2017-11-29 21:32:57 +00:00
|
|
|
)
|
|
|
|
removeImage: (step) =>
|
|
|
|
@images.remove(step.image)
|
2017-12-15 05:15:59 +00:00
|
|
|
saveImage: (step) =>
|
|
|
|
@images.save(step.image)
|
2017-11-29 21:32:57 +00:00
|
|
|
cleanup: (step) =>
|
|
|
|
@images.cleanup()
|
|
|
|
createNetworkOrVolume: (step) =>
|
|
|
|
model = if step.model is 'volume' then @volumes else @networks
|
|
|
|
model.create(step.target)
|
|
|
|
removeNetworkOrVolume: (step) =>
|
|
|
|
model = if step.model is 'volume' then @volumes else @networks
|
|
|
|
model.remove(step.current)
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
ensureSupervisorNetwork: =>
|
|
|
|
@networks.ensureSupervisorNetwork()
|
2017-11-29 21:32:57 +00:00
|
|
|
}
|
|
|
|
@validActions = _.keys(@actionExecutors).concat(@proxyvisor.validActions)
|
2017-11-01 07:55:36 +00:00
|
|
|
@_router = new ApplicationManagerRouter(this)
|
|
|
|
@router = @_router.router
|
|
|
|
|
|
|
|
@images.on('change', @reportCurrentState)
|
|
|
|
@services.on('change', @reportCurrentState)
|
|
|
|
|
|
|
|
serviceAction: serviceAction
|
|
|
|
imageForService: imageForService
|
|
|
|
fetchAction: fetchAction
|
|
|
|
|
|
|
|
reportCurrentState: (data) =>
|
|
|
|
@emit('change', data)
|
|
|
|
|
|
|
|
init: =>
|
|
|
|
@images.cleanupDatabase()
|
|
|
|
.then =>
|
|
|
|
@services.attachToRunning()
|
|
|
|
.then =>
|
|
|
|
@services.listenToEvents()
|
|
|
|
|
|
|
|
# Returns the status of applications and their services
|
2017-12-08 01:18:21 +00:00
|
|
|
# TODO: discuss: I think commit could be deduced by the UI looking at the image_installs on the API?
|
2017-11-01 07:55:36 +00:00
|
|
|
getStatus: =>
|
|
|
|
Promise.join(
|
|
|
|
@services.getStatus()
|
|
|
|
@images.getStatus()
|
2017-12-08 01:18:21 +00:00
|
|
|
@db.models('app').select([ 'appId', 'releaseId', 'commit' ])
|
|
|
|
(services, images, targetApps) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
apps = {}
|
|
|
|
dependent = {}
|
2017-12-08 01:18:21 +00:00
|
|
|
releaseId = null
|
2017-11-01 07:55:36 +00:00
|
|
|
# We iterate over the current running services and add them to the current state
|
|
|
|
# of the app they belong to.
|
2017-11-29 21:32:57 +00:00
|
|
|
for service in services
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = service.appId
|
|
|
|
apps[appId] ?= {}
|
|
|
|
apps[appId].services ?= {}
|
2017-12-08 01:18:21 +00:00
|
|
|
# We only send commit if all services have the same release, and it matches the target release
|
|
|
|
if !releaseId?
|
|
|
|
releaseId = service.releaseId
|
|
|
|
else if releaseId != service.releaseId
|
|
|
|
releaseId = false
|
2017-11-01 07:55:36 +00:00
|
|
|
if !apps[appId].services[service.imageId]?
|
|
|
|
apps[appId].services[service.imageId] = _.pick(service, [ 'status', 'releaseId' ])
|
|
|
|
apps[appId].services[service.imageId].download_progress = null
|
|
|
|
else
|
|
|
|
# There's two containers with the same imageId, so this has to be a handover
|
|
|
|
previousReleaseId = apps[appId].services[service.imageId].releaseId
|
2017-11-29 21:32:57 +00:00
|
|
|
apps[appId].services[service.imageId].releaseId = Math.max(previousReleaseId, service.releaseId)
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services[service.imageId].status = 'Handing over'
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for image in images
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = image.appId
|
|
|
|
if !image.dependent
|
|
|
|
apps[appId] ?= {}
|
|
|
|
apps[appId].services ?= {}
|
2017-11-29 21:32:57 +00:00
|
|
|
apps[appId].services[image.imageId] ?= _.pick(image, [ 'status', 'releaseId' ])
|
|
|
|
apps[appId].services[image.imageId].download_progress = image.downloadProgress
|
2017-11-01 07:55:36 +00:00
|
|
|
else
|
|
|
|
dependent[appId] ?= {}
|
|
|
|
dependent[appId].images ?= {}
|
2017-11-29 21:32:57 +00:00
|
|
|
dependent[appId].images[image.imageId] = _.pick(image, [ 'status' ])
|
|
|
|
dependent[appId].images[image.imageId].download_progress = image.downloadProgress
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
obj = { local: apps, dependent }
|
2017-12-08 01:18:21 +00:00
|
|
|
if releaseId and targetApps[0]?.releaseId == releaseId
|
|
|
|
obj.commit = targetApps[0].commit
|
2017-11-01 07:55:36 +00:00
|
|
|
return obj
|
|
|
|
)
|
|
|
|
|
|
|
|
getDependentState: =>
|
|
|
|
@proxyvisor.getCurrentStates()
|
|
|
|
|
|
|
|
_buildApps: (services, networks, volumes) ->
|
2018-01-19 19:31:02 +00:00
|
|
|
apps = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# We iterate over the current running services and add them to the current state
|
|
|
|
# of the app they belong to.
|
2017-11-29 21:32:57 +00:00
|
|
|
for service in services
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = service.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services.push(service)
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for network in networks
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = network.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].networks[network.name] = network.config
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for volume in volumes
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = volume.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].volumes[volume.name] = volume.config
|
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
return apps
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getCurrentForComparison: =>
|
|
|
|
Promise.join(
|
|
|
|
@services.getAll()
|
|
|
|
@networks.getAll()
|
|
|
|
@volumes.getAll()
|
|
|
|
(services, networks, volumes) =>
|
|
|
|
return @_buildApps(services, networks, volumes)
|
|
|
|
)
|
|
|
|
|
|
|
|
getCurrentApp: (appId) =>
|
|
|
|
Promise.join(
|
|
|
|
@services.getAllByAppId(appId)
|
|
|
|
@networks.getAllByAppId(appId)
|
|
|
|
@volumes.getAllByAppId(appId)
|
|
|
|
(services, networks, volumes) =>
|
2018-01-19 19:31:02 +00:00
|
|
|
return @_buildApps(services, networks, volumes)[appId]
|
2017-11-01 07:55:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
getTargetApp: (appId) =>
|
|
|
|
@db.models('app').where({ appId }).select()
|
|
|
|
.then ([ app ]) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
if !app?
|
|
|
|
return
|
2017-11-01 07:55:36 +00:00
|
|
|
@normaliseAndExtendAppFromDB(app)
|
|
|
|
|
|
|
|
# Compares current and target services and returns a list of service pairs to be updated/removed/installed.
|
|
|
|
# The returned list is an array of objects where the "current" and "target" properties define the update pair, and either can be null
|
|
|
|
# (in the case of an install or removal).
|
|
|
|
compareServicesForUpdate: (currentServices, targetServices) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
removePairs = []
|
|
|
|
installPairs = []
|
|
|
|
updatePairs = []
|
|
|
|
targetServiceIds = _.map(targetServices, 'serviceId')
|
|
|
|
currentServiceIds = _.uniq(_.map(currentServices, 'serviceId'))
|
|
|
|
|
|
|
|
toBeRemoved = _.difference(currentServiceIds, targetServiceIds)
|
|
|
|
for serviceId in toBeRemoved
|
|
|
|
servicesToRemove = _.filter(currentServices, (s) -> s.serviceId == serviceId)
|
|
|
|
for service in servicesToRemove
|
|
|
|
removePairs.push({
|
|
|
|
current: service
|
|
|
|
target: null
|
|
|
|
serviceId
|
|
|
|
})
|
|
|
|
|
|
|
|
toBeInstalled = _.difference(targetServiceIds, currentServiceIds)
|
|
|
|
for serviceId in toBeInstalled
|
|
|
|
serviceToInstall = _.find(targetServices, (s) -> s.serviceId == serviceId)
|
|
|
|
if serviceToInstall?
|
|
|
|
installPairs.push({
|
|
|
|
current: null
|
|
|
|
target: serviceToInstall
|
|
|
|
serviceId
|
|
|
|
})
|
|
|
|
|
|
|
|
toBeMaybeUpdated = _.intersection(targetServiceIds, currentServiceIds)
|
|
|
|
currentServicesPerId = {}
|
|
|
|
targetServicesPerId = _.keyBy(targetServices, 'serviceId')
|
|
|
|
for serviceId in toBeMaybeUpdated
|
|
|
|
currentServiceContainers = _.filter currentServices, (service) ->
|
|
|
|
return service.serviceId == serviceId
|
|
|
|
if currentServiceContainers.length > 1
|
|
|
|
currentServicesPerId[serviceId] = _.maxBy(currentServiceContainers, 'createdAt')
|
|
|
|
# All but the latest container for this service are spurious and should be removed
|
|
|
|
for service in _.without(currentServiceContainers, currentServicesPerId[serviceId])
|
2017-11-01 07:55:36 +00:00
|
|
|
removePairs.push({
|
|
|
|
current: service
|
|
|
|
target: null
|
|
|
|
serviceId
|
|
|
|
})
|
2017-11-29 21:32:57 +00:00
|
|
|
else
|
|
|
|
currentServicesPerId[serviceId] = currentServiceContainers[0]
|
|
|
|
|
|
|
|
needUpdate = _.filter toBeMaybeUpdated, (serviceId) ->
|
|
|
|
return !currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId])
|
|
|
|
for serviceId in needUpdate
|
|
|
|
updatePairs.push({
|
|
|
|
current: currentServicesPerId[serviceId]
|
|
|
|
target: targetServicesPerId[serviceId]
|
|
|
|
serviceId
|
|
|
|
})
|
2017-12-15 05:15:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
return { removePairs, installPairs, updatePairs }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-12-15 07:04:25 +00:00
|
|
|
_compareNetworksOrVolumesForUpdate: (model, { current, target }, appId) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
outputPairs = []
|
|
|
|
currentNames = _.keys(current)
|
|
|
|
targetNames = _.keys(target)
|
|
|
|
toBeRemoved = _.difference(currentNames, targetNames)
|
|
|
|
for name in toBeRemoved
|
|
|
|
outputPairs.push({
|
|
|
|
current: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: current[name]
|
|
|
|
}
|
|
|
|
target: null
|
|
|
|
})
|
|
|
|
toBeInstalled = _.difference(targetNames, currentNames)
|
|
|
|
for name in toBeInstalled
|
|
|
|
outputPairs.push({
|
|
|
|
current: null
|
|
|
|
target: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: target[name]
|
|
|
|
}
|
|
|
|
})
|
|
|
|
toBeUpdated = _.filter _.intersection(targetNames, currentNames), (name) ->
|
|
|
|
!model.isEqualConfig(current[name], target[name])
|
|
|
|
for name in toBeUpdated
|
|
|
|
outputPairs.push({
|
|
|
|
current: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: current[name]
|
|
|
|
}
|
|
|
|
target: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: target[name]
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return outputPairs
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-12-15 07:04:25 +00:00
|
|
|
compareNetworksForUpdate: ({ current, target }, appId) =>
|
|
|
|
@_compareNetworksOrVolumesForUpdate(@networks, { current, target }, appId)
|
|
|
|
|
|
|
|
compareVolumesForUpdate: ({ current, target }, appId) =>
|
|
|
|
@_compareNetworksOrVolumesForUpdate(@volumes, { current, target }, appId)
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
# Checks if a service is using a network or volume that is about to be updated
|
|
|
|
_hasCurrentNetworksOrVolumes: (service, networkPairs, volumePairs) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
if !service?
|
|
|
|
return false
|
2017-11-01 07:55:36 +00:00
|
|
|
hasNetwork = _.some networkPairs, (pair) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
"#{service.appId}_#{pair.current?.name}" == service.networkMode
|
2017-11-29 21:32:57 +00:00
|
|
|
if hasNetwork
|
|
|
|
return true
|
2017-11-01 07:55:36 +00:00
|
|
|
hasVolume = _.some service.volumes, (volume) ->
|
|
|
|
name = _.split(volume, ':')[0]
|
|
|
|
_.some volumePairs, (pair) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
"#{service.appId}_#{pair.current?.name}" == name
|
2017-11-29 21:32:57 +00:00
|
|
|
if hasVolume
|
|
|
|
return true
|
2017-11-01 07:55:36 +00:00
|
|
|
return false
|
|
|
|
|
|
|
|
# TODO: account for volumes-from, networks-from, links, etc
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# TODO: support networks instead of only networkMode
|
2018-01-27 01:29:19 +00:00
|
|
|
_dependenciesMetForServiceStart: (target, networkPairs, volumePairs, pendingPairs) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# for dependsOn, check no install or update pairs have that service
|
|
|
|
dependencyUnmet = _.some target.dependsOn ? [], (dependency) ->
|
2018-01-27 01:29:19 +00:00
|
|
|
_.find(pendingPairs, (pair) -> pair.target?.serviceName == dependency)?
|
2017-11-29 21:32:57 +00:00
|
|
|
if dependencyUnmet
|
|
|
|
return false
|
2017-11-01 07:55:36 +00:00
|
|
|
# for networks and volumes, check no network pairs have that volume name
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if _.find(networkPairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == target.networkMode)?
|
2017-11-01 07:55:36 +00:00
|
|
|
return false
|
|
|
|
volumeUnmet = _.some target.volumes, (volumeDefinition) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
[ sourceName, destName ] = volumeDefinition.split(':')
|
|
|
|
if !destName? # If this is not a named volume, ignore it
|
|
|
|
return false
|
2018-01-27 01:29:19 +00:00
|
|
|
return _.find(volumePairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == sourceName)?
|
2017-11-01 07:55:36 +00:00
|
|
|
return !volumeUnmet
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
|
|
|
|
# to kill a service once the images for the services it depends on have been downloaded, so as to minimize
|
|
|
|
# downtime (but not block the killing too much, potentially causing a deadlock)
|
2017-11-29 21:32:57 +00:00
|
|
|
_dependenciesMetForServiceKill: (target, targetApp, availableImages) =>
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if target.dependsOn?
|
|
|
|
for dependency in target.dependsOn
|
2017-11-23 02:15:55 +00:00
|
|
|
dependencyService = _.find(targetApp.services, (s) -> s.serviceName == dependency)
|
2018-01-23 17:11:59 +00:00
|
|
|
if !_.find(availableImages, (image) => image.dockerImageId == dependencyService.image or @images.isSameImage(image, { name: dependencyService.imageName }))?
|
2017-11-23 02:15:55 +00:00
|
|
|
return false
|
|
|
|
return true
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForNetworkOrVolume: ({ current, target }, currentApp, changingPairs, dependencyComparisonFn, model) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
# Check none of the currentApp.services use this network or volume
|
|
|
|
if current?
|
|
|
|
dependencies = _.filter currentApp.services, (service) ->
|
|
|
|
dependencyComparisonFn(service, current)
|
|
|
|
if _.isEmpty(dependencies)
|
|
|
|
return [{ action: 'removeNetworkOrVolume', model, current }]
|
|
|
|
else
|
|
|
|
# If the current update doesn't require killing the services that use this network/volume,
|
|
|
|
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
|
|
|
|
steps = []
|
2017-11-29 21:32:57 +00:00
|
|
|
for dependency in dependencies
|
2018-01-27 01:29:19 +00:00
|
|
|
if dependency.status != 'Stopping' and !_.some(changingPairs, (pair) -> pair.serviceId == dependency.serviceId)
|
2017-11-01 07:55:36 +00:00
|
|
|
steps.push(serviceAction('kill', dependency.serviceId, dependency))
|
|
|
|
return steps
|
|
|
|
else if target?
|
|
|
|
return [{ action: 'createNetworkOrVolume', model, target }]
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForNetwork: ({ current, target }, currentApp, changingPairs) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
dependencyComparisonFn = (service, current) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
service.networkMode == "#{service.appId}_#{current?.name}"
|
2018-01-27 01:29:19 +00:00
|
|
|
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'network')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForVolume: ({ current, target }, currentApp, changingPairs) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
# Check none of the currentApp.services use this network or volume
|
|
|
|
dependencyComparisonFn = (service, current) ->
|
|
|
|
_.some service.volumes, (volumeDefinition) ->
|
|
|
|
sourceName = volumeDefinition.split(':')[0]
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
sourceName == "#{service.appId}_#{current?.name}"
|
2018-01-27 01:29:19 +00:00
|
|
|
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'volume')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# Infers steps that do not require creating a new container
|
|
|
|
_updateContainerStep: (current, target) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
if current.releaseId != target.releaseId or current.imageId != target.imageId
|
|
|
|
return serviceAction('updateMetadata', target.serviceId, current, target)
|
2017-11-01 07:55:36 +00:00
|
|
|
else if target.running
|
|
|
|
return serviceAction('start', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return serviceAction('stop', target.serviceId, current, target)
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
_fetchOrStartStep: (current, target, needsDownload, dependenciesMetForStart) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2017-11-23 02:15:55 +00:00
|
|
|
else if dependenciesMetForStart()
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('start', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return null
|
|
|
|
|
|
|
|
_strategySteps: {
|
2017-11-23 02:15:55 +00:00
|
|
|
'download-then-kill': (current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2017-11-23 02:15:55 +00:00
|
|
|
else if dependenciesMetForKill()
|
2017-11-01 07:55:36 +00:00
|
|
|
# We only kill when dependencies are already met, so that we minimize downtime
|
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return null
|
2017-11-23 02:15:55 +00:00
|
|
|
'kill-then-download': (current, target) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
2017-11-23 02:15:55 +00:00
|
|
|
'delete-then-download': (current, target, needsDownload) ->
|
|
|
|
return serviceAction('kill', target.serviceId, current, target, removeImage: needsDownload)
|
|
|
|
'hand-over': (current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2017-11-23 02:15:55 +00:00
|
|
|
else if needsSpecialKill && dependenciesMetForKill()
|
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
|
|
|
else if dependenciesMetForStart()
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('handover', target.serviceId, current, target, timeout: timeout)
|
|
|
|
else
|
|
|
|
return null
|
|
|
|
}
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
_nextStepForService: ({ current, target }, updateContext) =>
|
2018-01-27 01:29:19 +00:00
|
|
|
{ targetApp, networkPairs, volumePairs, installPairs, updatePairs, availableImages, downloading } = updateContext
|
|
|
|
if current?.status == 'Stopping'
|
|
|
|
# There is already a kill step in progress for this service, so we wait
|
|
|
|
return { action: 'noop' }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
needsDownload = !_.some availableImages, (image) =>
|
2018-01-27 01:29:19 +00:00
|
|
|
image.dockerImageId == target?.image or @images.isSameImage(image, { name: target.imageName })
|
|
|
|
|
|
|
|
# This service needs an image download but it's currently downloading, so we wait
|
|
|
|
if needsDownload and target?.imageId in downloading
|
|
|
|
return { action: 'noop' }
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
dependenciesMetForStart = =>
|
2018-01-27 01:29:19 +00:00
|
|
|
@_dependenciesMetForServiceStart(target, networkPairs, volumePairs, installPairs.concat(updatePairs))
|
2017-11-23 02:15:55 +00:00
|
|
|
dependenciesMetForKill = =>
|
|
|
|
!needsDownload and @_dependenciesMetForServiceKill(target, targetApp, availableImages)
|
|
|
|
|
|
|
|
# If the service is using a network or volume that is being updated, we need to kill it
|
|
|
|
# even if its strategy is handover
|
|
|
|
needsSpecialKill = @_hasCurrentNetworksOrVolumes(current, networkPairs, volumePairs)
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
if current?.isSameContainer(target)
|
|
|
|
# We're only stopping/starting it
|
|
|
|
return @_updateContainerStep(current, target)
|
|
|
|
else if !current?
|
|
|
|
# Either this is a new service, or the current one has already been killed
|
2017-11-23 02:15:55 +00:00
|
|
|
return @_fetchOrStartStep(current, target, needsDownload, dependenciesMetForStart)
|
2017-11-01 07:55:36 +00:00
|
|
|
else
|
|
|
|
strategy = checkString(target.labels['io.resin.update.strategy'])
|
|
|
|
validStrategies = [ 'download-then-kill', 'kill-then-download', 'delete-then-download', 'hand-over' ]
|
2017-11-29 21:32:57 +00:00
|
|
|
if !_.includes(validStrategies, strategy)
|
|
|
|
strategy = 'download-then-kill'
|
2018-01-08 17:18:00 +00:00
|
|
|
timeout = checkInt(target.labels['io.resin.update.handover-timeout'])
|
2017-11-23 02:15:55 +00:00
|
|
|
return @_strategySteps[strategy](current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForAppUpdate: (currentApp, targetApp, availableImages = [], downloading = []) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
emptyApp = { services: [], volumes: {}, networks: {} }
|
|
|
|
if !targetApp?
|
|
|
|
targetApp = emptyApp
|
2017-11-29 21:32:57 +00:00
|
|
|
else
|
|
|
|
# Create the default network for the target app
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
targetApp.networks['default'] ?= {}
|
2017-11-01 07:55:36 +00:00
|
|
|
if !currentApp?
|
|
|
|
currentApp = emptyApp
|
2018-01-24 01:28:59 +00:00
|
|
|
if currentApp.services?.length == 1 and targetApp.services?.length == 1 and
|
|
|
|
targetApp.services[0].serviceName == currentApp.services[0].serviceName and
|
|
|
|
checkTruthy(currentApp.services[0].labels['io.resin.legacy-container'])
|
|
|
|
# This is a legacy preloaded app or container, so we didn't have things like serviceId.
|
|
|
|
# We hack a few things to avoid an unnecessary restart of the preloaded app
|
|
|
|
# (but ensuring it gets updated if it actually changed)
|
|
|
|
targetApp.services[0].labels['io.resin.legacy-container'] = currentApp.services[0].labels['io.resin.legacy-container']
|
|
|
|
targetApp.services[0].labels['io.resin.service-id'] = currentApp.services[0].labels['io.resin.service-id']
|
|
|
|
targetApp.services[0].serviceId = currentApp.services[0].serviceId
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = targetApp.appId ? currentApp.appId
|
2017-12-15 07:04:25 +00:00
|
|
|
networkPairs = @compareNetworksForUpdate({ current: currentApp.networks, target: targetApp.networks }, appId)
|
|
|
|
volumePairs = @compareVolumesForUpdate({ current: currentApp.volumes, target: targetApp.volumes }, appId)
|
2017-11-29 21:32:57 +00:00
|
|
|
{ removePairs, installPairs, updatePairs } = @compareServicesForUpdate(currentApp.services, targetApp.services)
|
|
|
|
steps = []
|
|
|
|
# All removePairs get a 'kill' action
|
|
|
|
for pair in removePairs
|
2018-01-27 01:29:19 +00:00
|
|
|
if pair.current.status != 'Stopping'
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
steps.push(serviceAction('kill', pair.current.serviceId, pair.current, null))
|
2018-01-27 01:29:19 +00:00
|
|
|
else
|
|
|
|
steps.push({ action: 'noop' })
|
2017-11-29 21:32:57 +00:00
|
|
|
# next step for install pairs in download - start order, but start requires dependencies, networks and volumes met
|
|
|
|
# next step for update pairs in order by update strategy. start requires dependencies, networks and volumes met.
|
|
|
|
for pair in installPairs.concat(updatePairs)
|
2018-01-27 01:29:19 +00:00
|
|
|
step = @_nextStepForService(pair, { targetApp, networkPairs, volumePairs, installPairs, updatePairs, availableImages, downloading })
|
2017-11-29 21:32:57 +00:00
|
|
|
if step?
|
|
|
|
steps.push(step)
|
|
|
|
# next step for network pairs - remove requires services killed, create kill if no pairs or steps affect that service
|
|
|
|
for pair in networkPairs
|
|
|
|
pairSteps = @_nextStepsForNetwork(pair, currentApp, removePairs.concat(updatePairs))
|
|
|
|
steps = steps.concat(pairSteps)
|
|
|
|
# next step for volume pairs - remove requires services killed, create kill if no pairs or steps affect that service
|
|
|
|
for pair in volumePairs
|
|
|
|
pairSteps = @_nextStepsForVolume(pair, currentApp, removePairs.concat(updatePairs))
|
|
|
|
steps = steps.concat(pairSteps)
|
2018-01-27 01:29:19 +00:00
|
|
|
return _.map(steps, (step) -> _.assign({}, step, { appId }))
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
normaliseAppForDB: (app) =>
|
|
|
|
services = _.map app.services, (s, serviceId) ->
|
|
|
|
service = _.clone(s)
|
|
|
|
service.appId = app.appId
|
|
|
|
service.releaseId = app.releaseId
|
2017-11-29 21:32:57 +00:00
|
|
|
service.serviceId = checkInt(serviceId)
|
2017-11-01 07:55:36 +00:00
|
|
|
service.commit = app.commit
|
|
|
|
return service
|
|
|
|
Promise.map services, (service) =>
|
|
|
|
service.image = @images.normalise(service.image)
|
|
|
|
Promise.props(service)
|
|
|
|
.then (services) ->
|
|
|
|
dbApp = {
|
|
|
|
appId: app.appId
|
|
|
|
commit: app.commit
|
|
|
|
name: app.name
|
|
|
|
releaseId: app.releaseId
|
|
|
|
services: JSON.stringify(services)
|
|
|
|
networks: JSON.stringify(app.networks ? {})
|
|
|
|
volumes: JSON.stringify(app.volumes ? {})
|
|
|
|
}
|
|
|
|
return dbApp
|
|
|
|
|
|
|
|
createTargetService: (service, opts) ->
|
2017-12-08 01:18:21 +00:00
|
|
|
@images.inspectByName(service.image)
|
2017-12-15 05:15:59 +00:00
|
|
|
.catchReturn(NotFoundError, undefined)
|
2017-12-08 01:18:21 +00:00
|
|
|
.then (imageInfo) ->
|
|
|
|
serviceOpts = {
|
|
|
|
serviceName: service.serviceName
|
|
|
|
imageInfo
|
|
|
|
}
|
|
|
|
_.assign(serviceOpts, opts)
|
2017-12-15 05:15:59 +00:00
|
|
|
service.imageName = service.image
|
|
|
|
if imageInfo?.Id?
|
|
|
|
service.image = imageInfo.Id
|
2017-12-08 01:18:21 +00:00
|
|
|
return new Service(service, serviceOpts)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
normaliseAndExtendAppFromDB: (app) =>
|
|
|
|
Promise.join(
|
|
|
|
@config.get('extendedEnvOptions')
|
2017-12-08 01:18:21 +00:00
|
|
|
@docker.getNetworkGateway(constants.supervisorNetworkInterface)
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
.catchReturn('127.0.0.1')
|
2017-11-01 07:55:36 +00:00
|
|
|
(opts, supervisorApiHost) =>
|
|
|
|
configOpts = {
|
|
|
|
appName: app.name
|
|
|
|
supervisorApiHost
|
|
|
|
}
|
|
|
|
_.assign(configOpts, opts)
|
|
|
|
volumes = JSON.parse(app.volumes)
|
2017-11-10 10:43:13 +00:00
|
|
|
volumes = _.mapValues volumes, (volumeConfig) ->
|
|
|
|
volumeConfig ?= {}
|
|
|
|
volumeConfig.labels ?= {}
|
|
|
|
return volumeConfig
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.map(JSON.parse(app.services), (service) => @createTargetService(service, configOpts))
|
|
|
|
.then (services) ->
|
|
|
|
# If a named volume is defined in a service, we add it app-wide so that we can track it and purge it
|
2017-11-29 21:32:57 +00:00
|
|
|
for s in services
|
2017-11-01 07:55:36 +00:00
|
|
|
serviceNamedVolumes = s.getNamedVolumes()
|
2017-11-29 21:32:57 +00:00
|
|
|
for name in serviceNamedVolumes
|
2017-11-01 07:55:36 +00:00
|
|
|
volumes[name] ?= { labels: {} }
|
|
|
|
outApp = {
|
|
|
|
appId: app.appId
|
|
|
|
name: app.name
|
|
|
|
commit: app.commit
|
|
|
|
releaseId: app.releaseId
|
|
|
|
services: services
|
|
|
|
networks: JSON.parse(app.networks)
|
|
|
|
volumes: volumes
|
|
|
|
}
|
|
|
|
return outApp
|
|
|
|
)
|
|
|
|
|
|
|
|
setTarget: (apps, dependent , trx) =>
|
|
|
|
setInTransaction = (trx) =>
|
|
|
|
Promise.try =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appsArray = _.map apps, (app, appId) ->
|
|
|
|
appClone = _.clone(app)
|
|
|
|
appClone.appId = checkInt(appId)
|
|
|
|
return appClone
|
|
|
|
Promise.map(appsArray, @normaliseAppForDB)
|
|
|
|
.then (appsForDB) =>
|
|
|
|
Promise.map appsForDB, (app) =>
|
|
|
|
@db.upsertModel('app', app, { appId: app.appId }, trx)
|
|
|
|
.then ->
|
|
|
|
trx('app').whereNotIn('appId', _.map(appsForDB, 'appId')).del()
|
2017-11-01 07:55:36 +00:00
|
|
|
.then =>
|
|
|
|
@proxyvisor.setTargetInTransaction(dependent, trx)
|
|
|
|
|
|
|
|
Promise.try =>
|
|
|
|
if trx?
|
|
|
|
setInTransaction(trx)
|
|
|
|
else
|
|
|
|
@db.transaction(setInTransaction)
|
|
|
|
.then =>
|
2018-01-25 17:41:00 +00:00
|
|
|
@_targetVolatilePerImageId = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-25 17:41:00 +00:00
|
|
|
setTargetVolatileForService: (imageId, target) ->
|
|
|
|
@_targetVolatilePerImageId[imageId] ?= {}
|
|
|
|
_.assign(@_targetVolatilePerImageId[imageId], target)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getTargetApps: =>
|
|
|
|
Promise.map(@db.models('app').select(), @normaliseAndExtendAppFromDB)
|
|
|
|
.map (app) =>
|
|
|
|
if !_.isEmpty(app.services)
|
|
|
|
app.services = _.map app.services, (service) =>
|
2018-01-25 17:41:00 +00:00
|
|
|
if @_targetVolatilePerImageId[service.imageId]?
|
|
|
|
_.merge(service, @_targetVolatilePerImageId[service.imageId])
|
2017-11-01 07:55:36 +00:00
|
|
|
return service
|
|
|
|
return app
|
2018-01-19 19:31:02 +00:00
|
|
|
.then (apps) ->
|
|
|
|
return _.keyBy(apps, 'appId')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getDependentTargets: =>
|
|
|
|
@proxyvisor.getTarget()
|
|
|
|
|
|
|
|
bestDeltaSource: (image, available) ->
|
|
|
|
if !image.dependent
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.serviceName == image.serviceName and availableImage.appId == image.appId
|
|
|
|
return availableImage.name
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.serviceName == image.serviceName
|
|
|
|
return availableImage.name
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.appId == image.appId
|
|
|
|
return availableImage.name
|
|
|
|
return 'resin/scratch'
|
|
|
|
|
2017-12-15 05:15:59 +00:00
|
|
|
# returns:
|
|
|
|
# imagesToRemove: images that
|
2017-11-01 07:55:36 +00:00
|
|
|
# - are not used in the current state, and
|
|
|
|
# - are not going to be used in the target state, and
|
|
|
|
# - are not needed for delta source / pull caching or would be used for a service with delete-then-download as strategy
|
2017-12-15 05:15:59 +00:00
|
|
|
# imagesToSave: images that
|
|
|
|
# - are locally available (i.e. an image with the same digest exists)
|
|
|
|
# - are not saved to the DB with all their metadata (serviceId, serviceName, etc)
|
|
|
|
_compareImages: (current, target, available) =>
|
|
|
|
|
|
|
|
allImagesForTargetApp = (app) -> _.map(app.services, imageForService)
|
|
|
|
allImagesForCurrentApp = (app) ->
|
|
|
|
_.map app.services, (service) ->
|
2018-01-24 00:26:45 +00:00
|
|
|
_.omit(_.find(available, (image) -> image.dockerImageId == service.image and image.imageId == service.imageId), [ 'dockerImageId', 'id' ])
|
2017-12-15 05:15:59 +00:00
|
|
|
availableWithoutIds = _.map(available, (image) -> _.omit(image, [ 'dockerImageId', 'id' ]))
|
|
|
|
currentImages = _.flatten(_.map(current.local.apps, allImagesForCurrentApp))
|
|
|
|
targetImages = _.flatten(_.map(target.local.apps, allImagesForTargetApp))
|
|
|
|
availableAndUnused = _.filter availableWithoutIds, (image) ->
|
|
|
|
!_.some currentImages.concat(targetImages), (imageInUse) -> _.isEqual(image, imageInUse)
|
2017-11-29 21:32:57 +00:00
|
|
|
imagesToDownload = _.filter targetImages, (targetImage) =>
|
|
|
|
!_.some available, (availableImage) => @images.isSameImage(availableImage, targetImage)
|
2017-12-15 05:15:59 +00:00
|
|
|
# Images that are available but we don't have them in the DB with the exact metadata:
|
|
|
|
imagesToSave = _.filter targetImages, (targetImage) =>
|
|
|
|
_.some(available, (availableImage) => @images.isSameImage(availableImage, targetImage)) and
|
|
|
|
!_.find(availableWithoutIds, (img) -> _.isEqual(img, targetImage))?
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
deltaSources = _.map imagesToDownload, (image) =>
|
|
|
|
return @bestDeltaSource(image, available)
|
|
|
|
proxyvisorImages = @proxyvisor.imagesInUse(current, target)
|
2017-12-15 05:15:59 +00:00
|
|
|
imagesToRemove = _.filter availableAndUnused, (image) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
notUsedForDelta = !_.some deltaSources, (deltaSource) -> deltaSource == image.name
|
2017-11-29 21:32:57 +00:00
|
|
|
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) => @images.isSameImage(image, { name: proxyvisorImage })
|
2017-11-01 07:55:36 +00:00
|
|
|
return notUsedForDelta and notUsedByProxyvisor
|
2017-12-15 05:15:59 +00:00
|
|
|
return { imagesToSave, imagesToRemove }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_inferNextSteps: (cleanupNeeded, availableImages, downloading, supervisorNetworkReady, current, target, ignoreImages, localMode) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.try =>
|
2018-01-23 04:56:13 +00:00
|
|
|
if checkTruthy(localMode)
|
2018-01-23 03:50:19 +00:00
|
|
|
target = _.cloneDeep(target)
|
|
|
|
target.local.apps = _.mapValues target.local.apps ? {}, (app) ->
|
|
|
|
app.services = []
|
|
|
|
return app
|
|
|
|
ignoreImages = true
|
2018-01-19 19:31:02 +00:00
|
|
|
currentByAppId = current.local.apps ? {}
|
|
|
|
targetByAppId = target.local.apps ? {}
|
2017-11-01 07:55:36 +00:00
|
|
|
nextSteps = []
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if !supervisorNetworkReady
|
|
|
|
nextSteps.push({ action: 'ensureSupervisorNetwork' })
|
|
|
|
else
|
2018-01-27 01:29:19 +00:00
|
|
|
if !ignoreImages and _.isEmpty(downloading)
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if cleanupNeeded
|
|
|
|
nextSteps.push({ action: 'cleanup' })
|
2017-12-15 05:15:59 +00:00
|
|
|
{ imagesToRemove, imagesToSave } = @_compareImages(current, target, availableImages)
|
|
|
|
for image in imagesToSave
|
|
|
|
nextSteps.push({ action: 'saveImage', image })
|
|
|
|
if _.isEmpty(imagesToSave)
|
|
|
|
for image in imagesToRemove
|
|
|
|
nextSteps.push({ action: 'removeImage', image })
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# If we have to remove any images, we do that before anything else
|
|
|
|
if _.isEmpty(nextSteps)
|
|
|
|
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
|
|
|
|
for appId in allAppIds
|
2018-01-27 01:29:19 +00:00
|
|
|
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], availableImages, downloading))
|
|
|
|
if !ignoreImages and _.isEmpty(nextSteps) and !_.isEmpty(downloading)
|
|
|
|
nextSteps.push({ action: 'noop' })
|
|
|
|
return _.uniqWith(nextSteps, _.isEqual)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
stopAll: ({ force = false, skipLock = false } = {}) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
@services.getAll()
|
|
|
|
.map (service) =>
|
2018-01-19 19:31:02 +00:00
|
|
|
@_lockingIfNecessary service.appId, { force, skipLock }, =>
|
2018-01-27 01:29:19 +00:00
|
|
|
@services.kill(service, { removeContainer: false, wait: true })
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
_lockingIfNecessary: (appId, { force = false, skipLock = false } = {}, fn) =>
|
|
|
|
if skipLock
|
2018-01-19 19:31:02 +00:00
|
|
|
return Promise.try( -> fn())
|
2017-11-01 07:55:36 +00:00
|
|
|
@config.get('lockOverride')
|
|
|
|
.then (lockOverride) ->
|
2018-01-18 22:49:48 +00:00
|
|
|
return checkTruthy(lockOverride) or force
|
2017-11-01 07:55:36 +00:00
|
|
|
.then (force) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
updateLock.lock(appId, { force }, fn)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
executeStepAction: (step, { force = false, skipLock = false } = {}) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
if _.includes(@proxyvisor.validActions, step.action)
|
|
|
|
return @proxyvisor.executeStepAction(step)
|
|
|
|
if !_.includes(@validActions, step.action)
|
|
|
|
return Promise.reject(new Error("Invalid action #{step.action}"))
|
2018-01-19 19:31:02 +00:00
|
|
|
@actionExecutors[step.action](step, { force, skipLock })
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
getRequiredSteps: (currentState, targetState, ignoreImages = false) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.join(
|
|
|
|
@images.isCleanupNeeded()
|
|
|
|
@images.getAvailable()
|
2018-01-27 01:29:19 +00:00
|
|
|
@images.getDownloadingImageIds()
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
@networks.supervisorNetworkReady()
|
2018-01-23 03:50:19 +00:00
|
|
|
@config.get('localMode')
|
2018-01-27 01:29:19 +00:00
|
|
|
(cleanupNeeded, availableImages, downloading, supervisorNetworkReady, localMode) =>
|
|
|
|
@_inferNextSteps(cleanupNeeded, availableImages, downloading, supervisorNetworkReady, currentState, targetState, ignoreImages, localMode)
|
2017-11-01 07:55:36 +00:00
|
|
|
.then (nextSteps) =>
|
2018-01-19 19:31:02 +00:00
|
|
|
if ignoreImages and _.some(nextSteps, (step) -> step.action == 'fetch')
|
|
|
|
throw new Error('Cannot fetch images while executing an API action')
|
2018-01-27 01:29:19 +00:00
|
|
|
@proxyvisor.getRequiredSteps(availableImages, downloading, currentState, targetState, nextSteps)
|
2017-11-01 07:55:36 +00:00
|
|
|
.then (proxyvisorSteps) ->
|
|
|
|
return nextSteps.concat(proxyvisorSteps)
|
|
|
|
)
|