2017-11-01 07:55:36 +00:00
|
|
|
Promise = require 'bluebird'
|
|
|
|
_ = require 'lodash'
|
|
|
|
EventEmitter = require 'events'
|
|
|
|
express = require 'express'
|
|
|
|
bodyParser = require 'body-parser'
|
2018-02-13 23:23:44 +00:00
|
|
|
fs = Promise.promisifyAll(require('fs'))
|
|
|
|
path = require 'path'
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
constants = require './lib/constants'
|
|
|
|
|
2019-01-23 19:24:57 +00:00
|
|
|
{ DockerUtils: Docker } = require './lib/docker-utils'
|
2018-10-11 13:27:46 +00:00
|
|
|
{ LocalModeManager } = require './local-mode'
|
2017-11-01 07:55:36 +00:00
|
|
|
updateLock = require './lib/update-lock'
|
|
|
|
{ checkTruthy, checkInt, checkString } = require './lib/validation'
|
2017-12-15 05:15:59 +00:00
|
|
|
{ NotFoundError } = require './lib/errors'
|
2018-12-06 02:38:58 +00:00
|
|
|
{ pathExistsOnHost } = require './lib/fs-utils'
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2019-01-24 16:52:22 +00:00
|
|
|
{ ServiceManager } = require './compose/service-manager'
|
2018-09-18 14:36:59 +00:00
|
|
|
{ Service } = require './compose/service'
|
2019-01-22 13:27:00 +00:00
|
|
|
{ Images } = require './compose/images'
|
2018-07-23 21:49:25 +00:00
|
|
|
{ NetworkManager } = require './compose/network-manager'
|
|
|
|
{ Network } = require './compose/network'
|
2019-01-22 20:25:04 +00:00
|
|
|
{ Volumes } = require './compose/volumes'
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
Proxyvisor = require './proxyvisor'
|
|
|
|
|
2018-05-11 13:23:46 +00:00
|
|
|
{ createV1Api } = require './device-api/v1'
|
2018-05-11 16:14:47 +00:00
|
|
|
{ createV2Api } = require './device-api/v2'
|
|
|
|
{ serviceAction } = require './device-api/common'
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# TODO: move this to an Image class?
|
|
|
|
imageForService = (service) ->
|
|
|
|
return {
|
2017-12-15 05:15:59 +00:00
|
|
|
name: service.imageName
|
2017-11-01 07:55:36 +00:00
|
|
|
appId: service.appId
|
|
|
|
serviceId: service.serviceId
|
|
|
|
serviceName: service.serviceName
|
2017-11-29 21:32:57 +00:00
|
|
|
imageId: service.imageId
|
|
|
|
releaseId: service.releaseId
|
2017-11-01 07:55:36 +00:00
|
|
|
dependent: 0
|
|
|
|
}
|
|
|
|
|
|
|
|
fetchAction = (service) ->
|
|
|
|
return {
|
|
|
|
action: 'fetch'
|
|
|
|
image: imageForService(service)
|
|
|
|
serviceId: service.serviceId
|
|
|
|
}
|
2018-02-13 23:23:44 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
# TODO: implement additional v2 endpoints
|
2018-02-13 23:23:44 +00:00
|
|
|
# Some v1 endpoins only work for single-container apps as they assume the app has a single service.
|
|
|
|
createApplicationManagerRouter = (applications) ->
|
|
|
|
router = express.Router()
|
|
|
|
router.use(bodyParser.urlencoded(extended: true))
|
|
|
|
router.use(bodyParser.json())
|
|
|
|
|
2018-05-11 13:23:46 +00:00
|
|
|
createV1Api(router, applications)
|
2018-05-11 16:14:47 +00:00
|
|
|
createV2Api(router, applications)
|
2018-01-19 19:31:02 +00:00
|
|
|
|
2018-02-13 23:23:44 +00:00
|
|
|
router.use(applications.proxyvisor.router)
|
|
|
|
|
|
|
|
return router
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
module.exports = class ApplicationManager extends EventEmitter
|
2018-01-19 19:31:02 +00:00
|
|
|
constructor: ({ @logger, @config, @db, @eventTracker, @deviceState }) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
@docker = new Docker()
|
2019-02-05 11:46:49 +00:00
|
|
|
@images = new Images({ @docker, @logger, @db, @config })
|
2017-11-01 07:55:36 +00:00
|
|
|
@services = new ServiceManager({ @docker, @logger, @images, @config })
|
2018-07-23 21:49:25 +00:00
|
|
|
@networks = new NetworkManager({ @docker, @logger })
|
2017-11-01 07:55:36 +00:00
|
|
|
@volumes = new Volumes({ @docker, @logger })
|
|
|
|
@proxyvisor = new Proxyvisor({ @config, @logger, @db, @docker, @images, applications: this })
|
2018-10-11 13:27:46 +00:00
|
|
|
@localModeManager = new LocalModeManager(@config, @docker, @logger, @db)
|
2017-12-08 01:18:21 +00:00
|
|
|
@timeSpentFetching = 0
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
@fetchesInProgress = 0
|
2018-01-25 17:41:00 +00:00
|
|
|
@_targetVolatilePerImageId = {}
|
2018-02-07 01:40:59 +00:00
|
|
|
@_containerStarted = {}
|
2019-02-05 11:46:49 +00:00
|
|
|
|
|
|
|
@config.on 'change', (changedConfig) =>
|
|
|
|
if changedConfig.appUpdatePollInterval
|
|
|
|
@images.appUpdatePollInterval = changedConfig.appUpdatePollInterval
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
@actionExecutors = {
|
2018-01-19 19:31:02 +00:00
|
|
|
stop: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2018-01-27 01:29:19 +00:00
|
|
|
wait = step.options?.wait ? false
|
|
|
|
@services.kill(step.current, { removeContainer: false, wait })
|
2018-02-07 01:40:59 +00:00
|
|
|
.then =>
|
|
|
|
delete @_containerStarted[step.current.containerId]
|
2018-01-19 19:31:02 +00:00
|
|
|
kill: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2017-11-29 21:32:57 +00:00
|
|
|
@services.kill(step.current)
|
|
|
|
.then =>
|
2018-02-07 01:40:59 +00:00
|
|
|
delete @_containerStarted[step.current.containerId]
|
2017-11-29 21:32:57 +00:00
|
|
|
if step.options?.removeImage
|
2018-09-18 14:36:59 +00:00
|
|
|
@images.removeByDockerId(step.current.config.image)
|
2018-04-11 22:58:17 +00:00
|
|
|
remove: (step) =>
|
|
|
|
# Only called for dead containers, so no need to take locks or anything
|
|
|
|
@services.remove(step.current)
|
2018-02-02 20:08:58 +00:00
|
|
|
updateMetadata: (step, { force = false, skipLock = false } = {}) =>
|
2018-10-17 14:34:07 +00:00
|
|
|
skipLock or= checkTruthy(step.current.config.labels['io.balena.legacy-container'])
|
2018-02-02 20:08:58 +00:00
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
|
|
|
@services.updateMetadata(step.current, step.target)
|
2018-01-19 19:31:02 +00:00
|
|
|
restart: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2018-02-13 23:23:44 +00:00
|
|
|
@services.kill(step.current, { wait: true })
|
2018-02-07 01:40:59 +00:00
|
|
|
.then =>
|
|
|
|
delete @_containerStarted[step.current.containerId]
|
2017-11-29 21:32:57 +00:00
|
|
|
.then =>
|
|
|
|
@services.start(step.target)
|
2018-02-07 01:40:59 +00:00
|
|
|
.then (container) =>
|
|
|
|
@_containerStarted[container.id] = true
|
2018-01-19 19:31:02 +00:00
|
|
|
stopAll: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@stopAll({ force, skipLock })
|
2017-11-29 21:32:57 +00:00
|
|
|
start: (step) =>
|
|
|
|
@services.start(step.target)
|
2018-02-07 01:40:59 +00:00
|
|
|
.then (container) =>
|
|
|
|
@_containerStarted[container.id] = true
|
2018-06-20 11:57:16 +00:00
|
|
|
updateCommit: (step) =>
|
|
|
|
@config.set({ currentCommit: step.target })
|
2018-01-19 19:31:02 +00:00
|
|
|
handover: (step, { force = false, skipLock = false } = {}) =>
|
|
|
|
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
2017-11-29 21:32:57 +00:00
|
|
|
@services.handover(step.current, step.target)
|
|
|
|
fetch: (step) =>
|
2017-12-08 01:18:21 +00:00
|
|
|
startTime = process.hrtime()
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
@fetchesInProgress += 1
|
2017-11-29 21:32:57 +00:00
|
|
|
Promise.join(
|
|
|
|
@config.get('fetchOptions')
|
|
|
|
@images.getAvailable()
|
|
|
|
(opts, availableImages) =>
|
|
|
|
opts.deltaSource = @bestDeltaSource(step.image, availableImages)
|
2018-01-27 01:29:19 +00:00
|
|
|
@images.triggerFetch step.image, opts, (success) =>
|
|
|
|
@fetchesInProgress -= 1
|
2018-02-13 23:23:44 +00:00
|
|
|
elapsed = process.hrtime(startTime)
|
|
|
|
elapsedMs = elapsed[0] * 1000 + elapsed[1] / 1e6
|
|
|
|
@timeSpentFetching += elapsedMs
|
2018-01-27 01:29:19 +00:00
|
|
|
if success
|
2018-02-13 23:23:44 +00:00
|
|
|
# update_downloaded is true if *any* image has been downloaded,
|
|
|
|
# and it's relevant mostly for the legacy GET /v1/device endpoint
|
|
|
|
# that assumes a single-container app
|
2018-01-27 01:29:19 +00:00
|
|
|
@reportCurrentState(update_downloaded: true)
|
2017-11-29 21:32:57 +00:00
|
|
|
)
|
|
|
|
removeImage: (step) =>
|
|
|
|
@images.remove(step.image)
|
2017-12-15 05:15:59 +00:00
|
|
|
saveImage: (step) =>
|
|
|
|
@images.save(step.image)
|
2017-11-29 21:32:57 +00:00
|
|
|
cleanup: (step) =>
|
2018-10-23 18:13:34 +00:00
|
|
|
@config.get('localMode').then (localMode) =>
|
2019-01-09 10:04:04 +00:00
|
|
|
if !localMode
|
2018-09-28 13:32:38 +00:00
|
|
|
@images.cleanup()
|
2017-11-29 21:32:57 +00:00
|
|
|
createNetworkOrVolume: (step) =>
|
2018-07-23 21:49:25 +00:00
|
|
|
if step.model is 'network'
|
2018-09-18 14:36:59 +00:00
|
|
|
# TODO: These step targets should be the actual compose objects,
|
|
|
|
# rather than recreating them
|
2018-07-23 21:49:25 +00:00
|
|
|
Network.fromComposeObject({ @docker, @logger },
|
|
|
|
step.target.name,
|
|
|
|
step.appId,
|
2018-09-18 14:36:59 +00:00
|
|
|
step.target.config
|
2018-07-23 21:49:25 +00:00
|
|
|
).create()
|
|
|
|
else
|
|
|
|
@volumes.create(step.target)
|
2017-11-29 21:32:57 +00:00
|
|
|
removeNetworkOrVolume: (step) =>
|
2018-07-23 21:49:25 +00:00
|
|
|
if step.model is 'network'
|
|
|
|
Network.fromComposeObject({ @docker, @logger },
|
|
|
|
step.current.name,
|
|
|
|
step.appId,
|
2018-09-18 14:36:59 +00:00
|
|
|
step.current.config
|
2018-07-23 21:49:25 +00:00
|
|
|
).remove()
|
|
|
|
else
|
|
|
|
@volumes.remove(step.current)
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
ensureSupervisorNetwork: =>
|
|
|
|
@networks.ensureSupervisorNetwork()
|
2017-11-29 21:32:57 +00:00
|
|
|
}
|
|
|
|
@validActions = _.keys(@actionExecutors).concat(@proxyvisor.validActions)
|
2018-02-13 23:23:44 +00:00
|
|
|
@router = createApplicationManagerRouter(this)
|
2017-11-01 07:55:36 +00:00
|
|
|
@images.on('change', @reportCurrentState)
|
|
|
|
@services.on('change', @reportCurrentState)
|
|
|
|
|
|
|
|
serviceAction: serviceAction
|
|
|
|
imageForService: imageForService
|
|
|
|
fetchAction: fetchAction
|
|
|
|
|
|
|
|
reportCurrentState: (data) =>
|
|
|
|
@emit('change', data)
|
|
|
|
|
|
|
|
init: =>
|
2019-02-05 11:46:49 +00:00
|
|
|
@config.get('appUpdatePollInterval')
|
|
|
|
.then (interval) =>
|
|
|
|
@images.appUpdatePollInterval = interval
|
|
|
|
@images.cleanupDatabase()
|
2018-10-11 13:27:46 +00:00
|
|
|
.then =>
|
|
|
|
@localModeManager.init()
|
2017-11-01 07:55:36 +00:00
|
|
|
.then =>
|
|
|
|
@services.attachToRunning()
|
|
|
|
.then =>
|
|
|
|
@services.listenToEvents()
|
|
|
|
|
|
|
|
# Returns the status of applications and their services
|
|
|
|
getStatus: =>
|
2018-09-28 13:32:38 +00:00
|
|
|
@config.get('localMode').then (localMode) =>
|
|
|
|
@_getStatus(localMode)
|
|
|
|
|
|
|
|
_getStatus: (localMode) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.join(
|
|
|
|
@services.getStatus()
|
2018-09-28 13:32:38 +00:00
|
|
|
@images.getStatus(localMode)
|
2018-06-20 11:57:16 +00:00
|
|
|
@config.get('currentCommit')
|
2018-10-23 22:08:51 +00:00
|
|
|
(services, images, currentCommit) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
apps = {}
|
|
|
|
dependent = {}
|
2017-12-08 01:18:21 +00:00
|
|
|
releaseId = null
|
2018-02-13 23:23:44 +00:00
|
|
|
creationTimesAndReleases = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
# We iterate over the current running services and add them to the current state
|
|
|
|
# of the app they belong to.
|
2017-11-29 21:32:57 +00:00
|
|
|
for service in services
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = service.appId
|
|
|
|
apps[appId] ?= {}
|
2018-02-13 23:23:44 +00:00
|
|
|
creationTimesAndReleases[appId] = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services ?= {}
|
2017-12-08 01:18:21 +00:00
|
|
|
# We only send commit if all services have the same release, and it matches the target release
|
|
|
|
if !releaseId?
|
|
|
|
releaseId = service.releaseId
|
|
|
|
else if releaseId != service.releaseId
|
|
|
|
releaseId = false
|
2017-11-01 07:55:36 +00:00
|
|
|
if !apps[appId].services[service.imageId]?
|
|
|
|
apps[appId].services[service.imageId] = _.pick(service, [ 'status', 'releaseId' ])
|
2018-02-13 23:23:44 +00:00
|
|
|
creationTimesAndReleases[appId][service.imageId] = _.pick(service, [ 'createdAt', 'releaseId' ])
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services[service.imageId].download_progress = null
|
|
|
|
else
|
|
|
|
# There's two containers with the same imageId, so this has to be a handover
|
2018-02-13 23:23:44 +00:00
|
|
|
apps[appId].services[service.imageId].releaseId = _.minBy([ creationTimesAndReleases[appId][service.imageId], service ], 'createdAt').releaseId
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services[service.imageId].status = 'Handing over'
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for image in images
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = image.appId
|
|
|
|
if !image.dependent
|
|
|
|
apps[appId] ?= {}
|
|
|
|
apps[appId].services ?= {}
|
2018-02-22 18:57:50 +00:00
|
|
|
if !apps[appId].services[image.imageId]?
|
|
|
|
apps[appId].services[image.imageId] = _.pick(image, [ 'status', 'releaseId' ])
|
|
|
|
apps[appId].services[image.imageId].download_progress = image.downloadProgress
|
2018-02-13 23:23:44 +00:00
|
|
|
else if image.imageId?
|
|
|
|
dependent[appId] ?= {}
|
|
|
|
dependent[appId].images ?= {}
|
|
|
|
dependent[appId].images[image.imageId] = _.pick(image, [ 'status' ])
|
|
|
|
dependent[appId].images[image.imageId].download_progress = image.downloadProgress
|
2017-11-01 07:55:36 +00:00
|
|
|
else
|
2018-02-13 23:23:44 +00:00
|
|
|
console.log('Ignoring legacy dependent image', image)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
obj = { local: apps, dependent }
|
2018-06-20 11:57:16 +00:00
|
|
|
obj.commit = currentCommit
|
2017-11-01 07:55:36 +00:00
|
|
|
return obj
|
|
|
|
)
|
|
|
|
|
|
|
|
getDependentState: =>
|
|
|
|
@proxyvisor.getCurrentStates()
|
|
|
|
|
2018-06-20 11:57:16 +00:00
|
|
|
_buildApps: (services, networks, volumes, currentCommit) ->
|
2018-01-19 19:31:02 +00:00
|
|
|
apps = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# We iterate over the current running services and add them to the current state
|
|
|
|
# of the app they belong to.
|
2017-11-29 21:32:57 +00:00
|
|
|
for service in services
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = service.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].services.push(service)
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for network in networks
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = network.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].networks[network.name] = network.config
|
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for volume in volumes
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = volume.appId
|
2018-01-19 19:31:02 +00:00
|
|
|
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
2017-11-01 07:55:36 +00:00
|
|
|
apps[appId].volumes[volume.name] = volume.config
|
|
|
|
|
2018-06-20 11:57:16 +00:00
|
|
|
# multi-app warning!
|
|
|
|
# This is just wrong on every level
|
2018-07-12 11:39:04 +00:00
|
|
|
_.each apps, (app) ->
|
2018-06-20 11:57:16 +00:00
|
|
|
app.commit = currentCommit
|
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
return apps
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getCurrentForComparison: =>
|
|
|
|
Promise.join(
|
|
|
|
@services.getAll()
|
|
|
|
@networks.getAll()
|
|
|
|
@volumes.getAll()
|
2018-06-20 11:57:16 +00:00
|
|
|
@config.get('currentCommit')
|
2018-02-13 23:23:44 +00:00
|
|
|
@_buildApps
|
2017-11-01 07:55:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
getCurrentApp: (appId) =>
|
|
|
|
Promise.join(
|
|
|
|
@services.getAllByAppId(appId)
|
|
|
|
@networks.getAllByAppId(appId)
|
|
|
|
@volumes.getAllByAppId(appId)
|
2018-07-09 10:09:42 +00:00
|
|
|
@config.get('currentCommit')
|
2018-02-13 23:23:44 +00:00
|
|
|
@_buildApps
|
|
|
|
).get(appId)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getTargetApp: (appId) =>
|
2018-11-26 15:37:42 +00:00
|
|
|
@config.get('apiEndpoint').then (endpoint) ->
|
2018-05-18 12:27:41 +00:00
|
|
|
@db.models('app').where({ appId, source: endpoint }).select()
|
2017-11-01 07:55:36 +00:00
|
|
|
.then ([ app ]) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
if !app?
|
|
|
|
return
|
2017-11-01 07:55:36 +00:00
|
|
|
@normaliseAndExtendAppFromDB(app)
|
|
|
|
|
|
|
|
# Compares current and target services and returns a list of service pairs to be updated/removed/installed.
|
|
|
|
# The returned list is an array of objects where the "current" and "target" properties define the update pair, and either can be null
|
|
|
|
# (in the case of an install or removal).
|
2018-02-07 01:40:59 +00:00
|
|
|
compareServicesForUpdate: (currentServices, targetServices) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
removePairs = []
|
|
|
|
installPairs = []
|
|
|
|
updatePairs = []
|
|
|
|
targetServiceIds = _.map(targetServices, 'serviceId')
|
|
|
|
currentServiceIds = _.uniq(_.map(currentServices, 'serviceId'))
|
|
|
|
|
|
|
|
toBeRemoved = _.difference(currentServiceIds, targetServiceIds)
|
|
|
|
for serviceId in toBeRemoved
|
2018-02-13 23:23:44 +00:00
|
|
|
servicesToRemove = _.filter(currentServices, { serviceId })
|
2017-11-29 21:32:57 +00:00
|
|
|
for service in servicesToRemove
|
|
|
|
removePairs.push({
|
|
|
|
current: service
|
|
|
|
target: null
|
|
|
|
serviceId
|
|
|
|
})
|
|
|
|
|
|
|
|
toBeInstalled = _.difference(targetServiceIds, currentServiceIds)
|
|
|
|
for serviceId in toBeInstalled
|
2018-02-13 23:23:44 +00:00
|
|
|
serviceToInstall = _.find(targetServices, { serviceId })
|
2017-11-29 21:32:57 +00:00
|
|
|
if serviceToInstall?
|
|
|
|
installPairs.push({
|
|
|
|
current: null
|
|
|
|
target: serviceToInstall
|
|
|
|
serviceId
|
|
|
|
})
|
|
|
|
|
|
|
|
toBeMaybeUpdated = _.intersection(targetServiceIds, currentServiceIds)
|
|
|
|
currentServicesPerId = {}
|
|
|
|
targetServicesPerId = _.keyBy(targetServices, 'serviceId')
|
|
|
|
for serviceId in toBeMaybeUpdated
|
2018-02-13 23:23:44 +00:00
|
|
|
currentServiceContainers = _.filter(currentServices, { serviceId })
|
2017-11-29 21:32:57 +00:00
|
|
|
if currentServiceContainers.length > 1
|
|
|
|
currentServicesPerId[serviceId] = _.maxBy(currentServiceContainers, 'createdAt')
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
# All but the latest container for this service are spurious and should be removed
|
|
|
|
for service in _.without(currentServiceContainers, currentServicesPerId[serviceId])
|
2017-11-01 07:55:36 +00:00
|
|
|
removePairs.push({
|
|
|
|
current: service
|
|
|
|
target: null
|
|
|
|
serviceId
|
|
|
|
})
|
2017-11-29 21:32:57 +00:00
|
|
|
else
|
|
|
|
currentServicesPerId[serviceId] = currentServiceContainers[0]
|
|
|
|
|
2018-02-07 01:40:59 +00:00
|
|
|
# Returns true if a service matches its target except it should be running and it is not, but we've
|
|
|
|
# already started it before. In this case it means it just exited so we don't want to start it again.
|
|
|
|
alreadyStarted = (serviceId) =>
|
2018-02-13 23:23:44 +00:00
|
|
|
return (
|
|
|
|
currentServicesPerId[serviceId].isEqualExceptForRunningState(targetServicesPerId[serviceId]) and
|
2018-09-18 14:36:59 +00:00
|
|
|
targetServicesPerId[serviceId].config.running and
|
2018-02-07 01:40:59 +00:00
|
|
|
@_containerStarted[currentServicesPerId[serviceId].containerId]
|
2018-02-13 23:23:44 +00:00
|
|
|
)
|
2018-02-07 01:40:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
needUpdate = _.filter toBeMaybeUpdated, (serviceId) ->
|
2018-02-13 23:23:44 +00:00
|
|
|
!currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId]) and !alreadyStarted(serviceId)
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
for serviceId in needUpdate
|
|
|
|
updatePairs.push({
|
|
|
|
current: currentServicesPerId[serviceId]
|
|
|
|
target: targetServicesPerId[serviceId]
|
|
|
|
serviceId
|
|
|
|
})
|
2017-12-15 05:15:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
return { removePairs, installPairs, updatePairs }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-12-15 07:04:25 +00:00
|
|
|
_compareNetworksOrVolumesForUpdate: (model, { current, target }, appId) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
outputPairs = []
|
|
|
|
currentNames = _.keys(current)
|
|
|
|
targetNames = _.keys(target)
|
|
|
|
toBeRemoved = _.difference(currentNames, targetNames)
|
|
|
|
for name in toBeRemoved
|
|
|
|
outputPairs.push({
|
|
|
|
current: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: current[name]
|
|
|
|
}
|
|
|
|
target: null
|
|
|
|
})
|
|
|
|
toBeInstalled = _.difference(targetNames, currentNames)
|
|
|
|
for name in toBeInstalled
|
|
|
|
outputPairs.push({
|
|
|
|
current: null
|
|
|
|
target: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: target[name]
|
|
|
|
}
|
|
|
|
})
|
2018-07-23 21:49:25 +00:00
|
|
|
toBeUpdated = _.filter _.intersection(targetNames, currentNames), (name) =>
|
|
|
|
# While we're in this in-between state of a network-manager, but not
|
|
|
|
# a volume-manager, we'll have to inspect the object to detect a
|
|
|
|
# network-manager
|
|
|
|
if model instanceof NetworkManager
|
|
|
|
opts = docker: @docker, logger: @logger
|
|
|
|
currentNet = Network.fromComposeObject(
|
|
|
|
opts,
|
|
|
|
name,
|
|
|
|
appId,
|
|
|
|
current[name]
|
|
|
|
)
|
|
|
|
targetNet = Network.fromComposeObject(
|
|
|
|
opts,
|
|
|
|
name,
|
|
|
|
appId,
|
2018-09-18 14:36:59 +00:00
|
|
|
target[name]
|
2018-07-23 21:49:25 +00:00
|
|
|
)
|
|
|
|
return !currentNet.isEqualConfig(targetNet)
|
|
|
|
else
|
|
|
|
return !model.isEqualConfig(current[name], target[name])
|
2017-11-29 21:32:57 +00:00
|
|
|
for name in toBeUpdated
|
|
|
|
outputPairs.push({
|
|
|
|
current: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: current[name]
|
|
|
|
}
|
|
|
|
target: {
|
|
|
|
name
|
|
|
|
appId
|
|
|
|
config: target[name]
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return outputPairs
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-12-15 07:04:25 +00:00
|
|
|
compareNetworksForUpdate: ({ current, target }, appId) =>
|
|
|
|
@_compareNetworksOrVolumesForUpdate(@networks, { current, target }, appId)
|
|
|
|
|
|
|
|
compareVolumesForUpdate: ({ current, target }, appId) =>
|
|
|
|
@_compareNetworksOrVolumesForUpdate(@volumes, { current, target }, appId)
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
# Checks if a service is using a network or volume that is about to be updated
|
|
|
|
_hasCurrentNetworksOrVolumes: (service, networkPairs, volumePairs) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
if !service?
|
|
|
|
return false
|
2017-11-01 07:55:36 +00:00
|
|
|
hasNetwork = _.some networkPairs, (pair) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
"#{service.appId}_#{pair.current?.name}" == service.networkMode
|
2017-11-29 21:32:57 +00:00
|
|
|
if hasNetwork
|
|
|
|
return true
|
2017-11-01 07:55:36 +00:00
|
|
|
hasVolume = _.some service.volumes, (volume) ->
|
|
|
|
name = _.split(volume, ':')[0]
|
|
|
|
_.some volumePairs, (pair) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
"#{service.appId}_#{pair.current?.name}" == name
|
2018-02-13 23:23:44 +00:00
|
|
|
return hasVolume
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# TODO: account for volumes-from, networks-from, links, etc
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# TODO: support networks instead of only networkMode
|
2018-01-27 01:29:19 +00:00
|
|
|
_dependenciesMetForServiceStart: (target, networkPairs, volumePairs, pendingPairs) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# for dependsOn, check no install or update pairs have that service
|
2018-02-22 18:57:50 +00:00
|
|
|
dependencyUnmet = _.some target.dependsOn, (dependency) ->
|
2018-02-13 23:23:44 +00:00
|
|
|
_.some(pendingPairs, (pair) -> pair.target?.serviceName == dependency)
|
2017-11-29 21:32:57 +00:00
|
|
|
if dependencyUnmet
|
|
|
|
return false
|
2017-11-01 07:55:36 +00:00
|
|
|
# for networks and volumes, check no network pairs have that volume name
|
2018-02-13 23:23:44 +00:00
|
|
|
if _.some(networkPairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == target.networkMode)
|
2017-11-01 07:55:36 +00:00
|
|
|
return false
|
|
|
|
volumeUnmet = _.some target.volumes, (volumeDefinition) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
[ sourceName, destName ] = volumeDefinition.split(':')
|
|
|
|
if !destName? # If this is not a named volume, ignore it
|
|
|
|
return false
|
2018-02-13 23:23:44 +00:00
|
|
|
return _.some(volumePairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == sourceName)
|
2017-11-01 07:55:36 +00:00
|
|
|
return !volumeUnmet
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
|
|
|
|
# to kill a service once the images for the services it depends on have been downloaded, so as to minimize
|
|
|
|
# downtime (but not block the killing too much, potentially causing a deadlock)
|
2019-01-22 13:27:00 +00:00
|
|
|
_dependenciesMetForServiceKill: (target, targetApp, availableImages) ->
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if target.dependsOn?
|
|
|
|
for dependency in target.dependsOn
|
2018-02-13 23:23:44 +00:00
|
|
|
dependencyService = _.find(targetApp.services, serviceName: dependency)
|
2019-01-22 13:27:00 +00:00
|
|
|
if !_.some(availableImages, (image) -> image.dockerImageId == dependencyService.image or Images.isSameImage(image, { name: dependencyService.imageName }))
|
2017-11-23 02:15:55 +00:00
|
|
|
return false
|
|
|
|
return true
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForNetworkOrVolume: ({ current, target }, currentApp, changingPairs, dependencyComparisonFn, model) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
# Check none of the currentApp.services use this network or volume
|
|
|
|
if current?
|
|
|
|
dependencies = _.filter currentApp.services, (service) ->
|
|
|
|
dependencyComparisonFn(service, current)
|
|
|
|
if _.isEmpty(dependencies)
|
|
|
|
return [{ action: 'removeNetworkOrVolume', model, current }]
|
|
|
|
else
|
|
|
|
# If the current update doesn't require killing the services that use this network/volume,
|
|
|
|
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
|
|
|
|
steps = []
|
2017-11-29 21:32:57 +00:00
|
|
|
for dependency in dependencies
|
2018-02-13 23:23:44 +00:00
|
|
|
if dependency.status != 'Stopping' and !_.some(changingPairs, serviceId: dependency.serviceId)
|
2017-11-01 07:55:36 +00:00
|
|
|
steps.push(serviceAction('kill', dependency.serviceId, dependency))
|
|
|
|
return steps
|
|
|
|
else if target?
|
|
|
|
return [{ action: 'createNetworkOrVolume', model, target }]
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForNetwork: ({ current, target }, currentApp, changingPairs) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
dependencyComparisonFn = (service, current) ->
|
2018-09-18 14:36:59 +00:00
|
|
|
service.config.networkMode == "#{service.appId}_#{current?.name}"
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'network')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
_nextStepsForVolume: ({ current, target }, currentApp, changingPairs) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
# Check none of the currentApp.services use this network or volume
|
|
|
|
dependencyComparisonFn = (service, current) ->
|
2018-09-18 14:36:59 +00:00
|
|
|
_.some service.config.volumes, (volumeDefinition) ->
|
2018-01-30 16:42:03 +00:00
|
|
|
[ sourceName, destName ] = volumeDefinition.split(':')
|
|
|
|
destName? and sourceName == "#{service.appId}_#{current?.name}"
|
2018-01-27 01:29:19 +00:00
|
|
|
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'volume')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
# Infers steps that do not require creating a new container
|
|
|
|
_updateContainerStep: (current, target) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
if current.releaseId != target.releaseId or current.imageId != target.imageId
|
|
|
|
return serviceAction('updateMetadata', target.serviceId, current, target)
|
2018-09-18 14:36:59 +00:00
|
|
|
else if target.config.running
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('start', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return serviceAction('stop', target.serviceId, current, target)
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
_fetchOrStartStep: (current, target, needsDownload, dependenciesMetForStart) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2017-11-23 02:15:55 +00:00
|
|
|
else if dependenciesMetForStart()
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('start', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return null
|
|
|
|
|
|
|
|
_strategySteps: {
|
2017-11-23 02:15:55 +00:00
|
|
|
'download-then-kill': (current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2017-11-23 02:15:55 +00:00
|
|
|
else if dependenciesMetForKill()
|
2017-11-01 07:55:36 +00:00
|
|
|
# We only kill when dependencies are already met, so that we minimize downtime
|
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
|
|
|
else
|
|
|
|
return null
|
2017-11-23 02:15:55 +00:00
|
|
|
'kill-then-download': (current, target) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
2017-11-23 02:15:55 +00:00
|
|
|
'delete-then-download': (current, target, needsDownload) ->
|
|
|
|
return serviceAction('kill', target.serviceId, current, target, removeImage: needsDownload)
|
|
|
|
'hand-over': (current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout) ->
|
2017-11-01 07:55:36 +00:00
|
|
|
if needsDownload
|
|
|
|
return fetchAction(target)
|
2018-02-13 23:23:44 +00:00
|
|
|
else if needsSpecialKill and dependenciesMetForKill()
|
2017-11-23 02:15:55 +00:00
|
|
|
return serviceAction('kill', target.serviceId, current, target)
|
|
|
|
else if dependenciesMetForStart()
|
2017-11-01 07:55:36 +00:00
|
|
|
return serviceAction('handover', target.serviceId, current, target, timeout: timeout)
|
|
|
|
else
|
|
|
|
return null
|
|
|
|
}
|
|
|
|
|
2018-09-28 13:32:38 +00:00
|
|
|
_nextStepForService: ({ current, target }, updateContext, localMode) =>
|
2018-01-27 01:29:19 +00:00
|
|
|
{ targetApp, networkPairs, volumePairs, installPairs, updatePairs, availableImages, downloading } = updateContext
|
|
|
|
if current?.status == 'Stopping'
|
|
|
|
# There is already a kill step in progress for this service, so we wait
|
|
|
|
return { action: 'noop' }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-04-11 22:58:17 +00:00
|
|
|
if current?.status == 'Dead'
|
|
|
|
# Dead containers have to be removed
|
|
|
|
return serviceAction('remove', current.serviceId, current)
|
|
|
|
|
2018-09-28 13:32:38 +00:00
|
|
|
needsDownload = false
|
|
|
|
# Don't attempt to fetch any images in local mode, they should already be there
|
|
|
|
if !localMode
|
2019-01-22 13:27:00 +00:00
|
|
|
needsDownload = !_.some availableImages, (image) ->
|
|
|
|
image.dockerImageId == target?.config.image or Images.isSameImage(image, { name: target.imageName })
|
2018-01-27 01:29:19 +00:00
|
|
|
|
|
|
|
# This service needs an image download but it's currently downloading, so we wait
|
|
|
|
if needsDownload and target?.imageId in downloading
|
|
|
|
return { action: 'noop' }
|
|
|
|
|
2017-11-23 02:15:55 +00:00
|
|
|
dependenciesMetForStart = =>
|
2018-01-27 01:29:19 +00:00
|
|
|
@_dependenciesMetForServiceStart(target, networkPairs, volumePairs, installPairs.concat(updatePairs))
|
2017-11-23 02:15:55 +00:00
|
|
|
dependenciesMetForKill = =>
|
|
|
|
!needsDownload and @_dependenciesMetForServiceKill(target, targetApp, availableImages)
|
|
|
|
|
|
|
|
# If the service is using a network or volume that is being updated, we need to kill it
|
|
|
|
# even if its strategy is handover
|
|
|
|
needsSpecialKill = @_hasCurrentNetworksOrVolumes(current, networkPairs, volumePairs)
|
|
|
|
|
2018-09-18 14:36:59 +00:00
|
|
|
if current?.isEqualConfig(target)
|
2017-11-01 07:55:36 +00:00
|
|
|
# We're only stopping/starting it
|
|
|
|
return @_updateContainerStep(current, target)
|
|
|
|
else if !current?
|
|
|
|
# Either this is a new service, or the current one has already been killed
|
2017-11-23 02:15:55 +00:00
|
|
|
return @_fetchOrStartStep(current, target, needsDownload, dependenciesMetForStart)
|
2017-11-01 07:55:36 +00:00
|
|
|
else
|
2018-10-17 14:34:07 +00:00
|
|
|
strategy = checkString(target.config.labels['io.balena.update.strategy'])
|
2017-11-01 07:55:36 +00:00
|
|
|
validStrategies = [ 'download-then-kill', 'kill-then-download', 'delete-then-download', 'hand-over' ]
|
2017-11-29 21:32:57 +00:00
|
|
|
if !_.includes(validStrategies, strategy)
|
|
|
|
strategy = 'download-then-kill'
|
2018-10-17 14:34:07 +00:00
|
|
|
timeout = checkInt(target.config.labels['io.balena.update.handover-timeout'])
|
2017-11-23 02:15:55 +00:00
|
|
|
return @_strategySteps[strategy](current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-09-28 13:32:38 +00:00
|
|
|
_nextStepsForAppUpdate: (currentApp, targetApp, localMode, availableImages = [], downloading = []) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
emptyApp = { services: [], volumes: {}, networks: {} }
|
|
|
|
if !targetApp?
|
|
|
|
targetApp = emptyApp
|
2017-11-29 21:32:57 +00:00
|
|
|
else
|
|
|
|
# Create the default network for the target app
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
targetApp.networks['default'] ?= {}
|
2018-02-13 23:23:44 +00:00
|
|
|
currentApp ?= emptyApp
|
2018-01-24 01:28:59 +00:00
|
|
|
if currentApp.services?.length == 1 and targetApp.services?.length == 1 and
|
|
|
|
targetApp.services[0].serviceName == currentApp.services[0].serviceName and
|
2018-10-17 14:34:07 +00:00
|
|
|
checkTruthy(currentApp.services[0].config.labels['io.balena.legacy-container'])
|
2018-01-24 01:28:59 +00:00
|
|
|
# This is a legacy preloaded app or container, so we didn't have things like serviceId.
|
|
|
|
# We hack a few things to avoid an unnecessary restart of the preloaded app
|
|
|
|
# (but ensuring it gets updated if it actually changed)
|
2018-10-20 17:08:49 +00:00
|
|
|
targetApp.services[0].config.labels['io.balena.legacy-container'] = currentApp.services[0].config.labels['io.balena.legacy-container']
|
|
|
|
targetApp.services[0].config.labels['io.balena.service-id'] = currentApp.services[0].config.labels['io.balena.service-id']
|
2018-01-24 01:28:59 +00:00
|
|
|
targetApp.services[0].serviceId = currentApp.services[0].serviceId
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
appId = targetApp.appId ? currentApp.appId
|
2017-12-15 07:04:25 +00:00
|
|
|
networkPairs = @compareNetworksForUpdate({ current: currentApp.networks, target: targetApp.networks }, appId)
|
|
|
|
volumePairs = @compareVolumesForUpdate({ current: currentApp.volumes, target: targetApp.volumes }, appId)
|
2017-11-29 21:32:57 +00:00
|
|
|
{ removePairs, installPairs, updatePairs } = @compareServicesForUpdate(currentApp.services, targetApp.services)
|
|
|
|
steps = []
|
|
|
|
# All removePairs get a 'kill' action
|
|
|
|
for pair in removePairs
|
2018-01-27 01:29:19 +00:00
|
|
|
if pair.current.status != 'Stopping'
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
steps.push(serviceAction('kill', pair.current.serviceId, pair.current, null))
|
2018-01-27 01:29:19 +00:00
|
|
|
else
|
|
|
|
steps.push({ action: 'noop' })
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
# next step for install pairs in download - start order, but start requires dependencies, networks and volumes met
|
|
|
|
# next step for update pairs in order by update strategy. start requires dependencies, networks and volumes met.
|
|
|
|
for pair in installPairs.concat(updatePairs)
|
2018-09-28 13:32:38 +00:00
|
|
|
step = @_nextStepForService(pair, { targetApp, networkPairs, volumePairs, installPairs, updatePairs, availableImages, downloading }, localMode)
|
2017-11-29 21:32:57 +00:00
|
|
|
if step?
|
|
|
|
steps.push(step)
|
|
|
|
# next step for network pairs - remove requires services killed, create kill if no pairs or steps affect that service
|
|
|
|
for pair in networkPairs
|
|
|
|
pairSteps = @_nextStepsForNetwork(pair, currentApp, removePairs.concat(updatePairs))
|
|
|
|
steps = steps.concat(pairSteps)
|
|
|
|
# next step for volume pairs - remove requires services killed, create kill if no pairs or steps affect that service
|
|
|
|
for pair in volumePairs
|
|
|
|
pairSteps = @_nextStepsForVolume(pair, currentApp, removePairs.concat(updatePairs))
|
|
|
|
steps = steps.concat(pairSteps)
|
2018-06-20 11:57:16 +00:00
|
|
|
|
|
|
|
if _.isEmpty(steps) and currentApp.commit != targetApp.commit
|
|
|
|
steps.push({
|
|
|
|
action: 'updateCommit'
|
|
|
|
target: targetApp.commit
|
|
|
|
})
|
|
|
|
|
2018-01-27 01:29:19 +00:00
|
|
|
return _.map(steps, (step) -> _.assign({}, step, { appId }))
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
normaliseAppForDB: (app) =>
|
|
|
|
services = _.map app.services, (s, serviceId) ->
|
|
|
|
service = _.clone(s)
|
|
|
|
service.appId = app.appId
|
|
|
|
service.releaseId = app.releaseId
|
2017-11-29 21:32:57 +00:00
|
|
|
service.serviceId = checkInt(serviceId)
|
2017-11-01 07:55:36 +00:00
|
|
|
service.commit = app.commit
|
|
|
|
return service
|
|
|
|
Promise.map services, (service) =>
|
|
|
|
service.image = @images.normalise(service.image)
|
|
|
|
Promise.props(service)
|
|
|
|
.then (services) ->
|
|
|
|
dbApp = {
|
|
|
|
appId: app.appId
|
|
|
|
commit: app.commit
|
|
|
|
name: app.name
|
2018-05-18 12:27:41 +00:00
|
|
|
source: app.source
|
2017-11-01 07:55:36 +00:00
|
|
|
releaseId: app.releaseId
|
|
|
|
services: JSON.stringify(services)
|
|
|
|
networks: JSON.stringify(app.networks ? {})
|
|
|
|
volumes: JSON.stringify(app.volumes ? {})
|
|
|
|
}
|
|
|
|
return dbApp
|
|
|
|
|
|
|
|
createTargetService: (service, opts) ->
|
2019-01-22 13:27:00 +00:00
|
|
|
# The image class now returns a native promise, so wrap
|
|
|
|
# this in a bluebird promise until we convert this to typescript
|
|
|
|
Promise.resolve(@images.inspectByName(service.image))
|
2017-12-15 05:15:59 +00:00
|
|
|
.catchReturn(NotFoundError, undefined)
|
2017-12-08 01:18:21 +00:00
|
|
|
.then (imageInfo) ->
|
|
|
|
serviceOpts = {
|
|
|
|
serviceName: service.serviceName
|
|
|
|
imageInfo
|
|
|
|
}
|
|
|
|
_.assign(serviceOpts, opts)
|
2017-12-15 05:15:59 +00:00
|
|
|
service.imageName = service.image
|
|
|
|
if imageInfo?.Id?
|
|
|
|
service.image = imageInfo.Id
|
2018-09-18 14:36:59 +00:00
|
|
|
return Service.fromComposeObject(service, serviceOpts)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
normaliseAndExtendAppFromDB: (app) =>
|
|
|
|
Promise.join(
|
|
|
|
@config.get('extendedEnvOptions')
|
2019-01-23 19:24:57 +00:00
|
|
|
@docker.getNetworkGateway(constants.supervisorNetworkInterface).catch(-> '127.0.0.1')
|
2018-02-13 23:23:44 +00:00
|
|
|
Promise.props({
|
|
|
|
firmware: pathExistsOnHost('/lib/firmware')
|
|
|
|
modules: pathExistsOnHost('/lib/modules')
|
|
|
|
})
|
2018-03-19 19:28:39 +00:00
|
|
|
fs.readFileAsync(path.join(constants.rootMountPoint, '/etc/hostname'), 'utf8').then(_.trim)
|
2018-03-14 19:10:19 +00:00
|
|
|
(opts, supervisorApiHost, hostPathExists, hostnameOnHost) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
configOpts = {
|
|
|
|
appName: app.name
|
|
|
|
supervisorApiHost
|
2018-02-13 23:23:44 +00:00
|
|
|
hostPathExists
|
2018-03-14 19:10:19 +00:00
|
|
|
hostnameOnHost
|
2017-11-01 07:55:36 +00:00
|
|
|
}
|
|
|
|
_.assign(configOpts, opts)
|
|
|
|
volumes = JSON.parse(app.volumes)
|
2017-11-10 10:43:13 +00:00
|
|
|
volumes = _.mapValues volumes, (volumeConfig) ->
|
|
|
|
volumeConfig ?= {}
|
|
|
|
volumeConfig.labels ?= {}
|
|
|
|
return volumeConfig
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.map(JSON.parse(app.services), (service) => @createTargetService(service, configOpts))
|
|
|
|
.then (services) ->
|
|
|
|
# If a named volume is defined in a service, we add it app-wide so that we can track it and purge it
|
2017-11-29 21:32:57 +00:00
|
|
|
for s in services
|
2017-11-01 07:55:36 +00:00
|
|
|
serviceNamedVolumes = s.getNamedVolumes()
|
2017-11-29 21:32:57 +00:00
|
|
|
for name in serviceNamedVolumes
|
2017-11-01 07:55:36 +00:00
|
|
|
volumes[name] ?= { labels: {} }
|
|
|
|
outApp = {
|
|
|
|
appId: app.appId
|
|
|
|
name: app.name
|
|
|
|
commit: app.commit
|
|
|
|
releaseId: app.releaseId
|
|
|
|
services: services
|
|
|
|
networks: JSON.parse(app.networks)
|
|
|
|
volumes: volumes
|
|
|
|
}
|
|
|
|
return outApp
|
|
|
|
)
|
|
|
|
|
2018-05-18 12:27:41 +00:00
|
|
|
setTarget: (apps, dependent , source, trx) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
setInTransaction = (trx) =>
|
|
|
|
Promise.try =>
|
2017-11-29 21:32:57 +00:00
|
|
|
appsArray = _.map apps, (app, appId) ->
|
|
|
|
appClone = _.clone(app)
|
|
|
|
appClone.appId = checkInt(appId)
|
2018-05-18 12:27:41 +00:00
|
|
|
appClone.source = source
|
2017-11-29 21:32:57 +00:00
|
|
|
return appClone
|
|
|
|
Promise.map(appsArray, @normaliseAppForDB)
|
2018-02-22 18:57:50 +00:00
|
|
|
.tap (appsForDB) =>
|
2017-11-29 21:32:57 +00:00
|
|
|
Promise.map appsForDB, (app) =>
|
|
|
|
@db.upsertModel('app', app, { appId: app.appId }, trx)
|
2018-10-30 17:05:37 +00:00
|
|
|
.then (appsForDB) ->
|
|
|
|
trx('app').where({ source }).whereNotIn('appId', _.map(appsForDB, 'appId')).del()
|
2017-11-01 07:55:36 +00:00
|
|
|
.then =>
|
|
|
|
@proxyvisor.setTargetInTransaction(dependent, trx)
|
|
|
|
|
|
|
|
Promise.try =>
|
|
|
|
if trx?
|
|
|
|
setInTransaction(trx)
|
|
|
|
else
|
|
|
|
@db.transaction(setInTransaction)
|
|
|
|
.then =>
|
2018-01-25 17:41:00 +00:00
|
|
|
@_targetVolatilePerImageId = {}
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-02-02 20:08:58 +00:00
|
|
|
setTargetVolatileForService: (imageId, target) =>
|
2018-01-25 17:41:00 +00:00
|
|
|
@_targetVolatilePerImageId[imageId] ?= {}
|
|
|
|
_.assign(@_targetVolatilePerImageId[imageId], target)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-02-02 20:08:58 +00:00
|
|
|
clearTargetVolatileForServices: (imageIds) =>
|
|
|
|
for imageId in imageIds
|
|
|
|
@_targetVolatilePerImageId[imageId] = {}
|
|
|
|
|
2017-11-01 07:55:36 +00:00
|
|
|
getTargetApps: =>
|
2018-09-28 13:32:38 +00:00
|
|
|
@config.getMany(['apiEndpoint', 'localMode']). then ({ apiEndpoint, localMode }) =>
|
|
|
|
source = apiEndpoint
|
2019-01-09 10:04:04 +00:00
|
|
|
if localMode
|
2018-09-28 13:32:38 +00:00
|
|
|
source = 'local'
|
2018-05-18 12:27:41 +00:00
|
|
|
Promise.map(@db.models('app').where({ source }), @normaliseAndExtendAppFromDB)
|
2017-11-01 07:55:36 +00:00
|
|
|
.map (app) =>
|
|
|
|
if !_.isEmpty(app.services)
|
|
|
|
app.services = _.map app.services, (service) =>
|
2018-01-25 17:41:00 +00:00
|
|
|
if @_targetVolatilePerImageId[service.imageId]?
|
|
|
|
_.merge(service, @_targetVolatilePerImageId[service.imageId])
|
2017-11-01 07:55:36 +00:00
|
|
|
return service
|
|
|
|
return app
|
2018-01-19 19:31:02 +00:00
|
|
|
.then (apps) ->
|
|
|
|
return _.keyBy(apps, 'appId')
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
getDependentTargets: =>
|
|
|
|
@proxyvisor.getTarget()
|
|
|
|
|
|
|
|
bestDeltaSource: (image, available) ->
|
|
|
|
if !image.dependent
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.serviceName == image.serviceName and availableImage.appId == image.appId
|
|
|
|
return availableImage.name
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.serviceName == image.serviceName
|
|
|
|
return availableImage.name
|
|
|
|
for availableImage in available
|
|
|
|
if availableImage.appId == image.appId
|
|
|
|
return availableImage.name
|
2018-10-26 00:58:46 +00:00
|
|
|
return null
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-12-15 05:15:59 +00:00
|
|
|
# returns:
|
|
|
|
# imagesToRemove: images that
|
2017-11-01 07:55:36 +00:00
|
|
|
# - are not used in the current state, and
|
|
|
|
# - are not going to be used in the target state, and
|
|
|
|
# - are not needed for delta source / pull caching or would be used for a service with delete-then-download as strategy
|
2017-12-15 05:15:59 +00:00
|
|
|
# imagesToSave: images that
|
|
|
|
# - are locally available (i.e. an image with the same digest exists)
|
|
|
|
# - are not saved to the DB with all their metadata (serviceId, serviceName, etc)
|
2018-09-28 13:32:38 +00:00
|
|
|
_compareImages: (current, target, available, localMode) =>
|
2017-12-15 05:15:59 +00:00
|
|
|
allImagesForTargetApp = (app) -> _.map(app.services, imageForService)
|
|
|
|
allImagesForCurrentApp = (app) ->
|
|
|
|
_.map app.services, (service) ->
|
2018-09-18 14:36:59 +00:00
|
|
|
img = _.find(available, { dockerImageId: service.config.image, imageId: service.imageId }) ? _.find(available, { dockerImageId: service.config.image })
|
2018-03-13 21:41:04 +00:00
|
|
|
return _.omit(img, [ 'dockerImageId', 'id' ])
|
2018-10-23 22:08:51 +00:00
|
|
|
allImageDockerIdsForTargetApp = (app) ->
|
2018-11-02 14:15:01 +00:00
|
|
|
_(app.services).map((svc) -> [ svc.imageName, svc.config.image ])
|
|
|
|
.filter((img) -> img[1]?)
|
|
|
|
.value()
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-12-15 05:15:59 +00:00
|
|
|
availableWithoutIds = _.map(available, (image) -> _.omit(image, [ 'dockerImageId', 'id' ]))
|
2018-02-13 23:23:44 +00:00
|
|
|
currentImages = _.flatMap(current.local.apps, allImagesForCurrentApp)
|
|
|
|
targetImages = _.flatMap(target.local.apps, allImagesForTargetApp)
|
2018-10-23 22:08:51 +00:00
|
|
|
targetImageDockerIds = _.fromPairs(_.flatMap(target.local.apps, allImageDockerIdsForTargetApp))
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-12-15 05:15:59 +00:00
|
|
|
availableAndUnused = _.filter availableWithoutIds, (image) ->
|
|
|
|
!_.some currentImages.concat(targetImages), (imageInUse) -> _.isEqual(image, imageInUse)
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2019-01-22 13:27:00 +00:00
|
|
|
imagesToDownload = _.filter targetImages, (targetImage) ->
|
|
|
|
!_.some available, (availableImage) -> Images.isSameImage(availableImage, targetImage)
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2017-12-15 05:15:59 +00:00
|
|
|
# Images that are available but we don't have them in the DB with the exact metadata:
|
2018-09-28 13:32:38 +00:00
|
|
|
imagesToSave = []
|
|
|
|
if !localMode
|
2019-01-22 13:27:00 +00:00
|
|
|
imagesToSave = _.filter targetImages, (targetImage) ->
|
2018-10-23 22:08:51 +00:00
|
|
|
isActuallyAvailable = _.some(
|
2019-01-22 13:27:00 +00:00
|
|
|
available, (availableImage) ->
|
|
|
|
if Images.isSameImage(availableImage, targetImage)
|
2018-10-23 22:08:51 +00:00
|
|
|
return true
|
|
|
|
if availableImage.dockerImageId == targetImageDockerIds[targetImage.name]
|
|
|
|
return true
|
|
|
|
return false
|
|
|
|
)
|
|
|
|
isNotSaved = !_.some availableWithoutIds, (img) -> _.isEqual(img, targetImage)
|
|
|
|
return isActuallyAvailable and isNotSaved
|
2017-11-01 07:55:36 +00:00
|
|
|
|
|
|
|
deltaSources = _.map imagesToDownload, (image) =>
|
|
|
|
return @bestDeltaSource(image, available)
|
|
|
|
proxyvisorImages = @proxyvisor.imagesInUse(current, target)
|
2018-09-18 14:36:59 +00:00
|
|
|
|
2019-01-22 13:27:00 +00:00
|
|
|
imagesToRemove = _.filter availableAndUnused, (image) ->
|
2018-02-13 23:23:44 +00:00
|
|
|
notUsedForDelta = !_.includes(deltaSources, image.name)
|
2019-01-22 13:27:00 +00:00
|
|
|
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) -> Images.isSameImage(image, { name: proxyvisorImage })
|
2017-11-01 07:55:36 +00:00
|
|
|
return notUsedForDelta and notUsedByProxyvisor
|
2017-12-15 05:15:59 +00:00
|
|
|
return { imagesToSave, imagesToRemove }
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-30 16:42:03 +00:00
|
|
|
_inferNextSteps: (cleanupNeeded, availableImages, downloading, supervisorNetworkReady, current, target, ignoreImages, { localMode, delta }) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
Promise.try =>
|
2018-10-09 11:00:31 +00:00
|
|
|
if localMode
|
2018-01-23 03:50:19 +00:00
|
|
|
ignoreImages = true
|
2018-01-19 19:31:02 +00:00
|
|
|
currentByAppId = current.local.apps ? {}
|
|
|
|
targetByAppId = target.local.apps ? {}
|
2017-11-01 07:55:36 +00:00
|
|
|
nextSteps = []
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if !supervisorNetworkReady
|
|
|
|
nextSteps.push({ action: 'ensureSupervisorNetwork' })
|
|
|
|
else
|
2018-01-27 01:29:19 +00:00
|
|
|
if !ignoreImages and _.isEmpty(downloading)
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
if cleanupNeeded
|
|
|
|
nextSteps.push({ action: 'cleanup' })
|
2018-09-28 13:32:38 +00:00
|
|
|
{ imagesToRemove, imagesToSave } = @_compareImages(current, target, availableImages, localMode)
|
2017-12-15 05:15:59 +00:00
|
|
|
for image in imagesToSave
|
|
|
|
nextSteps.push({ action: 'saveImage', image })
|
|
|
|
if _.isEmpty(imagesToSave)
|
|
|
|
for image in imagesToRemove
|
|
|
|
nextSteps.push({ action: 'removeImage', image })
|
Add support for init, mem_reservation, shm_size, read_only and sysctls.
Also several bugfixes:
* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor
* Fix bug in calculation of dependencies due to fields still using snake_case
* Fix snake_case in a migration, and remove unused lib/migration.coffee
* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)
* Set always as default restart policy
* Fix healthcheck, stop_grace_period and mem_limit
* Lint and reduce some cyclomatic complexities
* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning
* Check that supervisor network is okay every time we're applying target state
Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
2017-12-12 00:35:23 +00:00
|
|
|
# If we have to remove any images, we do that before anything else
|
|
|
|
if _.isEmpty(nextSteps)
|
|
|
|
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
|
|
|
|
for appId in allAppIds
|
2018-09-28 13:32:38 +00:00
|
|
|
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], localMode, availableImages, downloading))
|
2018-01-30 16:42:03 +00:00
|
|
|
newDownloads = _.filter(nextSteps, (s) -> s.action == 'fetch').length
|
|
|
|
if !ignoreImages and delta and newDownloads > 0
|
|
|
|
downloadsToBlock = downloading.length + newDownloads - constants.maxDeltaDownloads
|
|
|
|
while downloadsToBlock > 0
|
2018-02-13 23:23:44 +00:00
|
|
|
_.pull(nextSteps, _.find(nextSteps, action: 'fetch'))
|
2018-01-30 16:42:03 +00:00
|
|
|
downloadsToBlock -= 1
|
2018-01-27 01:29:19 +00:00
|
|
|
if !ignoreImages and _.isEmpty(nextSteps) and !_.isEmpty(downloading)
|
|
|
|
nextSteps.push({ action: 'noop' })
|
|
|
|
return _.uniqWith(nextSteps, _.isEqual)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
stopAll: ({ force = false, skipLock = false } = {}) =>
|
2019-01-31 10:09:25 +00:00
|
|
|
Promise.resolve(@services.getAll())
|
2017-11-01 07:55:36 +00:00
|
|
|
.map (service) =>
|
2018-01-19 19:31:02 +00:00
|
|
|
@_lockingIfNecessary service.appId, { force, skipLock }, =>
|
2018-01-27 01:29:19 +00:00
|
|
|
@services.kill(service, { removeContainer: false, wait: true })
|
2018-02-07 01:40:59 +00:00
|
|
|
.then =>
|
|
|
|
delete @_containerStarted[service.containerId]
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2017-11-29 21:32:57 +00:00
|
|
|
_lockingIfNecessary: (appId, { force = false, skipLock = false } = {}, fn) =>
|
|
|
|
if skipLock
|
2018-02-13 23:23:44 +00:00
|
|
|
return Promise.try(fn)
|
2017-11-01 07:55:36 +00:00
|
|
|
@config.get('lockOverride')
|
|
|
|
.then (lockOverride) ->
|
2019-01-09 10:04:04 +00:00
|
|
|
return lockOverride or force
|
2017-11-01 07:55:36 +00:00
|
|
|
.then (force) ->
|
2017-11-29 21:32:57 +00:00
|
|
|
updateLock.lock(appId, { force }, fn)
|
2017-11-01 07:55:36 +00:00
|
|
|
|
2018-01-19 19:31:02 +00:00
|
|
|
executeStepAction: (step, { force = false, skipLock = false } = {}) =>
|
2017-11-01 07:55:36 +00:00
|
|
|
if _.includes(@proxyvisor.validActions, step.action)
|
|
|
|
return @proxyvisor.executeStepAction(step)
|
|
|
|
if !_.includes(@validActions, step.action)
|
|
|
|
return Promise.reject(new Error("Invalid action #{step.action}"))
|
2018-01-19 19:31:02 +00:00
|
|
|
@actionExecutors[step.action](step, { force, skipLock })
|
2017-11-01 07:55:36 +00:00
|
|
|
|
Fix a race condition that could cause an unnecessary restart of a service immediately after download
Up to now, there was a slim but non-zero chance that an image would be downloaded between the call to `@getTarget` inside deviceState
(which gets the target state and creates Service objects using information from available images), and the call to
`@images.getAvailable` in ApplicationManager (which is used to determine whether we should keep waiting for a download or start the
service). If this race condition happened, then the ApplicationManager would infer that a service was ready to be started (because
the image appears as available), but would have incomplete information about the service because the image wasn't available when
the Service object was created. The result would be that the service would be started, and then immediately on the next applyTarget
the ApplicationManager would try to kill it and restart it to update it with the complete information from the image.
This patch changes this behavior by ensuring that all of the additional information about the current state, which includes available images,
is gathered *before* building the current and target states that we compare. This means that if the image is downloaded after the call to getAvailable, the Service might be constructed with all the information about the image, but it won't be started until the next pass, because ApplicationManager will treat it as still downloading.
Change-type: patch
Signed-off-by: Pablo Carranza Velez <pablo@balena.io>
2018-12-12 23:56:14 +00:00
|
|
|
getExtraStateForComparison: =>
|
2018-09-28 13:32:38 +00:00
|
|
|
@config.get('localMode').then (localMode) =>
|
Fix a race condition that could cause an unnecessary restart of a service immediately after download
Up to now, there was a slim but non-zero chance that an image would be downloaded between the call to `@getTarget` inside deviceState
(which gets the target state and creates Service objects using information from available images), and the call to
`@images.getAvailable` in ApplicationManager (which is used to determine whether we should keep waiting for a download or start the
service). If this race condition happened, then the ApplicationManager would infer that a service was ready to be started (because
the image appears as available), but would have incomplete information about the service because the image wasn't available when
the Service object was created. The result would be that the service would be started, and then immediately on the next applyTarget
the ApplicationManager would try to kill it and restart it to update it with the complete information from the image.
This patch changes this behavior by ensuring that all of the additional information about the current state, which includes available images,
is gathered *before* building the current and target states that we compare. This means that if the image is downloaded after the call to getAvailable, the Service might be constructed with all the information about the image, but it won't be started until the next pass, because ApplicationManager will treat it as still downloading.
Change-type: patch
Signed-off-by: Pablo Carranza Velez <pablo@balena.io>
2018-12-12 23:56:14 +00:00
|
|
|
Promise.props({
|
|
|
|
cleanupNeeded: @images.isCleanupNeeded()
|
|
|
|
availableImages: @images.getAvailable(localMode)
|
|
|
|
downloading: @images.getDownloadingImageIds()
|
|
|
|
supervisorNetworkReady: @networks.supervisorNetworkReady()
|
|
|
|
delta: @config.get('delta')
|
|
|
|
localMode
|
|
|
|
})
|
|
|
|
|
|
|
|
getRequiredSteps: (currentState, targetState, extraState, ignoreImages = false) =>
|
|
|
|
{ cleanupNeeded, availableImages, downloading, supervisorNetworkReady, delta, localMode } = extraState
|
2019-01-09 10:04:04 +00:00
|
|
|
conf = { delta, localMode }
|
Fix a race condition that could cause an unnecessary restart of a service immediately after download
Up to now, there was a slim but non-zero chance that an image would be downloaded between the call to `@getTarget` inside deviceState
(which gets the target state and creates Service objects using information from available images), and the call to
`@images.getAvailable` in ApplicationManager (which is used to determine whether we should keep waiting for a download or start the
service). If this race condition happened, then the ApplicationManager would infer that a service was ready to be started (because
the image appears as available), but would have incomplete information about the service because the image wasn't available when
the Service object was created. The result would be that the service would be started, and then immediately on the next applyTarget
the ApplicationManager would try to kill it and restart it to update it with the complete information from the image.
This patch changes this behavior by ensuring that all of the additional information about the current state, which includes available images,
is gathered *before* building the current and target states that we compare. This means that if the image is downloaded after the call to getAvailable, the Service might be constructed with all the information about the image, but it won't be started until the next pass, because ApplicationManager will treat it as still downloading.
Change-type: patch
Signed-off-by: Pablo Carranza Velez <pablo@balena.io>
2018-12-12 23:56:14 +00:00
|
|
|
if conf.localMode
|
|
|
|
cleanupNeeded = false
|
|
|
|
@_inferNextSteps(cleanupNeeded, availableImages, downloading, supervisorNetworkReady, currentState, targetState, ignoreImages, conf)
|
|
|
|
.then (nextSteps) =>
|
|
|
|
if ignoreImages and _.some(nextSteps, action: 'fetch')
|
|
|
|
throw new Error('Cannot fetch images while executing an API action')
|
|
|
|
@proxyvisor.getRequiredSteps(availableImages, downloading, currentState, targetState, nextSteps)
|
|
|
|
.then (proxyvisorSteps) ->
|
|
|
|
return nextSteps.concat(proxyvisorSteps)
|
2018-09-28 13:32:38 +00:00
|
|
|
|
|
|
|
serviceNameFromId: (serviceId) =>
|
|
|
|
@getTargetApps().then (apps) ->
|
|
|
|
# Multi-app warning!
|
|
|
|
# We assume here that there will only be a single
|
|
|
|
# application
|
|
|
|
for appId, app of apps
|
|
|
|
return _.find app.services, (svc) ->
|
|
|
|
svc.serviceId == serviceId
|
|
|
|
.get('serviceName')
|