Add support for init, mem_reservation, shm_size, read_only and sysctls.

Also several bugfixes:

* Fix VPN control, logging in deviceConfig, and action executors in proxyvisor

* Fix bug in calculation of dependencies due to fields still using snake_case

* Fix snake_case in a migration, and remove unused lib/migration.coffee

* In healthcheck, count deviceState as healthy when a fetch is in progress (as in the non-multicontainer supervisor)

* Set always as default restart policy

* Fix healthcheck, stop_grace_period and mem_limit

* Lint and reduce some cyclomatic complexities

* Namespace volumes and networks by appId, switch default network name to 'default', fix dependencies in networks and volumes, fix duplicated kill steps, fix fat arrow on provisioning

* Check that supervisor network is okay every time we're applying target state

Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
Pablo Carranza Velez 2017-12-11 16:35:23 -08:00
parent 739fe13cad
commit 25695aade5
11 changed files with 450 additions and 191 deletions

View File

@ -32,6 +32,7 @@
"docker-delta": "^2.0.4",
"docker-progress": "^2.7.2",
"docker-toolbelt": "^3.2.1",
"duration-js": "^4.0.0",
"event-stream": "^3.0.20",
"express": "^4.0.0",
"json-mask": "^0.3.8",

View File

@ -150,7 +150,7 @@ module.exports = class APIBinder
.then (opts) =>
if opts.registered_at? and opts.deviceId? and !opts.provisioningApiKey?
return
Promise.try ->
Promise.try =>
if opts.registered_at? and !opts.deviceId?
console.log('Device is registered but no device id available, attempting key exchange')
@_exchangeKeyAndGetDeviceOrRegenerate(opts)
@ -257,6 +257,9 @@ module.exports = class APIBinder
currentConfig = currentState.local.config
targetConfig = targetState.local.config
Promise.mapSeries _.toPairs(currentConfig), ([ key, value ]) =>
# We never want to disable VPN if, for instance, it failed to start so far
if key == 'RESIN_SUPERVISOR_VPN_CONTROL'
value = 'true'
if !targetConfig[key]?
envVar = {
value

View File

@ -10,7 +10,6 @@ process.env.DOCKER_HOST ?= "unix://#{constants.dockerSocket}"
Docker = require './lib/docker-utils'
updateLock = require './lib/update-lock'
{ checkTruthy, checkInt, checkString } = require './lib/validation'
{ NotFoundError } = require './lib/errors'
ServiceManager = require './compose/service-manager'
Service = require './compose/service'
@ -176,6 +175,7 @@ module.exports = class ApplicationManager extends EventEmitter
@volumes = new Volumes({ @docker, @logger })
@proxyvisor = new Proxyvisor({ @config, @logger, @db, @docker, @images, applications: this })
@timeSpentFetching = 0
@fetchesInProgress = 0
@_targetVolatilePerServiceId = {}
@actionExecutors = {
stop: (step, { force = false } = {}) =>
@ -201,7 +201,7 @@ module.exports = class ApplicationManager extends EventEmitter
@logger.logSystemMessage('No volumes to purge', { appId }, 'Purge data noop')
return
Promise.mapSeries _.toPairs(app.volumes ? {}), ([ name, config ]) =>
@volumes.remove({ name })
@volumes.remove({ name, appId })
.then =>
@volumes.create({ name, config, appId })
.then =>
@ -224,6 +224,7 @@ module.exports = class ApplicationManager extends EventEmitter
@services.handover(step.current, step.target)
fetch: (step) =>
startTime = process.hrtime()
@fetchesInProgress += 1
Promise.join(
@config.get('fetchOptions')
@images.getAvailable()
@ -232,6 +233,7 @@ module.exports = class ApplicationManager extends EventEmitter
@images.fetch(step.image, opts)
)
.finally =>
@fetchesInProgress -= 1
@timeSpentFetching += process.hrtime(startTime)[0]
@reportCurrentState(update_downloaded: true)
removeImage: (step) =>
@ -246,6 +248,8 @@ module.exports = class ApplicationManager extends EventEmitter
removeNetworkOrVolume: (step) =>
model = if step.model is 'volume' then @volumes else @networks
model.remove(step.current)
ensureSupervisorNetwork: =>
@networks.ensureSupervisorNetwork()
}
@validActions = _.keys(@actionExecutors).concat(@proxyvisor.validActions)
@_router = new ApplicationManagerRouter(this)
@ -261,19 +265,8 @@ module.exports = class ApplicationManager extends EventEmitter
reportCurrentState: (data) =>
@emit('change', data)
ensureSupervisorNetwork: =>
@docker.getNetwork(constants.supervisorNetworkInterface).inspect()
.catch NotFoundError, =>
@docker.createNetwork({
Name: constants.supervisorNetworkInterface
Options:
'com.docker.network.bridge.name': constants.supervisorNetworkInterface
})
init: =>
@images.cleanupDatabase()
.then =>
@ensureSupervisorNetwork()
.then =>
@services.attachToRunning()
.then =>
@ -499,49 +492,50 @@ module.exports = class ApplicationManager extends EventEmitter
if !service?
return false
hasNetwork = _.some networkPairs, (pair) ->
pair.current.name == service.network_mode
"#{service.appId}_#{pair.current?.name}" == service.networkMode
if hasNetwork
return true
hasVolume = _.some service.volumes, (volume) ->
name = _.split(volume, ':')[0]
_.some volumePairs, (pair) ->
pair.current.name == name
"#{service.appId}_#{pair.current?.name}" == name
if hasVolume
return true
return false
# TODO: account for volumes-from, networks-from, links, etc
# TODO: support networks instead of only network_mode
# TODO: support networks instead of only networkMode
_dependenciesMetForServiceStart: (target, networkPairs, volumePairs, pendingPairs, stepsInProgress) ->
# for depends_on, check no install or update pairs have that service
dependencyUnmet = _.some target.depends_on ? [], (dependency) ->
# for dependsOn, check no install or update pairs have that service
dependencyUnmet = _.some target.dependsOn ? [], (dependency) ->
_.find(pendingPairs, (pair) -> pair.target?.serviceName == dependency)? or _.find(stepsInProgress, (step) -> step.target?.serviceName == dependency)?
if dependencyUnmet
return false
# for networks and volumes, check no network pairs have that volume name
if _.find(networkPairs, (pair) -> pair.target?.name == target.network_mode)?
if _.find(networkPairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == target.networkMode)?
return false
if _.find(stepsInProgress, (step) -> step.model == 'network' and step.target?.name == target.network_mode)?
if _.find(stepsInProgress, (step) -> step.model == 'network' and "#{target.appId}_#{step.target?.name}" == target.networkMode)?
return false
volumeUnmet = _.some target.volumes, (volumeDefinition) ->
[ sourceName, destName ] = volumeDefinition.split(':')
if !destName? # If this is not a named volume, ignore it
return false
_.find(volumePairs, (pair) -> pair.target?.name == sourceName)? or _.find(stepsInProgress, (step) -> step.model == 'volume' and step.target.name == sourceName)?
return _.find(volumePairs, (pair) -> "#{target.appId}_#{pair.target?.name}" == sourceName)? or
_.find(stepsInProgress, (step) -> step.model == 'volume' and "#{target.appId}_#{step.target?.name}" == sourceName)?
return !volumeUnmet
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
# to kill a service once the images for the services it depends on have been downloaded, so as to minimize
# downtime (but not block the killing too much, potentially causing a deadlock)
_dependenciesMetForServiceKill: (target, targetApp, availableImages) =>
if target.depends_on?
for dependency in target.depends_on
if target.dependsOn?
for dependency in target.dependsOn
dependencyService = _.find(targetApp.services, (s) -> s.serviceName == dependency)
if !_.find(availableImages, (image) => @images.isSameImage(image, { name: dependencyService.image }))?
return false
return true
_nextStepsForNetworkOrVolume: ({ current, target }, currentApp, changingPairs, dependencyComparisonFn, model) ->
_nextStepsForNetworkOrVolume: ({ current, target }, currentApp, changingPairs, dependencyComparisonFn, model, stepsInProgress) ->
# Check none of the currentApp.services use this network or volume
if current?
dependencies = _.filter currentApp.services, (service) ->
@ -553,24 +547,24 @@ module.exports = class ApplicationManager extends EventEmitter
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
steps = []
for dependency in dependencies
if !_.some(changingPairs, (pair) -> pair.serviceId == dependency.serviceId)
if !_.some(changingPairs, (pair) -> pair.serviceId == dependency.serviceId) and !_.find(stepsInProgress, (step) -> step.serviceId == dependency.serviceId)?
steps.push(serviceAction('kill', dependency.serviceId, dependency))
return steps
else if target?
return [{ action: 'createNetworkOrVolume', model, target }]
_nextStepsForNetwork: ({ current, target }, currentApp, changingPairs) =>
_nextStepsForNetwork: ({ current, target }, currentApp, changingPairs, stepsInProgress) =>
dependencyComparisonFn = (service, current) ->
service.network_mode == current.name
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'network')
service.networkMode == "#{service.appId}_#{current?.name}"
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'network', stepsInProgress)
_nextStepsForVolume: ({ current, target }, currentApp, changingPairs) ->
_nextStepsForVolume: ({ current, target }, currentApp, changingPairs, stepsInProgress) ->
# Check none of the currentApp.services use this network or volume
dependencyComparisonFn = (service, current) ->
_.some service.volumes, (volumeDefinition) ->
sourceName = volumeDefinition.split(':')[0]
sourceName == current.name
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'volume')
sourceName == "#{service.appId}_#{current?.name}"
@_nextStepsForNetworkOrVolume({ current, target }, currentApp, changingPairs, dependencyComparisonFn, 'volume', stepsInProgress)
# Infers steps that do not require creating a new container
_updateContainerStep: (current, target) ->
@ -649,7 +643,7 @@ module.exports = class ApplicationManager extends EventEmitter
targetApp = emptyApp
else
# Create the default network for the target app
targetApp.networks[targetApp.appId] ?= {}
targetApp.networks['default'] ?= {}
if !currentApp?
currentApp = emptyApp
appId = targetApp.appId ? currentApp.appId
@ -660,7 +654,8 @@ module.exports = class ApplicationManager extends EventEmitter
steps = []
# All removePairs get a 'kill' action
for pair in removePairs
steps.push(serviceAction('kill', pair.current.serviceId, pair.current, null))
if !_.find(stepsInProgress, (step) -> step.serviceId == pair.current.serviceId)?
steps.push(serviceAction('kill', pair.current.serviceId, pair.current, null))
# next step for install pairs in download - start order, but start requires dependencies, networks and volumes met
# next step for update pairs in order by update strategy. start requires dependencies, networks and volumes met.
for pair in installPairs.concat(updatePairs)
@ -717,6 +712,7 @@ module.exports = class ApplicationManager extends EventEmitter
Promise.join(
@config.get('extendedEnvOptions')
@docker.getNetworkGateway(constants.supervisorNetworkInterface)
.catchReturn('127.0.0.1')
(opts, supervisorApiHost) =>
configOpts = {
appName: app.name
@ -826,22 +822,25 @@ module.exports = class ApplicationManager extends EventEmitter
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) => @images.isSameImage(image, { name: proxyvisorImage })
return notUsedForDelta and notUsedByProxyvisor
_inferNextSteps: (cleanupNeeded, availableImages, current, target, stepsInProgress) =>
_inferNextSteps: (cleanupNeeded, availableImages, supervisorNetworkReady, current, target, stepsInProgress) =>
Promise.try =>
currentByAppId = _.keyBy(current.local.apps ? [], 'appId')
targetByAppId = _.keyBy(target.local.apps ? [], 'appId')
nextSteps = []
if !_.some(stepsInProgress, (step) -> step.action == 'fetch')
if cleanupNeeded
nextSteps.push({ action: 'cleanup' })
imagesToRemove = @_unnecessaryImages(current, target, availableImages)
for image in imagesToRemove
nextSteps.push({ action: 'removeImage', image })
# If we have to remove any images, we do that before anything else
if _.isEmpty(nextSteps)
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
for appId in allAppIds
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], availableImages, stepsInProgress))
if !supervisorNetworkReady
nextSteps.push({ action: 'ensureSupervisorNetwork' })
else
if !_.some(stepsInProgress, (step) -> step.action == 'fetch')
if cleanupNeeded
nextSteps.push({ action: 'cleanup' })
imagesToRemove = @_unnecessaryImages(current, target, availableImages)
for image in imagesToRemove
nextSteps.push({ action: 'removeImage', image })
# If we have to remove any images, we do that before anything else
if _.isEmpty(nextSteps)
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
for appId in allAppIds
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], availableImages, stepsInProgress))
return @_removeDuplicateSteps(nextSteps, stepsInProgress)
_removeDuplicateSteps: (nextSteps, stepsInProgress) ->
@ -875,8 +874,9 @@ module.exports = class ApplicationManager extends EventEmitter
Promise.join(
@images.isCleanupNeeded()
@images.getAvailable()
(cleanupNeeded, availableImages) =>
@_inferNextSteps(cleanupNeeded, availableImages, currentState, targetState, stepsInProgress)
@networks.supervisorNetworkReady()
(cleanupNeeded, availableImages, supervisorNetworkReady) =>
@_inferNextSteps(cleanupNeeded, availableImages, supervisorNetworkReady, currentState, targetState, stepsInProgress)
.then (nextSteps) =>
@proxyvisor.getRequiredSteps(availableImages, currentState, targetState, nextSteps.concat(stepsInProgress))
.then (proxyvisorSteps) ->

View File

@ -1,14 +1,19 @@
Promise = require 'bluebird'
logTypes = require '../lib/log-types'
{ checkInt } = require '../lib/validation'
{ NotFoundError, ENOENT } = require '../lib/errors'
constants = require '../lib/constants'
fs = Promise.promisifyAll(require('fs'))
module.exports = class Networks
constructor: ({ @docker, @logger }) ->
# TODO: parse supported config fields
format: (network) ->
[ appId, name ] = network.Name.split('_')
return {
appId: checkInt(network.Labels['io.resin.appId'])
name: network.Name
appId: checkInt(appId)
name: name
config: {}
}
@ -22,31 +27,67 @@ module.exports = class Networks
@getAll()
.filter((network) -> network.appId == appId)
get: (name) =>
@docker.getNetwork(name).inspect()
get: ({ name, appId }) =>
@docker.getNetwork("#{appId}_#{name}").inspect()
.then(@format)
# TODO: what config values are relevant/whitelisted?
create: ({ name, config, appId }) =>
@logger.logSystemEvent(logTypes.createNetwork, { network: { name } })
@docker.createNetwork({
Name: name
Labels: {
'io.resin.supervised': 'true'
'io.resin.appId': appId.toString()
}
})
@get({ name, appId })
.then (net) =>
if !@isEqualConfig(net.config, config)
throw new Error("Trying to create network '#{name}', but a network with same name and different configuration exists")
.catch NotFoundError, =>
@docker.createNetwork({
Name: "#{appId}_#{name}"
Labels: {
'io.resin.supervised': 'true'
}
})
.catch (err) =>
@logger.logSystemEvent(logTypes.createNetworkError, { network: { name }, error: err })
@logger.logSystemEvent(logTypes.createNetworkError, { network: { name, appId }, error: err })
throw err
remove: ({ name }) =>
@logger.logSystemEvent(logTypes.removeNetwork, { network: { name } })
@docker.getNetwork(name).remove()
remove: ({ name, appId }) =>
@logger.logSystemEvent(logTypes.removeNetwork, { network: { name, appId } })
@docker.getNetwork("#{appId}_#{name}").remove()
.catch (err) =>
@logger.logSystemEvent(logTypes.removeNetworkError, { network: { name }, error: err })
@logger.logSystemEvent(logTypes.removeNetworkError, { network: { name, appId }, error: err })
throw err
supervisorNetworkReady: =>
# For mysterious reasons sometimes the balena/docker network exists
# but the interface does not
fs.statAsync("/sys/class/net/#{constants.supervisorNetworkInterface}")
.then =>
@docker.getNetwork(constants.supervisorNetworkInterface).inspect()
.then (net) ->
return net.Options['com.docker.network.bridge.name'] == constants.supervisorNetworkInterface
.catchReturn(NotFoundError, false)
.catchReturn(ENOENT, false)
ensureSupervisorNetwork: =>
removeIt = =>
@docker.getNetwork(constants.supervisorNetworkInterface).remove()
.then =>
@docker.getNetwork(constants.supervisorNetworkInterface).inspect()
@docker.getNetwork(constants.supervisorNetworkInterface).inspect()
.then (net) ->
if net.Options['com.docker.network.bridge.name'] != constants.supervisorNetworkInterface
removeIt()
else
fs.statAsync("/sys/class/net/#{constants.supervisorNetworkInterface}")
.catch ENOENT, ->
removeIt()
.catch NotFoundError, =>
console.log('Creating supervisor0 network')
@docker.createNetwork({
Name: constants.supervisorNetworkInterface
Options:
'com.docker.network.bridge.name': constants.supervisorNetworkInterface
})
# TODO: compare supported config fields
isEqualConfig: (current, target) ->
return true

View File

@ -5,28 +5,92 @@ updateLock = require '../lib/update-lock'
constants = require '../lib/constants'
conversions = require '../lib/conversions'
Duration = require 'duration-js'
Images = require './images'
validRestartPolicies = [ 'no', 'always', 'on-failure', 'unless-stopped' ]
parseMemoryNumber = (numAsString, defaultVal) ->
m = numAsString?.toString().match(/^([0-9]+)([bkmg]?)$/)
if !m? and defaultVal?
return parseMemoryNumber(defaultVal)
num = m[1]
pow = { '': 0, 'b': 0, 'B': 0, 'K': 1, 'k': 1, 'm': 2, 'M': 2, 'g': 3, 'G': 3 }
return parseInt(num) * 1024 ** pow[m[2]]
# Construct a restart policy based on its name.
# The default policy (if name is not a valid policy) is "always".
createRestartPolicy = (name) ->
if not (name in validRestartPolicies)
name = 'unless-stopped'
name = 'always'
return { Name: name, MaximumRetryCount: 0 }
getCommand = (service, imageInfo) ->
cmd = null
if service.command?
return service.command
else if imageInfo?.Config?.Cmd
return imageInfo.Config.Cmd
cmd = service.command
else if imageInfo?.Config?.Cmd?
cmd = imageInfo.Config.Cmd
if _.isString(cmd)
cmd = [ cmd ]
return cmd
getEntrypoint = (service, imageInfo) ->
entry = null
if service.entrypoint?
return service.entrypoint
else if imageInfo?.Config?.Entrypoint
return imageInfo.Config.Entrypoint
entry = service.entrypoint
else if imageInfo?.Config?.Entrypoint?
entry = imageInfo.Config.Entrypoint
if _.isString(entry)
entry = [ entry ]
return entry
getStopSignal = (service, imageInfo) ->
sig = null
if service.stop_signal?
sig = service.stop_signal
else if imageInfo?.Config?.StopSignal?
sig = imageInfo.Config.StopSignal
if sig? and !_.isString(sig) # In case the YAML was parsed as a number
sig = sig.toString()
return sig
buildHealthcheckTest = (test) ->
if _.isString(test)
return [ 'CMD-SHELL', test ]
else
return test
getNanoseconds = (duration) ->
d = new Duration(duration)
return d.nanoseconds()
# Mutates imageHealthcheck
overrideHealthcheckFromCompose = (serviceHealthcheck, imageHealthcheck = {}) ->
if serviceHealthcheck.disable
imageHealthcheck.Test = [ 'NONE' ]
else
imageHealthcheck.Test = buildHealthcheckTest(serviceHealthcheck.test)
if serviceHealthcheck.interval?
imageHealthcheck.Interval = getNanoseconds(serviceHealthcheck.interval)
if serviceHealthcheck.timeout?
imageHealthcheck.Timeout = getNanoseconds(serviceHealthcheck.timeout)
if serviceHealthcheck.start_period?
imageHealthcheck.StartPeriod = getNanoseconds(serviceHealthcheck.start_period)
if serviceHealthcheck.retries?
imageHealthcheck.Retries = parseInt(serviceHealthcheck.retries)
return imageHealthcheck
getHealthcheck = (service, imageInfo) ->
healthcheck = null
if imageInfo?.Config?.Healthcheck?
healthcheck = imageInfo.Config.Healthcheck
if service.healthcheck?
healthcheck = overrideHealthcheckFromCompose(service.healthcheck, healthcheck)
# Set invalid healthchecks back to null
if healthcheck and (!healthcheck.Test? or _.isEqual(healthcheck.Test, []))
healthcheck = null
return healthcheck
killmePath = (appId, serviceName) ->
return updateLock.lockPath(appId, serviceName)
@ -74,7 +138,32 @@ module.exports = class Service
@exposedPorts
@portBindings
@networks
@memLimit
@memReservation
@shmSize
@cpuShares
@cpuQuota
@cpus
@cpuset
@nanoCpus
@domainname
@oomScoreAdj
@dns
@dnsSearch
@dnsOpt
@tmpfs
@extraHosts
@ulimitsArray
@stopSignal
@stopGracePeriod
@init
@healthcheck
@readOnly
@sysctls
} = _.mapKeys(serviceProperties, (v, k) -> _.camelCase(k))
@networks ?= {}
@privileged ?= false
@volumes ?= []
@labels ?= {}
@ -87,42 +176,111 @@ module.exports = class Service
@devices ?= []
@exposedPorts ?= {}
@portBindings ?= {}
@networkMode ?= @appId.toString()
@networks ?= {}
@networks[@networkMode] ?= {}
@memLimit = parseMemoryNumber(@memLimit, '0')
@memReservation = parseMemoryNumber(@memReservation, '0')
@shmSize = parseMemoryNumber(@shmSize, '64m')
@cpuShares ?= 0
@cpuQuota ?= 0
@cpus ?= 0
@nanoCpus ?= 0
@cpuset ?= ''
@domainname ?= ''
@oomScoreAdj ?= 0
@tmpfs ?= []
@extraHosts ?= []
@dns ?= []
@dnsSearch ?= []
@dnsOpt ?= []
@ulimitsArray ?= []
@stopSignal ?= null
@stopGracePeriod ?= null
@healthcheck ?= null
@init ?= null
@readOnly ?= false
@sysctls ?= {}
# If the service has no containerId, it is a target service and has to be normalised and extended
if !@containerId?
@networkMode ?= 'default'
if @networkMode not in [ 'host', 'bridge', 'none' ]
@networkMode = "#{@appId}_#{@networkMode}"
@networks = _.mapKeys @networks, (v, k) ->
if k not in [ 'host', 'bridge', 'none' ]
return "#{@appId}_#{k}"
else
return k
@networks[@networkMode] ?= {}
@restartPolicy = createRestartPolicy(serviceProperties.restart)
@command = getCommand(serviceProperties, opts.imageInfo)
@entrypoint = getEntrypoint(serviceProperties, opts.imageInfo)
@stopSignal = getStopSignal(serviceProperties, opts.imageInfo)
@healthcheck = getHealthcheck(serviceProperties, opts.imageInfo)
@extendEnvVars(opts)
@extendLabels(opts.imageInfo)
@extendAndSanitiseVolumes(opts.imageInfo)
@extendAndSanitiseExposedPorts(opts.imageInfo)
{ @exposedPorts, @portBindings } = @getPortsAndPortBindings()
@devices = formatDevices(@devices)
if checkTruthy(@labels['io.resin.features.dbus'])
@volumes.push('/run/dbus:/host/run/dbus')
if checkTruthy(@labels['io.resin.features.kernel_modules'])
@volumes.push('/lib/modules:/lib/modules')
if checkTruthy(@labels['io.resin.features.firmware'])
@volumes.push('/lib/firmware:/lib/firmware')
if checkTruthy(@labels['io.resin.features.supervisor_api'])
@environment['RESIN_SUPERVISOR_PORT'] = opts.listenPort.toString()
@environment['RESIN_SUPERVISOR_API_KEY'] = opts.apiSecret
if @networkMode == 'host'
@environment['RESIN_SUPERVISOR_HOST'] = '127.0.0.1'
@environment['RESIN_SUPERVISOR_ADDRESS'] = "http://127.0.0.1:#{opts.listenPort}"
@addFeaturesFromLabels(opts)
if @dns?
if !Array.isArray(@dns)
@dns = [ @dns ]
if @dnsSearch?
if !Array.isArray(@dnsSearch)
@dnsSearch = [ @dns ]
@nanoCpus = Math.round(Number(@cpus) * 10 ** 9)
@ulimitsArray = _.map @ulimits, (value, name) ->
if _.isNumber(value) or _.isString(value)
return { Name: name, Soft: parseInt(value), Hard: parseInt(value) }
else
@environment['RESIN_SUPERVISOR_HOST'] = opts.supervisorApiHost
@environment['RESIN_SUPERVISOR_ADDRESS'] = "http://#{opts.supervisorApiHost}:#{opts.listenPort}"
@networks[constants.supervisorNetworkInterface] = {}
else
# We ensure the user hasn't added "supervisor0" to the service's networks
delete @networks[constants.supervisorNetworkInterface]
if checkTruthy(@labels['io.resin.features.resin_api'])
@environment['RESIN_API_KEY'] = opts.deviceApiKey
return { Name: name, Soft: parseInt(value.soft), Hard: parseInt(value.hard) }
if @init
@init = true
if @stopGracePeriod?
d = new Duration(@stopGracePeriod)
@stopGracePeriod = d.seconds()
@readOnly = Boolean(@readOnly)
if Array.isArray(@sysctls)
@sysctls = _.fromPairs(_.map(@sysctls, (v) -> _.split(v, '=')))
_addSupervisorApi: (opts) =>
@environment['RESIN_SUPERVISOR_PORT'] = opts.listenPort.toString()
@environment['RESIN_SUPERVISOR_API_KEY'] = opts.apiSecret
if @networkMode == 'host'
@environment['RESIN_SUPERVISOR_HOST'] = '127.0.0.1'
@environment['RESIN_SUPERVISOR_ADDRESS'] = "http://127.0.0.1:#{opts.listenPort}"
else
@environment['RESIN_SUPERVISOR_HOST'] = opts.supervisorApiHost
@environment['RESIN_SUPERVISOR_ADDRESS'] = "http://#{opts.supervisorApiHost}:#{opts.listenPort}"
@networks[constants.supervisorNetworkInterface] = {}
addFeaturesFromLabels: (opts) =>
if checkTruthy(@labels['io.resin.features.dbus'])
@volumes.push('/run/dbus:/host/run/dbus')
if checkTruthy(@labels['io.resin.features.kernel_modules'])
@volumes.push('/lib/modules:/lib/modules')
if checkTruthy(@labels['io.resin.features.firmware'])
@volumes.push('/lib/firmware:/lib/firmware')
if checkTruthy(@labels['io.resin.features.supervisor_api'])
@_addSupervisorApi(opts)
else
# We ensure the user hasn't added "supervisor0" to the service's networks
delete @networks[constants.supervisorNetworkInterface]
if checkTruthy(@labels['io.resin.features.resin_api'])
@environment['RESIN_API_KEY'] = opts.deviceApiKey
extendEnvVars: ({ imageInfo, uuid, appName, name, version, deviceType, osVersion }) =>
newEnv =
@ -169,9 +327,10 @@ module.exports = class Service
for vol in @volumes
isBind = /:/.test(vol)
if isBind
bindSource = vol.split(':')[0]
[ bindSource, bindDest ] = vol.split(':')
if !path.isAbsolute(bindSource)
volumes.push(vol)
# Rewrite named volumes to namespace by appId
volumes.push("#{@appId}_#{bindSource}:#{bindDest}")
else
console.log("Ignoring invalid bind mount #{vol}")
else
@ -188,7 +347,7 @@ module.exports = class Service
return null
bindSource = vol.split(':')[0]
if !path.isAbsolute(bindSource)
return bindSource
return bindSource.split('_')[1]
else
return null
return _.filter(validVolumes, (v) -> !_.isNull(v))
@ -263,6 +422,27 @@ module.exports = class Service
exposedPorts: container.Config.ExposedPorts
portBindings: container.HostConfig.PortBindings
networks: container.NetworkSettings.Networks
memLimit: container.HostConfig.Memory
memReservation: container.HostConfig.MemoryReservation
shmSize: container.HostConfig.ShmSize
cpuShares: container.HostConfig.CpuShares
cpuQuota: container.HostConfig.CpuQuota
nanoCpus: container.HostConfig.NanoCpus
cpuset: container.HostConfig.CpusetCpus
domainname: container.Config.Domainname
oomScoreAdj: container.HostConfig.OomScoreAdj
dns: container.HostConfig.Dns
dnsSearch: container.HostConfig.DnsSearch
dnsOpt: container.HostConfig.DnsOpt
tmpfs: _.keys(container.HostConfig.Tmpfs ? {})
extraHosts: container.HostConfig.ExtraHosts
ulimitsArray: container.HostConfig.Ulimits
stopSignal: container.Config.StopSignal
stopGracePeriod: container.Config.StopTimeout
healthcheck: container.Config.Healthcheck
init: container.HostConfig.Init
readOnly: container.HostConfig.ReadonlyRootfs
sysctls: container.HostConfig.Sysctls
}
# I've seen docker use either 'no' or '' for no restart policy, so we normalise to 'no'.
if service.restartPolicy.Name == ''
@ -297,6 +477,9 @@ module.exports = class Service
toContainerConfig: =>
{ binds, volumes } = @getBindsAndVolumes()
tmpfs = {}
for dir in @tmpfs
tmpfs[dir] = ''
conf = {
name: "#{@serviceName}_#{@imageId}_#{@releaseId}"
Image: @image
@ -307,7 +490,11 @@ module.exports = class Service
Env: _.map @environment, (v, k) -> k + '=' + v
ExposedPorts: @exposedPorts
Labels: @labels
Domainname: @domainname
HostConfig:
Memory: @memLimit
MemoryReservation: @memReservation
ShmSize: @shmSize
Privileged: @privileged
NetworkMode: @networkMode
PortBindings: @portBindings
@ -315,18 +502,38 @@ module.exports = class Service
CapAdd: @capAdd
CapDrop: @capDrop
Devices: @devices
CpuShares: @cpuShares
NanoCpus: @nanoCpus
CpuQuota: @cpuQuota
CpusetCpus: @cpuset
OomScoreAdj: @oomScoreAdj
Tmpfs: tmpfs
Dns: @dns
DnsSearch: @dnsSearch
DnsOpt: @dnsOpt
Ulimits: @ulimitsArray
ReadonlyRootfs: @readOnly
Sysctls: @sysctls
}
if @stopSignal?
conf.StopSignal = @stopSignal
if @stopGracePeriod?
conf.StopTimeout = @stopGracePeriod
if @healthcheck?
conf.Healthcheck = @healthcheck
if @restartPolicy.Name != 'no'
conf.HostConfig.RestartPolicy = @restartPolicy
# If network mode is the default network for this app, add alias for serviceName
if @networkMode == @appId.toString()
if @networkMode == "#{@appId}_default"
conf.NetworkingConfig = {
EndpointsConfig: {
"#{@appId}": {
"#{@appId}_default": {
Aliases: [ @serviceName ]
}
}
}
if @init
conf.HostConfig.Init = true
return conf
# TODO: when we support network configuration properly, return endpointConfig: conf
@ -340,6 +547,8 @@ module.exports = class Service
isSameContainer: (otherService) =>
propertiesToCompare = [
'command'
'entrypoint'
'networkMode'
'privileged'
'restartPolicy'
@ -347,12 +556,33 @@ module.exports = class Service
'environment'
'portBindings'
'exposedPorts'
'memLimit'
'memReservation'
'shmSize'
'cpuShares'
'cpuQuota'
'nanoCpus'
'cpuset'
'domainname'
'oomScoreAdj'
'healthcheck'
'stopSignal'
'stopGracePeriod'
'init'
'readOnly'
'sysctls'
]
arraysToCompare = [
'volumes'
'devices'
'capAdd'
'capDrop'
'dns'
'dnsSearch'
'dnsOpt'
'tmpfs'
'extraHosts'
'ulimitsArray'
]
isEq = Images.isSameImage({ name: @image }, { name: otherService.image }) and
_.isEqual(_.pick(this, propertiesToCompare), _.pick(otherService, propertiesToCompare)) and
@ -365,6 +595,8 @@ module.exports = class Service
#if !isEq
# console.log(JSON.stringify(this, null, 2))
# console.log(JSON.stringify(otherService, null, 2))
# diff = _.omitBy this, (prop, k) -> _.isEqual(prop, otherService[k])
# console.log(JSON.stringify(diff, null, 2))
return isEq

View File

@ -6,15 +6,16 @@ path = require 'path'
logTypes = require '../lib/log-types'
constants = require '../lib/constants'
{ checkInt } = require '../lib/validation'
{ NotFoundError } = require '../lib/errors'
module.exports = class Volumes
constructor: ({ @docker, @logger }) ->
format: (volume) ->
appId = checkInt(volume.Labels['io.resin.app_id'])
format: (volume) =>
[ appId, name ] = volume.Name.split('_')
return {
name: volume.Name
appId
name: name
appId: checkInt(appId)
config: {
labels: _.omit(volume.Labels, _.keys(@defaultLabels(appId)))
driverOpts: volume.Options
@ -35,15 +36,14 @@ module.exports = class Volumes
.then (volumes) ->
_.filter(volumes, (v) -> v.appId == appId)
get: (name) ->
@docker.getVolume(name).inspect()
.then (volume) ->
get: ({ name, appId }) ->
@docker.getVolume("#{appId}_#{name}").inspect()
.then (volume) =>
return @format(volume)
defaultLabels: (appId) ->
defaultLabels: ->
return {
'io.resin.supervised': 'true'
'io.resin.app_id': appId.toString()
}
# TODO: what config values are relevant/whitelisted?
@ -52,13 +52,18 @@ module.exports = class Volumes
config = _.mapKeys(config, (v, k) -> _.camelCase(k))
@logger.logSystemEvent(logTypes.createVolume, { volume: { name } })
labels = _.clone(config.labels) ? {}
_.assign(labels, @defaultLabels(appId))
_.assign(labels, @defaultLabels())
driverOpts = config.driverOpts ? {}
@docker.createVolume({
Name: name
Labels: labels
DriverOpts: driverOpts
})
@get({ name, appId })
.then (vol) =>
if !@isEqualConfig(vol.config, config)
throw new Error("Trying to create volume '#{name}', but a volume with same name and different configuration exists")
.catch NotFoundError, =>
@docker.createVolume({
Name: "#{appId}_#{name}"
Labels: labels
DriverOpts: driverOpts
})
.catch (err) =>
@logger.logSystemEvent(logTypes.createVolumeError, { volume: { name }, error: err })
throw err
@ -81,11 +86,11 @@ module.exports = class Volumes
.catch (err) ->
@logger.logSystemMessage("Warning: could not migrate legacy /data volume: #{err.message}", { error: err }, 'Volume migration error')
remove: ({ name }) ->
remove: ({ name, appId }) ->
@logger.logSystemEvent(logTypes.removeVolume, { volume: { name } })
@docker.getVolume(name).remove()
@docker.getVolume("#{appId}_#{name}").remove()
.catch (err) =>
@logger.logSystemEvent(logTypes.removeVolumeError, { volume: { name }, error: err })
@logger.logSystemEvent(logTypes.removeVolumeError, { volume: { name, appId }, error: err })
isEqualConfig: (current = {}, target = {}) ->
current = _.mapKeys(current, (v, k) -> _.camelCase(k))

View File

@ -37,7 +37,6 @@ arrayConfigKeys = [ 'dtparam', 'dtoverlay', 'device_tree_param', 'device_tree_ov
module.exports = class DeviceConfig
constructor: ({ @db, @config, @logger }) ->
@rebootRequired = false
@validActions = _.keys(@actionExecutors)
@gosuperHealthy = true
@configKeys = {
appUpdatePollInterval: { envVarName: 'RESIN_SUPERVISOR_POLL_INTERVAL', varType: 'int', defaultValue: '60000' }
@ -52,6 +51,41 @@ module.exports = class DeviceConfig
lockOverride: { envVarName: 'RESIN_SUPERVISOR_OVERRIDE_LOCK', varType: 'bool', defaultValue: 'false' }
}
@validKeys = [ 'RESIN_HOST_LOG_TO_DISPLAY', 'RESIN_SUPERVISOR_VPN_CONTROL' ].concat(_.map(@configKeys, 'envVarName'))
@actionExecutors = {
changeConfig: (step) =>
@logger.logConfigChange(step.humanReadableTarget)
@config.set(step.target)
.then =>
@logger.logConfigChange(step.humanReadableTarget, { success: true })
.catch (err) =>
@logger.logConfigChange(step.humanReadableTarget, { err })
throw err
setLogToDisplay: (step) =>
logValue = { RESIN_HOST_LOG_TO_DISPLAY: step.target }
@logger.logConfigChange(logValue)
@setLogToDisplay(step.target)
.then =>
@logger.logConfigChange(logValue, { success: true })
.catch (err) =>
@logger.logConfigChange(logValue, { err })
throw err
setVPNEnabled: (step, { initial = false } = {}) =>
logValue = { RESIN_SUPERVISOR_VPN_CONTROL: step.target }
if !initial
@logger.logConfigChange(logValue)
@setVPNEnabled(step.target)
.then =>
if !initial
@logger.logConfigChange(logValue, { success: true })
.catch (err) =>
@logger.logConfigChange(logValue, { err })
throw err
setBootConfig: (step) =>
@config.get('deviceType')
.then (deviceType) =>
@setBootConfig(deviceType, step.target)
}
@validActions = _.keys(@actionExecutors)
setTarget: (target, trx) =>
db = trx ? @db.models
@ -137,6 +171,11 @@ module.exports = class DeviceConfig
action: 'setLogToDisplay'
target: target['RESIN_HOST_LOG_TO_DISPLAY']
})
if !_.isEmpty(target['RESIN_SUPERVISOR_VPN_CONTROL']) && checkTruthy(current['RESIN_SUPERVISOR_VPN_CONTROL']) != checkTruthy(target['RESIN_SUPERVISOR_VPN_CONTROL'])
steps.push({
action: 'setVPNEnabled'
target: target['RESIN_SUPERVISOR_VPN_CONTROL']
})
if @bootConfigChangeRequired(deviceType, current, target)
steps.push({
action: 'setBootConfig'
@ -158,24 +197,8 @@ module.exports = class DeviceConfig
return [{ action: 'noop' }]
else return filteredSteps
actionExecutors: {
changeConfig: (step) =>
@logger.logConfigChange(step.humanReadableTarget)
@config.set(step.target)
.then =>
@logger.logConfigChange(step.humanReadableTarget, { success: true })
.catch (err) =>
@logger.logConfigChange(step.humanReadableTarget, { err })
throw err
setLogToDisplay: (step) =>
@setLogToDisplay(step.target)
setBootConfig: (step) =>
@config.get('deviceType')
.then (deviceType) =>
@setBootConfig(deviceType, step.target)
}
executeStepAction: (step) =>
@actionExecutors[step.action](step)
executeStepAction: (step, opts) =>
@actionExecutors[step.action](step, opts)
envToBootConfig: (env) ->
# We ensure env doesn't have garbage
@ -244,12 +267,8 @@ module.exports = class DeviceConfig
throw new Error("#{response.statusCode} #{body.Error}")
else
if body.Data == true
@logger.logSystemMessage("#{if enable then 'Enabled' else 'Disabled'} logs to display")
@rebootRequired = true
return body.Data
.catch (err) =>
@logger.logSystemMessage("Error setting log to display: #{err}", { error: err }, 'Set log to display error')
throw err
setBootConfig: (deviceType, target) =>
Promise.try =>
@ -294,7 +313,5 @@ module.exports = class DeviceConfig
enable = checkTruthy(val) ? true
gosuper.post('/v1/vpncontrol', { json: true, body: Enable: enable })
.spread (response, body) ->
if response.statusCode == 202
console.log('VPN enabled: ' + enable)
else
console.log('Error: ' + body + ' response:' + response.statusCode)
if response.statusCode != 202
throw new Error("#{response.statusCode} #{body?.Error}")

View File

@ -131,7 +131,8 @@ module.exports = class DeviceState extends EventEmitter
healthcheck: =>
@config.getMany([ 'appUpdatePollInterval', 'offlineMode' ])
.then (conf) =>
applyTargetHealthy = conf.offlineMode or !@applyInProgress or process.hrtime(@lastApplyStart)[0] - @applications.timeSpentFetching < 2 * conf.appUpdatePollInterval
cycleTimeWithinInterval = process.hrtime(@lastApplyStart)[0] - @applications.timeSpentFetching < 2 * conf.appUpdatePollInterval
applyTargetHealthy = conf.offlineMode or !@applyInProgress or @applications.fetchesInProgress > 0 or cycleTimeWithinInterval
return applyTargetHealthy and @deviceConfig.gosuperHealthy
normaliseLegacy: =>
@ -338,10 +339,10 @@ module.exports = class DeviceState extends EventEmitter
@shuttingDown = true
@emitAsync('shutdown')
executeStepAction: (step, { force, targetState }) =>
executeStepAction: (step, { force, initial }) =>
Promise.try =>
if _.includes(@deviceConfig.validActions, step.action)
@deviceConfig.executeStepAction(step)
@deviceConfig.executeStepAction(step, { initial })
else if _.includes(@applications.validActions, step.action)
@applications.executeStepAction(step, { force })
else
@ -360,7 +361,7 @@ module.exports = class DeviceState extends EventEmitter
return
@stepsInProgress.push(step)
setImmediate =>
@executeStepAction(step, { force })
@executeStepAction(step, { force, initial })
.finally =>
@usingInferStepsLock =>
_.pullAllWith(@stepsInProgress, [ step ], _.isEqual)

View File

@ -1,41 +0,0 @@
exports.defaultLegacyVolume = (appId) ->
return "resin-data-#{appId}"
exports.singleToMulticontainerApp = (app, appId) ->
newApp = {
appId
commit: app.commit
name: app.name
releaseId: '1'
networks: {}
volumes: { "#{exports.defaultLegacyVolume(appId)}": {} }
config: app.config ? {}
}
newApp.services = {
'1': {
serviceName: 'main'
imageId: '1'
commit: app.commit
releaseId: app.releaseId ? '1'
image: app.image
privileged: true
network_mode: 'host'
volumes: [
"#{exports.defaultLegacyVolume(appId)}:/data"
]
labels: {
'io.resin.features.kernel_modules': '1'
'io.resin.features.firmware': '1'
'io.resin.features.dbus': '1'
'io.resin.features.supervisor_api': '1'
'io.resin.features.resin_api': '1'
'io.resin.update.strategy': newApp.config['RESIN_SUPERVISOR_UPDATE_STRATEGY'] ? 'download-then-kill'
'io.resin.update.handover_timeout': newApp.config['RESIN_SUPERVISOR_HANDOVER_TIMEOUT'] ? ''
}
environment: app.environment ? {}
restart: 'unless-stopped'
running: true
}
}
return newApp

View File

@ -55,7 +55,7 @@ var singleToMulticontainerApp = function (app, appId) {
releaseId: 1,
image: app.imageId,
privileged: true,
network_mode: 'host',
networkMode: 'host',
volumes: [
`${defaultVolume}:/data`
],

View File

@ -238,7 +238,6 @@ module.exports = class Proxyvisor
@lastRequestForDevice = {}
@_router = new ProxyvisorRouter(this)
@router = @_router.router
@validActions = _.keys(@actionExecutors)
@actionExecutors = {
updateDependentTargets: (step) =>
@config.getMany([ 'currentApiKey', 'apiTimeout' ])
@ -314,6 +313,7 @@ module.exports = class Proxyvisor
cleanupTars(step.appId)
}
@validActions = _.keys(@actionExecutors)
bindToAPI: (apiBinder) =>
@apiBinder = apiBinder