Switch to using knex migrations to set up the database, and change the database format to use integers for ids instead of strings.

Also includes various improvements and bugfixes to services and the migration from legacy /data to volumes.

The switch ti migrations involves a dirty hack for webpack to properly resolve the paths to the migrations js files - it uses an expression
that webpack can't resolve, so we hardcode it to a value and use the ContextReplacementPlugin to make that value resolve to the migrations folder.

The downsides to this approach are:
- a change in knex code would break this
- the migration code is added twice to the supervisor image: once in the migrations folder (because knex needs to loop through the directory to find the files),
and once inside app.js (because I can't make webpack treat them as external)

Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
Pablo Carranza Velez 2017-11-29 13:32:57 -08:00
parent 7c98a9d058
commit b003f48d7b
26 changed files with 1221 additions and 1014 deletions

View File

@ -5,3 +5,4 @@ node_modules
tools
dindctl
README.md
knexfile.js

View File

@ -138,7 +138,7 @@ COPY package.json /usr/src/app/
RUN JOBS=MAX npm install --no-optional --unsafe-perm
COPY webpack.config.js fix-jsonstream.js /usr/src/app/
COPY webpack.config.js fix-jsonstream.js hardcode-migrations.js /usr/src/app/
COPY src /usr/src/app/src
RUN npm run lint \

7
hardcode-migrations.js Normal file
View File

@ -0,0 +1,7 @@
// knex migrations use dynamic requires which break with webpack.
// This hack makes the migrations directory a constant so that at least we can use webpack contexts for the
// require.
module.exports = function (source) {
return source.toString().replace("require(directory + '/' + name);", "require('./migrations/' + name);")
.replace("require(_path2.default.join(this._absoluteConfigDir(), name));", "require('./migrations/' + name);")
}

10
knexfile.js Normal file
View File

@ -0,0 +1,10 @@
// Only used to be able to run "knex migrate:make <migration name>" on the development machine.
// Not used in the supervisor.
module.exports = {
client: 'sqlite3',
connection: {
filename: './database.sqlite'
},
useNullAsDefault: true
}

View File

@ -28,6 +28,7 @@
"buffer-equal-constant-time": "^1.0.1",
"coffee-loader": "^0.7.3",
"coffee-script": "~1.11.0",
"copy-webpack-plugin": "^4.2.3",
"docker-delta": "^2.0.4",
"docker-progress": "^2.7.2",
"docker-toolbelt": "^3.2.1",
@ -41,7 +42,6 @@
"memoizee": "^0.4.1",
"mixpanel": "0.0.20",
"mkdirp": "^0.5.1",
"ncp": "^2.0.0",
"network-checker": "~0.0.5",
"node-loader": "^0.6.0",
"null-loader": "^0.1.1",
@ -52,7 +52,6 @@
"resin-register-device": "^3.0.0",
"resin-sync": "^9.3.0",
"resumable-request": "^2.0.0",
"rimraf": "^2.5.4",
"rwlock": "^5.0.0",
"semver": "^5.3.0",
"semver-regex": "^1.0.0",

View File

@ -26,9 +26,8 @@ class APIBinderRouter
@router.use(bodyParser.json())
@router.post '/v1/update', (req, res) =>
@eventTracker.track('Update notification')
setImmediate =>
if @apiBinder.readyForUpdates
@apiBinder.getAndSetTargetState(req.body.force)
if @apiBinder.readyForUpdates
@apiBinder.getAndSetTargetState(req.body.force)
res.sendStatus(204)
module.exports = class APIBinder
@ -65,7 +64,8 @@ module.exports = class APIBinder
apiPrefix: baseUrlLegacy
passthrough: requestOpts
@cachedResinApi = @resinApi.clone({}, cache: {})
return if !startServices
if !startServices
return
console.log('Ensuring device is provisioned')
@provisionDevice()
.then =>
@ -137,7 +137,8 @@ module.exports = class APIBinder
_provision: =>
@config.get('provisioningOptions')
.then (opts) =>
return if opts.registered_at? and opts.deviceId? and !opts.provisioningApiKey?
if opts.registered_at? and opts.deviceId? and !opts.provisioningApiKey?
return
Promise.try ->
if opts.registered_at? and !opts.deviceId?
console.log('Device is registered but no device id available, attempting key exchange')
@ -173,7 +174,8 @@ module.exports = class APIBinder
@_provisionOrRetry(retryDelay)
provisionDevice: =>
throw new Error('Trying to provision device without initializing API client') if !@resinApi?
if !@resinApi?
throw new Error('Trying to provision device without initializing API client')
@config.getMany([
'provisioned'
'bootstrapRetryDelay'
@ -193,8 +195,10 @@ module.exports = class APIBinder
'deviceId'
])
.then (conf) =>
throw new Error('Cannot provision dependent device in offline mode') if conf.offlineMode
throw new Error('Device must be provisioned to provision a dependent device') if !conf.provisioned
if conf.offlineMode
throw new Error('Cannot provision dependent device in offline mode')
if !conf.provisioned
throw new Error('Device must be provisioned to provision a dependent device')
# TODO: when API supports it as per https://github.com/resin-io/hq/pull/949 remove userId
_.defaults(device, {
user: conf.userId
@ -219,8 +223,10 @@ module.exports = class APIBinder
'apiTimeout'
])
.then (conf) =>
throw new Error('Cannot update dependent device in offline mode') if conf.offlineMode
throw new Error('Device must be provisioned to update a dependent device') if !conf.provisioned
if conf.offlineMode
throw new Error('Cannot update dependent device in offline mode')
if !conf.provisioned
throw new Error('Device must be provisioned to update a dependent device')
@resinApiLegacy.patch
resource: 'device'
id: id
@ -229,7 +235,6 @@ module.exports = class APIBinder
apikey: conf.currentApiKey
.timeout(conf.apiTimeout)
# TODO: change to the multicontainer model, I think it's device_configuration_variable?
# Creates the necessary config vars in the API to match the current device state,
# without overwriting any variables that are already set.
_reportInitialEnv: =>
@ -301,10 +306,12 @@ module.exports = class APIBinder
return
startTargetStatePoll: ->
throw new Error('Trying to start poll without initializing API client') if !@resinApi?
if !@resinApi?
throw new Error('Trying to start poll without initializing API client')
@_pollTargetState()
@config.on 'change', (changedConfig) =>
@_pollTargetState() if changedConfig.appUpdatePollInterval?
if changedConfig.appUpdatePollInterval?
@_pollTargetState()
_getStateDiff: =>
diff = {
@ -325,7 +332,6 @@ module.exports = class APIBinder
@cachedResinApi._request(requestParams)
# TODO: switch to using the proper endpoint by changing @_reportV1 to @_reportV2
_report: =>
@config.getMany([ 'currentApiKey', 'deviceId', 'apiTimeout', 'resinApiEndpoint', 'uuid' ])
.then (conf) =>
@ -339,6 +345,7 @@ module.exports = class APIBinder
_.assign(@lastReportedState.dependent, stateDiff.dependent)
_reportCurrentState: =>
@reportPending = true
@deviceState.getStatus()
.then (currentDeviceState) =>
_.assign(@stateForReport.local, currentDeviceState.local)
@ -350,19 +357,20 @@ module.exports = class APIBinder
@_report()
.delay(REPORT_SUCCESS_DELAY)
.then =>
setImmediate(@_reportCurrentState)
@_reportCurrentState()
.catch (err) =>
@eventTracker.track('Device state report failure', { error: err })
Promise.delay(REPORT_RETRY_DELAY)
.then =>
setImmediate(@_reportCurrentState)
@_reportCurrentState()
return
startCurrentStateReport: =>
throw new Error('Trying to start state reporting without initializing API client') if !@resinApi?
if !@resinApi?
throw new Error('Trying to start state reporting without initializing API client')
# patch to the device(id) endpoint
@deviceState.on 'change', =>
if !@reportPending
@reportPending = true
# A latency of 100 ms should be acceptable and
# allows avoiding catching docker at weird states
@_reportCurrentState()

View File

@ -21,7 +21,8 @@ Proxyvisor = require './proxyvisor'
serviceAction = (action, serviceId, current, target, options) ->
obj = { action, serviceId, current, target }
obj.options = options if options?
if options?
obj.options = options
return obj
# TODO: move this to an Image class?
@ -31,8 +32,8 @@ imageForService = (service) ->
appId: service.appId
serviceId: service.serviceId
serviceName: service.serviceName
imageId: service.imageId?.toString()
releaseId: service.releaseId?.toString()
imageId: service.imageId
releaseId: service.releaseId
dependent: 0
}
@ -52,7 +53,7 @@ class ApplicationManagerRouter
@router.use(bodyParser.json())
@router.post '/v1/restart', (req, res) =>
appId = checkString(req.body.appId)
appId = checkInt(req.body.appId)
force = checkTruthy(req.body.force)
@eventTracker.track('Restart container (v1)', { appId })
if !appId?
@ -60,8 +61,10 @@ class ApplicationManagerRouter
@applications.getCurrentApp(appId)
.then (app) =>
service = app?.services?[0]
return res.status(400).send('App not found') if !service?
return res.status(400).send('v1 endpoints are only allowed on single-container apps') if app.services.length > 1
if !service?
return res.status(400).send('App not found')
if app.services.length > 1
return res.status(400).send('v1 endpoints are only allowed on single-container apps')
@applications.executeStepAction(serviceAction('restart', service.serviceId, service, service), { force })
.then ->
res.status(200).send('OK')
@ -69,15 +72,17 @@ class ApplicationManagerRouter
res.status(503).send(err?.message or err or 'Unknown error')
@router.post '/v1/apps/:appId/stop', (req, res) =>
appId = checkString(req.params.appId)
appId = checkInt(req.params.appId)
force = checkTruthy(req.body.force)
if !appId?
return res.status(400).send('Missing app id')
@applications.getCurrentApp(appId)
.then (app) =>
service = app?.services?[0]
return res.status(400).send('App not found') if !service?
return res.status(400).send('v1 endpoints are only allowed on single-container apps') if app.services.length > 1
if !service?
return res.status(400).send('App not found')
if app.services.length > 1
return res.status(400).send('v1 endpoints are only allowed on single-container apps')
@applications.setTargetVolatileForService(service.serviceId, running: false)
@applications.executeStepAction(serviceAction('stop', service.serviceId, service), { force })
.then (service) ->
@ -86,15 +91,17 @@ class ApplicationManagerRouter
res.status(503).send(err?.message or err or 'Unknown error')
@router.post '/v1/apps/:appId/start', (req, res) =>
appId = checkString(req.params.appId)
appId = checkInt(req.params.appId)
force = checkTruthy(req.body.force)
if !appId?
return res.status(400).send('Missing app id')
@applications.getCurrentApp(appId)
.then (app) =>
service = app?.services?[0]
return res.status(400).send('App not found') if !service?
return res.status(400).send('v1 endpoints are only allowed on single-container apps') if app.services.length > 1
if !service?
return res.status(400).send('App not found')
if app.services.length > 1
return res.status(400).send('v1 endpoints are only allowed on single-container apps')
@applications.setTargetVolatileForService(service.serviceId, running: true)
@applications.executeStepAction(serviceAction('start', service.serviceId, null, service), { force })
.then (service) ->
@ -103,15 +110,17 @@ class ApplicationManagerRouter
res.status(503).send(err?.message or err or 'Unknown error')
@router.get '/v1/apps/:appId', (req, res) =>
appId = checkString(req.params.appId)
appId = checkInt(req.params.appId)
@eventTracker.track('GET app (v1)', appId)
if !appId?
return res.status(400).send('Missing app id')
@applications.getCurrentApp(appId)
.then (app) ->
service = app?.services?[0]
return res.status(400).send('App not found') if !service?
return res.status(400).send('v1 endpoints are only allowed on single-container apps') if app.services.length > 1
if !service?
return res.status(400).send('App not found')
if app.services.length > 1
return res.status(400).send('v1 endpoints are only allowed on single-container apps')
# Don't return data that will be of no use to the user
appToSend = {
appId
@ -126,19 +135,21 @@ class ApplicationManagerRouter
res.status(503).send(err?.message or err or 'Unknown error')
@router.post '/v1/purge', (req, res) =>
appId = checkString(req.body.appId)
appId = checkInt(req.body.appId)
force = checkTruthy(req.body.force)
if !appId?
errMsg = "App not found: an app needs to be installed for purge to work.
If you've recently moved this device from another app,
please push an app and wait for it to be installed first."
return res.status(400).send(errMsg)
Promise.using updateLock.lock(appId, { force }), =>
@_lockingIfNecessary appId, { force }, =>
@applications.getCurrentApp(appId)
.then (app) =>
service = app?.services?[0]
return res.status(400).send('App not found') if !service?
return res.status(400).send('v1 endpoints are only allowed on single-container apps') if app.services.length > 1
if !service?
return res.status(400).send('App not found')
if app.services.length > 1
return res.status(400).send('v1 endpoints are only allowed on single-container apps')
@applications.executeStepAction(serviceAction('kill', service.serviceId, service, null, skipLock: true), { force })
.then =>
@applications.executeStepAction({
@ -164,21 +175,75 @@ module.exports = class ApplicationManager extends EventEmitter
@volumes = new Volumes({ @docker, @logger })
@proxyvisor = new Proxyvisor({ @config, @logger, @db, @docker, @images, applications: this })
@_targetVolatilePerServiceId = {}
@validActions = [
'kill'
'start'
'stop'
'updateReleaseId'
'fetch'
'removeImage'
'updateImage'
'killAll'
'purge'
'restart'
'cleanup'
'createNetworkOrVolume'
'removeNetworkOrVolume'
].concat(@proxyvisor.validActions)
@actionExecutors = {
stop: (step, { force = false } = {}) =>
@_lockingIfNecessary step.current.appId, { force, skipLock: step.options?.skipLock }, =>
@services.kill(step.current, { removeContainer: false })
kill: (step, { force = false } = {}) =>
@_lockingIfNecessary step.current.appId, { force, skipLock: step.options?.skipLock }, =>
@services.kill(step.current)
.then =>
if step.options?.removeImage
@images.remove(imageForService(step.current))
updateMetadata: (step) =>
@services.updateMetadata(step.current, step.target)
purge: (step, { force = false } = {}) =>
appId = step.appId
@logger.logSystemMessage("Purging data for app #{appId}", { appId }, 'Purge data')
@_lockingIfNecessary appId, { force, skipLock: step.options?.skipLock }, =>
@getCurrentApp(appId)
.then (app) =>
if !_.isEmpty(app?.services)
throw new Error('Attempt to purge app with running services')
if _.isEmpty(app?.volumes)
@logger.logSystemMessage('No volumes to purge', { appId }, 'Purge data noop')
return
Promise.mapSeries _.toPairs(app.volumes ? {}), ([ name, config ]) =>
@volumes.remove({ name })
.then =>
@volumes.create({ name, config, appId })
.then =>
@logger.logSystemMessage('Purged data', { appId }, 'Purge data success')
.catch (err) =>
@logger.logSystemMessage("Error purging data: #{err}", { appId, error: err }, 'Purge data error')
throw err
restart: (step, { force = false } = {}) =>
@_lockingIfNecessary step.current.appId, { force, skipLock: step.options?.skipLock }, =>
Promise.try =>
@services.kill(step.current)
.then =>
@services.start(step.target)
stopAll: (step, { force = false } = {}) =>
@stopAll({ force })
start: (step) =>
@services.start(step.target)
handover: (step, { force = false } = {}) =>
@_lockingIfNecessary step.current.appId, { force, skipLock: step.options?.skipLock }, =>
@services.handover(step.current, step.target)
fetch: (step) =>
Promise.join(
@config.get('fetchOptions')
@images.getAvailable()
(opts, availableImages) =>
opts.deltaSource = @bestDeltaSource(step.image, availableImages)
@images.fetch(step.image, opts)
)
.finally =>
@reportCurrentState(update_downloaded: true)
removeImage: (step) =>
@images.remove(step.image)
updateImage: (step) =>
@images.update(step.target)
cleanup: (step) =>
@images.cleanup()
createNetworkOrVolume: (step) =>
model = if step.model is 'volume' then @volumes else @networks
model.create(step.target)
removeNetworkOrVolume: (step) =>
model = if step.model is 'volume' then @volumes else @networks
model.remove(step.current)
}
@validActions = _.keys(@actionExecutors).concat(@proxyvisor.validActions)
@_router = new ApplicationManagerRouter(this)
@router = @_router.router
@ -210,7 +275,7 @@ module.exports = class ApplicationManager extends EventEmitter
commit = null
# We iterate over the current running services and add them to the current state
# of the app they belong to.
_.forEach services, (service) ->
for service in services
appId = service.appId
apps[appId] ?= {}
apps[appId].services ?= {}
@ -225,22 +290,25 @@ module.exports = class ApplicationManager extends EventEmitter
else
# There's two containers with the same imageId, so this has to be a handover
previousReleaseId = apps[appId].services[service.imageId].releaseId
apps[appId].services[service.imageId].releaseId = Math.max(parseInt(previousReleaseId), parseInt(service.releaseId)).toString()
apps[appId].services[service.imageId].releaseId = Math.max(previousReleaseId, service.releaseId)
apps[appId].services[service.imageId].status = 'Handing over'
_.forEach images, (image) ->
for image in images
appId = image.appId
if !image.dependent
apps[appId] ?= {}
apps[appId].services ?= {}
apps[appId].services[image.imageId] ?= _.pick(image, [ 'status', 'download_progress', 'releaseId' ])
apps[appId].services[image.imageId] ?= _.pick(image, [ 'status', 'releaseId' ])
apps[appId].services[image.imageId].download_progress = image.downloadProgress
else
dependent[appId] ?= {}
dependent[appId].images ?= {}
dependent[appId].images[image.imageId] = _.pick(image, [ 'status', 'download_progress' ])
dependent[appId].images[image.imageId] = _.pick(image, [ 'status' ])
dependent[appId].images[image.imageId].download_progress = image.downloadProgress
obj = { local: apps, dependent }
obj.commit = commit if commit
if commit?
obj.commit = commit
return obj
)
@ -252,18 +320,18 @@ module.exports = class ApplicationManager extends EventEmitter
# We iterate over the current running services and add them to the current state
# of the app they belong to.
_.forEach services, (service) ->
for service in services
appId = service.appId
apps[appId].services ?= []
apps[appId].services.push(service)
_.forEach networks, (network) ->
for network in networks
appId = network.appId
apps[appId] ?= { appId }
apps[appId].networks ?= {}
apps[appId].networks[network.name] = network.config
_.forEach volumes, (volume) ->
for volume in volumes
appId = volume.appId
apps[appId] ?= { appId }
apps[appId].volumes ?= {}
@ -293,114 +361,108 @@ module.exports = class ApplicationManager extends EventEmitter
getTargetApp: (appId) =>
@db.models('app').where({ appId }).select()
.then ([ app ]) =>
return if !app?
if !app?
return
@normaliseAndExtendAppFromDB(app)
# Compares current and target services and returns a list of service pairs to be updated/removed/installed.
# The returned list is an array of objects where the "current" and "target" properties define the update pair, and either can be null
# (in the case of an install or removal).
compareServicesForUpdate: (currentServices, targetServices) ->
Promise.try ->
removePairs = []
installPairs = []
updatePairs = []
targetServiceIds = _.map(targetServices, 'serviceId')
currentServiceIds = _.uniq(_.map(currentServices, 'serviceId'))
removePairs = []
installPairs = []
updatePairs = []
targetServiceIds = _.map(targetServices, 'serviceId')
currentServiceIds = _.uniq(_.map(currentServices, 'serviceId'))
toBeRemoved = _.difference(currentServiceIds, targetServiceIds)
_.forEach toBeRemoved, (serviceId) ->
servicesToRemove = _.filter(currentServices, (s) -> s.serviceId == serviceId)
_.map servicesToRemove, (service) ->
toBeRemoved = _.difference(currentServiceIds, targetServiceIds)
for serviceId in toBeRemoved
servicesToRemove = _.filter(currentServices, (s) -> s.serviceId == serviceId)
for service in servicesToRemove
removePairs.push({
current: service
target: null
serviceId
})
toBeInstalled = _.difference(targetServiceIds, currentServiceIds)
for serviceId in toBeInstalled
serviceToInstall = _.find(targetServices, (s) -> s.serviceId == serviceId)
if serviceToInstall?
installPairs.push({
current: null
target: serviceToInstall
serviceId
})
toBeMaybeUpdated = _.intersection(targetServiceIds, currentServiceIds)
currentServicesPerId = {}
targetServicesPerId = _.keyBy(targetServices, 'serviceId')
for serviceId in toBeMaybeUpdated
currentServiceContainers = _.filter currentServices, (service) ->
return service.serviceId == serviceId
if currentServiceContainers.length > 1
currentServicesPerId[serviceId] = _.maxBy(currentServiceContainers, 'createdAt')
# All but the latest container for this service are spurious and should be removed
for service in _.without(currentServiceContainers, currentServicesPerId[serviceId])
removePairs.push({
current: service
target: null
serviceId
})
else
currentServicesPerId[serviceId] = currentServiceContainers[0]
toBeInstalled = _.difference(targetServiceIds, currentServiceIds)
_.forEach toBeInstalled, (serviceId) ->
servicesToInstall = _.filter(targetServices, (s) -> s.serviceId == serviceId)
_.map servicesToInstall, (service) ->
installPairs.push({
current: null
target: service
serviceId
})
toBeMaybeUpdated = _.intersection(targetServiceIds, currentServiceIds)
currentServicesPerId = {}
targetServicesPerId = {}
_.forEach toBeMaybeUpdated, (serviceId) ->
currentServiceContainers = _.filter currentServices, (service) ->
return service.serviceId == serviceId
targetServicesForId = _.filter targetServices, (service) ->
return service.serviceId == serviceId
throw new Error("Target state includes multiple services with serviceId #{serviceId}") if targetServicesForId.length > 1
targetServicesPerId[serviceId] = targetServicesForId[0]
if currentServiceContainers.length > 1
currentServicesPerId[serviceId] = _.maxBy(currentServiceContainers, 'createdAt')
# All but the latest container for this service are spurious and should be removed
_.forEach _.without(currentServiceContainers, currentServicesPerId[serviceId]), (service) ->
removePairs.push({
current: service
target: null
serviceId
})
else
currentServicesPerId[serviceId] = currentServiceContainers[0]
Promise.filter toBeMaybeUpdated, (serviceId) ->
return !currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId])
.map (serviceId) ->
updatePairs.push({
current: currentServicesPerId[serviceId]
target: targetServicesPerId[serviceId]
serviceId
})
.then ->
return { removePairs, installPairs, updatePairs }
needUpdate = _.filter toBeMaybeUpdated, (serviceId) ->
return !currentServicesPerId[serviceId].isEqual(targetServicesPerId[serviceId])
for serviceId in needUpdate
updatePairs.push({
current: currentServicesPerId[serviceId]
target: targetServicesPerId[serviceId]
serviceId
})
return { removePairs, installPairs, updatePairs }
compareNetworksOrVolumesForUpdate: (model, { current, target }, appId) ->
Promise.try ->
outputPairs = []
currentNames = _.keys(current)
targetNames = _.keys(target)
toBeRemoved = _.difference(currentNames, targetNames)
_.forEach toBeRemoved, (name) ->
outputPairs.push({
current: {
name
appId
config: current[name]
}
target: null
})
toBeInstalled = _.difference(targetNames, currentNames)
_.forEach toBeInstalled, (name) ->
outputPairs.push({
current: null
target: {
name
appId
config: target[name]
}
})
toBeUpdated = _.filter _.intersection(targetNames, currentNames), (name) ->
!model.isEqualConfig(current[name], target[name])
_.forEach toBeUpdated, (name) ->
outputPairs.push({
current: {
name
appId
config: current[name]
}
target: {
name
appId
config: target[name]
}
})
return outputPairs
outputPairs = []
currentNames = _.keys(current)
targetNames = _.keys(target)
toBeRemoved = _.difference(currentNames, targetNames)
for name in toBeRemoved
outputPairs.push({
current: {
name
appId
config: current[name]
}
target: null
})
toBeInstalled = _.difference(targetNames, currentNames)
for name in toBeInstalled
outputPairs.push({
current: null
target: {
name
appId
config: target[name]
}
})
toBeUpdated = _.filter _.intersection(targetNames, currentNames), (name) ->
!model.isEqualConfig(current[name], target[name])
for name in toBeUpdated
outputPairs.push({
current: {
name
appId
config: current[name]
}
target: {
name
appId
config: target[name]
}
})
return outputPairs
# TODO: should we consider the case where several services use the same image?
# In such case we should do more complex matching to allow several image objects
@ -409,24 +471,26 @@ module.exports = class ApplicationManager extends EventEmitter
compareImagesForMetadataUpdate: (availableImages, targetServices) ->
pairs = []
targetImages = _.map(targetServices, imageForService)
_.forEach targetImages, (target) ->
imageWithSameName = _.find(availableImages, (img) -> img.name == target.name)
return if !imageWithSameName?
return if _.find(availableImages, (img) -> _.isEqual(_.omit(img, 'id'), target))
pairs.push({ current: imageWithSameName, target, serviceId: target.serviceId })
for target in targetImages
imageWithSameContent = _.find(availableImages, (img) => @images.isSameImage(img, target))
if imageWithSameContent? and !_.find(availableImages, (img) -> _.isEqual(_.omit(img, 'id'), target))
pairs.push({ current: imageWithSameContent, target, serviceId: target.serviceId })
return pairs
# Checks if a service is using a network or volume that is about to be updated
_hasCurrentNetworksOrVolumes: (service, networkPairs, volumePairs) ->
return false if !service?
if !service?
return false
hasNetwork = _.some networkPairs, (pair) ->
pair.current.name == service.network_mode
return true if hasNetwork
if hasNetwork
return true
hasVolume = _.some service.volumes, (volume) ->
name = _.split(volume, ':')[0]
_.some volumePairs, (pair) ->
pair.current.name == name
return true if hasVolume
if hasVolume
return true
return false
# TODO: account for volumes-from, networks-from, links, etc
@ -435,25 +499,28 @@ module.exports = class ApplicationManager extends EventEmitter
# for depends_on, check no install or update pairs have that service
dependencyUnmet = _.some target.depends_on ? [], (dependency) ->
_.find(pendingPairs, (pair) -> pair.target?.serviceName == dependency)? or _.find(stepsInProgress, (step) -> step.target?.serviceName == dependency)?
return false if dependencyUnmet
# for networks and volumes, check no network pairs have that volume name
if _.find(networkPairs, (pair) -> pair.target.name == target.network_mode)?
if dependencyUnmet
return false
if _.find(stepsInProgress, (step) -> step.model == 'network' and step.target.name == target.network_mode)?
# for networks and volumes, check no network pairs have that volume name
if _.find(networkPairs, (pair) -> pair.target?.name == target.network_mode)?
return false
if _.find(stepsInProgress, (step) -> step.model == 'network' and step.target?.name == target.network_mode)?
return false
volumeUnmet = _.some target.volumes, (volumeDefinition) ->
sourceName = volumeDefinition.split(':')[0]
_.find(volumePairs, (pair) -> pair.target.name == sourceName)? or _.find(stepsInProgress, (step) -> step.model == 'volume' and step.target.name == sourceName)?
[ sourceName, destName ] = volumeDefinition.split(':')
if !destName? # If this is not a named volume, ignore it
return false
_.find(volumePairs, (pair) -> pair.target?.name == sourceName)? or _.find(stepsInProgress, (step) -> step.model == 'volume' and step.target.name == sourceName)?
return !volumeUnmet
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
# to kill a service once the images for the services it depends on have been downloaded, so as to minimize
# downtime (but not block the killing too much, potentially causing a deadlock)
_dependenciesMetForServiceKill: (target, targetApp, availableImages) ->
_dependenciesMetForServiceKill: (target, targetApp, availableImages) =>
if target.depends_on?
for dependency in target.depends_on
dependencyService = _.find(targetApp.services, (s) -> s.serviceName == dependency)
if !_.find(availableImages, (image) -> image.name == dependencyService.image)?
if !_.find(availableImages, (image) => @images.isSameImage(image, { name: dependencyService.image }))?
return false
return true
@ -468,7 +535,7 @@ module.exports = class ApplicationManager extends EventEmitter
# If the current update doesn't require killing the services that use this network/volume,
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
steps = []
_.forEach dependencies, (dependency) ->
for dependency in dependencies
if !_.some(changingPairs, (pair) -> pair.serviceId == dependency.serviceId)
steps.push(serviceAction('kill', dependency.serviceId, dependency))
return steps
@ -490,8 +557,8 @@ module.exports = class ApplicationManager extends EventEmitter
# Infers steps that do not require creating a new container
_updateContainerStep: (current, target) ->
if current.releaseId != target.releaseId
return serviceAction('updateReleaseId', target.serviceId, current, target)
if current.releaseId != target.releaseId or current.imageId != target.imageId
return serviceAction('updateMetadata', target.serviceId, current, target)
else if target.running
return serviceAction('start', target.serviceId, current, target)
else
@ -529,13 +596,13 @@ module.exports = class ApplicationManager extends EventEmitter
return null
}
_nextStepForService: ({ current, target }, updateContext) ->
_nextStepForService: ({ current, target }, updateContext) =>
{ targetApp, networkPairs, volumePairs, installPairs, updatePairs, stepsInProgress, availableImages } = updateContext
if _.find(stepsInProgress, (step) -> step.serviceId == target.serviceId)?
# There is already a step in progress for this service, so we wait
return null
needsDownload = !_.some(availableImages, (image) -> target.image == image.name)
needsDownload = !_.some(availableImages, (image) => @images.isSameImage(image, { name: target.image }))
dependenciesMetForStart = =>
@_dependenciesMetForServiceStart(target, networkPairs, volumePairs, installPairs.concat(updatePairs), stepsInProgress)
dependenciesMetForKill = =>
@ -554,7 +621,8 @@ module.exports = class ApplicationManager extends EventEmitter
else
strategy = checkString(target.labels['io.resin.update.strategy'])
validStrategies = [ 'download-then-kill', 'kill-then-download', 'delete-then-download', 'hand-over' ]
strategy = 'download-then-kill' if !_.includes(validStrategies, strategy)
if !_.includes(validStrategies, strategy)
strategy = 'download-then-kill'
timeout = checkInt(target.labels['io.resin.update.handover_timeout'])
return @_strategySteps[strategy](current, target, needsDownload, dependenciesMetForStart, dependenciesMetForKill, needsSpecialKill, timeout)
@ -562,45 +630,44 @@ module.exports = class ApplicationManager extends EventEmitter
emptyApp = { services: [], volumes: {}, networks: {} }
if !targetApp?
targetApp = emptyApp
else
# Create the default network for the target app
targetApp.networks[targetApp.appId] ?= {}
if !currentApp?
currentApp = emptyApp
appId = targetApp.appId ? currentApp.appId
# Create the default network for the target app
targetApp.networks[targetApp.appId] ?= {}
Promise.join(
@compareNetworksOrVolumesForUpdate(@networks, { current: currentApp.networks, target: targetApp.networks }, appId)
@compareNetworksOrVolumesForUpdate(@volumes, { current: currentApp.volumes, target: targetApp.volumes }, appId)
@compareServicesForUpdate(currentApp.services, targetApp.services)
@compareImagesForMetadataUpdate(availableImages, targetApp.services)
(networkPairs, volumePairs, { removePairs, installPairs, updatePairs }, imagePairs) =>
steps = []
# All removePairs get a 'kill' action
_.forEach removePairs, ({ current }) ->
steps.push(serviceAction('kill', current.serviceId, current, null))
# next step for install pairs in download - start order, but start requires dependencies, networks and volumes met
# next step for update pairs in order by update strategy. start requires dependencies, networks and volumes met.
_.forEach installPairs.concat(updatePairs), (pair) =>
step = @_nextStepForService(pair, { targetApp, networkPairs, volumePairs, installPairs, updatePairs, stepsInProgress, availableImages })
steps.push(step) if step?
# next step for network pairs - remove requires services killed, create kill if no pairs or steps affect that service
_.forEach networkPairs, (pair) =>
pairSteps = @_nextStepsForNetwork(pair, currentApp, removePairs.concat(updatePairs))
steps = steps.concat(pairSteps) if !_.isEmpty(pairSteps)
# next step for volume pairs - remove requires services killed, create kill if no pairs or steps affect that service
_.forEach volumePairs, (pair) =>
pairSteps = @_nextStepsForVolume(pair, currentApp, removePairs.concat(updatePairs))
steps = steps.concat(pairSteps) if !_.isEmpty(pairSteps)
_.forEach imagePairs, (pair) ->
steps.push(_.assign({ action: 'updateImage' }, pair))
return steps
)
networkPairs = @compareNetworksOrVolumesForUpdate(@networks, { current: currentApp.networks, target: targetApp.networks }, appId)
volumePairs = @compareNetworksOrVolumesForUpdate(@volumes, { current: currentApp.volumes, target: targetApp.volumes }, appId)
{ removePairs, installPairs, updatePairs } = @compareServicesForUpdate(currentApp.services, targetApp.services)
imagePairs = @compareImagesForMetadataUpdate(availableImages, targetApp.services)
steps = []
# All removePairs get a 'kill' action
for pair in removePairs
steps.push(serviceAction('kill', pair.current.serviceId, pair.current, null))
# next step for install pairs in download - start order, but start requires dependencies, networks and volumes met
# next step for update pairs in order by update strategy. start requires dependencies, networks and volumes met.
for pair in installPairs.concat(updatePairs)
step = @_nextStepForService(pair, { targetApp, networkPairs, volumePairs, installPairs, updatePairs, stepsInProgress, availableImages })
if step?
steps.push(step)
# next step for network pairs - remove requires services killed, create kill if no pairs or steps affect that service
for pair in networkPairs
pairSteps = @_nextStepsForNetwork(pair, currentApp, removePairs.concat(updatePairs))
steps = steps.concat(pairSteps)
# next step for volume pairs - remove requires services killed, create kill if no pairs or steps affect that service
for pair in volumePairs
pairSteps = @_nextStepsForVolume(pair, currentApp, removePairs.concat(updatePairs))
steps = steps.concat(pairSteps)
for pair in imagePairs
steps.push(_.assign({ action: 'updateImage' }, pair))
return steps
normaliseAppForDB: (app) =>
services = _.map app.services, (s, serviceId) ->
service = _.clone(s)
service.appId = app.appId
service.releaseId = app.releaseId
service.serviceId = serviceId
service.serviceId = checkInt(serviceId)
service.commit = app.commit
return service
Promise.map services, (service) =>
@ -632,7 +699,8 @@ module.exports = class ApplicationManager extends EventEmitter
.catchReturn(@docker.InvalidNetGatewayError, null)
(imageInfo, apiHostForNetwork) ->
serviceOpts.imageInfo = imageInfo
serviceOpts.supervisorApiHost = apiHostForNetwork if apiHostForNetwork?
if apiHostForNetwork?
serviceOpts.supervisorApiHost = apiHostForNetwork
return new Service(service, serviceOpts)
)
@ -655,9 +723,9 @@ module.exports = class ApplicationManager extends EventEmitter
Promise.map(JSON.parse(app.services), (service) => @createTargetService(service, configOpts))
.then (services) ->
# If a named volume is defined in a service, we add it app-wide so that we can track it and purge it
_.forEach services, (s) ->
for s in services
serviceNamedVolumes = s.getNamedVolumes()
_.forEach serviceNamedVolumes, (name) ->
for name in serviceNamedVolumes
volumes[name] ?= { labels: {} }
outApp = {
appId: app.appId
@ -674,17 +742,16 @@ module.exports = class ApplicationManager extends EventEmitter
setTarget: (apps, dependent , trx) =>
setInTransaction = (trx) =>
Promise.try =>
if apps?
appsArray = _.map apps, (app, appId) ->
appClone = _.clone(app)
appClone.appId = appId
return appClone
Promise.map(appsArray, @normaliseAppForDB)
.then (appsForDB) =>
Promise.map appsForDB, (app) =>
@db.upsertModel('app', app, { appId: app.appId }, trx)
.then ->
trx('app').whereNotIn('appId', _.map(appsForDB, 'appId')).del()
appsArray = _.map apps, (app, appId) ->
appClone = _.clone(app)
appClone.appId = checkInt(appId)
return appClone
Promise.map(appsArray, @normaliseAppForDB)
.then (appsForDB) =>
Promise.map appsForDB, (app) =>
@db.upsertModel('app', app, { appId: app.appId }, trx)
.then ->
trx('app').whereNotIn('appId', _.map(appsForDB, 'appId')).del()
.then =>
@proxyvisor.setTargetInTransaction(dependent, trx)
@ -705,7 +772,8 @@ module.exports = class ApplicationManager extends EventEmitter
.map (app) =>
if !_.isEmpty(app.services)
app.services = _.map app.services, (service) =>
_.merge(service, @_targetVolatilePerServiceId[service.serviceId]) if @_targetVolatilePerServiceId[service.serviceId]?
if @_targetVolatilePerServiceId[service.serviceId]?
_.merge(service, @_targetVolatilePerServiceId[service.serviceId])
return service
return app
@ -733,21 +801,21 @@ module.exports = class ApplicationManager extends EventEmitter
allImagesForApp = (app) -> _.map(app.services, imageForService)
currentImages = _.flatten(_.map(current.local?.apps, allImagesForApp))
targetImages = _.flatten(_.map(target.local?.apps, allImagesForApp))
availableAndUnused = _.filter available, (image) ->
!_.some currentImages.concat(targetImages), (imageInUse) -> image.name == imageInUse.name
imagesToDownload = _.filter targetImages, (imageName) ->
!_.some available, (availableImage) -> availableImage.name == imageName.name
currentImages = _.flatten(_.map(current.local.apps, allImagesForApp))
targetImages = _.flatten(_.map(target.local.apps, allImagesForApp))
availableAndUnused = _.filter available, (image) =>
!_.some currentImages.concat(targetImages), (imageInUse) => @images.isSameImage(image, imageInUse)
imagesToDownload = _.filter targetImages, (targetImage) =>
!_.some available, (availableImage) => @images.isSameImage(availableImage, targetImage)
deltaSources = _.map imagesToDownload, (image) =>
return @bestDeltaSource(image, available)
proxyvisorImages = @proxyvisor.imagesInUse(current, target)
return _.filter availableAndUnused, (image) ->
return _.filter availableAndUnused, (image) =>
notUsedForDelta = !_.some deltaSources, (deltaSource) -> deltaSource == image.name
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) -> image.name == proxyvisorImage
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) => @images.isSameImage(image, { name: proxyvisorImage })
return notUsedForDelta and notUsedByProxyvisor
_inferNextSteps: (cleanupNeeded, availableImages, current, target, stepsInProgress) =>
@ -759,17 +827,14 @@ module.exports = class ApplicationManager extends EventEmitter
if cleanupNeeded
nextSteps.push({ action: 'cleanup' })
imagesToRemove = @_unnecessaryImages(current, target, availableImages)
_.forEach imagesToRemove, (image) ->
for image in imagesToRemove
nextSteps.push({ action: 'removeImage', image })
# If we have to remove any images, we do that before anything else
return nextSteps if !_.isEmpty(nextSteps)
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
Promise.map allAppIds, (appId) =>
@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], availableImages, stepsInProgress)
.then (nextStepsForThisApp) ->
nextSteps = nextSteps.concat(nextStepsForThisApp)
.then =>
return @_removeDuplicateSteps(nextSteps, stepsInProgress)
if _.isEmpty(nextSteps)
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
for appId in allAppIds
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], availableImages, stepsInProgress))
return @_removeDuplicateSteps(nextSteps, stepsInProgress)
_removeDuplicateSteps: (nextSteps, stepsInProgress) ->
withoutProgressDups = _.filter nextSteps, (step) ->
@ -779,88 +844,24 @@ module.exports = class ApplicationManager extends EventEmitter
stopAll: ({ force = false } = {}) =>
@services.getAll()
.map (service) =>
Promise.using @_lockIfNecessary(service.appId, { force }), =>
@_lockingIfNecessary service.appId, { force }, =>
@services.kill(service, { removeContainer: false })
_lockIfNecessary: (appId, { force = false, skipLock = false } = {}) =>
return Promise.resolve() if skipLock
_lockingIfNecessary: (appId, { force = false, skipLock = false } = {}, fn) =>
if skipLock
return Promise.resolve()
@config.get('lockOverride')
.then (lockOverride) ->
return lockOverride or force
.then (force) ->
updateLock.lock(appId, { force })
updateLock.lock(appId, { force }, fn)
executeStepAction: (step, { force = false } = {}) =>
if _.includes(@proxyvisor.validActions, step.action)
return @proxyvisor.executeStepAction(step)
if !_.includes(@validActions, step.action)
return Promise.reject(new Error("Invalid action #{step.action}"))
actionExecutors =
stop: =>
Promise.using @_lockIfNecessary(step.current.appId, { force, skipLock: step.options?.skipLock }), =>
@services.kill(step.current, { removeContainer: false })
kill: =>
Promise.using @_lockIfNecessary(step.current.appId, { force, skipLock: step.options?.skipLock }), =>
@services.kill(step.current)
.then =>
@images.remove(imageForService(step.current)) if step.options?.removeImage
updateReleaseId: =>
@services.updateReleaseId(step.current, step.target.releaseId)
purge: =>
appId = step.appId
@logger.logSystemMessage("Purging data for app #{appId}", { appId }, 'Purge data')
Promise.using @_lockIfNecessary(appId, { force, skipLock: step.options?.skipLock }), =>
@getCurrentApp(appId)
.then (app) =>
throw new Error('Attempt to purge app with running services') if !_.isEmpty(app?.services)
if _.isEmpty(app?.volumes)
@logger.logSystemMessage('No volumes to purge', { appId }, 'Purge data noop')
return
Promise.mapSeries _.toPairs(app.volumes ? {}), ([ name, config ]) =>
@volumes.remove({ name })
.then =>
@volumes.create({ name, config, appId })
.then =>
@logger.logSystemMessage('Purged data', { appId }, 'Purge data success')
.catch (err) =>
@logger.logSystemMessage("Error purging data: #{err}", { appId, error: err }, 'Purge data error')
throw err
restart: =>
Promise.using @_lockIfNecessary(step.current.appId, { force, skipLock: step.options?.skipLock }), =>
Promise.try =>
@services.kill(step.current)
.then =>
@services.start(step.target)
stopAll: =>
@stopAll({ force })
start: =>
@services.start(step.target)
handover: =>
Promise.using @_lockIfNecessary(step.current.appId, { force, skipLock: step.options?.skipLock }), =>
@services.handover(step.current, step.target)
fetch: =>
Promise.join(
@config.get('fetchOptions')
@images.getAvailable()
(opts, availableImages) =>
opts.deltaSource = @bestDeltaSource(step.image, availableImages)
@images.fetch(step.image, opts)
)
.finally =>
@reportCurrentState(update_downloaded: true)
removeImage: =>
@images.remove(step.image)
updateImage: =>
@images.update(step.target)
cleanup: =>
@images.cleanup()
createNetworkOrVolume: =>
model = if step.model is 'volume' then @volumes else @networks
model.create(step.target)
removeNetworkOrVolume: =>
model = if step.model is 'volume' then @volumes else @networks
model.remove(step.current)
actionExecutors[step.action]()
@actionExecutors[step.action](step, { force })
getRequiredSteps: (currentState, targetState, stepsInProgress) =>
Promise.join(

View File

@ -6,7 +6,7 @@ constants = require '../lib/constants'
validation = require '../lib/validation'
ImageNotFoundError = (err) ->
return "#{err.statusCode}" is '404'
return validation.checkInt(err.statusCode) is 404
# image = {
# name: image registry/repo:tag
@ -17,7 +17,7 @@ ImageNotFoundError = (err) ->
# releaseId
# dependent
# status Downloading, Downloaded, Deleting
# download_progress
# downloadProgress
# }
module.exports = class Images extends EventEmitter
@ -37,7 +37,7 @@ module.exports = class Images extends EventEmitter
fetch: (image, opts) =>
onProgress = (progress) =>
@reportChange(image.imageId, { download_progress: progress.percentage })
@reportChange(image.imageId, { downloadProgress: progress.percentage })
@normalise(image.name)
.then (imageName) =>
@ -47,7 +47,7 @@ module.exports = class Images extends EventEmitter
.then =>
@inspectByName(imageName)
.catch =>
@reportChange(image.imageId, _.merge(_.clone(image), { status: 'Downloading', download_progress: 0 }))
@reportChange(image.imageId, _.merge(_.clone(image), { status: 'Downloading', downloadProgress: 0 }))
Promise.try =>
if validation.checkTruthy(opts.delta)
@logger.logSystemEvent(logTypes.downloadImageDelta, { image })
@ -81,19 +81,14 @@ module.exports = class Images extends EventEmitter
@db.models('image').update(image).where(name: image.name)
_removeImageIfNotNeeded: (image) =>
removed = true
@inspectByName(image.name)
.catch ImageNotFoundError, (err) ->
removed = false
return null
.then (img) =>
if img?
@db.models('image').where(name: image.name).select()
.then (imagesFromDB) =>
if imagesFromDB.length == 1 and _.isEqual(@format(imagesFromDB[0]), @format(image))
@docker.getImage(image.name).remove(force: true)
.then ->
return removed
@db.models('image').where(name: image.name).select()
.then (imagesFromDB) =>
if imagesFromDB.length == 1 and _.isEqual(@format(imagesFromDB[0]), @format(image))
@docker.getImage(image.name).remove(force: true)
.return(true)
.catchReturn(ImageNotFoundError, false)
remove: (image) =>
@reportChange(image.imageId, _.merge(_.clone(image), { status: 'Deleting' }))
@ -127,12 +122,12 @@ module.exports = class Images extends EventEmitter
_isAvailableInDocker: (image, dockerImages) ->
_.some dockerImages, (dockerImage) ->
_.includes(dockerImage.NormalisedRepoTags, image.name)
_.includes(dockerImage.NormalisedRepoTags, image.name) or _.includes(dockerImage.RepoDigests, image.name)
# Gets all images that are supervised, in an object containing name, appId, serviceId, serviceName, imageId, dependent.
getAvailable: =>
@_withImagesFromDockerAndDB (dockerImages, supervisedImages) =>
return _.filter(supervisedImages, (image) => @_isAvailableInDocker(image, dockerImages))
_.filter(supervisedImages, (image) => @_isAvailableInDocker(image, dockerImages))
cleanupDatabase: =>
@_withImagesFromDockerAndDB (dockerImages, supervisedImages) =>
@ -145,15 +140,15 @@ module.exports = class Images extends EventEmitter
@getAvailable()
.map (image) ->
image.status = 'Downloaded'
image.download_progress = null
image.downloadProgress = null
return image
.then (images) =>
status = _.clone(@volatileState)
_.forEach images, (image) ->
for image in images
status[image.imageId] ?= image
return _.values(status)
_getDanglingAndOldSupervisorsForCleanup: =>
_getOldSupervisorsForCleanup: =>
images = []
@docker.getRegistryAndName(constants.supervisorImage)
.then (supervisorImageInfo) =>
@ -164,13 +159,9 @@ module.exports = class Images extends EventEmitter
.then ({ imageName, tagName }) ->
if imageName == supervisorImageInfo.imageName and tagName != supervisorImageInfo.tagName
images.push(repoTag)
.then =>
@docker.listImages(filters: { dangling: [ 'true' ] })
.map (image) ->
images.push(image.Id)
.then =>
return _.filter images, (image) =>
!@imageCleanupFailures[image]? or Date.now() - @imageCleanupFailures[image] > 3600 * 1000
!@imageCleanupFailures[image]? or Date.now() - @imageCleanupFailures[image] > constants.imageCleanupErrorIgnoreTimeout
inspectByName: (imageName) =>
@docker.getImage(imageName).inspect()
@ -179,17 +170,25 @@ module.exports = class Images extends EventEmitter
@docker.normaliseImageName(imageName)
isCleanupNeeded: =>
@_getDanglingAndOldSupervisorsForCleanup()
@_getOldSupervisorsForCleanup()
.then (imagesForCleanup) ->
return !_.isEmpty(imagesForCleanup)
# Delete old supervisor images and dangling images
# Delete old supervisor images
cleanup: =>
@_getDanglingAndOldSupervisorsForCleanup()
@_getOldSupervisorsForCleanup()
.map (image) =>
console.log("Cleaning up #{image}")
@docker.getImage(image).remove(force: true)
.then =>
delete @imageCleanupFailures[image]
.catch (err) =>
@logger.logSystemMessage("Error cleaning up #{image}: #{err.message} - will ignore for 1 hour", { error: err }, 'Image cleanup error')
@imageCleanupFailures[image] = Date.now()
@isSameImage: (image1, image2) ->
hash1 = image1.name.split('@')[1]
hash2 = image2.name.split('@')[1]
return image1.name == image2.name or (hash1? and hash1 == hash2)
isSameImage: @isSameImage

View File

@ -1,35 +1,30 @@
Promise = require 'bluebird'
_ = require 'lodash'
logTypes = require '../lib/log-types'
{ checkInt } = require '../lib/validation'
module.exports = class Networks
constructor: ({ @docker, @logger }) ->
# TODO: parse supported config fields
format: (network) ->
return {
appId: network.Labels['io.resin.appId']
appId: checkInt(network.