mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2025-04-19 00:26:53 +00:00
Various bugfixes
* Get imageId when normalising a dep. app from the DB * Fix the appId in migrations when updating the supervisor * Use the update lock to update a service's metadata * Restart clears volatile target state * Fix function definition for updateMetadata * Improve backwards compatibility of /v1/apps/:appId endpoint * Fix multicontainer deltas to work with resumable-request 2.0 * Fix dependent target normalisation logic Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
parent
da2359e224
commit
ba829412e1
@ -58,6 +58,8 @@ class ApplicationManagerRouter
|
||||
@deviceState.getCurrentForComparison()
|
||||
.then (currentState) =>
|
||||
app = currentState.local.apps[appId]
|
||||
imageIds = _.map(app.services, 'imageId')
|
||||
@applications.clearTargetVolatileForServices(imageIds)
|
||||
stoppedApp = _.cloneDeep(app)
|
||||
stoppedApp.services = []
|
||||
currentState.local.apps[appId] = stoppedApp
|
||||
@ -147,23 +149,27 @@ class ApplicationManagerRouter
|
||||
@eventTracker.track('GET app (v1)', appId)
|
||||
if !appId?
|
||||
return res.status(400).send('Missing app id')
|
||||
@applications.getCurrentApp(appId)
|
||||
.then (app) ->
|
||||
service = app?.services?[0]
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
# Don't return data that will be of no use to the user
|
||||
appToSend = {
|
||||
appId
|
||||
containerId: service.containerId
|
||||
env: _.omit(service.environment, constants.privateAppEnvVars)
|
||||
commit: service.commit
|
||||
releaseId: app.releaseId
|
||||
imageId: service.image
|
||||
}
|
||||
res.json(appToSend)
|
||||
Promise.join(
|
||||
@applications.getCurrentApp(appId)
|
||||
@applications.getStatus()
|
||||
(app, status) ->
|
||||
service = app?.services?[0]
|
||||
if !service?
|
||||
return res.status(400).send('App not found')
|
||||
if app.services.length > 1
|
||||
return res.status(400).send('Some v1 endpoints are only allowed on single-container apps')
|
||||
# Don't return data that will be of no use to the user
|
||||
appToSend = {
|
||||
appId
|
||||
containerId: service.containerId
|
||||
env: _.omit(service.environment, constants.privateAppEnvVars)
|
||||
releaseId: service.releaseId
|
||||
imageId: service.image
|
||||
}
|
||||
if status.commit?
|
||||
appToSend.commit = status.commit
|
||||
res.json(appToSend)
|
||||
)
|
||||
.catch (err) ->
|
||||
res.status(503).send(err?.message or err or 'Unknown error')
|
||||
|
||||
@ -205,6 +211,7 @@ class ApplicationManagerRouter
|
||||
if !service?
|
||||
errMsg = 'Service not found, a container must exist for service restart to work.'
|
||||
return res.status(404).send(errMsg)
|
||||
@applications.setTargetVolatileForService(service.imageId, running: true)
|
||||
@applications.executeStepAction(serviceAction('restart', service.serviceId, service, service), { skipLock: true })
|
||||
.then ->
|
||||
res.status(200).send('OK')
|
||||
@ -288,8 +295,9 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
.then =>
|
||||
if step.options?.removeImage
|
||||
@images.removeByDockerId(step.current.image)
|
||||
updateMetadata: (step) =>
|
||||
@services.updateMetadata(step.current, step.target)
|
||||
updateMetadata: (step, { force = false, skipLock = false } = {}) =>
|
||||
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
||||
@services.updateMetadata(step.current, step.target)
|
||||
restart: (step, { force = false, skipLock = false } = {}) =>
|
||||
@_lockingIfNecessary step.current.appId, { force, skipLock: skipLock or step.options?.skipLock }, =>
|
||||
Promise.try =>
|
||||
@ -857,10 +865,14 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
.then =>
|
||||
@_targetVolatilePerImageId = {}
|
||||
|
||||
setTargetVolatileForService: (imageId, target) ->
|
||||
setTargetVolatileForService: (imageId, target) =>
|
||||
@_targetVolatilePerImageId[imageId] ?= {}
|
||||
_.assign(@_targetVolatilePerImageId[imageId], target)
|
||||
|
||||
clearTargetVolatileForServices: (imageIds) =>
|
||||
for imageId in imageIds
|
||||
@_targetVolatilePerImageId[imageId] = {}
|
||||
|
||||
getTargetApps: =>
|
||||
Promise.map(@db.models('app').select(), @normaliseAndExtendAppFromDB)
|
||||
.map (app) =>
|
||||
|
@ -9,9 +9,9 @@ _ = require 'lodash'
|
||||
{ envArrayToObject } = require './conversions'
|
||||
{ checkInt } = require './validation'
|
||||
|
||||
applyDelta = (imgSrc, deltaUrl, { requestTimeout, applyTimeout, resumeOpts }, onProgress) ->
|
||||
applyDelta = (imgSrc, deltaUrl, applyTimeout, opts, onProgress) ->
|
||||
new Promise (resolve, reject) ->
|
||||
req = resumable(request, { url: deltaUrl, timeout: requestTimeout }, resumeOpts)
|
||||
req = resumable(Object.assign({ url: deltaUrl }, opts))
|
||||
.on('progress', onProgress)
|
||||
.on('retry', onProgress)
|
||||
.on('error', reject)
|
||||
@ -24,7 +24,7 @@ applyDelta = (imgSrc, deltaUrl, { requestTimeout, applyTimeout, resumeOpts }, on
|
||||
deltaStream = dockerDelta.applyDelta(imgSrc, timeout: applyTimeout)
|
||||
res.pipe(deltaStream)
|
||||
.on('id', (id) -> resolve('sha256:' + id))
|
||||
.on('error', req.destroy.bind(req))
|
||||
.on('error', req.abort.bind(req))
|
||||
|
||||
module.exports = class DockerUtils extends DockerToolbelt
|
||||
constructor: (opts) ->
|
||||
@ -94,8 +94,8 @@ module.exports = class DockerUtils extends DockerToolbelt
|
||||
deltaSrc = null
|
||||
else
|
||||
deltaSrc = deltaSourceId
|
||||
resumeOpts = { maxRetries: retryCount, retryInterval }
|
||||
resolve(applyDelta(deltaSrc, deltaUrl, { requestTimeout, applyTimeout, resumeOpts }, onProgress))
|
||||
resumeOpts = { timeout: requestTimeout, maxRetries: retryCount, retryInterval }
|
||||
resolve(applyDelta(deltaSrc, deltaUrl, applyTimeout, resumeOpts, onProgress))
|
||||
.on 'error', reject
|
||||
.catch dockerDelta.OutOfSyncError, (err) =>
|
||||
console.log('Falling back to regular pull')
|
||||
|
@ -30,6 +30,7 @@ exports.requestOpts =
|
||||
'User-Agent': userAgent
|
||||
|
||||
resumableOpts =
|
||||
timeout: DEFAULT_REQUEST_TIMEOUT
|
||||
maxRetries: DEFAULT_REQUEST_RETRY_COUNT
|
||||
retryInterval: DEFAULT_REQUEST_RETRY_INTERVAL
|
||||
|
||||
|
@ -12,12 +12,13 @@ var tryParse = function (obj) {
|
||||
}
|
||||
}
|
||||
|
||||
var singleToMulticontainerApp = function (app, appId) {
|
||||
var singleToMulticontainerApp = function (app) {
|
||||
// From *very* old supervisors, env or config may be null
|
||||
// so we ignore errors parsing them
|
||||
let conf = tryParse(app.config)
|
||||
let env = tryParse(app.env)
|
||||
let environment = {}
|
||||
let appId = parseInt(app.appId)
|
||||
for (let key in env) {
|
||||
if (!/^RESIN_/.test(key)) {
|
||||
environment[key] = env[key]
|
||||
|
@ -368,11 +368,11 @@ module.exports = class Proxyvisor
|
||||
|
||||
normaliseDependentDeviceTargetForDB: (device, appCommit) ->
|
||||
Promise.try ->
|
||||
apps = _.clone(device.apps ? {})
|
||||
for app in apps
|
||||
app.commit ?= appCommit
|
||||
apps = _.mapValues _.clone(device.apps ? {}), (app) ->
|
||||
app.commit = appCommit or null
|
||||
app.config ?= {}
|
||||
app.environment ?= {}
|
||||
return app
|
||||
apps = JSON.stringify(apps)
|
||||
outDevice = {
|
||||
uuid: device.uuid
|
||||
@ -417,6 +417,7 @@ module.exports = class Proxyvisor
|
||||
commit: app.commit
|
||||
releaseId: app.releaseId
|
||||
image: app.image
|
||||
imageId: app.imageId
|
||||
config: JSON.parse(app.config)
|
||||
environment: JSON.parse(app.environment)
|
||||
parentApp: app.parentApp
|
||||
@ -428,7 +429,10 @@ module.exports = class Proxyvisor
|
||||
outDevice = {
|
||||
uuid: device.uuid
|
||||
name: device.name
|
||||
apps: JSON.parse(device.apps)
|
||||
apps: _.mapValues JSON.parse(device.apps), (a) ->
|
||||
a.commit ?= null
|
||||
return a
|
||||
|
||||
}
|
||||
return outDevice
|
||||
|
||||
@ -495,11 +499,12 @@ module.exports = class Proxyvisor
|
||||
return null if dev.markedForDeletion
|
||||
devTarget = _.clone(dev)
|
||||
delete devTarget.markedForDeletion
|
||||
delete devTarget.lock_expiry_date
|
||||
devTarget.apps = {}
|
||||
devTarget.apps[appId] = {
|
||||
commit: dev.apps[appId].targetCommit
|
||||
environment: dev.apps[appId].targetEnvironment
|
||||
config: dev.apps[appId].targetConfig
|
||||
environment: dev.apps[appId].targetEnvironment or {}
|
||||
config: dev.apps[appId].targetConfig or {}
|
||||
}
|
||||
return devTarget
|
||||
currentDeviceTargets = _.filter(currentDeviceTargets, (dev) -> !_.isNull(dev))
|
||||
@ -538,6 +543,7 @@ module.exports = class Proxyvisor
|
||||
}]
|
||||
|
||||
devicesDiffer = @_compareDevices(currentDevices, targetDevices, appId)
|
||||
|
||||
# - if current doesn't match target, or the devices differ, push an updateDependentTargets step
|
||||
if !_.isEqual(current, target) or devicesDiffer
|
||||
return [{
|
||||
|
Loading…
x
Reference in New Issue
Block a user