mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2025-05-07 19:38:13 +00:00
Issues #389 and #390: Remove /host_run/dbus and /host/var/lib/connman bind mounts for non-ResinOS-1.X devices
On ResinOS 2.X the default mounts should not include the previously deprecated host_run, and there's no connman which makes the connman mount confusing. This is a breaking change as it is not backwards-compatible on non-ResinOS instances of the supervisor. Change-Type: major Signed-off-by: Pablo Carranza Velez <pablo@resin.io>
This commit is contained in:
parent
f7c702b845
commit
b64ed9568c
@ -214,8 +214,8 @@ fetch = (app, setDeviceUpdateState = true) ->
|
|||||||
throw err
|
throw err
|
||||||
|
|
||||||
shouldMountKmod = (image) ->
|
shouldMountKmod = (image) ->
|
||||||
device.getOSVersion().then (osVersion) ->
|
device.isResinOSv1().then (isV1) ->
|
||||||
return false if not /^Resin OS 1./.test(osVersion)
|
return false if not isV1
|
||||||
Promise.using docker.imageRootDirMounted(image), (rootDir) ->
|
Promise.using docker.imageRootDirMounted(image), (rootDir) ->
|
||||||
utils.getOSVersion(rootDir + '/etc/os-release')
|
utils.getOSVersion(rootDir + '/etc/os-release')
|
||||||
.then (version) ->
|
.then (version) ->
|
||||||
@ -225,112 +225,113 @@ shouldMountKmod = (image) ->
|
|||||||
return false
|
return false
|
||||||
|
|
||||||
application.start = start = (app) ->
|
application.start = start = (app) ->
|
||||||
volumes = utils.defaultVolumes
|
device.isResinOSv1().then (isV1) ->
|
||||||
binds = utils.defaultBinds(app.appId)
|
volumes = utils.defaultVolumes(isV1)
|
||||||
alreadyStarted = false
|
binds = utils.defaultBinds(app.appId, isV1)
|
||||||
Promise.try ->
|
alreadyStarted = false
|
||||||
# Parse the env vars before trying to access them, that's because they have to be stringified for knex..
|
Promise.try ->
|
||||||
return [ JSON.parse(app.env), JSON.parse(app.config) ]
|
# Parse the env vars before trying to access them, that's because they have to be stringified for knex..
|
||||||
.spread (env, conf) ->
|
return [ JSON.parse(app.env), JSON.parse(app.config) ]
|
||||||
if env.PORT?
|
.spread (env, conf) ->
|
||||||
portList = env.PORT
|
if env.PORT?
|
||||||
.split(',')
|
portList = env.PORT
|
||||||
.map((port) -> port.trim())
|
.split(',')
|
||||||
.filter(isValidPort)
|
.map((port) -> port.trim())
|
||||||
|
.filter(isValidPort)
|
||||||
|
|
||||||
if app.containerId?
|
if app.containerId?
|
||||||
# If we have a container id then check it exists and if so use it.
|
# If we have a container id then check it exists and if so use it.
|
||||||
container = docker.getContainer(app.containerId)
|
container = docker.getContainer(app.containerId)
|
||||||
containerPromise = container.inspectAsync().return(container)
|
containerPromise = container.inspectAsync().return(container)
|
||||||
else
|
else
|
||||||
containerPromise = Promise.rejected()
|
containerPromise = Promise.rejected()
|
||||||
|
|
||||||
# If there is no existing container then create one instead.
|
# If there is no existing container then create one instead.
|
||||||
containerPromise.catch ->
|
containerPromise.catch ->
|
||||||
fetch(app)
|
fetch(app)
|
||||||
.then (imageInfo) ->
|
.then (imageInfo) ->
|
||||||
logSystemEvent(logTypes.installApp, app)
|
logSystemEvent(logTypes.installApp, app)
|
||||||
device.updateState(status: 'Installing')
|
device.updateState(status: 'Installing')
|
||||||
|
|
||||||
ports = {}
|
ports = {}
|
||||||
portBindings = {}
|
portBindings = {}
|
||||||
if portList?
|
if portList?
|
||||||
portList.forEach (port) ->
|
portList.forEach (port) ->
|
||||||
ports[port + '/tcp'] = {}
|
ports[port + '/tcp'] = {}
|
||||||
portBindings[port + '/tcp'] = [ HostPort: port ]
|
portBindings[port + '/tcp'] = [ HostPort: port ]
|
||||||
|
|
||||||
if imageInfo?.Config?.Cmd
|
if imageInfo?.Config?.Cmd
|
||||||
cmd = imageInfo.Config.Cmd
|
cmd = imageInfo.Config.Cmd
|
||||||
else
|
else
|
||||||
cmd = [ '/bin/bash', '-c', '/start' ]
|
cmd = [ '/bin/bash', '-c', '/start' ]
|
||||||
|
|
||||||
restartPolicy = createRestartPolicy({ name: conf['RESIN_APP_RESTART_POLICY'], maximumRetryCount: conf['RESIN_APP_RESTART_RETRIES'] })
|
restartPolicy = createRestartPolicy({ name: conf['RESIN_APP_RESTART_POLICY'], maximumRetryCount: conf['RESIN_APP_RESTART_RETRIES'] })
|
||||||
shouldMountKmod(app.imageId)
|
shouldMountKmod(app.imageId)
|
||||||
.then (shouldMount) ->
|
.then (shouldMount) ->
|
||||||
binds.push('/bin/kmod:/bin/kmod:ro') if shouldMount
|
binds.push('/bin/kmod:/bin/kmod:ro') if shouldMount
|
||||||
docker.createContainerAsync(
|
docker.createContainerAsync(
|
||||||
Image: app.imageId
|
Image: app.imageId
|
||||||
Cmd: cmd
|
Cmd: cmd
|
||||||
Tty: true
|
Tty: true
|
||||||
Volumes: volumes
|
Volumes: volumes
|
||||||
Env: _.map env, (v, k) -> k + '=' + v
|
Env: _.map env, (v, k) -> k + '=' + v
|
||||||
ExposedPorts: ports
|
ExposedPorts: ports
|
||||||
HostConfig:
|
HostConfig:
|
||||||
Privileged: true
|
Privileged: true
|
||||||
NetworkMode: 'host'
|
NetworkMode: 'host'
|
||||||
PortBindings: portBindings
|
PortBindings: portBindings
|
||||||
Binds: binds
|
Binds: binds
|
||||||
RestartPolicy: restartPolicy
|
RestartPolicy: restartPolicy
|
||||||
)
|
)
|
||||||
.tap ->
|
.tap ->
|
||||||
logSystemEvent(logTypes.installAppSuccess, app)
|
logSystemEvent(logTypes.installAppSuccess, app)
|
||||||
|
.catch (err) ->
|
||||||
|
logSystemEvent(logTypes.installAppError, app, err)
|
||||||
|
throw err
|
||||||
|
.tap (container) ->
|
||||||
|
logSystemEvent(logTypes.startApp, app)
|
||||||
|
device.updateState(status: 'Starting')
|
||||||
|
container.startAsync()
|
||||||
.catch (err) ->
|
.catch (err) ->
|
||||||
logSystemEvent(logTypes.installAppError, app, err)
|
statusCode = '' + err.statusCode
|
||||||
throw err
|
# 304 means the container was already started, precisely what we want :)
|
||||||
.tap (container) ->
|
if statusCode is '304'
|
||||||
logSystemEvent(logTypes.startApp, app)
|
alreadyStarted = true
|
||||||
device.updateState(status: 'Starting')
|
return
|
||||||
container.startAsync()
|
|
||||||
.catch (err) ->
|
|
||||||
statusCode = '' + err.statusCode
|
|
||||||
# 304 means the container was already started, precisely what we want :)
|
|
||||||
if statusCode is '304'
|
|
||||||
alreadyStarted = true
|
|
||||||
return
|
|
||||||
|
|
||||||
if statusCode is '500' and err.json.trim().match(/exec format error$/)
|
if statusCode is '500' and err.json.trim().match(/exec format error$/)
|
||||||
# Provide a friendlier error message for "exec format error"
|
# Provide a friendlier error message for "exec format error"
|
||||||
device.getDeviceType()
|
device.getDeviceType()
|
||||||
.then (deviceType) ->
|
.then (deviceType) ->
|
||||||
throw new Error("Application architecture incompatible with #{deviceType}: exec format error")
|
throw new Error("Application architecture incompatible with #{deviceType}: exec format error")
|
||||||
else
|
else
|
||||||
# rethrow the same error
|
# rethrow the same error
|
||||||
throw err
|
throw err
|
||||||
.catch (err) ->
|
.catch (err) ->
|
||||||
# If starting the container failed, we remove it so that it doesn't litter
|
# If starting the container failed, we remove it so that it doesn't litter
|
||||||
container.removeAsync(v: true)
|
container.removeAsync(v: true)
|
||||||
|
.then ->
|
||||||
|
app.containerId = null
|
||||||
|
knex('app').update(app).where(appId: app.appId)
|
||||||
|
.finally ->
|
||||||
|
logSystemEvent(logTypes.startAppError, app, err)
|
||||||
|
throw err
|
||||||
.then ->
|
.then ->
|
||||||
app.containerId = null
|
app.containerId = container.id
|
||||||
knex('app').update(app).where(appId: app.appId)
|
device.updateState(commit: app.commit)
|
||||||
.finally ->
|
logger.attach(app)
|
||||||
logSystemEvent(logTypes.startAppError, app, err)
|
.tap (container) ->
|
||||||
throw err
|
# Update the app info, only if starting the container worked.
|
||||||
.then ->
|
knex('app').update(app).where(appId: app.appId)
|
||||||
app.containerId = container.id
|
.then (affectedRows) ->
|
||||||
device.updateState(commit: app.commit)
|
knex('app').insert(app) if affectedRows == 0
|
||||||
logger.attach(app)
|
.tap ->
|
||||||
.tap (container) ->
|
if alreadyStarted
|
||||||
# Update the app info, only if starting the container worked.
|
logSystemEvent(logTypes.startAppNoop, app)
|
||||||
knex('app').update(app).where(appId: app.appId)
|
else
|
||||||
.then (affectedRows) ->
|
logSystemEvent(logTypes.startAppSuccess, app)
|
||||||
knex('app').insert(app) if affectedRows == 0
|
.finally ->
|
||||||
.tap ->
|
device.updateState(status: 'Idle')
|
||||||
if alreadyStarted
|
|
||||||
logSystemEvent(logTypes.startAppNoop, app)
|
|
||||||
else
|
|
||||||
logSystemEvent(logTypes.startAppSuccess, app)
|
|
||||||
.finally ->
|
|
||||||
device.updateState(status: 'Idle')
|
|
||||||
|
|
||||||
validRestartPolicies = [ 'no', 'always', 'on-failure', 'unless-stopped' ]
|
validRestartPolicies = [ 'no', 'always', 'on-failure', 'unless-stopped' ]
|
||||||
# Construct a restart policy based on its name and maximumRetryCount.
|
# Construct a restart policy based on its name and maximumRetryCount.
|
||||||
|
@ -224,3 +224,8 @@ do ->
|
|||||||
|
|
||||||
exports.getOSVersion = memoizePromise ->
|
exports.getOSVersion = memoizePromise ->
|
||||||
utils.getOSVersion(config.hostOsVersionPath)
|
utils.getOSVersion(config.hostOsVersionPath)
|
||||||
|
|
||||||
|
exports.isResinOSv1 = memoizePromise ->
|
||||||
|
exports.getOSVersion().then (osVersion) ->
|
||||||
|
return true if /^Resin OS 1./.test(osVersion)
|
||||||
|
return false
|
@ -265,42 +265,45 @@ do ->
|
|||||||
docker.modem.dialAsync = Promise.promisify(docker.modem.dial)
|
docker.modem.dialAsync = Promise.promisify(docker.modem.dial)
|
||||||
createContainer = (options, internalId) ->
|
createContainer = (options, internalId) ->
|
||||||
Promise.using writeLockImages(), ->
|
Promise.using writeLockImages(), ->
|
||||||
knex('image').select().where('repoTag', options.Image)
|
Promise.join(
|
||||||
.then (images) ->
|
knex('image').select().where('repoTag', options.Image)
|
||||||
throw new Error('Only images created via the Supervisor can be used for creating containers.') if images.length == 0
|
device.isResinOSv1()
|
||||||
knex.transaction (tx) ->
|
(images, isV1) ->
|
||||||
Promise.try ->
|
throw new Error('Only images created via the Supervisor can be used for creating containers.') if images.length == 0
|
||||||
return internalId if internalId?
|
knex.transaction (tx) ->
|
||||||
tx.insert({}, 'id').into('container')
|
Promise.try ->
|
||||||
.then ([ id ]) ->
|
return internalId if internalId?
|
||||||
return id
|
tx.insert({}, 'id').into('container')
|
||||||
.then (id) ->
|
.then ([ id ]) ->
|
||||||
options.HostConfig ?= {}
|
return id
|
||||||
options.Volumes ?= {}
|
.then (id) ->
|
||||||
_.assign(options.Volumes, utils.defaultVolumes)
|
options.HostConfig ?= {}
|
||||||
options.HostConfig.Binds = utils.defaultBinds("containers/#{id}")
|
options.Volumes ?= {}
|
||||||
query = ''
|
_.assign(options.Volumes, utils.defaultVolumes(isV1))
|
||||||
query = "name=#{options.Name}&" if options.Name?
|
options.HostConfig.Binds = utils.defaultBinds("containers/#{id}", isV1)
|
||||||
optsf =
|
query = ''
|
||||||
path: "/containers/create?#{query}"
|
query = "name=#{options.Name}&" if options.Name?
|
||||||
method: 'POST'
|
optsf =
|
||||||
options: options
|
path: "/containers/create?#{query}"
|
||||||
statusCodes:
|
method: 'POST'
|
||||||
200: true
|
options: options
|
||||||
201: true
|
statusCodes:
|
||||||
404: 'no such container'
|
200: true
|
||||||
406: 'impossible to attach'
|
201: true
|
||||||
500: 'server error'
|
404: 'no such container'
|
||||||
utils.validateKeys(options, utils.validContainerOptions)
|
406: 'impossible to attach'
|
||||||
.then ->
|
500: 'server error'
|
||||||
utils.validateKeys(options.HostConfig, utils.validHostConfigOptions)
|
utils.validateKeys(options, utils.validContainerOptions)
|
||||||
.then ->
|
.then ->
|
||||||
docker.modem.dialAsync(optsf)
|
utils.validateKeys(options.HostConfig, utils.validHostConfigOptions)
|
||||||
.then (data) ->
|
.then ->
|
||||||
containerId = data.Id
|
docker.modem.dialAsync(optsf)
|
||||||
tx('container').update({ containerId }).where({ id })
|
.then (data) ->
|
||||||
.return(data)
|
containerId = data.Id
|
||||||
exports.createContainer = (req, res) ->
|
tx('container').update({ containerId }).where({ id })
|
||||||
|
.return(data)
|
||||||
|
)
|
||||||
|
exports.createContainer = (req, res) ->
|
||||||
createContainer(req.body)
|
createContainer(req.body)
|
||||||
.then (data) ->
|
.then (data) ->
|
||||||
res.json(data)
|
res.json(data)
|
||||||
|
@ -242,26 +242,32 @@ exports.getOSVersion = (path) ->
|
|||||||
console.log('Could not get OS Version: ', err, err.stack)
|
console.log('Could not get OS Version: ', err, err.stack)
|
||||||
return undefined
|
return undefined
|
||||||
|
|
||||||
exports.defaultVolumes = {
|
exports.defaultVolumes = (includeV1Volumes) ->
|
||||||
'/data': {}
|
volumes = {
|
||||||
'/lib/modules': {}
|
'/data': {}
|
||||||
'/lib/firmware': {}
|
'/lib/modules': {}
|
||||||
'/host/var/lib/connman': {}
|
'/lib/firmware': {}
|
||||||
'/host/run/dbus': {}
|
'/host/run/dbus': {}
|
||||||
}
|
}
|
||||||
|
if includeV1Volumes
|
||||||
|
volumes['/host/var/lib/connman'] = {}
|
||||||
|
volumes['/host_run/dbus'] = {}
|
||||||
|
return volumes
|
||||||
|
|
||||||
exports.getDataPath = (identifier) ->
|
exports.getDataPath = (identifier) ->
|
||||||
return config.dataPath + '/' + identifier
|
return config.dataPath + '/' + identifier
|
||||||
|
|
||||||
exports.defaultBinds = (dataPath) ->
|
exports.defaultBinds = (dataPath, includeV1Binds) ->
|
||||||
return [
|
binds = [
|
||||||
exports.getDataPath(dataPath) + ':/data'
|
exports.getDataPath(dataPath) + ':/data'
|
||||||
'/lib/modules:/lib/modules'
|
'/lib/modules:/lib/modules'
|
||||||
'/lib/firmware:/lib/firmware'
|
'/lib/firmware:/lib/firmware'
|
||||||
'/run/dbus:/host_run/dbus'
|
|
||||||
'/run/dbus:/host/run/dbus'
|
'/run/dbus:/host/run/dbus'
|
||||||
'/var/lib/connman:/host/var/lib/connman'
|
|
||||||
]
|
]
|
||||||
|
if includeV1Binds
|
||||||
|
binds.push('/run/dbus:/host_run/dbus')
|
||||||
|
binds.push('/var/lib/connman:/host/var/lib/connman')
|
||||||
|
return binds
|
||||||
|
|
||||||
exports.validComposeOptions = [
|
exports.validComposeOptions = [
|
||||||
'command'
|
'command'
|
||||||
|
Loading…
x
Reference in New Issue
Block a user