Merge pull request #216 from resin-io/215-improve-containers-api

Improve containers API
This commit is contained in:
Petros Angelatos 2016-07-26 20:52:56 -07:00 committed by GitHub
commit eb09713391
7 changed files with 228 additions and 71 deletions

View File

@ -24,6 +24,7 @@
"lodash": "^3.0.0",
"log-timestamp": "^0.1.2",
"mixpanel": "0.0.20",
"mkdirp": "^0.5.1",
"network-checker": "~0.0.5",
"pinejs-client": "^1.7.1",
"pubnub": "^3.7.13",

View File

@ -170,6 +170,7 @@ module.exports = (application) ->
unparsedRouter.delete '/v1/images/*', dockerUtils.deleteImage
unparsedRouter.get '/v1/images', dockerUtils.listImages
parsedRouter.post '/v1/containers/create', dockerUtils.createContainer
parsedRouter.post '/v1/containers/update', dockerUtils.updateContainer
parsedRouter.post '/v1/containers/:id/start', dockerUtils.startContainer
unparsedRouter.post '/v1/containers/:id/stop', dockerUtils.stopContainer
unparsedRouter.delete '/v1/containers/:id', dockerUtils.deleteContainer
@ -183,7 +184,7 @@ module.exports = (application) ->
utils.getKnexApp(appId)
.then (app) ->
res.status(200)
compose.up(application.composePath(appId), onStatus)
compose.up(appId, onStatus)
.catch (err) ->
console.log('Error on compose up:', err, err.stack)
.finally ->
@ -201,7 +202,7 @@ module.exports = (application) ->
utils.getKnexApp(appId)
.then (app) ->
res.status(200)
compose.down(application.composePath(appId), onStatus)
compose.down(appId, onStatus)
.catch (err) ->
console.log('Error on compose down:', err, err.stack)
.finally ->

View File

@ -176,21 +176,8 @@ shouldMountKmod = (image) ->
return false
application.start = start = (app) ->
volumes =
'/data': {}
'/lib/modules': {}
'/lib/firmware': {}
'/host/var/lib/connman': {}
'/host/run/dbus': {}
binds = [
config.dataPath + '/' + app.appId + ':/data'
'/lib/modules:/lib/modules'
'/lib/firmware:/lib/firmware'
'/run/dbus:/host_run/dbus'
'/run/dbus:/host/run/dbus'
'/etc/resolv.conf:/etc/resolv.conf:rw'
'/var/lib/connman:/host/var/lib/connman'
]
volumes = utils.defaultVolumes
binds = utils.defaultBinds(app.appId)
Promise.try ->
# Parse the env vars before trying to access them, that's because they have to be stringified for knex..
JSON.parse(app.env)
@ -322,9 +309,6 @@ killmePath = (app) ->
appId = app.appId ? app
return "/mnt/root#{config.dataPath}/#{appId}/resin-kill-me"
application.composePath = (appId) ->
return "/mnt/root#{config.dataPath}/#{appId}/docker-compose.yml"
# At boot, all apps should be unlocked *before* start to prevent a deadlock
application.unlockAndStart = unlockAndStart = (app) ->
lockFile.unlockAsync(lockPath(app))

View File

@ -4,14 +4,24 @@ _ = require 'lodash'
dockerUtils = require './docker-utils'
{ docker } = dockerUtils
fs = Promise.promisifyAll(require('fs'))
spawn = require('child_process').spawn
{ spawn, execAsync } = Promise.promisifyAll(require('child_process'))
mkdirp = Promise.promisify(require('mkdirp'))
path = require 'path'
utils = require './utils'
config = require './config'
runComposeCommand = (composeArgs, path, onStatus) ->
onStatus ?= console.log.bind(console)
reportStatus = (status) ->
try onStatus(status)
composePathSrc = (appId) ->
return "/mnt/root#{config.dataPath}/#{appId}/docker-compose.yml"
composePathDst = (appId) ->
return "/mnt/root#{config.dataPath}/resin-supervisor/compose/#{appId}/docker-compose.yml"
composeDataPath = (appId, serviceName) ->
return "compose/#{appId}/#{serviceName}"
runComposeCommand = (composeArgs, appId, reportStatus) ->
new Promise (resolve, reject) ->
child = spawn('docker-compose', ['-f', path].concat(composeArgs), stdio: 'pipe')
child = spawn('docker-compose', ['-f', composePathDst(appId)].concat(composeArgs), stdio: 'pipe')
.on 'error', reject
.on 'exit', (code) ->
return reject(new Error("docker-compose exited with code #{code}")) if code isnt 0
@ -20,18 +30,24 @@ runComposeCommand = (composeArgs, path, onStatus) ->
reportStatus(status: '' + data)
child.stderr.on 'data', (data) ->
reportStatus(status: '' + data)
.catch (err) ->
msg = err?.message or err
reportStatus(error: msg)
throw err
writeComposeFile = (composeSpec, dstPath) ->
mkdirp(path.dirname(dstPath))
.then ->
YAML.stringify(composeSpec)
.then (yml) ->
fs.writeFileAsync(dstPath, yml)
.then ->
execAsync('sync')
# Runs docker-compose up using the compose YAML at "path".
# Reports status and errors in JSON to the onStatus function.
exports.up = (path, onStatus) ->
# Copies the compose file from srcPath to dstPath adding default volumes
exports.up = (appId, onStatus) ->
onStatus ?= console.log.bind(console)
reportStatus = (status) ->
try onStatus(status)
fs.readFileAsync(path)
fs.readFileAsync(composePathSrc(appId))
.then (data) ->
YAML.parse(data.toString())
.then (composeSpec) ->
@ -46,9 +62,27 @@ exports.up = (path, onStatus) ->
docker.getImage(service.image).inspectAsync()
.catch ->
dockerUtils.pullAndProtectImage(service.image, reportStatus)
.then ->
utils.validateKeys(service, utils.validComposeOptions)
.then ->
services[serviceName].volumes = utils.defaultBinds(composeDataPath(appId, serviceName))
.then ->
writeComposeFile(composeSpec, composePathDst(appId))
.then ->
runComposeCommand(['up', '-d'], path, onStatus)
runComposeCommand(['up', '-d'], appId, reportStatus)
.catch (err) ->
msg = err?.message or err
reportStatus(error: msg)
throw err
# Runs docker-compose down using the compose YAML at "path".
# Reports status and errors in JSON to the onStatus function.
exports.down = _.partial(runComposeCommand, 'down')
exports.down = (appId, onStatus) ->
onStatus ?= console.log.bind(console)
reportStatus = (status) ->
try onStatus(status)
runComposeCommand([ 'down' ], appId, reportStatus)
.catch (err) ->
msg = err?.message or err
reportStatus(error: msg)
throw err

View File

@ -46,6 +46,12 @@ knex.init = Promise.all([
knex.schema.createTable 'image', (t) ->
t.increments('id').primary()
t.string('repoTag')
knex.schema.hasTable('container')
.then (exists) ->
if not exists
knex.schema.createTable 'container', (t) ->
t.increments('id').primary()
t.string('containerId')
])

View File

@ -8,6 +8,8 @@ _ = require 'lodash'
knex = require './db'
{ request } = require './request'
Lock = require 'rwlock'
utils = require './utils'
rimraf = Promise.promisify(require('rimraf'))
docker = new Docker(socketPath: config.dockerSocket)
@ -108,21 +110,20 @@ do ->
Promise.join(
knex('image').select('repoTag')
.map (image) ->
image.repoTag
# Docker sometimes prepends 'docker.io/' to official images
return [ image.repoTag, 'docker.io/' + image.repoTag ]
.then(_.flatten)
knex('app').select()
.map (app) ->
app.imageId + ':latest'
docker.listImagesAsync()
(localTags, apps, images) ->
(locallyCreatedTags, apps, images) ->
imageTags = _.map(images, 'RepoTags')
supervisorTags = _.filter imageTags, (tags) ->
_.contains(tags, supervisorTag)
appTags = _.filter imageTags, (tags) ->
_.any tags, (tag) ->
_.contains(apps, tag)
locallyCreatedTags = _.filter imageTags, (tags) ->
_.any tags, (tag) ->
_.contains(localTags, tag)
supervisorTags = _.flatten(supervisorTags)
appTags = _.flatten(appTags)
locallyCreatedTags = _.flatten(locallyCreatedTags)
@ -181,12 +182,13 @@ do ->
exports.createImage = (req, res) ->
{ registry, repo, tag, fromImage } = req.query
if fromImage?
repoTag = fromImage
repoTag += ':' + tag if tag?
repoTag = buildRepoTag(fromImage, tag)
else
repoTag = buildRepoTag(repo, tag, registry)
Promise.using writeLockImages(), ->
knex('image').insert({ repoTag })
knex('image').select().where({ repoTag })
.then ([ img ]) ->
knex('image').insert({ repoTag }) if !img?
.then ->
if fromImage?
docker.createImageAsync({ fromImage, tag })
@ -200,7 +202,9 @@ do ->
exports.pullAndProtectImage = (image, onProgress) ->
repoTag = buildRepoTag(image)
Promise.using writeLockImages(), ->
knex('image').insert({ repoTag })
knex('image').select().where({ repoTag })
.then ([ img ]) ->
knex('image').insert({ repoTag }) if !img?
.then ->
dockerProgress.pull(repoTag, onProgress)
@ -244,57 +248,96 @@ do ->
res.status(500).send(err?.message or err or 'Unknown error')
docker.modem.dialAsync = Promise.promisify(docker.modem.dial)
exports.createContainer = (req, res) ->
createContainer = (options, internalId) ->
Promise.using writeLockImages(), ->
knex('image').select().where('repoTag', req.body.Image)
knex('image').select().where('repoTag', options.Image)
.then (images) ->
throw new Error('Only images created via the Supervisor can be used for creating containers.') if images.length == 0
optsf =
path: '/containers/create?'
method: 'POST'
options: req.body
statusCodes:
200: true
201: true
404: 'no such container'
406: 'impossible to attach'
500: 'server error'
docker.modem.dialAsync(optsf)
.then (data) ->
res.json(data)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')
exports.startContainer = (req, res) ->
docker.getContainer(req.params.id).startAsync(req.body)
knex.transaction (tx) ->
Promise.try ->
return internalId if internalId?
tx.insert({}, 'id').into('container')
.then ([ id ]) ->
return id
.then (id) ->
options.HostConfig ?= {}
options.Volumes ?= {}
_.assign(options.Volumes, utils.defaultVolumes)
options.HostConfig.Binds = utils.defaultBinds("containers/#{id}")
query = ''
query = "name=#{options.Name}&" if options.Name?
optsf =
path: "/containers/create?#{query}"
method: 'POST'
options: options
statusCodes:
200: true
201: true
404: 'no such container'
406: 'impossible to attach'
500: 'server error'
utils.validateKeys(options, utils.validContainerOptions)
.then ->
utils.validateKeys(options.HostConfig, utils.validHostConfigOptions)
.then ->
docker.modem.dialAsync(optsf)
.then (data) ->
containerId = data.Id
tx('container').update({ containerId }).where({ id })
.return(data)
exports.createContainer = (req, res) ->
createContainer(req.body)
.then (data) ->
res.json(data)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')
exports.stopContainer = (req, res) ->
container = docker.getContainer(req.params.id)
startContainer = (containerId, options) ->
utils.validateKeys(options, utils.validHostConfigOptions)
.then ->
docker.getContainer(containerId).startAsync(options)
exports.startContainer = (req, res) ->
startContainer(req.params.id, req.body)
.then (data) ->
res.json(data)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')
stopContainer = (containerId, options) ->
container = docker.getContainer(containerId)
knex('app').select()
.then (apps) ->
throw new Error('Cannot stop an app container') if _.any(apps, containerId: req.params.id)
throw new Error('Cannot stop an app container') if _.any(apps, { containerId })
container.inspectAsync()
.then (cont) ->
throw new Error('Cannot stop supervisor container') if cont.Name == '/resin_supervisor' or _.any(cont.Names, (n) -> n == '/resin_supervisor')
container.stopAsync(sanitizeQuery(req.query))
container.stopAsync(options)
exports.stopContainer = (req, res) ->
stopContainer(req.params.id, sanitizeQuery(req.query))
.then (data) ->
res.json(data)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')
exports.deleteContainer = (req, res) ->
container = docker.getContainer(req.params.id)
deleteContainer = (containerId, options) ->
container = docker.getContainer(containerId)
knex('app').select()
.then (apps) ->
throw new Error('Cannot remove an app container') if _.any(apps, containerId: req.params.id)
throw new Error('Cannot remove an app container') if _.any(apps, { containerId })
container.inspectAsync()
.then (cont) ->
throw new Error('Cannot remove supervisor container') if cont.Name == '/resin_supervisor' or _.any(cont.Names, (n) -> n == '/resin_supervisor')
container.removeAsync(sanitizeQuery(req.query))
if options.purge
knex('container').select().where({ containerId })
.then (contFromDB) ->
# This will also be affected by #115. Should fix when we fix that.
rimraf(utils.getDataPath("containers/#{contFromDB.id}"))
.then ->
knex('container').where({ containerId }).del()
.then ->
container.removeAsync(options)
exports.deleteContainer = (req, res) ->
deleteContainer(req.params.id, sanitizeQuery(req.query))
.then (data) ->
res.json(data)
.catch (err) ->
@ -306,3 +349,21 @@ do ->
res.json(containers)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')
exports.updateContainer = (req, res) ->
{ oldContainerId } = req.query
return res.status(400).send('Missing oldContainerId') if !oldContainerId?
knex('container').select().where({ containerId: oldContainerId })
.then ([ oldContainer ]) ->
return res.status(404).send('Old container not found') if !oldContainer?
stopContainer(oldContainerId, t: 10)
.then ->
deleteContainer(oldContainerId, v: true)
.then ->
createContainer(req.body, oldContainer.id)
.tap (data) ->
startContainer(data.Id)
.then (data) ->
res.json(data)
.catch (err) ->
res.status(500).send(err?.message or err or 'Unknown error')

View File

@ -208,3 +208,73 @@ exports.getOSVersion = (path) ->
.catch (err) ->
console.log('Could not get OS Version: ', err, err.stack)
return undefined
exports.defaultVolumes = {
'/data': {}
'/lib/modules': {}
'/lib/firmware': {}
'/host/var/lib/connman': {}
'/host/run/dbus': {}
}
exports.getDataPath = (identifier) ->
return config.dataPath + '/' + identifier
exports.defaultBinds = (dataPath) ->
return [
exports.getDataPath(dataPath) + ':/data'
'/lib/modules:/lib/modules'
'/lib/firmware:/lib/firmware'
'/run/dbus:/host_run/dbus'
'/run/dbus:/host/run/dbus'
'/etc/resolv.conf:/etc/resolv.conf:rw'
'/var/lib/connman:/host/var/lib/connman'
]
exports.validComposeOptions = [
'command'
'entrypoint'
'environment'
'expose'
'image'
'labels'
'ports'
'stop_signal'
'volumes' # Will be overwritten with the default binds
'user'
'working_dir'
'network_mode'
'net'
'privileged'
'restart'
]
exports.validContainerOptions = [
'Hostname'
'User'
'Env'
'Labels'
'Cmd'
'Entrypoint'
'Image'
'Volumes'
'WorkingDir'
'ExposedPorts'
'HostConfig'
'Name'
]
exports.validHostConfigOptions = [
'Binds' # Will be overwritten with the default binds
'Links'
'PortBindings'
'Privileged'
'RestartPolicy'
'NetworkMode'
]
exports.validateKeys = (options, validSet) ->
Promise.try ->
return if !options?
invalidKeys = _.keys(_.omit(options, validSet))
throw new Error("Using #{invalidKeys.join(', ')} is not allowed.") if !_.isEmpty(invalidKeys)