mirror of
https://github.com/balena-io/balena-cli.git
synced 2024-12-19 21:57:51 +00:00
Add docker-compose-aware builds and deployments
Legacy behaviour is mostly retained. The most notable change in behaviour is that invoking `resin deploy` without options is now allowed (see help string how it behaves). In this commit there are also the following notable changes: - Deploy/Build are promoted to primary commands - Extracts QEMU-related code to a new file - Adds a utility file to retrieve the CLI version and its parts - Adds a helper that can be used to manipulate display on capable clients - Declares several new dependencies. Most are already indirectly installed via some dependency Change-Type: minor
This commit is contained in:
parent
96116aeaec
commit
14a3f51b73
@ -1432,12 +1432,19 @@ name of container to stop
|
||||
|
||||
## build [source]
|
||||
|
||||
Use this command to build a container with a provided docker daemon.
|
||||
Use this command to build an image or a complete multicontainer project
|
||||
with the provided docker daemon.
|
||||
|
||||
You must provide either an application or a device-type/architecture
|
||||
pair to use the resin Dockerfile pre-processor
|
||||
(e.g. Dockerfile.template -> Dockerfile).
|
||||
|
||||
This command will look into the given source directory (or the current working
|
||||
directory if one isn't specified) for a compose file. If one is found, this
|
||||
command will build each service defined in the compose file. If a compose file
|
||||
isn't found, the command will look for a Dockerfile, and if yet that isn't found,
|
||||
it will try to generate one.
|
||||
|
||||
Examples:
|
||||
|
||||
$ resin build
|
||||
@ -1461,6 +1468,18 @@ The type of device this build is for
|
||||
|
||||
The target resin.io application this build is for
|
||||
|
||||
#### --projectName, -n <projectName>
|
||||
|
||||
Specify an alternate project name; default is the directory name
|
||||
|
||||
#### --emulated, -e
|
||||
|
||||
Run an emulated build using Qemu
|
||||
|
||||
#### --logs
|
||||
|
||||
Display full log output
|
||||
|
||||
#### --docker, -P <docker>
|
||||
|
||||
Path to a local docker socket
|
||||
@ -1497,20 +1516,24 @@ Set a build-time variable (eg. "-B 'ARG=value'"). Can be specified multiple time
|
||||
|
||||
Don't use docker layer caching when building
|
||||
|
||||
#### --emulated, -e
|
||||
|
||||
Run an emulated build using Qemu
|
||||
|
||||
#### --squash
|
||||
|
||||
Squash newly built layers into a single new layer
|
||||
|
||||
## deploy <appName> [image]
|
||||
|
||||
Use this command to deploy an image to an application, optionally building it first.
|
||||
Use this command to deploy an image or a complete multicontainer project
|
||||
to an application, optionally building it first.
|
||||
|
||||
Usage: `deploy <appName> ([image] | --build [--source build-dir])`
|
||||
|
||||
Unless an image is specified, this command will look into the current directory
|
||||
(or the one specified by --source) for a compose file. If one is found, this
|
||||
command will deploy each service defined in the compose file, building it first
|
||||
if an image for it doesn't exist. If a compose file isn't found, the command
|
||||
will look for a Dockerfile, and if yet that isn't found, it will try to
|
||||
generate one.
|
||||
|
||||
To deploy to an app on which you're a collaborator, use
|
||||
`resin deploy <appOwnerUsername>/<appName>`.
|
||||
|
||||
@ -1518,23 +1541,37 @@ Note: If building with this command, all options supported by `resin build`
|
||||
are also supported with this command.
|
||||
|
||||
Examples:
|
||||
|
||||
$ resin deploy myApp
|
||||
$ resin deploy myApp --build --source myBuildDir/
|
||||
$ resin deploy myApp myApp/myImage
|
||||
|
||||
### Options
|
||||
|
||||
#### --build, -b
|
||||
|
||||
Build image then deploy
|
||||
|
||||
#### --source, -s <source>
|
||||
|
||||
The source directory to use when building the image
|
||||
Specify an alternate source directory; default is the working directory
|
||||
|
||||
#### --build, -b
|
||||
|
||||
Force a rebuild before deploy
|
||||
|
||||
#### --nologupload
|
||||
|
||||
Don't upload build logs to the dashboard with image (if building)
|
||||
|
||||
#### --projectName, -n <projectName>
|
||||
|
||||
Specify an alternate project name; default is the directory name
|
||||
|
||||
#### --emulated, -e
|
||||
|
||||
Run an emulated build using Qemu
|
||||
|
||||
#### --logs
|
||||
|
||||
Display full log output
|
||||
|
||||
#### --docker, -P <docker>
|
||||
|
||||
Path to a local docker socket
|
||||
@ -1571,10 +1608,6 @@ Set a build-time variable (eg. "-B 'ARG=value'"). Can be specified multiple time
|
||||
|
||||
Don't use docker layer caching when building
|
||||
|
||||
#### --emulated, -e
|
||||
|
||||
Run an emulated build using Qemu
|
||||
|
||||
#### --squash
|
||||
|
||||
Squash newly built layers into a single new layer
|
||||
|
@ -2,32 +2,61 @@
|
||||
# of this action
|
||||
Promise = require('bluebird')
|
||||
dockerUtils = require('../utils/docker')
|
||||
compose = require('../utils/compose')
|
||||
|
||||
getBundleInfo = Promise.method (options) ->
|
||||
helpers = require('../utils/helpers')
|
||||
###
|
||||
Opts must be an object with the following keys:
|
||||
|
||||
if options.application?
|
||||
# An application was provided
|
||||
return helpers.getArchAndDeviceType(options.application)
|
||||
.then (app) ->
|
||||
return [app.arch, app.device_type]
|
||||
else if options.arch? and options.deviceType?
|
||||
return [options.arch, options.deviceType]
|
||||
else
|
||||
# No information, cannot do resolution
|
||||
return undefined
|
||||
appName: the name of the app this build is for; optional
|
||||
arch: the architecture to build for
|
||||
deviceType: the device type to build for
|
||||
projectPath: the project root directory; must be absolute
|
||||
buildEmulated
|
||||
buildOpts: arguments to forward to docker build command
|
||||
###
|
||||
buildProject = (docker, logger, composeOpts, opts) ->
|
||||
compose.loadProject(
|
||||
logger
|
||||
composeOpts.projectPath
|
||||
composeOpts.projectName
|
||||
)
|
||||
.then (project) ->
|
||||
compose.buildProject(
|
||||
docker
|
||||
logger
|
||||
project.path
|
||||
project.name
|
||||
project.composition
|
||||
opts.arch
|
||||
opts.deviceType
|
||||
opts.buildEmulated
|
||||
opts.buildOpts
|
||||
composeOpts.inlineLogs
|
||||
)
|
||||
.then ->
|
||||
logger.logSuccess('Build succeeded!')
|
||||
.tapCatch (e) ->
|
||||
logger.logError('Build failed')
|
||||
|
||||
module.exports =
|
||||
signature: 'build [source]'
|
||||
description: 'Build a container locally'
|
||||
description: 'Build a single image or a multicontainer project locally'
|
||||
permission: 'user'
|
||||
primary: true
|
||||
help: '''
|
||||
Use this command to build a container with a provided docker daemon.
|
||||
Use this command to build an image or a complete multicontainer project
|
||||
with the provided docker daemon.
|
||||
|
||||
You must provide either an application or a device-type/architecture
|
||||
pair to use the resin Dockerfile pre-processor
|
||||
(e.g. Dockerfile.template -> Dockerfile).
|
||||
|
||||
This command will look into the given source directory (or the current working
|
||||
directory if one isn't specified) for a compose file. If one is found, this
|
||||
command will build each service defined in the compose file. If a compose file
|
||||
isn't found, the command will look for a Dockerfile, and if yet that isn't found,
|
||||
it will try to generate one.
|
||||
|
||||
Examples:
|
||||
|
||||
$ resin build
|
||||
@ -37,7 +66,7 @@ module.exports =
|
||||
$ resin build --docker '/var/run/docker.sock'
|
||||
$ resin build --dockerHost my.docker.host --dockerPort 2376 --ca ca.pem --key key.pem --cert cert.pem
|
||||
'''
|
||||
options: dockerUtils.appendOptions [
|
||||
options: dockerUtils.appendOptions compose.appendOptions [
|
||||
{
|
||||
signature: 'arch'
|
||||
parameter: 'arch'
|
||||
@ -58,7 +87,46 @@ module.exports =
|
||||
},
|
||||
]
|
||||
action: (params, options, done) ->
|
||||
Logger = require('../utils/logger')
|
||||
dockerUtils.runBuild(params, options, getBundleInfo, new Logger())
|
||||
.asCallback(done)
|
||||
# compositions with many services trigger misleading warnings
|
||||
require('events').defaultMaxListeners = 1000
|
||||
|
||||
helpers = require('../utils/helpers')
|
||||
Logger = require('../utils/logger')
|
||||
|
||||
logger = new Logger()
|
||||
|
||||
logger.logDebug('Parsing input...')
|
||||
|
||||
Promise.try ->
|
||||
# `build` accepts `[source]` as a parameter, but compose expects it
|
||||
# as an option. swap them here
|
||||
options.source ?= params.source
|
||||
delete params.source
|
||||
|
||||
{ application, arch, deviceType } = options
|
||||
|
||||
if (not (arch? and deviceType?) and not application?) or (application? and (arch? or deviceType?))
|
||||
throw new Error('You must specify either an application or an arch/deviceType pair to build for')
|
||||
|
||||
if arch? and deviceType?
|
||||
[ undefined, arch, deviceType ]
|
||||
else
|
||||
helpers.getArchAndDeviceType(application)
|
||||
.then (app) ->
|
||||
[ application, app.arch, app.device_type ]
|
||||
|
||||
.then ([ appName, arch, deviceType ]) ->
|
||||
Promise.join(
|
||||
dockerUtils.getDocker(options)
|
||||
dockerUtils.generateBuildOpts(options)
|
||||
compose.generateOpts(options)
|
||||
(docker, buildOpts, composeOpts) ->
|
||||
buildProject(docker, logger, composeOpts, {
|
||||
appName
|
||||
arch
|
||||
deviceType
|
||||
buildEmulated: !!options.emulated
|
||||
buildOpts
|
||||
})
|
||||
)
|
||||
.asCallback(done)
|
||||
|
@ -1,121 +1,113 @@
|
||||
# Imported here because it's needed for the setup
|
||||
# of this action
|
||||
Promise = require('bluebird')
|
||||
dockerUtils = require('../utils/docker')
|
||||
compose = require('../utils/compose')
|
||||
|
||||
getBuilderPushEndpoint = (baseUrl, owner, app) ->
|
||||
querystring = require('querystring')
|
||||
args = querystring.stringify({ owner, app })
|
||||
"https://builder.#{baseUrl}/v1/push?#{args}"
|
||||
###
|
||||
Opts must be an object with the following keys:
|
||||
|
||||
getBuilderLogPushEndpoint = (baseUrl, buildId, owner, app) ->
|
||||
querystring = require('querystring')
|
||||
args = querystring.stringify({ owner, app, buildId })
|
||||
"https://builder.#{baseUrl}/v1/pushLogs?#{args}"
|
||||
app: the application instance to deploy to
|
||||
image: the image to deploy; optional
|
||||
shouldPerformBuild
|
||||
shouldUploadLogs
|
||||
buildEmulated
|
||||
buildOpts: arguments to forward to docker build command
|
||||
###
|
||||
deployProject = (docker, logger, composeOpts, opts) ->
|
||||
_ = require('lodash')
|
||||
doodles = require('resin-doodles')
|
||||
sdk = require('resin-sdk').fromSharedOptions()
|
||||
|
||||
formatImageName = (image) ->
|
||||
image.split('/').pop()
|
||||
|
||||
parseInput = Promise.method (params, options) ->
|
||||
if not params.appName?
|
||||
throw new Error('Need an application to deploy to!')
|
||||
appName = params.appName
|
||||
image = undefined
|
||||
if params.image?
|
||||
if options.build or options.source?
|
||||
throw new Error('Build and source parameters are not applicable when specifying an image')
|
||||
options.build = false
|
||||
image = params.image
|
||||
else if options.build
|
||||
source = options.source || '.'
|
||||
else
|
||||
throw new Error('Need either an image or a build flag!')
|
||||
|
||||
return [appName, options.build, source, image]
|
||||
|
||||
showPushProgress = (message) ->
|
||||
visuals = require('resin-cli-visuals')
|
||||
progressBar = new visuals.Progress(message)
|
||||
progressBar.update({ percentage: 0 })
|
||||
return progressBar
|
||||
|
||||
getBundleInfo = (options) ->
|
||||
helpers = require('../utils/helpers')
|
||||
|
||||
helpers.getArchAndDeviceType(options.appName)
|
||||
.then (app) ->
|
||||
[app.arch, app.device_type]
|
||||
|
||||
performUpload = (imageStream, token, username, url, appName, logger) ->
|
||||
request = require('request')
|
||||
progressStream = require('progress-stream')
|
||||
zlib = require('zlib')
|
||||
|
||||
# Need to strip off the newline
|
||||
progressMessage = logger.formatMessage('info', 'Deploying').slice(0, -1)
|
||||
progressBar = showPushProgress(progressMessage)
|
||||
streamWithProgress = imageStream.pipe progressStream
|
||||
time: 500,
|
||||
length: imageStream.length
|
||||
, ({ percentage, eta }) ->
|
||||
progressBar.update
|
||||
percentage: Math.min(percentage, 100)
|
||||
eta: eta
|
||||
|
||||
uploadRequest = request.post
|
||||
url: getBuilderPushEndpoint(url, username, appName)
|
||||
headers:
|
||||
'Content-Encoding': 'gzip'
|
||||
auth:
|
||||
bearer: token
|
||||
body: streamWithProgress.pipe(zlib.createGzip({
|
||||
level: 6
|
||||
}))
|
||||
|
||||
uploadToPromise(uploadRequest, logger)
|
||||
|
||||
uploadLogs = (logs, token, url, buildId, username, appName) ->
|
||||
request = require('request')
|
||||
request.post
|
||||
json: true
|
||||
url: getBuilderLogPushEndpoint(url, buildId, username, appName)
|
||||
auth:
|
||||
bearer: token
|
||||
body: Buffer.from(logs)
|
||||
|
||||
uploadToPromise = (uploadRequest, logger) ->
|
||||
new Promise (resolve, reject) ->
|
||||
|
||||
handleMessage = (data) ->
|
||||
data = data.toString()
|
||||
logger.logDebug("Received data: #{data}")
|
||||
|
||||
try
|
||||
obj = JSON.parse(data)
|
||||
catch e
|
||||
logger.logError('Error parsing reply from remote side')
|
||||
reject(e)
|
||||
return
|
||||
|
||||
if obj.type?
|
||||
switch obj.type
|
||||
when 'error' then reject(new Error("Remote error: #{obj.error}"))
|
||||
when 'success' then resolve(obj)
|
||||
when 'status' then logger.logInfo("Remote: #{obj.message}")
|
||||
else reject(new Error("Received unexpected reply from remote: #{data}"))
|
||||
else
|
||||
reject(new Error("Received unexpected reply from remote: #{data}"))
|
||||
|
||||
uploadRequest
|
||||
.on('error', reject)
|
||||
.on('data', handleMessage)
|
||||
compose.loadProject(
|
||||
logger
|
||||
composeOpts.projectPath
|
||||
composeOpts.projectName
|
||||
opts.image
|
||||
)
|
||||
.then (project) ->
|
||||
# find which services use images that already exist locally
|
||||
Promise.map project.descriptors, (d) ->
|
||||
# unconditionally build (or pull) if explicitly requested
|
||||
return d if opts.shouldPerformBuild
|
||||
docker.getImage(d.image.tag ? d.image).inspect()
|
||||
.return(d.serviceName)
|
||||
.catchReturn()
|
||||
.filter (d) -> !!d
|
||||
.then (servicesToSkip) ->
|
||||
# multibuild takes in a composition and always attempts to
|
||||
# build or pull all services. we workaround that here by
|
||||
# passing a modified composition.
|
||||
compositionToBuild = _.cloneDeep(project.composition)
|
||||
compositionToBuild.services = _.omit(compositionToBuild.services, servicesToSkip)
|
||||
if _.size(compositionToBuild.services) is 0
|
||||
logger.logInfo('Everything is up to date (use --build to force a rebuild)')
|
||||
return {}
|
||||
compose.buildProject(
|
||||
docker
|
||||
logger
|
||||
project.path
|
||||
project.name
|
||||
compositionToBuild
|
||||
opts.app.arch
|
||||
opts.app.device_type
|
||||
opts.buildEmulated
|
||||
opts.buildOpts
|
||||
composeOpts.inlineLogs
|
||||
)
|
||||
.then (builtImages) ->
|
||||
_.keyBy(builtImages, 'serviceName')
|
||||
.then (builtImages) ->
|
||||
project.descriptors.map (d) ->
|
||||
builtImages[d.serviceName] ? {
|
||||
serviceName: d.serviceName,
|
||||
name: d.image.tag ? d.image
|
||||
logs: 'Build skipped; image for service already exists.'
|
||||
props: {}
|
||||
}
|
||||
.then (images) ->
|
||||
Promise.join(
|
||||
sdk.auth.getUserId()
|
||||
sdk.auth.getToken()
|
||||
sdk.settings.get('apiUrl')
|
||||
(userId, auth, apiEndpoint) ->
|
||||
compose.deployProject(
|
||||
docker
|
||||
logger
|
||||
project.composition
|
||||
images
|
||||
opts.app.id
|
||||
userId
|
||||
"Bearer #{auth}"
|
||||
apiEndpoint
|
||||
!opts.shouldUploadLogs
|
||||
)
|
||||
)
|
||||
.then (release) ->
|
||||
logger.logSuccess('Deploy succeeded!')
|
||||
logger.logSuccess("Release: #{release.commit}")
|
||||
console.log()
|
||||
console.log(doodles.getDoodle()) # Show charlie
|
||||
console.log()
|
||||
.tapCatch (e) ->
|
||||
logger.logError('Deploy failed')
|
||||
|
||||
module.exports =
|
||||
signature: 'deploy <appName> [image]'
|
||||
description: 'Deploy an image to a resin.io application'
|
||||
description: 'Deploy a single image or a multicontainer project to a resin.io application'
|
||||
help: '''
|
||||
Use this command to deploy an image to an application, optionally building it first.
|
||||
Use this command to deploy an image or a complete multicontainer project
|
||||
to an application, optionally building it first.
|
||||
|
||||
Usage: `deploy <appName> ([image] | --build [--source build-dir])`
|
||||
|
||||
Unless an image is specified, this command will look into the current directory
|
||||
(or the one specified by --source) for a compose file. If one is found, this
|
||||
command will deploy each service defined in the compose file, building it first
|
||||
if an image for it doesn't exist. If a compose file isn't found, the command
|
||||
will look for a Dockerfile, and if yet that isn't found, it will try to
|
||||
generate one.
|
||||
|
||||
To deploy to an app on which you're a collaborator, use
|
||||
`resin deploy <appOwnerUsername>/<appName>`.
|
||||
|
||||
@ -123,23 +115,26 @@ module.exports =
|
||||
are also supported with this command.
|
||||
|
||||
Examples:
|
||||
|
||||
$ resin deploy myApp
|
||||
$ resin deploy myApp --build --source myBuildDir/
|
||||
$ resin deploy myApp myApp/myImage
|
||||
'''
|
||||
permission: 'user'
|
||||
options: dockerUtils.appendOptions [
|
||||
{
|
||||
signature: 'build'
|
||||
boolean: true
|
||||
description: 'Build image then deploy'
|
||||
alias: 'b'
|
||||
},
|
||||
primary: true
|
||||
options: dockerUtils.appendOptions compose.appendOptions [
|
||||
{
|
||||
signature: 'source'
|
||||
parameter: 'source'
|
||||
description: 'The source directory to use when building the image'
|
||||
description: 'Specify an alternate source directory; default is the working directory'
|
||||
alias: 's'
|
||||
},
|
||||
{
|
||||
signature: 'build'
|
||||
boolean: true
|
||||
description: 'Force a rebuild before deploy'
|
||||
alias: 'b'
|
||||
},
|
||||
{
|
||||
signature: 'nologupload'
|
||||
description: "Don't upload build logs to the dashboard with image (if building)"
|
||||
@ -147,83 +142,53 @@ module.exports =
|
||||
}
|
||||
]
|
||||
action: (params, options, done) ->
|
||||
_ = require('lodash')
|
||||
tmp = require('tmp')
|
||||
tmpNameAsync = Promise.promisify(tmp.tmpName)
|
||||
resin = require('resin-sdk-preconfigured')
|
||||
# compositions with many services trigger misleading warnings
|
||||
require('events').defaultMaxListeners = 1000
|
||||
|
||||
helpers = require('../utils/helpers')
|
||||
Logger = require('../utils/logger')
|
||||
|
||||
logger = new Logger()
|
||||
|
||||
# Ensure the tmp files gets deleted
|
||||
tmp.setGracefulCleanup()
|
||||
logger.logDebug('Parsing input...')
|
||||
|
||||
logs = ''
|
||||
Promise.try ->
|
||||
{ appName, image } = params
|
||||
|
||||
upload = (token, username, url) ->
|
||||
dockerUtils.getDocker(options)
|
||||
.then (docker) ->
|
||||
# Check input parameters
|
||||
parseInput(params, options)
|
||||
.then ([appName, build, source, imageName]) ->
|
||||
tmpNameAsync()
|
||||
.then (bufferFile) ->
|
||||
# look into "resin build" options if appName isn't given
|
||||
appName = options.application if not appName?
|
||||
delete options.application
|
||||
|
||||
# Setup the build args for how the build routine expects them
|
||||
options = _.assign({}, options, { appName })
|
||||
params = _.assign({}, params, { source })
|
||||
if not appName?
|
||||
throw new Error('Please specify the name of the application to deploy')
|
||||
|
||||
Promise.try ->
|
||||
if build
|
||||
dockerUtils.runBuild(params, options, getBundleInfo, logger)
|
||||
else
|
||||
{ image: imageName, log: '' }
|
||||
.then ({ image: imageName, log: buildLogs }) ->
|
||||
logger.logInfo('Initializing deploy...')
|
||||
if image? and options.build
|
||||
throw new Error('Build option is not applicable when specifying an image')
|
||||
|
||||
logs = buildLogs
|
||||
Promise.all [
|
||||
dockerUtils.bufferImage(docker, imageName, bufferFile)
|
||||
token
|
||||
username
|
||||
url
|
||||
params.appName
|
||||
logger
|
||||
]
|
||||
.spread(performUpload)
|
||||
.finally ->
|
||||
# If the file was never written to (for instance because an error
|
||||
# has occured before any data was written) this call will throw an
|
||||
# ugly error, just suppress it
|
||||
Promise.try ->
|
||||
require('mz/fs').unlink(bufferFile)
|
||||
.catch(_.noop)
|
||||
.tap ({ image: imageName, buildId }) ->
|
||||
logger.logSuccess("Successfully deployed image: #{formatImageName(imageName)}")
|
||||
return buildId
|
||||
.then ({ image: imageName, buildId }) ->
|
||||
if logs is '' or options.nologupload?
|
||||
return ''
|
||||
Promise.join(
|
||||
helpers.getApplication(appName)
|
||||
helpers.getArchAndDeviceType(appName)
|
||||
(app, { arch, device_type }) ->
|
||||
app.arch = arch
|
||||
app.device_type = device_type
|
||||
return app
|
||||
)
|
||||
.then (app) ->
|
||||
[ app, image, !!options.build, !options.nologupload ]
|
||||
|
||||
logger.logInfo('Uploading logs to dashboard...')
|
||||
|
||||
Promise.join(
|
||||
logs
|
||||
token
|
||||
url
|
||||
buildId
|
||||
username
|
||||
params.appName
|
||||
uploadLogs
|
||||
)
|
||||
.return('Successfully uploaded logs')
|
||||
.then (msg) ->
|
||||
logger.logSuccess(msg) if msg isnt ''
|
||||
.asCallback(done)
|
||||
|
||||
Promise.join(
|
||||
resin.auth.getToken()
|
||||
resin.auth.whoami()
|
||||
resin.settings.get('resinUrl')
|
||||
upload
|
||||
)
|
||||
.then ([ app, image, shouldPerformBuild, shouldUploadLogs ]) ->
|
||||
Promise.join(
|
||||
dockerUtils.getDocker(options)
|
||||
dockerUtils.generateBuildOpts(options)
|
||||
compose.generateOpts(options)
|
||||
(docker, buildOpts, composeOpts) ->
|
||||
deployProject(docker, logger, composeOpts, {
|
||||
app
|
||||
image
|
||||
shouldPerformBuild
|
||||
shouldUploadLogs
|
||||
buildEmulated: !!options.emulated
|
||||
buildOpts
|
||||
})
|
||||
)
|
||||
.asCallback(done)
|
||||
|
747
lib/utils/compose.coffee
Normal file
747
lib/utils/compose.coffee
Normal file
@ -0,0 +1,747 @@
|
||||
Promise = require('bluebird')
|
||||
path = require('path')
|
||||
|
||||
exports.appendProjectOptions = appendProjectOptions = (opts) ->
|
||||
opts.concat [
|
||||
{
|
||||
signature: 'projectName'
|
||||
parameter: 'projectName'
|
||||
description: 'Specify an alternate project name; default is the directory name'
|
||||
alias: 'n'
|
||||
},
|
||||
]
|
||||
|
||||
exports.appendOptions = (opts) ->
|
||||
appendProjectOptions(opts).concat [
|
||||
{
|
||||
signature: 'emulated'
|
||||
description: 'Run an emulated build using Qemu'
|
||||
boolean: true
|
||||
alias: 'e'
|
||||
},
|
||||
{
|
||||
signature: 'logs'
|
||||
description: 'Display full log output'
|
||||
boolean: true
|
||||
},
|
||||
]
|
||||
|
||||
exports.generateOpts = (options) ->
|
||||
fs = require('mz/fs')
|
||||
fs.realpath(options.source || '.').then (projectPath) ->
|
||||
projectName: options.projectName
|
||||
projectPath: projectPath
|
||||
inlineLogs: !!options.logs
|
||||
|
||||
compositionFileNames = [
|
||||
'resin-compose.yml'
|
||||
'resin-compose.yaml'
|
||||
'docker-compose.yml'
|
||||
'docker-compose.yaml'
|
||||
]
|
||||
|
||||
# look into the given directory for valid compose files and return
|
||||
# the contents of the first one found.
|
||||
resolveProject = (rootDir) ->
|
||||
fs = require('mz/fs')
|
||||
Promise.any compositionFileNames.map (filename) ->
|
||||
fs.readFile(path.join(rootDir, filename), 'utf-8')
|
||||
|
||||
# Parse the given composition and return a structure with info. Input is:
|
||||
# - composePath: the *absolute* path to the directory containing the compose file
|
||||
# - composeStr: the contents of the compose file, as a string
|
||||
createProject = (composePath, composeStr, projectName = null) ->
|
||||
yml = require('js-yaml')
|
||||
compose = require('resin-compose-parse')
|
||||
|
||||
# both methods below may throw.
|
||||
composition = yml.safeLoad(composeStr, schema: yml.FAILSAFE_SCHEMA)
|
||||
composition = compose.normalize(composition)
|
||||
|
||||
projectName ?= path.basename(composePath)
|
||||
descriptors = compose.parse(composition).map (descr) ->
|
||||
# generate an image name based on the project and service names
|
||||
# if one is not given and the service requires a build
|
||||
if descr.image.context? and not descr.image.tag?
|
||||
descr.image.tag = [ projectName, descr.serviceName ].join('_')
|
||||
return descr
|
||||
return {
|
||||
path: composePath,
|
||||
name: projectName,
|
||||
composition,
|
||||
descriptors
|
||||
}
|
||||
|
||||
# high-level function resolving a project and creating a composition out
|
||||
# of it in one go. if image is given, it'll create a default project for
|
||||
# that without looking for a project. falls back to creating a default
|
||||
# project if none is found at the given projectPath.
|
||||
exports.loadProject = (logger, projectPath, projectName, image) ->
|
||||
compose = require('resin-compose-parse')
|
||||
logger.logDebug('Loading project...')
|
||||
|
||||
Promise.try ->
|
||||
if image?
|
||||
logger.logInfo("Creating default composition with image: #{image}")
|
||||
return compose.defaultComposition(image)
|
||||
|
||||
logger.logDebug('Resolving project...')
|
||||
|
||||
resolveProject(projectPath)
|
||||
.tap ->
|
||||
logger.logInfo('Compose file detected')
|
||||
.catch (e) ->
|
||||
logger.logDebug("Failed to resolve project: #{e}")
|
||||
logger.logInfo("Creating default composition with source: #{projectPath}")
|
||||
return compose.defaultComposition()
|
||||
.then (composeStr) ->
|
||||
logger.logDebug('Creating project...')
|
||||
createProject(projectPath, composeStr, projectName)
|
||||
|
||||
toPosixPath = (systemPath) ->
|
||||
path = require('path')
|
||||
systemPath.replace(new RegExp('\\' + path.sep, 'g'), '/')
|
||||
|
||||
tarDirectory = (dir) ->
|
||||
tar = require('tar-stream')
|
||||
klaw = require('klaw')
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
streamToPromise = require('stream-to-promise')
|
||||
|
||||
getFiles = ->
|
||||
streamToPromise(klaw(dir))
|
||||
.filter((item) -> not item.stats.isDirectory())
|
||||
.map((item) -> item.path)
|
||||
|
||||
pack = tar.pack()
|
||||
getFiles(dir)
|
||||
.map (file) ->
|
||||
relPath = path.relative(path.resolve(dir), file)
|
||||
Promise.join relPath, fs.stat(file), fs.readFile(file),
|
||||
(filename, stats, data) ->
|
||||
pack.entry({ name: toPosixPath(filename), size: stats.size, mode: stats.mode }, data)
|
||||
.then ->
|
||||
pack.finalize()
|
||||
return pack
|
||||
|
||||
truncateString = (str, len) ->
|
||||
return str if str.length < len
|
||||
str = str.slice(0, len)
|
||||
# return everything up to the last line. this is a cheeky way to avoid
|
||||
# having to deal with splitting the string midway through some special
|
||||
# character sequence.
|
||||
return str.slice(0, str.lastIndexOf('\n'))
|
||||
|
||||
LOG_LENGTH_MAX = 512 * 1024 # 512KB
|
||||
|
||||
exports.buildProject = (
|
||||
docker, logger,
|
||||
projectPath, projectName, composition,
|
||||
arch, deviceType,
|
||||
emulated, buildOpts,
|
||||
inlineLogs
|
||||
) ->
|
||||
_ = require('lodash')
|
||||
humanize = require('humanize')
|
||||
compose = require('resin-compose-parse')
|
||||
builder = require('resin-multibuild')
|
||||
transpose = require('docker-qemu-transpose')
|
||||
qemu = require('./qemu')
|
||||
|
||||
logger.logInfo("Building for #{arch}/#{deviceType}")
|
||||
|
||||
imageDescriptors = compose.parse(composition)
|
||||
imageDescriptorsByServiceName = _.keyBy(imageDescriptors, 'serviceName')
|
||||
|
||||
if inlineLogs
|
||||
renderer = new BuildProgressInline(logger.streams['build'], imageDescriptors)
|
||||
else
|
||||
tty = require('./tty')(process.stdout)
|
||||
renderer = new BuildProgressUI(tty, imageDescriptors)
|
||||
renderer.start()
|
||||
|
||||
qemu.installQemuIfNeeded(emulated, logger)
|
||||
.tap (needsQemu) ->
|
||||
return if not needsQemu
|
||||
logger.logInfo('Emulation is enabled')
|
||||
# Copy qemu into all build contexts
|
||||
Promise.map imageDescriptors, (d) ->
|
||||
return if not d.image.context? # external image
|
||||
return qemu.copyQemu(path.join(projectPath, d.image.context))
|
||||
.then (needsQemu) ->
|
||||
# Tar up the directory, ready for the build stream
|
||||
tarDirectory(projectPath)
|
||||
.then (tarStream) ->
|
||||
builder.splitBuildStream(composition, tarStream)
|
||||
.tap (tasks) ->
|
||||
# Updates each task as a side-effect
|
||||
builder.performResolution(tasks, arch, deviceType)
|
||||
.map (task) ->
|
||||
if not task.external and not task.resolved
|
||||
throw new Error(
|
||||
"Project type for service '#{task.serviceName}' could not be determined. " +
|
||||
'Please add a Dockerfile'
|
||||
)
|
||||
.map (task) ->
|
||||
d = imageDescriptorsByServiceName[task.serviceName]
|
||||
|
||||
# multibuild parses the composition internally so any tags we've
|
||||
# set before are lost; re-assign them here
|
||||
task.tag ?= [ projectName, task.serviceName ].join('_')
|
||||
if d.image.context?
|
||||
d.image.tag = task.tag
|
||||
|
||||
# configure build opts appropriately
|
||||
task.dockerOpts ?= {}
|
||||
_.merge(task.dockerOpts, buildOpts, { t: task.tag })
|
||||
if d.image.context?.args?
|
||||
task.dockerOpts.buildargs ?= {}
|
||||
_.merge(task.dockerOpts.buildargs, d.image.context.args)
|
||||
|
||||
# Get the service-specific log stream
|
||||
# Caveat: `multibuild.BuildTask` defines no `logStream` property
|
||||
# but it's convenient to store it there; it's JS ultimately.
|
||||
task.logStream = renderer.streams[task.serviceName]
|
||||
task.logBuffer = []
|
||||
|
||||
# Setup emulation if needed
|
||||
return [ task, null ] if task.external or not needsQemu
|
||||
binPath = qemu.qemuPathInContext(path.join(projectPath, task.context))
|
||||
transpose.transposeTarStream task.buildStream,
|
||||
hostQemuPath: toPosixPath(binPath)
|
||||
containerQemuPath: "/tmp/#{qemu.QEMU_BIN_NAME}"
|
||||
.then (stream) ->
|
||||
task.buildStream = stream
|
||||
.return([ task, binPath ])
|
||||
.map ([ task, qemuPath ]) ->
|
||||
Promise.resolve(task).tap (task) ->
|
||||
captureStream = buildLogCapture(task.external, task.logBuffer)
|
||||
|
||||
if task.external
|
||||
# External image -- there's no build to be performed,
|
||||
# just follow pull progress.
|
||||
captureStream.pipe(task.logStream)
|
||||
task.progressHook = pullProgressAdapter(captureStream)
|
||||
else
|
||||
task.streamHook = (stream) ->
|
||||
if qemuPath?
|
||||
buildThroughStream = transpose.getBuildThroughStream
|
||||
hostQemuPath: toPosixPath(qemuPath)
|
||||
containerQemuPath: "/tmp/#{qemu.QEMU_BIN_NAME}"
|
||||
rawStream = stream.pipe(buildThroughStream)
|
||||
else
|
||||
rawStream = stream
|
||||
# `stream` sends out raw strings in contrast to `task.progressHook`
|
||||
# where we're given objects. capture these strings as they come
|
||||
# before we parse them.
|
||||
rawStream
|
||||
.pipe(captureStream)
|
||||
.pipe(buildProgressAdapter(inlineLogs))
|
||||
.pipe(task.logStream)
|
||||
.then (tasks) ->
|
||||
logger.logDebug 'Prepared tasks; building...'
|
||||
builder.performBuilds(tasks, docker)
|
||||
.map (builtImage) ->
|
||||
if not builtImage.successful
|
||||
builtImage.error.serviceName = builtImage.serviceName
|
||||
throw builtImage.error
|
||||
|
||||
d = imageDescriptorsByServiceName[builtImage.serviceName]
|
||||
task = _.find(tasks, serviceName: builtImage.serviceName)
|
||||
|
||||
image =
|
||||
serviceName: d.serviceName
|
||||
name: d.image.tag ? d.image
|
||||
logs: truncateString(task.logBuffer.join('\n'), LOG_LENGTH_MAX)
|
||||
props:
|
||||
dockerfile: builtImage.dockerfile
|
||||
projectType: builtImage.projectType
|
||||
|
||||
# Times here are timestamps, so test whether they're null
|
||||
# before creating a date out of them, as `new Date(null)`
|
||||
# creates a date representing UNIX time 0.
|
||||
if (startTime = builtImage.startTime)
|
||||
image.props.startTime = new Date(startTime)
|
||||
if (endTime = builtImage.endTime)
|
||||
image.props.endTime = new Date(endTime)
|
||||
docker.getImage(image.name).inspect().get('Size').then (size) ->
|
||||
image.props.size = size
|
||||
.return(image)
|
||||
.tap (images) ->
|
||||
summary = _(images).map ({ serviceName, props }) ->
|
||||
[ serviceName, "Image size: #{humanize.filesize(props.size)}" ]
|
||||
.fromPairs()
|
||||
.value()
|
||||
renderer.end(summary)
|
||||
.finally(renderer.end)
|
||||
|
||||
createRelease = (apiEndpoint, auth, userId, appId, composition) ->
|
||||
_ = require('lodash')
|
||||
crypto = require('crypto')
|
||||
releaseMod = require('resin-release')
|
||||
|
||||
client = releaseMod.createClient({ apiEndpoint, auth })
|
||||
|
||||
releaseMod.create
|
||||
client: client
|
||||
user: userId
|
||||
application: appId
|
||||
composition: composition
|
||||
source: 'local'
|
||||
commit: crypto.pseudoRandomBytes(16).toString('hex').toLowerCase()
|
||||
.then ({ release, serviceImages }) ->
|
||||
release = _.omit(release, [
|
||||
'created_at'
|
||||
'belongs_to__application'
|
||||
'is_created_by__user'
|
||||
'__metadata'
|
||||
])
|
||||
_.keys serviceImages, (serviceName) ->
|
||||
serviceImages[serviceName] = _.omit(serviceImages[serviceName], [
|
||||
'created_at'
|
||||
'is_a_build_of__service'
|
||||
'__metadata'
|
||||
])
|
||||
return { client, release, serviceImages }
|
||||
|
||||
tagServiceImages = (docker, images, serviceImages) ->
|
||||
Promise.map images, (d) ->
|
||||
serviceImage = serviceImages[d.serviceName]
|
||||
imageName = serviceImage.is_stored_at__image_location
|
||||
[ _match, registry, repo, tag = 'latest' ] = /(.*?)\/(.*?)(?::([^/]*))?$/.exec(imageName)
|
||||
name = "#{registry}/#{repo}"
|
||||
docker.getImage(d.name).tag({ repo: name, tag, force: true })
|
||||
.then ->
|
||||
docker.getImage("#{name}:#{tag}")
|
||||
.then (localImage) ->
|
||||
serviceName: d.serviceName
|
||||
serviceImage: serviceImage
|
||||
localImage: localImage
|
||||
registry: registry
|
||||
repo: repo
|
||||
logs: d.logs
|
||||
props: d.props
|
||||
|
||||
authorizePush = (tokenAuthEndpoint, registry, images) ->
|
||||
_ = require('lodash')
|
||||
sdk = require('resin-sdk').fromSharedOptions()
|
||||
|
||||
if not _.isArray(images)
|
||||
images = [ images ]
|
||||
|
||||
sdk.request.send
|
||||
baseUrl: tokenAuthEndpoint
|
||||
url: '/auth/v1/token'
|
||||
qs:
|
||||
service: registry
|
||||
scope: images.map (repo) ->
|
||||
"repository:#{repo}:pull,push"
|
||||
.get('body')
|
||||
.get('token')
|
||||
.catchReturn({})
|
||||
|
||||
pushAndUpdateServiceImages = (docker, token, images, afterEach) ->
|
||||
chalk = require('chalk')
|
||||
{ DockerProgress } = require('docker-progress')
|
||||
tty = require('./tty')(process.stdout)
|
||||
|
||||
opts = { authconfig: registrytoken: token }
|
||||
|
||||
progress = new DockerProgress(dockerToolbelt: docker)
|
||||
renderer = pushProgressRenderer(tty, chalk.blue('[Push]') + ' ')
|
||||
reporters = progress.aggregateProgress(images.length, renderer)
|
||||
|
||||
Promise.using tty.cursorHidden(), ->
|
||||
Promise.map images, ({ serviceImage, localImage, props, logs }, index) ->
|
||||
Promise.join(
|
||||
localImage.inspect().get('Size')
|
||||
progress.push(localImage.name, reporters[index], opts).finally(renderer.end)
|
||||
(size, digest) ->
|
||||
serviceImage.image_size = size
|
||||
serviceImage.content_hash = digest
|
||||
serviceImage.build_log = logs
|
||||
serviceImage.dockerfile = props.dockerfile
|
||||
serviceImage.project_type = props.projectType
|
||||
serviceImage.start_timestamp = props.startTime if props.startTime
|
||||
serviceImage.end_timestamp = props.endTime if props.endTime
|
||||
serviceImage.push_timestamp = new Date()
|
||||
serviceImage.status = 'success'
|
||||
)
|
||||
.tapCatch (e) ->
|
||||
serviceImage.error_message = '' + e
|
||||
serviceImage.status = 'failed'
|
||||
.finally ->
|
||||
afterEach?(serviceImage, props)
|
||||
|
||||
exports.deployProject = (
|
||||
docker, logger,
|
||||
composition, images,
|
||||
appId, userId, auth,
|
||||
apiEndpoint,
|
||||
skipLogUpload
|
||||
) ->
|
||||
_ = require('lodash')
|
||||
chalk = require('chalk')
|
||||
releaseMod = require('resin-release')
|
||||
tty = require('./tty')(process.stdout)
|
||||
|
||||
prefix = chalk.cyan('[Info]') + ' '
|
||||
spinner = createSpinner()
|
||||
runloop = runSpinner(tty, spinner, "#{prefix}Creating release...")
|
||||
|
||||
createRelease(apiEndpoint, auth, userId, appId, composition)
|
||||
.finally(runloop.end)
|
||||
.then ({ client, release, serviceImages }) ->
|
||||
logger.logDebug('Tagging images...')
|
||||
tagServiceImages(docker, images, serviceImages)
|
||||
.tap (images) ->
|
||||
logger.logDebug('Authorizing push...')
|
||||
authorizePush(apiEndpoint, images[0].registry, _.map(images, 'repo'))
|
||||
.then (token) ->
|
||||
logger.logInfo('Pushing images to registry...')
|
||||
pushAndUpdateServiceImages docker, token, images, (serviceImage) ->
|
||||
logger.logDebug("Saving image #{serviceImage.is_stored_at__image_location}")
|
||||
if skipLogUpload
|
||||
delete serviceImage.build_log
|
||||
releaseMod.updateImage(client, serviceImage.id, serviceImage)
|
||||
.finally ->
|
||||
logger.logDebug('Untagging images...')
|
||||
Promise.map images, ({ localImage }) ->
|
||||
localImage.remove()
|
||||
.then ->
|
||||
release.status = 'success'
|
||||
.tapCatch (e) ->
|
||||
release.status = 'failed'
|
||||
.finally ->
|
||||
runloop = runSpinner(tty, spinner, "#{prefix}Saving release...")
|
||||
release.end_timestamp = new Date()
|
||||
releaseMod.updateRelease(client, release.id, release)
|
||||
.finally(runloop.end)
|
||||
.return(release)
|
||||
|
||||
# utilities
|
||||
|
||||
renderProgressBar = (percentage, stepCount) ->
|
||||
_ = require('lodash')
|
||||
percentage = _.clamp(percentage, 0, 100)
|
||||
barCount = stepCount * percentage // 100
|
||||
spaceCount = stepCount - barCount
|
||||
bar = "[#{_.repeat('=', barCount)}>#{_.repeat(' ', spaceCount)}]"
|
||||
return "#{bar} #{_.padStart(percentage, 3)}%"
|
||||
|
||||
pushProgressRenderer = (tty, prefix) ->
|
||||
fn = (e) ->
|
||||
{ error, percentage } = e
|
||||
throw new Error(error) if error?
|
||||
bar = renderProgressBar(percentage, 40)
|
||||
tty.replaceLine("#{prefix}#{bar}\r")
|
||||
fn.end = ->
|
||||
tty.clearLine()
|
||||
return fn
|
||||
|
||||
buildLogCapture = (objectMode, buffer) ->
|
||||
_ = require('lodash')
|
||||
through = require('through2')
|
||||
|
||||
through { objectMode }, (data, enc, cb) ->
|
||||
return cb(null, data) if not data?
|
||||
|
||||
# data from pull stream
|
||||
if data.error
|
||||
buffer.push("#{data.error}")
|
||||
else if data.progress and data.status
|
||||
buffer.push("#{data.progress}% #{data.status}")
|
||||
else if data.status
|
||||
buffer.push("#{data.status}")
|
||||
|
||||
# data from build stream
|
||||
else
|
||||
# normalise build log output here. it is somewhat ugly
|
||||
# that this supposedly "passthrough" stream mutates the
|
||||
# values before forwarding them, but it's convenient
|
||||
# as it allows to both forward and save normalised logs
|
||||
|
||||
# convert to string, split to lines, trim each one and
|
||||
# filter out empty ones.
|
||||
lines = _(data.toString('utf-8').split(/\r?\n$/))
|
||||
.map(_.trimEnd)
|
||||
.reject(_.isEmpty)
|
||||
|
||||
# forward each line separately
|
||||
lines.forEach (line) =>
|
||||
buffer.push(line)
|
||||
@push(line)
|
||||
|
||||
return cb()
|
||||
|
||||
cb(null, data)
|
||||
|
||||
buildProgressAdapter = (inline) ->
|
||||
through = require('through2')
|
||||
|
||||
stepRegex = /^\s*Step\s+(\d+)\/(\d+)\s*:\s+(.+)$/
|
||||
|
||||
[ step, numSteps, progress ] = [ null, null, undefined ]
|
||||
|
||||
through { objectMode: true }, (str, enc, cb) ->
|
||||
return cb(null, str) if not str?
|
||||
|
||||
if inline
|
||||
return cb(null, { status: str })
|
||||
|
||||
if /^Successfully tagged /.test(str)
|
||||
progress = undefined
|
||||
else
|
||||
if (match = stepRegex.exec(str))
|
||||
step = match[1]
|
||||
numSteps = match[2]
|
||||
str = match[3]
|
||||
if step?
|
||||
str = "Step #{step}/#{numSteps}: #{str}"
|
||||
progress = parseInt(step, 10) * 100 // parseInt(numSteps, 10)
|
||||
|
||||
cb(null, { status: str, progress })
|
||||
|
||||
pullProgressAdapter = (outStream) ->
|
||||
return ({ status, id, percentage, error, errorDetail }) ->
|
||||
if status?
|
||||
status = status.replace(/^Status: /, '')
|
||||
if id?
|
||||
status = "#{id}: #{status}"
|
||||
if percentage is 100
|
||||
percentage = undefined
|
||||
outStream.write
|
||||
status: status
|
||||
progress: percentage
|
||||
error: errorDetail?.message ? error
|
||||
|
||||
createSpinner = ->
|
||||
chars = '|/-\\'
|
||||
index = 0
|
||||
-> chars[(index++) % chars.length]
|
||||
|
||||
runSpinner = (tty, spinner, msg) ->
|
||||
runloop = createRunLoop ->
|
||||
tty.clearLine()
|
||||
tty.writeLine("#{msg} #{spinner()}")
|
||||
tty.cursorUp()
|
||||
runloop.onEnd = ->
|
||||
tty.clearLine()
|
||||
tty.writeLine(msg)
|
||||
return runloop
|
||||
|
||||
createRunLoop = (tick) ->
|
||||
timerId = setInterval(tick, 1000 / 10)
|
||||
runloop = {
|
||||
onEnd: ->
|
||||
end: ->
|
||||
clearInterval(timerId)
|
||||
runloop.onEnd()
|
||||
}
|
||||
return runloop
|
||||
|
||||
class BuildProgressUI
|
||||
constructor: (tty, descriptors) ->
|
||||
_ = require('lodash')
|
||||
chalk = require('chalk')
|
||||
through = require('through2')
|
||||
|
||||
eventHandler = @_handleEvent
|
||||
services = _.map(descriptors, 'serviceName')
|
||||
|
||||
streams = _(services).map (service) ->
|
||||
stream = through.obj (event, _enc, cb) ->
|
||||
eventHandler(service, event)
|
||||
cb()
|
||||
stream.pipe(tty.stream)
|
||||
[ service, stream ]
|
||||
.fromPairs()
|
||||
.value()
|
||||
|
||||
@_tty = tty
|
||||
@_serviceToDataMap = {}
|
||||
@_services = services
|
||||
|
||||
# Logger magically prefixes the log line with [Build] etc., but it doesn't
|
||||
# work well with the spinner we're also showing. Manually build the prefix
|
||||
# here and bypass the logger.
|
||||
prefix = chalk.blue('[Build]') + ' '
|
||||
|
||||
offset = 10 # account for escape sequences inserted for colouring
|
||||
@_prefixWidth = offset + prefix.length + _.max(_.map(services, 'length'))
|
||||
@_prefix = prefix
|
||||
|
||||
# these are to handle window wrapping
|
||||
@_maxLineWidth = null
|
||||
@_lineWidths = []
|
||||
|
||||
@_startTime = null
|
||||
@_ended = false
|
||||
@_cancelled = false
|
||||
@_spinner = createSpinner()
|
||||
|
||||
@streams = streams
|
||||
|
||||
_handleEvent: (service, event) =>
|
||||
@_serviceToDataMap[service] = event
|
||||
|
||||
_handleInterrupt: =>
|
||||
@_cancelled = true
|
||||
@end()
|
||||
process.exit(130) # 128 + SIGINT
|
||||
|
||||
start: =>
|
||||
process.on('SIGINT', @_handleInterrupt)
|
||||
@_tty.hideCursor()
|
||||
@_services.forEach (service) =>
|
||||
@streams[service].write({ status: 'Preparing...' })
|
||||
@_runloop = createRunLoop(@_display)
|
||||
@_startTime = Date.now()
|
||||
|
||||
end: (summary = null) =>
|
||||
return if @_ended
|
||||
@_ended = true
|
||||
process.removeListener('SIGINT', @_handleInterrupt)
|
||||
@_runloop.end()
|
||||
@_runloop = null
|
||||
|
||||
@_clear()
|
||||
@_renderStatus(true)
|
||||
@_renderSummary(summary ? @_getServiceSummary())
|
||||
@_tty.showCursor()
|
||||
|
||||
_display: =>
|
||||
@_clear()
|
||||
@_renderStatus()
|
||||
@_renderSummary(@_getServiceSummary())
|
||||
@_tty.cursorUp(@_services.length + 1) # for status line
|
||||
|
||||
_clear: ->
|
||||
@_tty.deleteToEnd()
|
||||
@_maxLineWidth = @_tty.currentWindowSize().width
|
||||
|
||||
_getServiceSummary: ->
|
||||
_ = require('lodash')
|
||||
|
||||
services = @_services
|
||||
serviceToDataMap = @_serviceToDataMap
|
||||
|
||||
_(services).map (service) ->
|
||||
{ status, progress, error } = serviceToDataMap[service] ? {}
|
||||
if error
|
||||
return "#{error}"
|
||||
else if progress
|
||||
bar = renderProgressBar(progress, 20)
|
||||
return "#{bar} #{status}" if status
|
||||
return "#{bar}"
|
||||
else if status
|
||||
return "#{status}"
|
||||
else
|
||||
return 'Waiting...'
|
||||
.map (data, index) ->
|
||||
[ services[index], data ]
|
||||
.fromPairs()
|
||||
.value()
|
||||
|
||||
_renderStatus: (end = false) ->
|
||||
moment = require('moment')
|
||||
require('moment-duration-format')(moment)
|
||||
|
||||
@_tty.clearLine()
|
||||
@_tty.write(@_prefix)
|
||||
if end and @_cancelled
|
||||
@_tty.writeLine('Build cancelled')
|
||||
else if end
|
||||
serviceCount = @_services.length
|
||||
serviceStr = if serviceCount is 1 then '1 service' else "#{serviceCount} services"
|
||||
runTime = Date.now() - @_startTime
|
||||
durationStr = moment.duration(runTime // 1000, 'seconds').format()
|
||||
@_tty.writeLine("Built #{serviceStr} in #{durationStr}")
|
||||
else
|
||||
@_tty.writeLine("Building services... #{@_spinner()}")
|
||||
|
||||
_renderSummary: (serviceToStrMap) ->
|
||||
_ = require('lodash')
|
||||
chalk = require('chalk')
|
||||
truncate = require('cli-truncate')
|
||||
strlen = require('string-width')
|
||||
|
||||
@_services.forEach (service, index) =>
|
||||
str = _.padEnd(@_prefix + chalk.bold(service), @_prefixWidth)
|
||||
str += serviceToStrMap[service]
|
||||
if @_maxLineWidth?
|
||||
str = truncate(str, @_maxLineWidth)
|
||||
@_lineWidths[index] = strlen(str)
|
||||
|
||||
@_tty.clearLine()
|
||||
@_tty.writeLine(str)
|
||||
|
||||
class BuildProgressInline
|
||||
constructor: (outStream, descriptors) ->
|
||||
_ = require('lodash')
|
||||
through = require('through2')
|
||||
|
||||
services = _.map(descriptors, 'serviceName')
|
||||
eventHandler = @_renderEvent
|
||||
streams = _(services).map (service) ->
|
||||
stream = through.obj (event, _enc, cb) ->
|
||||
eventHandler(service, event)
|
||||
cb()
|
||||
stream.pipe(outStream)
|
||||
[ service, stream ]
|
||||
.fromPairs()
|
||||
.value()
|
||||
|
||||
offset = 10 # account for escape sequences inserted for colouring
|
||||
@_prefixWidth = offset + _.max(_.map(services, 'length'))
|
||||
@_outStream = outStream
|
||||
@_services = services
|
||||
@_startTime = null
|
||||
@_ended = false
|
||||
|
||||
@streams = streams
|
||||
|
||||
start: =>
|
||||
@_outStream.write('Building services...\n')
|
||||
@_services.forEach (service) =>
|
||||
@streams[service].write({ status: 'Preparing...' })
|
||||
@_startTime = Date.now()
|
||||
|
||||
end: (summary = null) =>
|
||||
moment = require('moment')
|
||||
require('moment-duration-format')(moment)
|
||||
|
||||
return if @_ended
|
||||
@_ended = true
|
||||
|
||||
if summary?
|
||||
@_services.forEach (service) =>
|
||||
@_renderEvent(service, summary[service])
|
||||
|
||||
if @_cancelled
|
||||
@_outStream.write('Build cancelled\n')
|
||||
else
|
||||
serviceCount = @_services.length
|
||||
serviceStr = if serviceCount is 1 then '1 service' else "#{serviceCount} services"
|
||||
runTime = Date.now() - @_startTime
|
||||
durationStr = moment.duration(runTime // 1000, 'seconds').format()
|
||||
@_outStream.write("Built #{serviceStr} in #{durationStr}\n")
|
||||
|
||||
_renderEvent: (service, event) =>
|
||||
_ = require('lodash')
|
||||
chalk = require('chalk')
|
||||
|
||||
str = do ->
|
||||
{ status, error } = event
|
||||
if error
|
||||
return "#{error}"
|
||||
else if status
|
||||
return "#{status}"
|
||||
else
|
||||
return 'Waiting...'
|
||||
|
||||
prefix = _.padEnd(chalk.bold(service), @_prefixWidth)
|
||||
@_outStream.write(prefix)
|
||||
@_outStream.write(str)
|
||||
@_outStream.write('\n')
|
@ -1,7 +1,6 @@
|
||||
# Functions to help actions which rely on using docker
|
||||
|
||||
QEMU_VERSION = 'v2.5.50-resin-execve'
|
||||
QEMU_BIN_NAME = 'qemu-execve'
|
||||
Promise = require('bluebird')
|
||||
|
||||
# Use this function to seed an action's list of capitano options
|
||||
# with the docker options. Using this interface means that
|
||||
@ -71,12 +70,6 @@ exports.appendOptions = (opts) ->
|
||||
description: "Don't use docker layer caching when building"
|
||||
boolean: true
|
||||
},
|
||||
{
|
||||
signature: 'emulated'
|
||||
description: 'Run an emulated build using Qemu'
|
||||
boolean: true
|
||||
alias: 'e'
|
||||
},
|
||||
{
|
||||
signature: 'squash'
|
||||
description: 'Squash newly built layers into a single new layer'
|
||||
@ -84,8 +77,7 @@ exports.appendOptions = (opts) ->
|
||||
}
|
||||
]
|
||||
|
||||
exports.generateConnectOpts = generateConnectOpts = (opts) ->
|
||||
Promise = require('bluebird')
|
||||
generateConnectOpts = (opts) ->
|
||||
buildDockerodeOpts = require('dockerode-options')
|
||||
fs = require('mz/fs')
|
||||
_ = require('lodash')
|
||||
@ -131,204 +123,35 @@ exports.generateConnectOpts = generateConnectOpts = (opts) ->
|
||||
|
||||
return connectOpts
|
||||
|
||||
exports.tarDirectory = tarDirectory = (dir) ->
|
||||
Promise = require('bluebird')
|
||||
tar = require('tar-stream')
|
||||
klaw = require('klaw')
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
streamToPromise = require('stream-to-promise')
|
||||
|
||||
getFiles = ->
|
||||
streamToPromise(klaw(dir))
|
||||
.filter((item) -> not item.stats.isDirectory())
|
||||
.map((item) -> item.path)
|
||||
|
||||
pack = tar.pack()
|
||||
getFiles(dir)
|
||||
.map (file) ->
|
||||
relPath = path.relative(path.resolve(dir), file)
|
||||
Promise.join relPath, fs.stat(file), fs.readFile(file),
|
||||
(filename, stats, data) ->
|
||||
pack.entry({ name: filename, size: stats.size, mode: stats.mode }, data)
|
||||
.then ->
|
||||
pack.finalize()
|
||||
return pack
|
||||
|
||||
cacheHighlightStream = ->
|
||||
colors = require('colors/safe')
|
||||
es = require('event-stream')
|
||||
{ EOL } = require('os')
|
||||
|
||||
extractArrowMessage = (message) ->
|
||||
arrowTest = /^\s*-+>\s*(.+)/i
|
||||
if (match = arrowTest.exec(message))
|
||||
match[1]
|
||||
else
|
||||
undefined
|
||||
|
||||
es.mapSync (data) ->
|
||||
msg = extractArrowMessage(data)
|
||||
if msg? and msg.toLowerCase() == 'using cache'
|
||||
data = colors.bgGreen.black(msg)
|
||||
return data + EOL
|
||||
|
||||
parseBuildArgs = (args, onError) ->
|
||||
parseBuildArgs = (args) ->
|
||||
_ = require('lodash')
|
||||
if not _.isArray(args)
|
||||
args = [ args ]
|
||||
buildArgs = {}
|
||||
args.forEach (str) ->
|
||||
pair = /^([^\s]+?)=(.*)$/.exec(str)
|
||||
args.forEach (arg) ->
|
||||
pair = /^([^\s]+?)=(.*)$/.exec(arg)
|
||||
if pair?
|
||||
buildArgs[pair[1]] = pair[2]
|
||||
buildArgs[pair[1]] = pair[2] ? ''
|
||||
else
|
||||
onError(str)
|
||||
throw new Error("Could not parse build argument: '#{arg}'")
|
||||
return buildArgs
|
||||
|
||||
# Pass in the command line parameters and options and also
|
||||
# a function which will return the information about the bundle
|
||||
exports.runBuild = (params, options, getBundleInfo, logger) ->
|
||||
Promise = require('bluebird')
|
||||
dockerBuild = require('resin-docker-build')
|
||||
resolver = require('resin-bundle-resolve')
|
||||
es = require('event-stream')
|
||||
doodles = require('resin-doodles')
|
||||
transpose = require('docker-qemu-transpose')
|
||||
path = require('path')
|
||||
|
||||
# The default build context is the current directory
|
||||
params.source ?= '.'
|
||||
logs = ''
|
||||
# Only used in emulated builds
|
||||
qemuPath = ''
|
||||
|
||||
Promise.try ->
|
||||
return if not (options.emulated and platformNeedsQemu())
|
||||
|
||||
hasQemu()
|
||||
.then (present) ->
|
||||
if !present
|
||||
logger.logInfo('Installing qemu for ARM emulation...')
|
||||
installQemu()
|
||||
.then ->
|
||||
# Copy the qemu binary into the build context
|
||||
copyQemu(params.source)
|
||||
.then (binPath) ->
|
||||
qemuPath = path.relative(params.source, binPath)
|
||||
.then ->
|
||||
# Tar up the directory, ready for the build stream
|
||||
tarDirectory(params.source)
|
||||
.then (tarStream) ->
|
||||
new Promise (resolve, reject) ->
|
||||
hooks =
|
||||
buildSuccess: (image) ->
|
||||
# Show charlie. In the interest of cloud parity,
|
||||
# use console.log, not the standard logging streams
|
||||
doodle = doodles.getDoodle()
|
||||
console.log()
|
||||
console.log(doodle)
|
||||
console.log()
|
||||
|
||||
resolve({ image, log: logs + '\n' + doodle + '\n' } )
|
||||
|
||||
buildFailure: reject
|
||||
buildStream: (stream) ->
|
||||
if options.emulated
|
||||
logger.logInfo('Running emulated build')
|
||||
|
||||
getBundleInfo(options)
|
||||
.then (info) ->
|
||||
if !info?
|
||||
logger.logWarn '''
|
||||
Warning: No architecture/device type or application information provided.
|
||||
Dockerfile/project pre-processing will not be performed.
|
||||
'''
|
||||
return tarStream
|
||||
else
|
||||
[arch, deviceType] = info
|
||||
# Perform type resolution on the project
|
||||
bundle = new resolver.Bundle(tarStream, deviceType, arch)
|
||||
resolver.resolveBundle(bundle, resolver.getDefaultResolvers())
|
||||
.then (resolved) ->
|
||||
logger.logInfo("Building #{resolved.projectType} project")
|
||||
|
||||
return resolved.tarStream
|
||||
.then (buildStream) ->
|
||||
# if we need emulation
|
||||
if options.emulated and platformNeedsQemu()
|
||||
return transpose.transposeTarStream buildStream,
|
||||
hostQemuPath: toPosixPath(qemuPath)
|
||||
containerQemuPath: "/tmp/#{QEMU_BIN_NAME}"
|
||||
else
|
||||
return buildStream
|
||||
.then (buildStream) ->
|
||||
# Send the resolved tar stream to the docker daemon
|
||||
buildStream.pipe(stream)
|
||||
.catch(reject)
|
||||
|
||||
# And print the output
|
||||
logThroughStream = es.through (data) ->
|
||||
logs += data.toString()
|
||||
this.emit('data', data)
|
||||
|
||||
if options.emulated and platformNeedsQemu()
|
||||
buildThroughStream = transpose.getBuildThroughStream
|
||||
hostQemuPath: toPosixPath(qemuPath)
|
||||
containerQemuPath: "/tmp/#{QEMU_BIN_NAME}"
|
||||
|
||||
newStream = stream.pipe(buildThroughStream)
|
||||
else
|
||||
newStream = stream
|
||||
|
||||
newStream
|
||||
.pipe(logThroughStream)
|
||||
.pipe(cacheHighlightStream())
|
||||
.pipe(logger.streams.build)
|
||||
|
||||
# Create a builder
|
||||
generateConnectOpts(options)
|
||||
.tap (connectOpts) ->
|
||||
ensureDockerSeemsAccessible(connectOpts)
|
||||
.then (connectOpts) ->
|
||||
# Allow degugging output, hidden behind an env var
|
||||
logger.logDebug('Connecting with the following options:')
|
||||
logger.logDebug(JSON.stringify(connectOpts, null, ' '))
|
||||
|
||||
builder = new dockerBuild.Builder(connectOpts)
|
||||
opts = {}
|
||||
|
||||
if options.tag?
|
||||
opts['t'] = options.tag
|
||||
if options.nocache?
|
||||
opts['nocache'] = true
|
||||
if options.buildArg?
|
||||
opts['buildargs'] = parseBuildArgs options.buildArg, (arg) ->
|
||||
logger.logWarn("Could not parse variable: '#{arg}'")
|
||||
if options.squash?
|
||||
opts['squash'] = true
|
||||
|
||||
builder.createBuildStream(opts, hooks, reject)
|
||||
|
||||
# Given an image id or tag, export the image to a tar archive,
|
||||
# gzip the result, and buffer it to disk.
|
||||
exports.bufferImage = (docker, imageId, bufferFile) ->
|
||||
Promise = require('bluebird')
|
||||
streamUtils = require('./streams')
|
||||
|
||||
image = docker.getImage(imageId)
|
||||
imageMetadata = image.inspectAsync()
|
||||
|
||||
Promise.join image.get(), imageMetadata.get('Size'), (imageStream, imageSize) ->
|
||||
streamUtils.buffer(imageStream, bufferFile)
|
||||
.tap (bufferedStream) ->
|
||||
bufferedStream.length = imageSize
|
||||
exports.generateBuildOpts = (options) ->
|
||||
opts = {}
|
||||
if options.tag?
|
||||
opts.t = options.tag
|
||||
if options.nocache?
|
||||
opts.nocache = true
|
||||
if options.squash?
|
||||
opts.squash = true
|
||||
if options.buildArg?
|
||||
opts.buildargs = parseBuildArgs(options.buildArg)
|
||||
return opts
|
||||
|
||||
exports.getDocker = (options) ->
|
||||
generateConnectOpts(options)
|
||||
.tap (connectOpts) ->
|
||||
ensureDockerSeemsAccessible(connectOpts)
|
||||
.then(createClient)
|
||||
.tap(ensureDockerSeemsAccessible)
|
||||
|
||||
exports.createClient = createClient = do ->
|
||||
# docker-toolbelt v3 is not backwards compatible as it removes all *Async
|
||||
@ -339,7 +162,6 @@ exports.createClient = createClient = do ->
|
||||
# **New code must not use the *Async methods.**
|
||||
#
|
||||
Docker = require('docker-toolbelt')
|
||||
Promise = require('bluebird')
|
||||
Promise.promisifyAll Docker.prototype, {
|
||||
filter: (name) -> name == 'run'
|
||||
multiArgs: true
|
||||
@ -351,89 +173,6 @@ exports.createClient = createClient = do ->
|
||||
return (opts) ->
|
||||
return new Docker(opts)
|
||||
|
||||
ensureDockerSeemsAccessible = (options) ->
|
||||
fs = require('mz/fs')
|
||||
|
||||
if options.socketPath?
|
||||
# If we're trying to use a socket, check it exists and we have access to it
|
||||
fs.access(options.socketPath, (fs.constants || fs).R_OK | (fs.constants || fs).W_OK)
|
||||
.return(true)
|
||||
.catch (err) ->
|
||||
throw new Error(
|
||||
"Docker seems to be unavailable (using socket #{options.socketPath}). Is it
|
||||
installed, and do you have permission to talk to it?"
|
||||
)
|
||||
else
|
||||
# Otherwise, we think we're probably ok
|
||||
Promise.resolve(true)
|
||||
|
||||
hasQemu = ->
|
||||
fs = require('mz/fs')
|
||||
|
||||
getQemuPath()
|
||||
.then(fs.stat)
|
||||
.return(true)
|
||||
.catchReturn(false)
|
||||
|
||||
getQemuPath = ->
|
||||
resin = require('resin-sdk-preconfigured')
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
|
||||
resin.settings.get('binDirectory')
|
||||
.then (binDir) ->
|
||||
# The directory might not be created already,
|
||||
# if not, create it
|
||||
fs.access(binDir)
|
||||
.catch code: 'ENOENT', ->
|
||||
fs.mkdir(binDir)
|
||||
.then ->
|
||||
path.join(binDir, QEMU_BIN_NAME)
|
||||
|
||||
platformNeedsQemu = ->
|
||||
os = require('os')
|
||||
os.platform() == 'linux'
|
||||
|
||||
installQemu = ->
|
||||
request = require('request')
|
||||
fs = require('fs')
|
||||
zlib = require('zlib')
|
||||
|
||||
getQemuPath()
|
||||
.then (qemuPath) ->
|
||||
new Promise (resolve, reject) ->
|
||||
installStream = fs.createWriteStream(qemuPath)
|
||||
qemuUrl = "https://github.com/resin-io/qemu/releases/download/#{QEMU_VERSION}/#{QEMU_BIN_NAME}.gz"
|
||||
request(qemuUrl)
|
||||
.pipe(zlib.createGunzip())
|
||||
.pipe(installStream)
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
|
||||
copyQemu = (context) ->
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
# Create a hidden directory in the build context, containing qemu
|
||||
binDir = path.join(context, '.resin')
|
||||
binPath = path.join(binDir, QEMU_BIN_NAME)
|
||||
|
||||
fs.access(binDir)
|
||||
.catch code: 'ENOENT', ->
|
||||
fs.mkdir(binDir)
|
||||
.then ->
|
||||
getQemuPath()
|
||||
.then (qemu) ->
|
||||
new Promise (resolve, reject) ->
|
||||
read = fs.createReadStream(qemu)
|
||||
write = fs.createWriteStream(binPath)
|
||||
read
|
||||
.pipe(write)
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
.then ->
|
||||
fs.chmod(binPath, '755')
|
||||
.return(binPath)
|
||||
|
||||
toPosixPath = (systemPath) ->
|
||||
path = require('path')
|
||||
systemPath.replace(new RegExp('\\' + path.sep, 'g'), '/')
|
||||
ensureDockerSeemsAccessible = (docker) ->
|
||||
docker.ping().catch ->
|
||||
throw new Error('Docker seems to be unavailable. Is it installed and running?')
|
||||
|
@ -133,7 +133,7 @@ export function getArchAndDeviceType(
|
||||
);
|
||||
}
|
||||
|
||||
function getApplication(applicationName: string) {
|
||||
export function getApplication(applicationName: string) {
|
||||
// Check for an app of the form `user/application`, and send
|
||||
// that off to a special handler (before importing any modules)
|
||||
const match = /(\w+)\/(\w+)/.exec(applicationName);
|
||||
|
86
lib/utils/qemu.coffee
Normal file
86
lib/utils/qemu.coffee
Normal file
@ -0,0 +1,86 @@
|
||||
Promise = require('bluebird')
|
||||
|
||||
exports.QEMU_VERSION = QEMU_VERSION = 'v2.5.50-resin-execve'
|
||||
exports.QEMU_BIN_NAME = QEMU_BIN_NAME = 'qemu-execve'
|
||||
|
||||
exports.installQemuIfNeeded = Promise.method (emulated, logger) ->
|
||||
return false if not (emulated and platformNeedsQemu())
|
||||
|
||||
hasQemu()
|
||||
.then (present) ->
|
||||
if !present
|
||||
logger.logInfo('Installing qemu for ARM emulation...')
|
||||
installQemu()
|
||||
.return(true)
|
||||
|
||||
exports.qemuPathInContext = (context) ->
|
||||
path = require('path')
|
||||
binDir = path.join(context, '.resin')
|
||||
binPath = path.join(binDir, QEMU_BIN_NAME)
|
||||
path.relative(context, binPath)
|
||||
|
||||
exports.copyQemu = (context) ->
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
# Create a hidden directory in the build context, containing qemu
|
||||
binDir = path.join(context, '.resin')
|
||||
binPath = path.join(binDir, QEMU_BIN_NAME)
|
||||
|
||||
Promise.resolve(fs.mkdir(binDir))
|
||||
.catch(code: 'EEXIST', ->)
|
||||
.then ->
|
||||
getQemuPath()
|
||||
.then (qemu) ->
|
||||
new Promise (resolve, reject) ->
|
||||
read = fs.createReadStream(qemu)
|
||||
write = fs.createWriteStream(binPath)
|
||||
read
|
||||
.pipe(write)
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
.then ->
|
||||
fs.chmod(binPath, '755')
|
||||
.then ->
|
||||
path.relative(context, binPath)
|
||||
|
||||
hasQemu = ->
|
||||
fs = require('mz/fs')
|
||||
|
||||
getQemuPath()
|
||||
.then(fs.stat)
|
||||
.return(true)
|
||||
.catchReturn(false)
|
||||
|
||||
getQemuPath = ->
|
||||
resin = require('resin-sdk').fromSharedOptions()
|
||||
path = require('path')
|
||||
fs = require('mz/fs')
|
||||
|
||||
resin.settings.get('binDirectory')
|
||||
.then (binDir) ->
|
||||
Promise.resolve(fs.mkdir(binDir))
|
||||
.catch(code: 'EEXIST', ->)
|
||||
.then ->
|
||||
path.join(binDir, QEMU_BIN_NAME)
|
||||
|
||||
platformNeedsQemu = ->
|
||||
os = require('os')
|
||||
os.platform() == 'linux'
|
||||
|
||||
installQemu = ->
|
||||
request = require('request')
|
||||
fs = require('fs')
|
||||
zlib = require('zlib')
|
||||
|
||||
getQemuPath()
|
||||
.then (qemuPath) ->
|
||||
new Promise (resolve, reject) ->
|
||||
installStream = fs.createWriteStream(qemuPath)
|
||||
qemuUrl = "https://github.com/resin-io/qemu/releases/download/#{QEMU_VERSION}/#{QEMU_BIN_NAME}.gz"
|
||||
request(qemuUrl)
|
||||
.on('error', reject)
|
||||
.pipe(zlib.createGunzip())
|
||||
.on('error', reject)
|
||||
.pipe(installStream)
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
65
lib/utils/tty.coffee
Normal file
65
lib/utils/tty.coffee
Normal file
@ -0,0 +1,65 @@
|
||||
|
||||
windowSize = {}
|
||||
|
||||
updateWindowSize = ->
|
||||
size = require('window-size').get()
|
||||
windowSize.width = size.width
|
||||
windowSize.height = size.height
|
||||
|
||||
process.stdout.on('resize', updateWindowSize)
|
||||
|
||||
module.exports = (stream = process.stdout) ->
|
||||
# make sure we get initial metrics
|
||||
updateWindowSize()
|
||||
|
||||
currentWindowSize = ->
|
||||
# always return a copy
|
||||
width: windowSize.width
|
||||
height: windowSize.height
|
||||
|
||||
hideCursor = ->
|
||||
stream.write('\u001B[?25l')
|
||||
|
||||
showCursor = ->
|
||||
stream.write('\u001B[?25h')
|
||||
|
||||
cursorUp = (rows = 0) ->
|
||||
stream.write("\u001B[#{rows}A")
|
||||
|
||||
cursorDown = (rows = 0) ->
|
||||
stream.write("\u001B[#{rows}B")
|
||||
|
||||
cursorHidden = ->
|
||||
Promise = require('bluebird')
|
||||
Promise.try(hideCursor).disposer(showCursor)
|
||||
|
||||
write = (str) ->
|
||||
stream.write(str)
|
||||
|
||||
writeLine = (str) ->
|
||||
stream.write("#{str}\n")
|
||||
|
||||
clearLine = ->
|
||||
stream.write('\u001B[2K\r')
|
||||
|
||||
replaceLine = (str) ->
|
||||
clearLine()
|
||||
write(str)
|
||||
|
||||
deleteToEnd = ->
|
||||
stream.write('\u001b[0J')
|
||||
|
||||
return {
|
||||
stream
|
||||
currentWindowSize
|
||||
hideCursor
|
||||
showCursor
|
||||
cursorHidden
|
||||
cursorUp
|
||||
cursorDown
|
||||
write
|
||||
writeLine
|
||||
clearLine
|
||||
replaceLine
|
||||
deleteToEnd
|
||||
}
|
17
package.json
17
package.json
@ -91,9 +91,11 @@
|
||||
"body-parser": "^1.14.1",
|
||||
"capitano": "^1.7.0",
|
||||
"chalk": "^2.3.0",
|
||||
"cli-truncate": "^1.1.0",
|
||||
"coffeescript": "^1.12.6",
|
||||
"columnify": "^1.5.2",
|
||||
"denymount": "^2.2.0",
|
||||
"docker-progress": "^3.0.1",
|
||||
"docker-qemu-transpose": "^0.3.4",
|
||||
"docker-toolbelt": "^3.1.0",
|
||||
"dockerode": "^2.5.0",
|
||||
@ -104,6 +106,7 @@
|
||||
"express": "^4.13.3",
|
||||
"global-tunnel-ng": "^2.1.1",
|
||||
"hasbin": "^1.2.3",
|
||||
"humanize": "0.0.9",
|
||||
"inquirer": "^3.1.1",
|
||||
"is-root": "^1.0.0",
|
||||
"js-yaml": "^3.10.0",
|
||||
@ -111,7 +114,8 @@
|
||||
"lodash": "^4.17.4",
|
||||
"mixpanel": "^0.4.0",
|
||||
"mkdirp": "^0.5.1",
|
||||
"moment": "^2.12.0",
|
||||
"moment": "^2.20.1",
|
||||
"moment-duration-format": "^2.2.1",
|
||||
"mz": "^2.6.0",
|
||||
"node-cleanup": "^2.1.2",
|
||||
"opn": "^5.1.0",
|
||||
@ -121,11 +125,12 @@
|
||||
"raven": "^1.2.0",
|
||||
"reconfix": "^0.0.3",
|
||||
"request": "^2.81.0",
|
||||
"resin-bundle-resolve": "^0.5.1",
|
||||
"resin-bundle-resolve": "^0.5.3",
|
||||
"resin-cli-auth": "^1.2.0",
|
||||
"resin-cli-errors": "^1.2.0",
|
||||
"resin-cli-form": "^1.4.1",
|
||||
"resin-cli-visuals": "^1.4.0",
|
||||
"resin-compose-parse": "^1.5.2",
|
||||
"resin-config-json": "^1.0.0",
|
||||
"resin-device-config": "^4.0.0",
|
||||
"resin-device-init": "^4.0.0",
|
||||
@ -133,7 +138,9 @@
|
||||
"resin-doodles": "0.0.1",
|
||||
"resin-image-fs": "^2.3.0",
|
||||
"resin-image-manager": "^5.0.0",
|
||||
"resin-multibuild": "^0.5.1",
|
||||
"resin-preload": "^6.0.0-beta4",
|
||||
"resin-release": "^1.1.1",
|
||||
"resin-sdk": "^9.0.0-beta7",
|
||||
"resin-sdk-preconfigured": "^6.9.0",
|
||||
"resin-settings-client": "^3.6.1",
|
||||
@ -143,10 +150,14 @@
|
||||
"rindle": "^1.0.0",
|
||||
"semver": "^5.3.0",
|
||||
"stream-to-promise": "^2.2.0",
|
||||
"string-width": "^2.1.1",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "0.0.31",
|
||||
"ts-node": "^3.3.0",
|
||||
"umount": "^1.1.6",
|
||||
"unzip2": "^0.2.5",
|
||||
"update-notifier": "^2.2.0"
|
||||
"update-notifier": "^2.2.0",
|
||||
"window-size": "^1.1.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"removedrive": "^1.0.0"
|
||||
|
Loading…
Reference in New Issue
Block a user