no updates

This commit is contained in:
Aleksis Brezas 2015-01-14 03:05:23 +00:00 committed by Pablo Carranza Vélez
parent d98c9ae180
commit 0665054a79
6 changed files with 55 additions and 215 deletions

View File

@ -63,9 +63,8 @@ endif
-docker rm build-supervisor-latest 2> /dev/null
run-supervisor-x86_64:
docker run --privileged -d -v /var/run/docker.sock:/run/docker.sock -e API_ENDPOINT=https://staging.resin.io -e REGISTRY_ENDPOINT=registry.staging.resin.io -e PUBNUB_SUBSCRIBE_KEY=sub-c-bananas -e PUBNUB_PUBLISH_KEY=pub-c-bananas -e MIXPANEL_TOKEN=bananasbananas resin/x86_64-supervisor /start
docker run --privileged -d -v /var/run/docker.sock:/run/docker.sock -v /boot/config.json:/boot/config.json -v /resin-data/resin-supervisor:/data -v /proc/net/fib_trie:/mnt/fib_trie -v /var/log/supervisor-log:/var/log -e API_ENDPOINT=https://staging.resin.io -e REGISTRY_ENDPOINT=registry.staging.resin.io -e PUBNUB_SUBSCRIBE_KEY=sub-c-bananas -e PUBNUB_PUBLISH_KEY=pub-c-bananas -e MIXPANEL_TOKEN=bananasbananas resin/x86_64-supervisor /start
run-supervisor-i386:
docker run --privileged -d -v /var/run/docker.sock:/run/docker.sock -e API_ENDPOINT=https://staging.resin.io -e REGISTRY_ENDPOINT=registry.staging.resin.io -e PUBNUB_SUBSCRIBE_KEY=sub-c-bananas -e PUBNUB_PUBLISH_KEY=pub-c-bananas -e MIXPANEL_TOKEN=bananasbananas resin/i386-supervisor /start
docker run --privileged -d -v /var/run/docker.sock:/run/docker.sock -v /boot/config.json:/boot/config.json -v /:/mnt -v /resin-data/resin-supervisor:/data -v /proc/net/fib_trie:/mnt/fib_trie -v /var/log/supervisor-log:/var/log -e API_ENDPOINT=https://staging.resin.io -e REGISTRY_ENDPOINT=registry.staging.resin.io -e PUBNUB_SUBSCRIBE_KEY=sub-c-bananas -e PUBNUB_PUBLISH_KEY=pub-c-bananas -e MIXPANEL_TOKEN=bananasbananas resin/i386-supervisor /start
.PHONY: supervisor supervisor-accelerated run-supervisor-x86_64 run-supervisor-i386

View File

@ -1,6 +1,6 @@
[program:resin-supervisor]
directory=/app
command=node src/supervisor.js
command=node src/app.js
redirect_stderr=true
autorestart=true
autostart=false

View File

@ -3,7 +3,6 @@ fs = Promise.promisifyAll require 'fs'
utils = require './utils'
express = require 'express'
application = require './application'
supervisor = require './supervisor-update'
tty = require './lib/tty'
knex = require './db'
@ -21,11 +20,6 @@ api.post '/v1/update', (req, res) ->
application.update()
res.send(204)
api.post '/v1/update-supervisor', (req, res) ->
console.log('Got supervisor update')
supervisor.update()
res.send(204)
api.post '/v1/spawn-tty', (req, res) ->
appId = req.body.appId
utils.mixpanelTrack('Spawn tty', appId)

View File

@ -4,66 +4,65 @@ utils = require './utils'
bootstrap = require './bootstrap'
config = require './config'
utils.mixpanelTrack('Supervisor start')
process.on 'uncaughtException', (e) ->
console.error('Got unhandled exception', e, e?.stack)
knex.init.then ->
utils.mixpanelTrack('Supervisor start')
console.log('Starting connectivity check..')
utils.connectivityCheck()
console.log('Starting connectivity check..')
utils.connectivityCheck()
knex('config').select('value').where(key: 'uuid').then ([ uuid ]) ->
if not uuid?.value
console.log('New device detected. Bootstrapping..')
retryingBootstrap = ->
utils.mixpanelTrack('Device bootstrap')
bootstrap().catch (err) ->
utils.mixpanelTrack('Device bootstrap failed, retrying', {error: err, delay: config.bootstrapRetryDelay})
Promise.delay(config.bootstrapRetryDelay)
.then(retryingBootstrap)
retryingBootstrap()
else
uuid.value
.then (uuid) ->
# Persist the uuid in subsequent metrics
utils.mixpanelProperties.uuid = uuid
knex('config').select('value').where(key: 'uuid').then ([ uuid ]) ->
if not uuid?.value
console.log('New device detected. Bootstrapping..')
retryingBootstrap = ->
utils.mixpanelTrack('Device bootstrap')
bootstrap().catch (err) ->
utils.mixpanelTrack('Device bootstrap failed, retrying', {error: err, delay: config.bootstrapRetryDelay})
Promise.delay(config.bootstrapRetryDelay)
.then(retryingBootstrap)
retryingBootstrap()
else
uuid.value
.then (uuid) ->
# Persist the uuid in subsequent metrics
utils.mixpanelProperties.uuid = uuid
api = require './api'
application = require './application'
supervisor = require './supervisor-update'
vpn = require './lib/vpn'
api = require './api'
application = require './application'
vpn = require './lib/vpn'
console.log('Starting OpenVPN..')
setImmediate(vpn.connect)
console.log('Starting OpenVPN..')
setImmediate(vpn.connect)
console.log('Starting API server..')
api.listen(80)
console.log('Starting API server..')
api.listen(80)
console.log('Starting Apps..')
knex('app').select()
.then (apps) ->
Promise.all(apps.map(application.start))
.catch (error) ->
console.error('Error starting apps:', error)
.then ->
utils.mixpanelTrack('Start application update poll', {interval: config.appUpdatePollInterval})
setInterval(->
console.log('Starting Apps..')
knex('app').select()
.then (apps) ->
Promise.all(apps.map(application.start))
.catch (error) ->
console.error('Error starting apps:', error)
.then ->
utils.mixpanelTrack('Start application update poll', {interval: config.appUpdatePollInterval})
setInterval(->
application.update()
, config.appUpdatePollInterval)
application.update()
, config.appUpdatePollInterval)
application.update()
updateIpAddr = ->
utils.findIpAddrs().then (ipAddrs) ->
application.updateDeviceInfo(
ip_address: ipAddrs.join(' ')
)
console.log('Starting periodic check for IP addresses..')
setInterval(updateIpAddr, 30 * 1000) # Every 30s
updateIpAddr()
updateIpAddr = ->
utils.findIpAddrs().then (ipAddrs) ->
application.updateDeviceInfo(
ip_address: ipAddrs.join(' ')
)
console.log('Starting periodic check for IP addresses..')
setInterval(updateIpAddr, 30 * 1000) # Every 30s
updateIpAddr()
# Tell the supervisor updater that we have successfully started, so that it can do whatever it needs to.
supervisor.startupSuccessful()
# Let API know we are running a new version
console.log('Updating supervisor version:', utils.supervisorVersion)
application.updateDeviceInfo(
supervisor_version: utils.supervisorVersion
)
# Let API know we are running a new version
console.log('Updating supervisor version:', utils.supervisorVersion)
application.updateDeviceInfo(
supervisor_version: utils.supervisorVersion
)

View File

@ -1,131 +0,0 @@
config = require './config'
dockerUtils = require './docker-utils'
Promise = require 'bluebird'
_ = require 'lodash'
es = require 'event-stream'
fs = Promise.promisifyAll(require('fs'))
{docker} = dockerUtils
localImage = config.localImage
remoteImage = config.remoteImage
containerIdPromise =
fs.readFileAsync( '/proc/1/cgroup' )
.then (data) ->
data.toString().match( /:cpu:\/docker\/(.*)$/m )[1]
.catch (err) ->
return process.env.HOSTNAME
currentContainerPromise =
containerIdPromise.then (containerId) ->
docker.getContainer(containerId)
startNewSupervisor = (currentSupervisor, image, waitForSuccess = true) ->
console.log('Creating supervisor container:', image)
docker.createContainerAsync(
Image: image
Cmd: [ '/start' ]
Volumes: config.supervisorContainer.Volumes
Env: currentSupervisor.Config.Env
)
.tap (container) ->
console.log('Starting supervisor container:', image)
container.startAsync(
Privileged: true
Binds: config.supervisorContainer.Binds
)
.then (container) ->
if !waitForSuccess
return
# check that next supervisor outputs config.successMessage before this supervisor exits
container.attachAsync({ stream: true, stdout: true, stderr: false, tty: true })
.then (stream) ->
new Promise (resolve, reject) ->
es.pipeline(
stream
es.split()
es.mapSync (line) ->
# ignore first 8 characters of every line that are a header sent by docker attach
data = line.substr(8)
if data is config.successMessage
resolve(container)
return
)
stream.on 'end', ->
reject(new Error('New supervisor stopped before success message'))
.timeout(config.restartSuccessTimeout) # wait up to 1 minute
.catch (e) ->
container.stopAsync()
console.error('Container failed to start successfully. Error: ', e)
throw e
.then ->
# We've started the new container, so we're done here! #pray
console.log('Removing our container and letting the new supervisor take over')
currentContainerPromise.then (container) ->
container.removeAsync(force: true)
.finally ->
# This should never really be reached as docker should already have terminated us,
# but in case it hasn't, then we'll just commit harakiri
console.log('And process.exiting..')
process.exit()
currentConfigPromise = currentContainerPromise.then (container) ->
container.inspectAsync()
.tap (currentSupervisor) ->
# The volume keys are the important bit.
expectedVolumes = _.sortBy(_.keys(config.supervisorContainer.Volumes))
actualVolumes = _.sortBy(_.keys(currentSupervisor.Volumes))
expectedBinds = _.sortBy(config.supervisorContainer.Binds)
actualBinds = _.sortBy(currentSupervisor.HostConfig.Binds)
# Check all the expected binds and volumes exist, if not then start a new supervisor (which will add them correctly)
if !_.isEqual(expectedVolumes, actualVolumes) or !_.isEqual(expectedBinds, actualBinds)
console.log('Supervisor restart (for binds/mounts)')
restart = ->
# When restarting for just binds/mounts we just wait for the supervisor updates to start.
startNewSupervisor(currentSupervisor, localImage, false)
.catch (err) ->
console.error('Error restarting', err)
# If there's an error just keep attempting to restart to get to a useful state.
restart()
restart()
# This is a promise that resolves when we have fully initialised.
exports.initialised = currentConfigPromise.then (currentSupervisor) ->
utils = require './utils'
supervisorUpdating = Promise.resolve()
exports.update = ->
# Make sure only one attempt to update the full supervisor is running at a time, ignoring any errors from
# previous update attempts.
supervisorUpdating = supervisorUpdating.then ->
utils.mixpanelTrack('Supervisor update check')
console.log('Fetching supervisor:', remoteImage)
dockerUtils.fetchImage(remoteImage)
.then ->
console.log('Inspecting new supervisor:', remoteImage)
docker.getImage(remoteImage).inspectAsync()
.then (localImageInfo) ->
localImageId = localImageInfo.Id or localImageInfo.id
if localImageId is currentSupervisor.Image
utils.mixpanelTrack('Supervisor up to date')
return
utils.mixpanelTrack('Supervisor update start', image: localImageId)
startNewSupervisor(currentSupervisor, remoteImage)
.catch (err) ->
utils.mixpanelTrack('Supervisor update failed', error: err)
# The error here is intentionally not propagated further up the chain,
# because the supervisor-update module internally handles update failures
# and makes sure that ill updates do not affect the rest of the system.
exports.startupSuccessful = ->
# Let the previous supervisor know that we started successfully
console.log(config.successMessage)
console.log('Tagging ourselves as a working supervisor:', remoteImage)
docker.getImage(remoteImage).tagAsync(
repo: localImage
force: true
)

View File

@ -1,21 +0,0 @@
process.on 'uncaughtException', (e) ->
console.error('Got unhandled exception', e, e?.stack)
supervisor = require './supervisor-update'
# Make sure the supervisor-update has initialised before we continue, as it will handle restarting to add mounts if
# necessary.
supervisor.initialised.then ->
# Start the update checks ASAP, as any later point may fail,
# but at least if we're checking for updates we may be able to update to make them work!
console.log('Starting periodic check for supervisor updates..')
setInterval(->
supervisor.update()
, 5 * 60 * 1000) # Every 5 mins
supervisor.update()
knex = require './db'
# Wait for the DB schema to be created
knex.init.then ->
require('./app')