Merge pull request #872 from balena-io/typescript

More typescript conversions
This commit is contained in:
CameronDiver 2019-01-23 09:25:19 +00:00 committed by GitHub
commit bb46cd521c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 998 additions and 498 deletions

29
package-lock.json generated
View File

@ -1,6 +1,6 @@
{
"name": "balena-supervisor",
"version": "9.2.10",
"version": "9.3.1",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
@ -2657,9 +2657,9 @@
}
},
"docker-toolbelt": {
"version": "3.3.5",
"resolved": "https://registry.npmjs.org/docker-toolbelt/-/docker-toolbelt-3.3.5.tgz",
"integrity": "sha512-VeBiQZnE+8o6+FnydhXQUY8GC3jtr9/cLW5jOKge88wdoQ6Vh1/wn1132JeHuo+05L+U4sjWSebb6WnswCVRKA==",
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/docker-toolbelt/-/docker-toolbelt-3.3.7.tgz",
"integrity": "sha512-L6fYvqknv9t5aE4/gnpB7LtrhhaNZ9f+nlErK0ls5jBI2iOKSPUh6HvEXeitPoSKW2iXAy2315+cwxt3quG4Mg==",
"dev": true,
"requires": {
"@types/bluebird": "^3.5.23",
@ -3546,6 +3546,12 @@
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=",
"dev": true
},
"fp-ts": {
"version": "1.13.0",
"resolved": "https://registry.npmjs.org/fp-ts/-/fp-ts-1.13.0.tgz",
"integrity": "sha512-QDPtCkuLay+dX3zinnvt0ER+j8mdWZZSpM//DIY3GycgwUTDPBKFD7CxX5DU/mIDstcWqGuuUSrMNtB6MWluCQ==",
"dev": true
},
"fragment-cache": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
@ -4894,6 +4900,15 @@
"integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=",
"dev": true
},
"io-ts": {
"version": "1.6.4",
"resolved": "https://registry.npmjs.org/io-ts/-/io-ts-1.6.4.tgz",
"integrity": "sha512-n/a3RiOdsgni4sjsYVtny6/PmzVfU3Gtkc1O06izREueu6NkP57olLa7oACUT+F2LIDnePXY2HA4wxGYqHI05A==",
"dev": true,
"requires": {
"fp-ts": "^1.0.0"
}
},
"ip": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz",
@ -9007,6 +9022,12 @@
"integrity": "sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo=",
"dev": true
},
"strict-event-emitter-types": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strict-event-emitter-types/-/strict-event-emitter-types-2.0.0.tgz",
"integrity": "sha512-Nk/brWYpD85WlOgzw5h173aci0Teyv8YdIAEtV+N88nDB0dLlazZyJMIsN6eo1/AR61l+p6CJTG1JIyFaoNEEA==",
"dev": true
},
"string-argv": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.0.2.tgz",

View File

@ -59,7 +59,7 @@
"deep-object-diff": "^1.1.0",
"docker-delta": "^2.2.4",
"docker-progress": "^3.0.3",
"docker-toolbelt": "^3.3.5",
"docker-toolbelt": "^3.3.7",
"duration-js": "^4.0.0",
"event-stream": "3.3.4",
"express": "^4.0.0",
@ -91,6 +91,7 @@
"rimraf": "^2.6.2",
"rwlock": "^5.0.0",
"shell-quote": "^1.6.1",
"strict-event-emitter-types": "^2.0.0",
"ts-loader": "^5.3.0",
"typed-error": "^2.0.0",
"typescript": "^3.2.2",

View File

@ -17,10 +17,10 @@ updateLock = require './lib/update-lock'
ServiceManager = require './compose/service-manager'
{ Service } = require './compose/service'
Images = require './compose/images'
{ Images } = require './compose/images'
{ NetworkManager } = require './compose/network-manager'
{ Network } = require './compose/network'
Volumes = require './compose/volumes'
{ Volumes } = require './compose/volumes'
Proxyvisor = require './proxyvisor'
@ -469,11 +469,11 @@ module.exports = class ApplicationManager extends EventEmitter
# Unless the update strategy requires an early kill (i.e. kill-then-download, delete-then-download), we only want
# to kill a service once the images for the services it depends on have been downloaded, so as to minimize
# downtime (but not block the killing too much, potentially causing a deadlock)
_dependenciesMetForServiceKill: (target, targetApp, availableImages) =>
_dependenciesMetForServiceKill: (target, targetApp, availableImages) ->
if target.dependsOn?
for dependency in target.dependsOn
dependencyService = _.find(targetApp.services, serviceName: dependency)
if !_.some(availableImages, (image) => image.dockerImageId == dependencyService.image or @images.isSameImage(image, { name: dependencyService.imageName }))
if !_.some(availableImages, (image) -> image.dockerImageId == dependencyService.image or Images.isSameImage(image, { name: dependencyService.imageName }))
return false
return true
@ -563,8 +563,8 @@ module.exports = class ApplicationManager extends EventEmitter
needsDownload = false
# Don't attempt to fetch any images in local mode, they should already be there
if !localMode
needsDownload = !_.some availableImages, (image) =>
image.dockerImageId == target?.config.image or @images.isSameImage(image, { name: target.imageName })
needsDownload = !_.some availableImages, (image) ->
image.dockerImageId == target?.config.image or Images.isSameImage(image, { name: target.imageName })
# This service needs an image download but it's currently downloading, so we wait
if needsDownload and target?.imageId in downloading
@ -671,7 +671,9 @@ module.exports = class ApplicationManager extends EventEmitter
return dbApp
createTargetService: (service, opts) ->
@images.inspectByName(service.image)
# The image class now returns a native promise, so wrap
# this in a bluebird promise until we convert this to typescript
Promise.resolve(@images.inspectByName(service.image))
.catchReturn(NotFoundError, undefined)
.then (imageInfo) ->
serviceOpts = {
@ -818,16 +820,16 @@ module.exports = class ApplicationManager extends EventEmitter
availableAndUnused = _.filter availableWithoutIds, (image) ->
!_.some currentImages.concat(targetImages), (imageInUse) -> _.isEqual(image, imageInUse)
imagesToDownload = _.filter targetImages, (targetImage) =>
!_.some available, (availableImage) => @images.isSameImage(availableImage, targetImage)
imagesToDownload = _.filter targetImages, (targetImage) ->
!_.some available, (availableImage) -> Images.isSameImage(availableImage, targetImage)
# Images that are available but we don't have them in the DB with the exact metadata:
imagesToSave = []
if !localMode
imagesToSave = _.filter targetImages, (targetImage) =>
imagesToSave = _.filter targetImages, (targetImage) ->
isActuallyAvailable = _.some(
available, (availableImage) =>
if @images.isSameImage(availableImage, targetImage)
available, (availableImage) ->
if Images.isSameImage(availableImage, targetImage)
return true
if availableImage.dockerImageId == targetImageDockerIds[targetImage.name]
return true
@ -840,9 +842,9 @@ module.exports = class ApplicationManager extends EventEmitter
return @bestDeltaSource(image, available)
proxyvisorImages = @proxyvisor.imagesInUse(current, target)
imagesToRemove = _.filter availableAndUnused, (image) =>
imagesToRemove = _.filter availableAndUnused, (image) ->
notUsedForDelta = !_.includes(deltaSources, image.name)
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) => @images.isSameImage(image, { name: proxyvisorImage })
notUsedByProxyvisor = !_.some proxyvisorImages, (proxyvisorImage) -> Images.isSameImage(image, { name: proxyvisorImage })
return notUsedForDelta and notUsedByProxyvisor
return { imagesToSave, imagesToRemove }

View File

@ -6,7 +6,7 @@ import { DeviceApplicationState } from './types/state';
import { Logger } from './logger';
import { EventTracker } from './event-tracker';
import Images = require('./compose/images');
import Images from './compose/images';
import ServiceManager = require('./compose/service-manager');
import DB from './db';

View File

@ -1,326 +0,0 @@
Promise = require 'bluebird'
_ = require 'lodash'
EventEmitter = require 'events'
logTypes = require '../lib/log-types'
constants = require '../lib/constants'
validation = require '../lib/validation'
{ DeltaStillProcessingError, NotFoundError } = require '../lib/errors'
# image = {
# name: image registry/repo@digest or registry/repo:tag
# appId
# serviceId
# serviceName
# imageId (from balena API)
# releaseId
# dependent
# dockerImageId
# status Downloading, Downloaded, Deleting
# downloadProgress
# }
hasDigest = (name) ->
name?.split?('@')?[1]?
module.exports = class Images extends EventEmitter
constructor: ({ @docker, @logger, @db }) ->
@imageCleanupFailures = {}
# A store of volatile state for images (e.g. download progress), indexed by imageId
@volatileState = {}
reportChange: (imageId, status) ->
if status?
@volatileState[imageId] ?= { imageId }
_.merge(@volatileState[imageId], status)
@emit('change')
else if imageId? and @volatileState[imageId]?
delete @volatileState[imageId]
@emit('change')
triggerFetch: (image, opts, onFinish = _.noop) =>
onProgress = (progress) =>
# Only report the percentage if we haven't finished fetching
if @volatileState[image.imageId]?
@reportChange(image.imageId, { downloadProgress: progress.percentage })
@normalise(image.name)
.then (imageName) =>
image = _.clone(image)
image.name = imageName
@markAsSupervised(image)
.then =>
@inspectByName(imageName)
.then (img) =>
@db.models('image').update({ dockerImageId: img.Id }).where(image)
.then ->
onFinish(true)
return null
.catch =>
@reportChange(image.imageId, _.merge(_.clone(image), { status: 'Downloading', downloadProgress: 0 }))
Promise.try =>
if opts.delta and opts.deltaSource?
@logger.logSystemEvent(logTypes.downloadImageDelta, { image })
@inspectByName(opts.deltaSource)
.then (srcImage) =>
opts.deltaSourceId = srcImage.Id
@docker.fetchDeltaWithProgress(imageName, opts, onProgress)
.tap (id) =>
if !hasDigest(imageName)
@docker.getRepoAndTag(imageName)
.then ({ repo, tag }) =>
@docker.getImage(id).tag({ repo, tag })
else
@logger.logSystemEvent(logTypes.downloadImage, { image })
@docker.fetchImageWithProgress(imageName, opts, onProgress)
.then (id) =>
@db.models('image').update({ dockerImageId: id }).where(image)
.then =>
@logger.logSystemEvent(logTypes.downloadImageSuccess, { image })
return true
.catch DeltaStillProcessingError, =>
# If this is a delta image pull, and the delta still hasn't finished generating,
# don't show a failure message, and instead just inform the user that it's remotely
# processing
@logger.logSystemEvent(logTypes.deltaStillProcessingError)
return false
.catch (err) =>
@logger.logSystemEvent(logTypes.downloadImageError, { image, error: err })
return false
.then (success) =>
@reportChange(image.imageId)
onFinish(success)
return null
return null
format: (image) ->
image.serviceId ?= null
image.serviceName ?= null
image.imageId ?= null
image.releaseId ?= null
image.dependent ?= 0
image.dockerImageId ?= null
return _.omit(image, 'id')
markAsSupervised: (image) =>
image = @format(image)
@db.upsertModel('image', image, image)
update: (image) =>
image = @format(image)
@db.models('image').update(image).where(name: image.name)
save: (image) =>
@inspectByName(image.name)
.then (img) =>
image = _.clone(image)
image.dockerImageId = img.Id
@markAsSupervised(image)
_removeImageIfNotNeeded: (image) =>
# We first fetch the image from the DB to ensure it exists,
# and get the dockerImageId and any other missing field
@db.models('image').select().where(image)
.then (images) =>
if images.length == 0
return false
img = images[0]
Promise.try =>
if !img.dockerImageId?
# Legacy image from before we started using dockerImageId, so we try to remove it by name
@docker.getImage(img.name).remove(force: true)
.return(true)
else
@db.models('image').where(dockerImageId: img.dockerImageId).select()
.then (imagesFromDB) =>
if imagesFromDB.length == 1 and _.isEqual(@format(imagesFromDB[0]), @format(img))
@reportChange(image.imageId, _.merge(_.clone(image), { status: 'Deleting' }))
@logger.logSystemEvent(logTypes.deleteImage, { image })
@docker.getImage(img.dockerImageId).remove(force: true)
.return(true)
else if !hasDigest(img.name)
# Image has a regular tag, so we might have to remove unnecessary tags
@docker.getImage(img.dockerImageId).inspect()
.then (dockerImg) =>
differentTags = _.reject(imagesFromDB, name: img.name)
if dockerImg.RepoTags.length > 1 and
_.includes(dockerImg.RepoTags, img.name) and
_.some(dockerImg.RepoTags, (tag) -> _.some(differentTags, name: tag))
@docker.getImage(img.name).remove(noprune: true)
.return(false)
else
return false
.catchReturn(NotFoundError, false)
.tap =>
@db.models('image').del().where(id: img.id)
.then (removed) =>
if removed
@logger.logSystemEvent(logTypes.deleteImageSuccess, { image })
.finally =>
@reportChange(image.imageId)
remove: (image) =>
@_removeImageIfNotNeeded(image)
.tapCatch (err) =>
@logger.logSystemEvent(logTypes.deleteImageError, { image, error: err })
getByDockerId: (id) =>
@db.models('image').where(dockerImageId: id).first()
removeByDockerId: (id) =>
@getByDockerId(id)
.then(@remove)
getNormalisedTags: (image) ->
Promise.map(image.RepoTags ? [], @normalise)
_withImagesFromDockerAndDB: (callback) =>
Promise.join(
@docker.listImages(digests: true)
.map (image) =>
image.NormalisedRepoTags = @getNormalisedTags(image)
Promise.props(image)
@db.models('image').select()
callback
)
_matchesTagOrDigest: (image, dockerImage) ->
return _.includes(dockerImage.NormalisedRepoTags, image.name) or
_.some(dockerImage.RepoDigests, (digest) -> Images.hasSameDigest(image.name, digest))
_isAvailableInDocker: (image, dockerImages) =>
_.some dockerImages, (dockerImage) =>
@_matchesTagOrDigest(image, dockerImage) or image.dockerImageId == dockerImage.Id
# Gets all images that are supervised, in an object containing name, appId, serviceId, serviceName, imageId, dependent.
getAvailable: (localMode) =>
@_withImagesFromDockerAndDB (dockerImages, supervisedImages) =>
_.filter(supervisedImages, (image) => @_isAvailableInDocker(image, dockerImages))
.then (images) =>
if localMode
# Get all images present on the local daemon which are tagged as local images
return @_getLocalModeImages().then (localImages) ->
images.concat(localImages)
return images
getDownloadingImageIds: =>
Promise.try =>
return _.map(_.keys(_.pickBy(@volatileState, status: 'Downloading')), validation.checkInt)
cleanupDatabase: =>
@_withImagesFromDockerAndDB (dockerImages, supervisedImages) =>
Promise.map supervisedImages, (image) =>
# If the supervisor was interrupted between fetching an image and storing its id,
# some entries in the db might need to have the dockerImageId populated
if !image.dockerImageId?
id = _.find(dockerImages, (dockerImage) => @_matchesTagOrDigest(image, dockerImage))?.Id
if id?
@db.models('image').update(dockerImageId: id).where(image)
.then ->
image.dockerImageId = id
.then =>
_.filter(supervisedImages, (image) => !@_isAvailableInDocker(image, dockerImages))
.then (imagesToRemove) =>
ids = _.map(imagesToRemove, 'id')
@db.models('image').del().whereIn('id', ids)
getStatus: (localMode) =>
@getAvailable(localMode)
.map (image) ->
image.status = 'Downloaded'
image.downloadProgress = null
return image
.then (images) =>
status = _.clone(@volatileState)
for image in images
status[image.imageId] ?= image
return _.values(status)
_getImagesForCleanup: =>
images = []
Promise.join(
@docker.getRegistryAndName(constants.supervisorImage)
@docker.getImage(constants.supervisorImage).inspect()
@db.models('image').select('dockerImageId')
.map((image) -> image.dockerImageId)
(supervisorImageInfo, supervisorImage, usedImageIds) =>
isSupervisorRepoTag = ({ imageName, tagName }) ->
supervisorRepos = [ supervisorImageInfo.imageName ]
if _.startsWith(supervisorImageInfo.imageName, 'balena/') # We're on a new balena/ARCH-supervisor image
supervisorRepos.push(supervisorImageInfo.imageName.replace(/^balena/, 'resin'))
return _.some(supervisorRepos, (repo) -> imageName == repo) and tagName != supervisorImageInfo.tagName
isDangling = (image) ->
# Looks like dangling images show up with these weird RepoTags and RepoDigests sometimes
(_.isEmpty(image.RepoTags) or _.isEqual(image.RepoTags, [ '<none>:<none>' ])) and
(_.isEmpty(image.RepoDigests) or _.isEqual(image.RepoDigests, [ '<none>@<none>' ]))
@docker.listImages(digests: true)
.map (image) =>
# Cleanup should remove truly dangling images (i.e. dangling and with no digests)
if isDangling(image) and not (image.Id in usedImageIds)
images.push(image.Id)
else if !_.isEmpty(image.RepoTags) and image.Id != supervisorImage.Id
# We also remove images from the supervisor repository with a different tag
Promise.map image.RepoTags, (repoTag) =>
@docker.getRegistryAndName(repoTag)
.then (imageNameComponents) ->
if isSupervisorRepoTag(imageNameComponents)
images.push(image.Id)
)
.then =>
toCleanup = _.filter _.uniq(images), (image) =>
!@imageCleanupFailures[image]? or Date.now() - @imageCleanupFailures[image] > constants.imageCleanupErrorIgnoreTimeout
return toCleanup
inspectByName: (imageName) =>
@docker.getImage(imageName).inspect()
.catch NotFoundError, (err) =>
digest = imageName.split('@')[1]
Promise.try =>
if digest?
@db.models('image').where('name', 'like', "%@#{digest}").select()
else
@db.models('image').where(name: imageName).select()
.then (imagesFromDB) =>
for image in imagesFromDB
if image.dockerImageId?
return @docker.getImage(image.dockerImageId).inspect()
throw err
normalise: (imageName) =>
@docker.normaliseImageName(imageName)
isCleanupNeeded: =>
@_getImagesForCleanup()
.then (imagesForCleanup) ->
return !_.isEmpty(imagesForCleanup)
# Delete dangling images and old supervisor images
cleanup: =>
@_getImagesForCleanup()
.map (image) =>
console.log("Cleaning up #{image}")
@docker.getImage(image).remove(force: true)
.then =>
delete @imageCleanupFailures[image]
.catch (err) =>
@logger.logSystemMessage("Error cleaning up #{image}: #{err.message} - will ignore for 1 hour", { error: err }, 'Image cleanup error')
@imageCleanupFailures[image] = Date.now()
@hasSameDigest: (name1, name2) ->
hash1 = name1?.split('@')[1]
hash2 = name2?.split('@')[1]
return hash1? and hash1 == hash2
@isSameImage: (image1, image2) ->
return image1.name == image2.name or Images.hasSameDigest(image1.name, image2.name)
isSameImage: @isSameImage
_getLocalModeImages: =>
Promise.join(
@docker.listImages(filters: label: [ 'io.resin.local.image=1' ])
@docker.listImages(filters: label: [ 'io.balena.local.image=1' ])
(legacyImages, currentImages) ->
_.unionBy(legacyImages, currentImages, 'Id')
)

View File

@ -1,7 +0,0 @@
import Image from '../types/image';
declare class Images {
public getStatus(): Image[];
}
export = Images;

652
src/compose/images.ts Normal file
View File

@ -0,0 +1,652 @@
import * as Bluebird from 'bluebird';
import * as Docker from 'dockerode';
import { EventEmitter } from 'events';
import * as _ from 'lodash';
import StrictEventEmitter from 'strict-event-emitter-types';
import { SchemaReturn } from '../config/schema-type';
import Database from '../db';
import * as constants from '../lib/constants';
import DockerUtils = require('../lib/docker-utils');
import { DeltaStillProcessingError, NotFoundError } from '../lib/errors';
import * as LogTypes from '../lib/log-types';
import * as validation from '../lib/validation';
import Logger from '../logger';
interface ImageEvents {
change: void;
}
type ImageEventEmitter = StrictEventEmitter<EventEmitter, ImageEvents>;
interface ImageConstructOpts {
docker: DockerUtils;
logger: Logger;
db: Database;
}
interface FetchProgressEvent {
percentage: number;
}
// TODO: This is copied from src/lib/docker-utils.d.ts but because of the
// export mechanism used, we can't export it. Once we convert docker-utils
// to typescript, remove this
interface DeltaFetchOptions {
deltaRequestTimeout: number;
deltaApplyTimeout: number;
deltaRetryCount: number;
deltaRetryInterval: number;
uuid: string;
currentApiKey: string;
deltaEndpoint: string;
apiEndpoint: string;
deltaSource: string;
deltaSourceId: string;
deltaVersion: string;
}
type FetchOptions = SchemaReturn<'fetchOptions'> & { deltaSource?: string };
export interface Image {
id: number;
// image registry/repo@digest or registry/repo:tag
name: string;
appId: number;
serviceId: number;
serviceName: string;
// Id from balena api
imageId: number;
releaseId: number;
dependent: number;
dockerImageId: string;
status: 'Downloading' | 'Downloaded' | 'Deleting';
downloadProgress: Nullable<number>;
}
// TODO: This is necessary for the format() method, but I'm not sure
// why, and it seems like a bad idea as it is. Fix the need for this.
type MaybeImage = { [key in keyof Image]: Image[key] | null };
// TODO: Remove the need for this type...
type NormalisedDockerImage = Docker.ImageInfo & {
NormalisedRepoTags: string[];
};
export class Images extends (EventEmitter as {
new (): ImageEventEmitter;
}) {
private docker: DockerUtils;
private logger: Logger;
private db: Database;
private imageCleanupFailures: Dictionary<number> = {};
// A store of volatile state for images (e.g. download progress), indexed by imageId
private volatileState: { [imageId: number]: Image } = {};
public constructor(opts: ImageConstructOpts) {
super();
this.docker = opts.docker;
this.logger = opts.logger;
this.db = opts.db;
}
public async triggerFetch(
image: Image,
opts: FetchOptions,
onFinish = _.noop,
): Promise<null> {
const onProgress = (progress: FetchProgressEvent) => {
// Only report the percentage if we haven't finished fetching
if (this.volatileState[image.imageId] != null) {
this.reportChange(image.imageId, {
downloadProgress: progress.percentage,
});
}
};
let success: boolean;
try {
const imageName = await this.normalise(image.name);
image = _.clone(image);
image.name = imageName;
await this.markAsSupervised(image);
const img = await this.inspectByName(image.name);
await this.db
.models('image')
.update({ dockerImageId: img.Id })
.where(image);
onFinish(true);
return null;
} catch (e) {
if (!NotFoundError(e)) {
throw e;
}
this.reportChange(
image.imageId,
_.merge(_.clone(image), { status: 'Downloading', downloadProgress: 0 }),
);
try {
let id;
if (opts.delta && opts.deltaSource != null) {
id = await this.fetchDelta(image, opts, onProgress);
} else {
id = await this.fetchImage(image, opts, onProgress);
}
await this.db
.models('image')
.update({ dockerImageId: id })
.where(image);
this.logger.logSystemEvent(LogTypes.downloadImageSuccess, { image });
success = true;
} catch (err) {
if (err instanceof DeltaStillProcessingError) {
// If this is a delta image pull, and the delta still hasn't finished generating,
// don't show a failure message, and instead just inform the user that it's remotely
// processing
this.logger.logSystemEvent(LogTypes.deltaStillProcessingError, {});
} else {
this.logger.logSystemEvent(LogTypes.downloadImageError, {
image,
error: err,
});
}
success = false;
}
}
this.reportChange(image.imageId);
onFinish(success);
return null;
}
public async remove(image: Image): Promise<void> {
try {
await this.removeImageIfNotNeeded(image);
} catch (e) {
this.logger.logSystemEvent(LogTypes.deleteImageError, {
image,
error: e,
});
throw e;
}
}
public async getByDockerId(id: string): Promise<Image> {
return await this.db
.models('image')
.where({ dockerImageId: id })
.first();
}
public async removeByDockerId(id: string): Promise<void> {
const image = await this.getByDockerId(id);
await this.remove(image);
}
private async getNormalisedTags(image: Docker.ImageInfo): Promise<string[]> {
return await Bluebird.map(
image.RepoTags != null ? image.RepoTags : [],
this.normalise.bind(this),
);
}
private withImagesFromDockerAndDB<T>(
cb: (dockerImages: NormalisedDockerImage[], composeImages: Image[]) => T,
) {
return Bluebird.join(
Bluebird.resolve(this.docker.listImages({ digests: true })).map(image => {
const newImage: Dictionary<unknown> = _.clone(image);
newImage.NormalisedRepoTags = this.getNormalisedTags(image);
return Bluebird.props(newImage);
}),
this.db.models('image').select(),
cb,
);
}
private matchesTagOrDigest(
image: Image,
dockerImage: NormalisedDockerImage,
): boolean {
return (
_.includes(dockerImage.NormalisedRepoTags, image.name) ||
_.some(dockerImage.RepoDigests, digest =>
Images.hasSameDigest(image.name, digest),
)
);
}
private isAvailableInDocker(
image: Image,
dockerImages: NormalisedDockerImage[],
): boolean {
return _.some(
dockerImages,
dockerImage =>
this.matchesTagOrDigest(image, dockerImage) ||
image.dockerImageId === dockerImage.Id,
);
}
public async getAvailable(_localMode: boolean): Promise<Image[]> {
const images = await this.withImagesFromDockerAndDB(
(dockerImages, supervisedImages) =>
_.filter(supervisedImages, image =>
this.isAvailableInDocker(image, dockerImages),
),
);
// if (localMode) {
// // Get all images present on the local daemon which are tagged as local images
// return images.concat(await this.getLocalModeImages());
// }
return images;
}
// TODO: Why does this need a Bluebird.try?
public getDownloadingImageIds() {
return Bluebird.try(() =>
_(this.volatileState)
.pickBy({ status: 'Downloading' })
.keys()
.map(validation.checkInt)
.value(),
);
}
public async cleanupDatabase(): Promise<void> {
const imagesToRemove = await this.withImagesFromDockerAndDB(
async (dockerImages, supervisedImages) => {
for (const supervisedImage of supervisedImages) {
// If the supervisor was interrupted between fetching an image and storing its id,
// some entries in the db might need to have the dockerImageId populated
if (supervisedImage.dockerImageId == null) {
const id = _.get(
_.find(dockerImages, dockerImage =>
this.matchesTagOrDigest(supervisedImage, dockerImage),
),
'Id',
);
if (id != null) {
await this.db
.models('image')
.update({ dockerImageId: id })
.where(supervisedImage);
supervisedImage.dockerImageId = id;
}
}
}
return _.filter(
supervisedImages,
image => !this.isAvailableInDocker(image, dockerImages),
);
},
);
const ids = _.map(imagesToRemove, 'id');
await this.db
.models('image')
.del()
.whereIn('id', ids);
}
public async getStatus(localMode: boolean) {
const images = await this.getAvailable(localMode);
for (const image of images) {
image.status = 'Downloaded';
image.downloadProgress = null;
}
const status = _.clone(this.volatileState);
for (const image of images) {
if (status[image.imageId] == null) {
status[image.imageId] = image;
}
}
return _.values(status);
}
public async update(image: Image): Promise<void> {
image = this.format(image);
await this.db
.models('image')
.update(image)
.where({ name: image.name });
}
public async save(image: Image): Promise<void> {
const img = await this.inspectByName(image.name);
image = _.clone(image);
image.dockerImageId = img.Id;
await this.markAsSupervised(image);
}
private async getImagesForCleanup(): Promise<string[]> {
const images = [];
const [
supervisorImageInfo,
supervisorImage,
usedImageIds,
] = await Promise.all([
this.docker.getRegistryAndName(constants.supervisorImage),
this.docker.getImage(constants.supervisorImage).inspect(),
this.db
.models('image')
.select('dockerImageId')
.map((img: Image) => img.dockerImageId),
]);
const supervisorRepos = [supervisorImageInfo.imageName];
// If we're on the new balena/ARCH-supervisor image
if (_.startsWith(supervisorImageInfo.imageName, 'balena/')) {
supervisorRepos.push(
supervisorImageInfo.imageName.replace(/^balena/, 'resin'),
);
}
const isSupervisorRepoTag = ({
imageName,
tagName,
}: {
imageName: string;
tagName: string;
}) => {
return (
_.some(supervisorRepos, repo => imageName === repo) &&
tagName !== supervisorImageInfo.tagName
);
};
const dockerImages = await this.docker.listImages({ digests: true });
for (const image of dockerImages) {
// Cleanup should remove truly dangling images (i.e dangling and with no digests)
if (Images.isDangling(image) && !_.includes(usedImageIds, image.Id)) {
images.push(image.Id);
} else if (
!_.isEmpty(image.RepoTags) &&
image.Id !== supervisorImage.Id
) {
// We also remove images from the supervisor repository with a different tag
for (const tag of image.RepoTags) {
const imageNameComponents = await this.docker.getRegistryAndName(tag);
if (isSupervisorRepoTag(imageNameComponents)) {
images.push(image.Id);
}
}
}
}
const toCleanup = _(images)
.uniq()
.filter(
image =>
this.imageCleanupFailures[image] == null ||
Date.now() - this.imageCleanupFailures[image] >
constants.imageCleanupErrorIgnoreTimeout,
)
.value();
return toCleanup;
}
public async inspectByName(
imageName: string,
): Promise<Docker.ImageInspectInfo> {
try {
return await this.docker.getImage(imageName).inspect();
} catch (e) {
if (NotFoundError(e)) {
const digest = imageName.split('@')[1];
let imagesFromDb: Image[];
if (digest != null) {
imagesFromDb = await this.db
.models('image')
.where('name', 'like', `%@${digest}`);
} else {
imagesFromDb = await this.db
.models('image')
.where({ name: imageName })
.select();
}
for (const image of imagesFromDb) {
if (image.dockerImageId != null) {
return await this.docker.getImage(image.dockerImageId).inspect();
}
}
}
throw e;
}
}
public async isCleanupNeeded() {
return !_.isEmpty(await this.getImagesForCleanup());
}
public async cleanup() {
const images = await this.getImagesForCleanup();
for (const image of images) {
console.log(`Cleaning up ${image}`);
try {
await this.docker.getImage(image).remove({ force: true });
delete this.imageCleanupFailures[image];
} catch (e) {
this.logger.logSystemMessage(
`Error cleaning up ${image}: ${e.message} - will ignore for 1 hour`,
{ error: e },
'Image cleanup error',
);
this.imageCleanupFailures[image] = Date.now();
}
}
}
public static isSameImage(image1: Image, image2: Image): boolean {
return (
image1.name === image2.name ||
Images.hasSameDigest(image1.name, image2.name)
);
}
// private async getLocalModeImages() {
// const [legacy, current] = await Promise.all([
// this.docker.listImages({
// filters: { label: ['io.resin.local.image=1'] },
// }),
// this.docker.listImages({
// filters: { label: ['io.balena.local.image=1'] },
// }),
// ]);
// const dockerImages = _.unionBy(legacy, current, 'Id');
// }
private normalise(imageName: string): Bluebird<string> {
return this.docker.normaliseImageName(imageName);
}
private static isDangling(image: Docker.ImageInfo): boolean {
return (
(_.isEmpty(image.RepoTags) ||
_.isEqual(image.RepoTags, ['<none>:<none>'])) &&
(_.isEmpty(image.RepoDigests) ||
_.isEqual(image.RepoDigests, ['<none>@<none>']))
);
}
private static hasSameDigest(
name1: Nullable<string>,
name2: Nullable<string>,
): boolean {
const hash1 = name1 != null ? name1.split('@')[1] : null;
const hash2 = name2 != null ? name2.split('@')[1] : null;
return hash1 != null && hash1 === hash2;
}
private async removeImageIfNotNeeded(image: Image): Promise<void> {
let removed: boolean;
// We first fetch the image from the DB to ensure it exists,
// and get the dockerImageId and any other missing fields
const images = await this.db
.models('image')
.select()
.where(image);
if (images.length === 0) {
removed = false;
}
const img = images[0];
try {
if (img.dockerImageId == null) {
// Legacy image from before we started using dockerImageId, so we try to remove it
// by name
await this.docker.getImage(img.name).remove({ force: true });
removed = true;
} else {
const imagesFromDb = await this.db
.models('image')
.where({ dockerImageId: img.dockerImageId })
.select();
if (
imagesFromDb.length === 1 &&
_.isEqual(this.format(imagesFromDb[0]), this.format(img))
) {
this.reportChange(
image.imageId,
_.merge(_.clone(image), { status: 'Deleting' }),
);
this.logger.logSystemEvent(LogTypes.deleteImage, { image });
this.docker.getImage(img.dockerImageId).remove({ force: true });
removed = true;
} else if (!Images.hasDigest(img.name)) {
// Image has a regular tag, so we might have to remove unnecessary tags
const dockerImage = await this.docker
.getImage(img.dockerImageId)
.inspect();
const differentTags = _.reject(imagesFromDb, { name: img.name });
if (
dockerImage.RepoTags.length > 1 &&
_.includes(dockerImage.RepoTags, img.name) &&
_.some(dockerImage.RepoTags, t =>
_.some(differentTags, { name: t }),
)
) {
await this.docker.getImage(img.name).remove({ noprune: true });
}
removed = false;
} else {
removed = false;
}
}
} catch (e) {
if (NotFoundError(e)) {
removed = false;
} else {
throw e;
}
} finally {
this.reportChange(image.imageId);
}
await this.db
.models('image')
.del()
.where({ id: img.id });
if (removed) {
this.logger.logSystemEvent(LogTypes.deleteImageSuccess, { image });
}
}
private async markAsSupervised(image: Image): Promise<void> {
image = this.format(image);
await this.db.upsertModel('image', image, image);
}
private format(image: MaybeImage): Image {
return _(image)
.defaults({
serviceId: null,
serviceName: null,
imageId: null,
releaseId: null,
dependent: 0,
dockerImageId: null,
})
.omit('id')
.value() as Image;
}
private async fetchDelta(
image: Image,
opts: FetchOptions,
onProgress: (evt: FetchProgressEvent) => void,
): Promise<string> {
this.logger.logSystemEvent(LogTypes.downloadImageDelta, { image });
const deltaOpts = (opts as unknown) as DeltaFetchOptions;
const srcImage = await this.inspectByName(deltaOpts.deltaSource);
deltaOpts.deltaSourceId = srcImage.Id;
const id = await this.docker.fetchDeltaWithProgress(
image.name,
deltaOpts,
onProgress,
);
if (!Images.hasDigest(image.name)) {
const { repo, tag } = await this.docker.getRepoAndTag(image.name);
await this.docker.getImage(id).tag({ repo, tag });
}
return id;
}
private fetchImage(
image: Image,
opts: FetchOptions,
onProgress: (evt: FetchProgressEvent) => void,
): Bluebird<string> {
this.logger.logSystemEvent(LogTypes.downloadImage, { image });
return this.docker.fetchImageWithProgress(image.name, opts, onProgress);
}
// TODO: find out if imageId can actually be null
private reportChange(imageId: Nullable<number>, status?: Partial<Image>) {
if (imageId != null) {
if (status != null) {
if (this.volatileState[imageId] == null) {
this.volatileState[imageId] = { imageId } as Image;
}
_.merge(this.volatileState[imageId], status);
return this.emit('change');
} else if (this.volatileState[imageId] != null) {
delete this.volatileState[imageId];
return this.emit('change');
}
}
}
private static hasDigest(name: Nullable<string>): boolean {
if (name == null) {
return false;
}
const parts = name.split('@');
if (parts[1] == null) {
return false;
}
return true;
}
}
export default Images;

View File

@ -1,106 +0,0 @@
Promise = require 'bluebird'
_ = require 'lodash'
path = require 'path'
logTypes = require '../lib/log-types'
constants = require '../lib/constants'
{ checkInt } = require '../lib/validation'
{ NotFoundError } = require '../lib/errors'
{ defaultLegacyVolume } = require '../lib/migration'
{ safeRename } = require '../lib/fs-utils'
ComposeUtils = require './utils'
module.exports = class Volumes
constructor: ({ @docker, @logger }) ->
format: (volume) ->
m = volume.Name.match(/^([0-9]+)_(.+)$/)
appId = checkInt(m[1])
name = m[2]
return {
name: name
appId: appId
config: {
labels: _.omit(ComposeUtils.normalizeLabels(volume.Labels), _.keys(constants.defaultVolumeLabels))
driverOpts: volume.Options
}
handle: volume
}
_listWithBothLabels: =>
Promise.join(
@docker.listVolumes(filters: label: [ 'io.resin.supervised' ])
@docker.listVolumes(filters: label: [ 'io.balena.supervised' ])
(legacyVolumesResponse, currentVolumesResponse) ->
legacyVolumes = legacyVolumesResponse.Volumes ? []
currentVolumes = currentVolumesResponse.Volumes ? []
return _.unionBy(legacyVolumes, currentVolumes, 'Name')
)
getAll: =>
@_listWithBothLabels()
.map (volume) =>
@docker.getVolume(volume.Name).inspect()
.then(@format)
getAllByAppId: (appId) =>
@getAll()
.then (volumes) ->
_.filter(volumes, { appId })
get: ({ name, appId }) ->
@docker.getVolume("#{appId}_#{name}").inspect()
.then(@format)
# TODO: what config values are relevant/whitelisted?
# For now we only care about driverOpts and labels
create: ({ name, config = {}, appId }) =>
config = _.mapKeys(config, (v, k) -> _.camelCase(k))
@logger.logSystemEvent(logTypes.createVolume, { volume: { name } })
labels = _.clone(config.labels) ? {}
_.assign(labels, constants.defaultVolumeLabels)
driverOpts = config.driverOpts ? {}
@get({ name, appId })
.tap (vol) =>
if !@isEqualConfig(vol.config, config)
throw new Error("Trying to create volume '#{name}', but a volume with same name and different configuration exists")
.catch NotFoundError, =>
@docker.createVolume({
Name: "#{appId}_#{name}"
Labels: labels
DriverOpts: driverOpts
}).call('inspect').then(@format)
.tapCatch (err) =>
@logger.logSystemEvent(logTypes.createVolumeError, { volume: { name }, error: err })
createFromLegacy: (appId) =>
name = defaultLegacyVolume()
legacyPath = path.join(constants.rootMountPoint, 'mnt/data/resin-data', appId.toString())
@createFromPath({ name, appId }, legacyPath)
.catch (err) =>
@logger.logSystemMessage("Warning: could not migrate legacy /data volume: #{err.message}", { error: err }, 'Volume migration error')
# oldPath must be a path inside /mnt/data
createFromPath: ({ name, config = {}, appId }, oldPath) =>
@create({ name, config, appId })
.get('handle')
.then (v) ->
# Convert the path to be of the same mountpoint so that rename can work
volumePath = path.join(constants.rootMountPoint, 'mnt/data', v.Mountpoint.split(path.sep).slice(3)...)
safeRename(oldPath, volumePath)
remove: ({ name, appId }) ->
@logger.logSystemEvent(logTypes.removeVolume, { volume: { name } })
@docker.getVolume("#{appId}_#{name}").remove()
.catch (err) =>
@logger.logSystemEvent(logTypes.removeVolumeError, { volume: { name, appId }, error: err })
isEqualConfig: (current = {}, target = {}) ->
current = _.mapKeys(current, (v, k) -> _.camelCase(k))
target = _.mapKeys(target, (v, k) -> _.camelCase(k))
currentOpts = current.driverOpts ? {}
targetOpts = target.driverOpts ? {}
currentLabels = current.labels ? {}
targetLabels = target.labels ? {}
return _.isEqual(currentLabels, targetLabels) and _.isEqual(currentOpts, targetOpts)

231
src/compose/volumes.ts Normal file
View File

@ -0,0 +1,231 @@
import * as Dockerode from 'dockerode';
import * as _ from 'lodash';
import * as path from 'path';
import Docker = require('../lib/docker-utils');
import Logger from '../logger';
import constants = require('../lib/constants');
import { InternalInconsistencyError, NotFoundError } from '../lib/errors';
import { safeRename } from '../lib/fs-utils';
import * as LogTypes from '../lib/log-types';
import { defaultLegacyVolume } from '../lib/migration';
import { LabelObject } from '../lib/types';
import { checkInt } from '../lib/validation';
import * as ComposeUtils from './utils';
interface VolumeConstructOpts {
docker: Docker;
logger: Logger;
}
export interface ComposeVolume {
name: string;
appId: number;
config: {
labels: LabelObject;
driverOpts: Dockerode.VolumeInspectInfo['Options'];
};
dockerVolume: Dockerode.VolumeInspectInfo;
}
interface VolumeNameOpts {
name: string;
appId: number;
}
// This weird type is currently needed because the create function (and helpers)
// accept either a docker volume or a compose volume (or an empty object too apparently).
// If we instead split the tasks into createFromCompose and createFromDocker, we will no
// longer have this issue (and weird typing)
type VolumeConfig = ComposeVolume['config'] | Dockerode.VolumeInspectInfo | {};
type VolumeCreateOpts = VolumeNameOpts & {
config?: VolumeConfig;
};
export class Volumes {
private docker: Docker;
private logger: Logger;
public constructor(opts: VolumeConstructOpts) {
this.docker = opts.docker;
this.logger = opts.logger;
}
public async getAll(): Promise<ComposeVolume[]> {
const volumes = await this.listWithBothLabels();
return volumes.map(Volumes.format);
}
public async getAllByAppId(appId: number): Promise<ComposeVolume[]> {
const all = await this.getAll();
return _.filter(all, { appId });
}
public async get({ name, appId }: VolumeNameOpts): Promise<ComposeVolume> {
const volume = await this.docker.getVolume(`${appId}_${name}`).inspect();
return Volumes.format(volume);
}
public async create(opts: VolumeCreateOpts): Promise<ComposeVolume> {
const { name, config = {}, appId } = opts;
const camelCaseConfig: Dictionary<unknown> = _.mapKeys(config, (_v, k) =>
_.camelCase(k),
);
this.logger.logSystemEvent(LogTypes.createVolume, { volume: { name } });
const labels = _.clone(camelCaseConfig.labels as LabelObject) || {};
_.assign(labels, constants.defaultVolumeLabels);
const driverOpts: Dictionary<unknown> =
camelCaseConfig.driverOpts != null
? (camelCaseConfig.driverOpts as Dictionary<unknown>)
: {};
try {
const volume = await this.get({ name, appId });
if (!this.isEqualConfig(volume.config, config)) {
throw new InternalInconsistencyError(
`Trying to create volume '${name}', but a volume with the same name and different configuration exists`,
);
}
return volume;
} catch (e) {
if (!NotFoundError(e)) {
this.logger.logSystemEvent(LogTypes.createVolumeError, {
volume: { name },
error: e,
});
throw e;
}
const volume = await this.docker.createVolume({
Name: Volumes.generateVolumeName({ name, appId }),
Labels: labels,
DriverOpts: driverOpts,
});
return Volumes.format(await volume.inspect());
}
}
public async createFromLegacy(appId: number): Promise<ComposeVolume | void> {
const name = defaultLegacyVolume();
const legacyPath = path.join(
constants.rootMountPoint,
'mnt/data/resin-data',
appId.toString(),
);
try {
return await this.createFromPath({ name, appId }, legacyPath);
} catch (e) {
this.logger.logSystemMessage(
`Warning: could not migrate legacy /data volume: ${e.message}`,
{ error: e },
'Volume migration error',
);
}
}
// oldPath must be a path inside /mnt/data
public async createFromPath(
opts: VolumeCreateOpts,
oldPath: string,
): Promise<void> {
const volume = await this.create(opts);
const handle = volume.dockerVolume;
// Convert the path to be of the same mountpoint so that rename can work
const volumePath = path.join(
constants.rootMountPoint,
'mnt/data',
...handle.Mountpoint.split(path.sep).slice(3),
);
await safeRename(oldPath, volumePath);
}
public async remove({ name, appId }: VolumeNameOpts) {
this.logger.logSystemEvent(LogTypes.removeVolume, { volume: { name } });
try {
await this.docker
.getVolume(Volumes.generateVolumeName({ name, appId }))
.remove();
} catch (e) {
this.logger.logSystemEvent(LogTypes.removeVolumeError, {
volume: { name, appId },
error: e,
});
}
}
public isEqualConfig(current: VolumeConfig, target: VolumeConfig): boolean {
const currentConfig = (_.mapKeys(current, (_v, k) =>
_.camelCase(k),
) as unknown) as ComposeVolume['config'];
const targetConfig = (_.mapKeys(target, (_v, k) =>
_.camelCase(k),
) as unknown) as ComposeVolume['config'];
const currentOpts = currentConfig.driverOpts || {};
const targetOpts = targetConfig.driverOpts || {};
const currentLabels = currentConfig.labels || {};
const targetLabels = targetConfig.labels || {};
return (
_.isEqual(currentOpts, targetOpts) &&
_.isEqual(currentLabels, targetLabels)
);
}
private static format(volume: Dockerode.VolumeInspectInfo): ComposeVolume {
const match = volume.Name.match(/^([0-9]+)_(.+)$/);
if (match == null) {
throw new Error('Malformed volume name in Volume.format');
}
const appId = checkInt(match[1]);
const name = match[2];
return {
name,
// We know this cast is fine due to the regex
appId: appId as number,
config: {
labels: _.omit(
ComposeUtils.normalizeLabels(volume.Labels),
_.keys(constants.defaultVolumeLabels),
),
driverOpts: volume.Options,
},
dockerVolume: volume,
};
}
private async listWithBothLabels(): Promise<Dockerode.VolumeInspectInfo[]> {
// We have to cast the listVolumes call from any[] to any below, until the
// relevant PR: https://github.com/DefinitelyTyped/DefinitelyTyped/pull/32383
// is merged and released
const [legacyResponse, currentResponse]: [
Dockerode.VolumeInfoList,
Dockerode.VolumeInfoList
] = await Promise.all([
this.docker.listVolumes({
filters: { label: ['io.resin.supervised'] },
}) as Promise<any>,
this.docker.listVolumes({
filters: { label: ['io.balena.supervised'] },
}) as Promise<any>,
]);
const legacyVolumes = _.get(legacyResponse, 'Volumes', []);
const currentVolumes = _.get(currentResponse, 'Volumes', []);
return _.unionBy(legacyVolumes, currentVolumes, 'Name');
}
private static generateVolumeName({ name, appId }: VolumeNameOpts) {
return `${appId}_${name}`;
}
}
export default Volumes;

View File

@ -3,6 +3,7 @@ import { EventEmitter } from 'events';
import { Transaction } from 'knex';
import * as _ from 'lodash';
import { generateUniqueKey } from 'resin-register-device';
import StrictEventEmitter from 'strict-event-emitter-types';
import { Either } from 'fp-ts/lib/Either';
import * as t from 'io-ts';
@ -25,8 +26,19 @@ interface ConfigOpts {
}
type ConfigMap<T extends SchemaTypeKey> = { [key in T]: SchemaReturn<key> };
type ConfigChangeMap<T extends SchemaTypeKey> = {
[key in T]: SchemaReturn<key> | undefined
};
export class Config extends EventEmitter {
interface ConfigEvents {
change: ConfigChangeMap<SchemaTypeKey>;
}
type ConfigEventEmitter = StrictEventEmitter<EventEmitter, ConfigEvents>;
export class Config extends (EventEmitter as {
new (): ConfigEventEmitter;
}) {
private db: DB;
private configJsonBackend: ConfigJsonConfigBackend;
@ -173,7 +185,7 @@ export class Config extends EventEmitter {
.return();
}
}).then(() => {
this.emit('change', keyValues);
this.emit('change', keyValues as ConfigMap<SchemaTypeKey>);
});
}

View File

@ -47,7 +47,7 @@ export class DB {
public upsertModel(
modelName: string,
obj: any,
id: number | { [key: string]: string },
id: number | { [key: string]: unknown },
trx?: Knex.Transaction,
): Bluebird<any> {
const knex = trx || this.knex;

View File

@ -149,12 +149,13 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
);
// TODO: Support dependent applications when this feature is complete
router.get('/v2/applications/state', (_req: Request, res: Response) => {
router.get('/v2/applications/state', async (_req: Request, res: Response) => {
// It's kinda hacky to access the services and db via the application manager
// maybe refactor this code
const localMode = await deviceState.config.get('localMode');
Bluebird.join(
applications.services.getStatus(),
applications.images.getStatus(),
applications.images.getStatus(localMode),
applications.db.models('app').select(['appId', 'commit', 'name']),
(
services,
@ -210,7 +211,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
response[appName].services[img.serviceName] = {
status,
releaseId: img.releaseId,
downloadProgress: img.downloadProgress,
downloadProgress: img.downloadProgress || null,
};
});

View File

@ -1,5 +1,6 @@
import * as Bluebird from 'bluebird';
import DockerToolbelt = require('docker-toolbelt');
import { SchemaReturn } from '../config/schema-type';
// This is the EnvVarObject from src/lib/types, but it seems we cannot
// reference it relatively. Just redefine it as it's simple and won't change
@ -14,6 +15,8 @@ interface TaggedRepoImage {
tag: string;
}
type FetchOptions = SchemaReturn<'fetchOptions'>;
declare class DockerUtils extends DockerToolbelt {
constructor(opts: any);
@ -23,13 +26,13 @@ declare class DockerUtils extends DockerToolbelt {
imgDest: string,
fullDeltaOpts: any,
onProgress: (args: any) => void,
): Bluebird<void>;
): Bluebird<string>;
fetchImageWithProgress(
image: string,
config: { uuid: string; currentApiKey: string },
config: FetchOptions,
onProgress: (args: any) => void,
): Bluebird<void>;
): Bluebird<string>;
getImageEnv(id: string): Bluebird<EnvVarObject>;
getNetworkGateway(netName: string): Bluebird<string>;

View File

@ -10,8 +10,7 @@ export interface CheckIntOptions {
const ENV_VAR_KEY_REGEX = /^[a-zA-Z_][a-zA-Z0-9_]*$/;
const LABEL_NAME_REGEX = /^[a-zA-Z][a-zA-Z0-9\.\-]*$/;
type NullableString = string | undefined | null;
type NullableLiteral = number | NullableString;
type NullableLiteral = Nullable<number | string>;
/**
* checkInt

View File

@ -3,7 +3,6 @@ import * as Docker from 'dockerode';
import * as _ from 'lodash';
import Config from './config';
import { SchemaReturn, SchemaTypeKey } from './config/schema-type';
import Database from './db';
import { Logger } from './logger';
@ -25,23 +24,20 @@ export class LocalModeManager {
public async init() {
// Setup a listener to catch state changes relating to local mode
this.config.on(
'change',
(changed: { [key in SchemaTypeKey]: SchemaReturn<key> }) => {
if (changed.localMode != null) {
const localMode = changed.localMode || false;
this.config.on('change', changed => {
if (changed.localMode != null) {
const localMode = changed.localMode || false;
// First switch the logger to it's correct state
this.logger.switchBackend(localMode);
// First switch the logger to it's correct state
this.logger.switchBackend(localMode);
// If we're leaving local mode, make sure to remove all of the
// leftover artifacts
if (!localMode) {
this.removeLocalModeArtifacts();
}
// If we're leaving local mode, make sure to remove all of the
// leftover artifacts
if (!localMode) {
this.removeLocalModeArtifacts();
}
},
);
}
});
// On startup, check if we're in unmanaged mode,
// as local mode needs to be set

View File

@ -4,7 +4,6 @@ import * as _ from 'lodash';
import * as morgan from 'morgan';
import Config from './config';
import { SchemaReturn, SchemaTypeKey } from './config/schema-type';
import { EventTracker } from './event-tracker';
import blink = require('./lib/blink');
import * as iptables from './lib/iptables';
@ -147,18 +146,15 @@ export class SupervisorAPI {
// Monitor the switching of local mode, and change which interfaces will
// be listened to based on that
this.config.on(
'change',
(changedConfig: { [key in SchemaTypeKey]: SchemaReturn<key> }) => {
if (changedConfig.localMode != null) {
this.applyListeningRules(
changedConfig.localMode || false,
port,
allowedInterfaces,
);
}
},
);
this.config.on('change', changedConfig => {
if (changedConfig.localMode != null) {
this.applyListeningRules(
changedConfig.localMode || false,
port,
allowedInterfaces,
);
}
});
this.server = this.api.listen(port);
this.server.timeout = apiTimeout;

View File

@ -21,4 +21,27 @@ declare module 'dockerode' {
Healthcheck?: DockerHealthcheck;
StopTimeout?: number;
}
// TODO: Once https://github.com/DefinitelyTyped/DefinitelyTyped/pull/32383
// is merged and released, remove this and VolumeInfoList
export interface VolumeInspectInfo {
Name: string;
Driver: string;
Mountpoint: string;
Status?: { [key: string]: string };
Labels: { [key: string]: string };
Scope: 'local' | 'global';
// Field is always present, but sometimes is null
Options: { [key: string]: string } | null;
// Field is sometimes present, and sometimes null
UsageData?: {
Size: number;
RefCount: number;
} | null;
}
export interface VolumeInfoList {
Volumes: Dockerode.VolumeInspectInfo[];
Warnings: string[];
}
}

2
typings/global.d.ts vendored
View File

@ -5,3 +5,5 @@ interface Dictionary<T> {
interface Callback<T> {
(err?: Error, res?: T): void;
}
type Nullable<T> = T | null | undefined;