Merge pull request #1032 from balena-io/1026-dont-delete-volumes-implicitly

Don't delete volumes implicitly
This commit is contained in:
CameronDiver 2019-07-10 07:17:11 -07:00 committed by GitHub
commit 9fe1592a28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 354 additions and 49 deletions

View File

@ -38,9 +38,6 @@ RUN apt-get update && apt-get install ca-certificates \
iptables libnss-mdns nodejs rsync git python make curl g++ \
kmod vim
COPY package*.json ./
# We first ensure that every architecture has an npm version
# which can do an npm ci, then we perform the ci using this
# temporary version
@ -48,8 +45,10 @@ RUN curl -LOJ https://www.npmjs.com/install.sh && \
# This is required to avoid a bug in uid-number
# https://github.com/npm/uid-number/issues/7
npm config set unsafe-perm true && \
npm_install="${NPM_VERSION}" npm_config_prefix=/tmp sh ./install.sh && \
JOBS=MAX /tmp/bin/npm ci --no-optional --unsafe-perm
npm_install="${NPM_VERSION}" npm_config_prefix=/tmp sh ./install.sh
COPY package*.json ./
RUN JOBS=MAX /tmp/bin/npm ci --no-optional --unsafe-perm
COPY src src/
COPY typings typings/

View File

@ -1152,3 +1152,33 @@ Response:
]
}
```
### V2 Utilities
#### Cleanup volumes with no references
Added in supervisor version v10.0.0
Starting with balena-supervisor v10.0.0, volumes which have no
references are no longer automatically removed as part of
the standard update flow. To cleanup up any orphaned
volumes, use this supervisor endpoint:
From an application container:
```
$ curl "$BALENA_SUPERVISOR_ADDRESS/v2/cleanup-volumes?apikey=$BALENA_SUPERVISOR_API_KEY"
```
Successful response:
```
{
"status": "success"
}
```
Unsuccessful response:
```
{
"status": "failed",
"message": "the error message"
}
```

36
package-lock.json generated
View File

@ -1609,17 +1609,8 @@
"deep-equal": "^1.0.1",
"dns-equal": "^1.0.0",
"dns-txt": "^2.0.2",
"multicast-dns": "git+https://github.com/resin-io-modules/multicast-dns.git#a15c63464eb43e8925b187ed5cb9de6892e8aacc",
"multicast-dns-service-types": "^1.1.0"
},
"dependencies": {
"multicast-dns": {
"version": "git+https://github.com/resin-io-modules/multicast-dns.git#a15c63464eb43e8925b187ed5cb9de6892e8aacc",
"from": "git+https://github.com/resin-io-modules/multicast-dns.git#a15c63464eb43e8925b187ed5cb9de6892e8aacc",
"requires": {
"dns-packet": "^1.0.1",
"thunky": "^0.1.0"
}
}
}
},
"brace-expansion": {
@ -2803,6 +2794,16 @@
"integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0=",
"dev": true
},
"dns-packet": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.1.tgz",
"integrity": "sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg==",
"dev": true,
"requires": {
"ip": "^1.1.0",
"safe-buffer": "^5.0.1"
}
},
"dns-txt": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz",
@ -7307,6 +7308,15 @@
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
"dev": true
},
"multicast-dns": {
"version": "git+https://github.com/resin-io-modules/multicast-dns.git#a15c63464eb43e8925b187ed5cb9de6892e8aacc",
"from": "git+https://github.com/resin-io-modules/multicast-dns.git#listen-on-all-interfaces",
"dev": true,
"requires": {
"dns-packet": "^1.0.1",
"thunky": "^0.1.0"
}
},
"multicast-dns-service-types": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz",
@ -10053,6 +10063,12 @@
"xtend": "~4.0.1"
}
},
"thunky": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/thunky/-/thunky-0.1.0.tgz",
"integrity": "sha1-vzAUaCTituZ7Dy16Ssi+smkIaE4=",
"dev": true
},
"tildify": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/tildify/-/tildify-1.2.0.tgz",

View File

@ -156,11 +156,10 @@ module.exports = class ApplicationManager extends EventEmitter
@networks.create(step.target)
else
@volumes.create(step.target)
removeNetworkOrVolume: (step) =>
if step.model is 'network'
@networks.remove(step.current)
else
@volumes.remove(step.current)
removeNetwork: (step) =>
@networks.remove(step.current)
removeVolume: (step) =>
@volumes.remove(step.current)
ensureSupervisorNetwork: =>
@networks.ensureSupervisorNetwork()
}
@ -467,7 +466,9 @@ module.exports = class ApplicationManager extends EventEmitter
dependencies = _.filter currentApp.services, (service) ->
dependencyComparisonFn(service, current)
if _.isEmpty(dependencies)
return [{ action: 'removeNetworkOrVolume', model, current }]
if model is 'network'
return [{ action: 'removeNetwork', current }]
return []
else
# If the current update doesn't require killing the services that use this network/volume,
# we have to kill them before removing the network/volume (e.g. when we're only updating the network config)
@ -622,7 +623,7 @@ module.exports = class ApplicationManager extends EventEmitter
pairSteps = @_nextStepsForVolume(pair, currentApp, removePairs.concat(updatePairs))
steps = steps.concat(pairSteps)
if _.isEmpty(steps) and currentApp.commit != targetApp.commit
if _.isEmpty(steps) and targetApp.commit? and currentApp.commit != targetApp.commit
steps.push({
action: 'updateCommit'
target: targetApp.commit
@ -858,11 +859,36 @@ module.exports = class ApplicationManager extends EventEmitter
return { imagesToSave, imagesToRemove }
_inferNextSteps: (cleanupNeeded, availableImages, downloading, supervisorNetworkReady, current, target, ignoreImages, { localMode, delta }) =>
volumePromises = []
Promise.try =>
if localMode
ignoreImages = true
currentByAppId = current.local.apps ? {}
targetByAppId = target.local.apps ? {}
# Given we need to detect when a device is moved
# between applications, we do it this way. This code
# is going to change to an application-manager +
# application model, which means that we can just
# detect when an application is no longer referenced
# in the target state, and run the teardown that way.
# Until then, this essentially does the same thing. We
# check when every other part of the teardown for an
# application has been complete, and then append the
# volume removal steps.
# We also don't want to remove cloud volumes when
# switching to local mode
# multi-app warning: this will break
oldApps = null
if !localMode
currentAppIds = _.keys(current.local.apps).map((n) -> checkInt(n))
targetAppIds = _.keys(target.local.apps).map((n) -> checkInt(n))
if targetAppIds.length > 1
throw new Error('Current supervisor does not support multiple applications')
diff = _.difference(currentAppIds, targetAppIds)
if diff.length > 0
oldApps = diff
nextSteps = []
if !supervisorNetworkReady
# if the supervisor0 network isn't ready and there's any containers using it, we need
@ -894,6 +920,12 @@ module.exports = class ApplicationManager extends EventEmitter
allAppIds = _.union(_.keys(currentByAppId), _.keys(targetByAppId))
for appId in allAppIds
nextSteps = nextSteps.concat(@_nextStepsForAppUpdate(currentByAppId[appId], targetByAppId[appId], localMode, availableImages, downloading))
if oldApps != null and _.includes(oldApps, checkInt(appId))
# We check if everything else has been done for
# the old app to be removed. If it has, we then
# remove all of the volumes
if _.every(nextSteps, { action: 'noop' })
volumePromises.push(@removeAllVolumesForApp(checkInt(appId)))
newDownloads = _.filter(nextSteps, (s) -> s.action == 'fetch').length
if !ignoreImages and delta and newDownloads > 0
downloadsToBlock = downloading.length + newDownloads - constants.maxDeltaDownloads
@ -903,6 +935,11 @@ module.exports = class ApplicationManager extends EventEmitter
if !ignoreImages and _.isEmpty(nextSteps) and !_.isEmpty(downloading)
nextSteps.push({ action: 'noop' })
return _.uniqWith(nextSteps, _.isEqual)
.then (nextSteps) ->
Promise.all(volumePromises).then (volSteps) ->
nextSteps = nextSteps.concat(_.flatten(volSteps))
return nextSteps
stopAll: ({ force = false, skipLock = false } = {}) =>
Promise.resolve(@services.getAll())
@ -962,4 +999,8 @@ module.exports = class ApplicationManager extends EventEmitter
svc.serviceId == serviceId
.get('serviceName')
removeAllVolumesForApp: (appId) =>
@volumes.getAllByAppId(appId).then (volumes) ->
return volumes.map((v) -> { action: 'removeVolume', current: v })
localModeSwitchCompletion: => @localModeManager.switchCompletion()

View File

@ -14,6 +14,9 @@ import { APIBinder } from './api-binder';
import { Service } from './compose/service';
import Config from './config';
import NetworkManager from './compose/network-manager';
import VolumeManager from './compose/volume-manager';
declare interface Options {
force?: boolean;
running?: boolean;
@ -41,6 +44,8 @@ export class ApplicationManager extends EventEmitter {
public apiBinder: APIBinder;
public services: ServiceManager;
public volumes: VolumeManager;
public networks: NetworkManager;
public config: Config;
public db: DB;
public images: Images;

View File

@ -1,7 +1,5 @@
import * as Docker from 'dockerode';
import filter = require('lodash/filter');
import get = require('lodash/get');
import unionBy = require('lodash/unionBy');
import * as _ from 'lodash';
import * as Path from 'path';
import constants = require('../lib/constants');
@ -53,7 +51,7 @@ export class VolumeManager {
public async getAllByAppId(appId: number): Promise<Volume[]> {
const all = await this.getAll();
return filter(all, { appId });
return _.filter(all, { appId });
}
public async create(volume: Volume): Promise<void> {
@ -131,6 +129,34 @@ export class VolumeManager {
return volume;
}
public async removeOrphanedVolumes(): Promise<void> {
// Iterate through every container, and track the
// references to a volume
// Note that we're not just interested in containers
// which are part of the private state, and instead
// *all* containers. This means we don't remove
// something that's part of a sideloaded container
const [dockerContainers, dockerVolumes] = await Promise.all([
this.docker.listContainers(),
this.docker.listVolumes(),
]);
const containerVolumes = _(dockerContainers)
.flatMap(c => c.Mounts)
.filter(m => m.Type === 'volume')
// We know that the name must be set, if the mount is
// a volume
.map(m => m.Name as string)
.uniq()
.value();
const volumeNames = _.map(dockerVolumes.Volumes, 'Name');
const volumesToRemove = _.difference(volumeNames, containerVolumes);
await Promise.all(
volumesToRemove.map(v => this.docker.getVolume(v).remove()),
);
}
private async listWithBothLabels(): Promise<Docker.VolumeInspectInfo[]> {
const [legacyResponse, currentResponse] = await Promise.all([
this.docker.listVolumes({
@ -141,9 +167,9 @@ export class VolumeManager {
}),
]);
const legacyVolumes = get(legacyResponse, 'Volumes', []);
const currentVolumes = get(currentResponse, 'Volumes', []);
return unionBy(legacyVolumes, currentVolumes, 'Name');
const legacyVolumes = _.get(legacyResponse, 'Volumes', []);
const currentVolumes = _.get(currentResponse, 'Volumes', []);
return _.unionBy(legacyVolumes, currentVolumes, 'Name');
}
}

View File

@ -159,7 +159,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
// if we have anything other than a string, it must be converted to
// a string before being stored in the db
const strValue = Config.valueToString(value);
const strValue = Config.valueToString(value, key);
if (oldValues[key] !== value) {
return this.db.upsertModel(
@ -336,7 +336,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
});
}
private static valueToString(value: unknown) {
private static valueToString(value: unknown, name: string) {
switch (typeof value) {
case 'object':
return JSON.stringify(value);
@ -346,7 +346,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
return value.toString();
default:
throw new InternalInconsistencyError(
`Could not convert configuration value to string for storage, value: ${value}, type: ${typeof value}`,
`Could not convert configuration value to string for storage, name: ${name}, value: ${value}, type: ${typeof value}`,
);
}
}

View File

@ -485,4 +485,18 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
});
}
});
router.get('/v2/cleanup-volumes', async (_req, res) => {
try {
await applications.volumes.removeOrphanedVolumes();
res.json({
status: 'success',
});
} catch (e) {
res.status(503).json({
status: 'failed',
message: messageFromError(e),
});
}
});
}

View File

@ -12,6 +12,7 @@ DeviceState = require '../src/device-state'
{ Config } = require('../src/config')
{ Service } = require '../src/compose/service'
{ Network } = require '../src/compose/network'
{ Volume } = require '../src/compose/volume'
appDBFormatNormalised = {
appId: 1234
@ -160,7 +161,7 @@ describe 'ApplicationManager', ->
return appCloned
.then (normalisedApps) ->
currentCloned = _.cloneDeep(current)
currentCloned.local.apps = normalisedApps
currentCloned.local.apps = _.keyBy(normalisedApps, 'appId')
return currentCloned
@normaliseTarget = (target, available) =>
@ -178,6 +179,7 @@ describe 'ApplicationManager', ->
service.config.image = img.dockerImageId
return service
return app
targetCloned.local.apps = _.keyBy(targetCloned.local.apps, 'appId')
return targetCloned
@db.init()
.then =>
@ -199,8 +201,8 @@ describe 'ApplicationManager', ->
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {})
expect(steps).to.eventually.deep.equal([{
action: 'start'
current: current.local.apps[0].services[1]
target: target.local.apps[0].services[1]
current: current.local.apps['1234'].services[1]
target: target.local.apps['1234'].services[1]
serviceId: 24
appId: 1234
options: {}
@ -215,7 +217,7 @@ describe 'ApplicationManager', ->
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {})
expect(steps).to.eventually.deep.equal([{
action: 'kill'
current: current.local.apps[0].services[1]
current: current.local.apps['1234'].services[1]
target: null
serviceId: 24
appId: 1234
@ -231,7 +233,7 @@ describe 'ApplicationManager', ->
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {})
expect(steps).to.eventually.deep.equal([{
action: 'fetch'
image: @applications.imageForService(target.local.apps[0].services[1])
image: @applications.imageForService(target.local.apps['1234'].services[1])
serviceId: 24
appId: 1234
serviceName: 'anotherService'
@ -243,7 +245,7 @@ describe 'ApplicationManager', ->
@normaliseCurrent(currentState[0])
@normaliseTarget(targetState[2], availableImages[0])
(current, target) =>
steps = @applications._inferNextSteps(false, availableImages[0], [ target.local.apps[0].services[1].imageId ], true, current, target, false, {})
steps = @applications._inferNextSteps(false, availableImages[0], [ target.local.apps['1234'].services[1].imageId ], true, current, target, false, {})
expect(steps).to.eventually.deep.equal([{ action: 'noop', appId: 1234 }])
)
@ -255,8 +257,8 @@ describe 'ApplicationManager', ->
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {})
expect(steps).to.eventually.deep.equal([{
action: 'kill'
current: current.local.apps[0].services[1]
target: target.local.apps[0].services[1]
current: current.local.apps['1234'].services[1]
target: target.local.apps['1234'].services[1]
serviceId: 24
appId: 1234
options: {}
@ -271,7 +273,7 @@ describe 'ApplicationManager', ->
steps = @applications._inferNextSteps(false, availableImages[2], [], true, current, target, false, {})
expect(steps).to.eventually.have.deep.members([{
action: 'fetch'
image: @applications.imageForService(target.local.apps[0].services[0])
image: @applications.imageForService(target.local.apps['1234'].services[0])
serviceId: 23
appId: 1234,
serviceName: 'aservice'
@ -287,16 +289,16 @@ describe 'ApplicationManager', ->
expect(steps).to.eventually.have.deep.members([
{
action: 'kill'
current: current.local.apps[0].services[0]
target: target.local.apps[0].services[0]
current: current.local.apps['1234'].services[0]
target: target.local.apps['1234'].services[0]
serviceId: 23
appId: 1234
options: {}
},
{
action: 'kill'
current: current.local.apps[0].services[1]
target: target.local.apps[0].services[1]
current: current.local.apps['1234'].services[1]
target: target.local.apps['1234'].services[1]
serviceId: 24
appId: 1234
options: {}
@ -314,7 +316,7 @@ describe 'ApplicationManager', ->
{
action: 'start'
current: null
target: target.local.apps[0].services[0]
target: target.local.apps['1234'].services[0]
serviceId: 23
appId: 1234
options: {}
@ -332,7 +334,7 @@ describe 'ApplicationManager', ->
{
action: 'start'
current: null
target: target.local.apps[0].services[1]
target: target.local.apps['1234'].services[1]
serviceId: 24
appId: 1234
options: {}
@ -349,7 +351,7 @@ describe 'ApplicationManager', ->
expect(steps).to.eventually.have.deep.members([
{
action: 'kill'
current: current.local.apps[0].services[0]
current: current.local.apps['1234'].services[0]
target: null
serviceId: 23
appId: 1234
@ -358,7 +360,7 @@ describe 'ApplicationManager', ->
{
action: 'start'
current: null
target: target.local.apps[0].services[1]
target: target.local.apps['1234'].services[1]
serviceId: 24
appId: 1234
options: {}
@ -387,3 +389,37 @@ describe 'ApplicationManager', ->
it 'converts a dependent app in DB format into state format', ->
app = @applications.proxyvisor.normaliseDependentAppFromDB(dependentDBFormat)
expect(app).to.eventually.deep.equal(dependentStateFormatNormalised)
describe 'Volumes', ->
before ->
stub(@applications, 'removeAllVolumesForApp').returns(Promise.resolve([{
action: 'removeVolume',
current: Volume.fromComposeObject('my_volume', 12, {}, { docker: null, logger: null })
}]))
after ->
@applications.removeAllVolumesForApp.restore()
it 'should not remove volumes when they are no longer referenced', ->
Promise.join(
@normaliseCurrent(currentState[6]),
@normaliseTarget(targetState[0], availableImages[0])
(current, target) =>
@applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}).then (steps) ->
expect(
_.every(steps, (s) -> s.action != 'removeVolume'),
'Volumes from current app should not be removed'
).to.be.true
)
it 'should remove volumes from previous applications', ->
Promise.join(
@normaliseCurrent(currentState[5])
@normaliseTarget(targetState[6], [])
(current, target) =>
@applications._inferNextSteps(false, [], [], true, current, target, false, {}).then (steps) ->
expect(steps).to.have.length(1)
expect(steps[0]).to.have.property('action').that.equals('removeVolume')
expect(steps[0].current).to.have.property('appId').that.equals(12)
)

View File

@ -305,6 +305,26 @@ targetState[5] = {
dependent: { apps: [], devices: [] }
}
targetState[6] = {
local: {
name: 'volumeTest'
config: {
}
apps: [
{
appId: 12345
name: 'volumeApp'
commit: 'asd'
releaseId: 3
services: {}
volumes: {}
networks: {}
}
]
}
dependent: { apps: [], devices: [] }
}
exports.currentState = currentState = []
currentState[0] = {
local: {
@ -619,6 +639,124 @@ currentState[4] = {
dependent: { apps: [], devices: [] }
}
currentState[5] = {
local: {
name: 'volumeTest'
config: {}
apps: [
{
appId: 12345
name: 'volumeApp'
commit: 'asd'
releaseId: 3
services: []
volumes: {}
networks: { default: {} }
},
{
appId: 12,
name: 'previous-app',
commit: '123',
releaseId: 10
services: [],
networks: {},
volumes: {
my_volume: {}
}
}
]
}
dependent: { apps: [], devices: [] }
}
currentState[6] = {
local: {
name: 'aDeviceWithDifferentName'
config: {
'RESIN_HOST_CONFIG_gpu_mem': '512'
'RESIN_HOST_LOG_TO_DISPLAY': '1'
}
apps: [
{
appId: 1234
name: 'superapp'
commit: 'afafafa'
releaseId: 2
services: [
{
appId: 1234
serviceId: 23
releaseId: 2
commit: 'afafafa'
serviceName: 'aservice'
imageId: 12345
image: 'id1'
environment: {
'FOO': 'bar'
'ADDITIONAL_ENV_VAR': 'foo'
}
privileged: false
restart: 'always'
volumes: [
'/tmp/balena-supervisor/services/1234/aservice:/tmp/resin',
'/tmp/balena-supervisor/services/1234/aservice:/tmp/balena'
]
labels: {
'io.resin.app-id': '1234'
'io.resin.service-id': '23'
'io.resin.supervised': 'true'
'io.resin.service-name': 'aservice'
}
running: true
createdAt: new Date()
containerId: '1'
networkMode: 'default'
networks: { 'default': { aliases: [ 'aservice' ] } }
command: [ 'someCommand' ]
entrypoint: [ 'theEntrypoint' ]
},
{
appId: 1234
serviceId: 24
releaseId: 2
commit: 'afafafa'
serviceName: 'anotherService'
imageId: 12346
image: 'id0'
environment: {
'FOO': 'bro'
'ADDITIONAL_ENV_VAR': 'foo'
}
volumes: [
'/tmp/balena-supervisor/services/1234/anotherService:/tmp/resin',
'/tmp/balena-supervisor/services/1234/anotherService:/tmp/balena'
]
privileged: false
restart: 'always'
labels: {
'io.resin.app-id': '1234'
'io.resin.service-id': '24'
'io.resin.supervised': 'true'
'io.resin.service-name': 'anotherService'
}
running: true
createdAt: new Date()
containerId: '2'
networkMode: 'default'
networks: { 'default': { aliases: [ 'anotherService' ] } }
command: [ 'someCommand' ]
entrypoint: [ 'theEntrypoint' ]
}
]
volumes: {}
networks: { default: {} }
}
]
}
dependent: { apps: [], devices: [] }
}
exports.availableImages = availableImages = []
availableImages[0] = [
{