mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2024-12-23 15:32:24 +00:00
Seperate volume handling into manager and object classes
This change also makes sure that in the application-manager workflow we pass around instances of the Volume class, rather than just the config. Change-type: patch Signed-off-by: Cameron Diver <cameron@balena.io>
This commit is contained in:
parent
fe20f1b400
commit
23e564389d
@ -7,6 +7,7 @@ fs = Promise.promisifyAll(require('fs'))
|
||||
path = require 'path'
|
||||
|
||||
constants = require './lib/constants'
|
||||
{ log } = require './lib/supervisor-console'
|
||||
|
||||
{ DockerUtils: Docker } = require './lib/docker-utils'
|
||||
{ LocalModeManager } = require './local-mode'
|
||||
@ -20,7 +21,8 @@ updateLock = require './lib/update-lock'
|
||||
{ Images } = require './compose/images'
|
||||
{ NetworkManager } = require './compose/network-manager'
|
||||
{ Network } = require './compose/network'
|
||||
{ Volumes } = require './compose/volumes'
|
||||
{ VolumeManager } = require './compose/volume-manager'
|
||||
{ Volume } = require './compose/volume'
|
||||
|
||||
Proxyvisor = require './proxyvisor'
|
||||
|
||||
@ -68,7 +70,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
@images = new Images({ @docker, @logger, @db, @config })
|
||||
@services = new ServiceManager({ @docker, @logger, @images, @config })
|
||||
@networks = new NetworkManager({ @docker, @logger })
|
||||
@volumes = new Volumes({ @docker, @logger })
|
||||
@volumes = new VolumeManager({ @docker, @logger })
|
||||
@proxyvisor = new Proxyvisor({ @config, @logger, @db, @docker, @images, applications: this })
|
||||
@localModeManager = new LocalModeManager(@config, @docker, @logger, @db)
|
||||
@timeSpentFetching = 0
|
||||
@ -168,7 +170,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
step.current.config
|
||||
).remove()
|
||||
else
|
||||
@volumes.remove(step.current)
|
||||
step.current.remove()
|
||||
ensureSupervisorNetwork: =>
|
||||
@networks.ensureSupervisorNetwork()
|
||||
}
|
||||
@ -288,7 +290,7 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
for volume in volumes
|
||||
appId = volume.appId
|
||||
apps[appId] ?= { appId, services: [], volumes: {}, networks: {} }
|
||||
apps[appId].volumes[volume.name] = volume.config
|
||||
apps[appId].volumes[volume.name] = volume
|
||||
|
||||
# multi-app warning!
|
||||
# This is just wrong on every level
|
||||
@ -398,24 +400,30 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
targetNames = _.keys(target)
|
||||
toBeRemoved = _.difference(currentNames, targetNames)
|
||||
for name in toBeRemoved
|
||||
outputPairs.push({
|
||||
current: {
|
||||
name
|
||||
appId
|
||||
config: current[name]
|
||||
}
|
||||
target: null
|
||||
})
|
||||
if model instanceof NetworkManager
|
||||
outputPairs.push({
|
||||
current: current {
|
||||
name
|
||||
appId
|
||||
config: current[name]
|
||||
}
|
||||
target: null
|
||||
})
|
||||
else
|
||||
outputPairs.push({ current: current[name], target: null })
|
||||
toBeInstalled = _.difference(targetNames, currentNames)
|
||||
for name in toBeInstalled
|
||||
outputPairs.push({
|
||||
current: null
|
||||
target: {
|
||||
name
|
||||
appId
|
||||
config: target[name]
|
||||
}
|
||||
})
|
||||
if model instanceof NetworkManager
|
||||
outputPairs.push({
|
||||
current: null
|
||||
target: {
|
||||
name
|
||||
appId
|
||||
config: target[name]
|
||||
}
|
||||
})
|
||||
else
|
||||
outputPairs.push({ current: null, target: target[name] })
|
||||
toBeUpdated = _.filter _.intersection(targetNames, currentNames), (name) =>
|
||||
# While we're in this in-between state of a network-manager, but not
|
||||
# a volume-manager, we'll have to inspect the object to detect a
|
||||
@ -436,20 +444,26 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
)
|
||||
return !currentNet.isEqualConfig(targetNet)
|
||||
else
|
||||
return !model.isEqualConfig(current[name], target[name])
|
||||
return !current[name].isEqualConfig(target[name])
|
||||
for name in toBeUpdated
|
||||
outputPairs.push({
|
||||
current: {
|
||||
name
|
||||
appId
|
||||
config: current[name]
|
||||
}
|
||||
target: {
|
||||
name
|
||||
appId
|
||||
config: target[name]
|
||||
}
|
||||
})
|
||||
if model instanceof NetworkManager
|
||||
outputPairs.push({
|
||||
current: {
|
||||
name
|
||||
appId
|
||||
config: current[name]
|
||||
}
|
||||
target: {
|
||||
name
|
||||
appId
|
||||
config: target[name]
|
||||
}
|
||||
})
|
||||
else
|
||||
outputPairs.push({
|
||||
current: current[name],
|
||||
target: target[name]
|
||||
})
|
||||
return outputPairs
|
||||
|
||||
compareNetworksForUpdate: ({ current, target }, appId) =>
|
||||
@ -715,6 +729,14 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
service.image = imageInfo.Id
|
||||
return Service.fromComposeObject(service, serviceOpts)
|
||||
|
||||
createTargetVolume: (name, appId, volume) ->
|
||||
return Volume.fromComposeVolume(
|
||||
name,
|
||||
appId,
|
||||
volume,
|
||||
{ @docker, @logger }
|
||||
)
|
||||
|
||||
normaliseAndExtendAppFromDB: (app) =>
|
||||
Promise.join(
|
||||
@config.get('extendedEnvOptions')
|
||||
@ -732,18 +754,20 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
hostnameOnHost
|
||||
}
|
||||
_.assign(configOpts, opts)
|
||||
|
||||
volumes = JSON.parse(app.volumes)
|
||||
volumes = _.mapValues volumes, (volumeConfig) ->
|
||||
volumes = _.mapValues volumes, (volumeConfig, volumeName) =>
|
||||
volumeConfig ?= {}
|
||||
volumeConfig.labels ?= {}
|
||||
return volumeConfig
|
||||
@createTargetVolume(volumeName, app.appId, volumeConfig)
|
||||
|
||||
Promise.map(JSON.parse(app.services), (service) => @createTargetService(service, configOpts))
|
||||
.then (services) ->
|
||||
.then (services) =>
|
||||
# If a named volume is defined in a service, we add it app-wide so that we can track it and purge it
|
||||
for s in services
|
||||
serviceNamedVolumes = s.getNamedVolumes()
|
||||
for name in serviceNamedVolumes
|
||||
volumes[name] ?= { labels: {} }
|
||||
volumes[name] = @createTargetVolume(name, app.appId, { labels: {} })
|
||||
outApp = {
|
||||
appId: app.appId
|
||||
name: app.name
|
||||
@ -984,5 +1008,4 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
svc.serviceId == serviceId
|
||||
.get('serviceName')
|
||||
|
||||
|
||||
localModeSwitchCompletion: => @localModeManager.switchCompletion()
|
||||
|
147
src/compose/volume-manager.ts
Normal file
147
src/compose/volume-manager.ts
Normal file
@ -0,0 +1,147 @@
|
||||
import * as Docker from 'dockerode';
|
||||
import filter = require('lodash/filter');
|
||||
import get = require('lodash/get');
|
||||
import unionBy = require('lodash/unionBy');
|
||||
import * as Path from 'path';
|
||||
|
||||
import constants = require('../lib/constants');
|
||||
import { InternalInconsistencyError, NotFoundError } from '../lib/errors';
|
||||
import { safeRename } from '../lib/fs-utils';
|
||||
import * as LogTypes from '../lib/log-types';
|
||||
import { defaultLegacyVolume } from '../lib/migration';
|
||||
import Logger from '../logger';
|
||||
import Volume, { VolumeConfig } from './volume';
|
||||
|
||||
export interface VolumeMangerConstructOpts {
|
||||
docker: Docker;
|
||||
logger: Logger;
|
||||
}
|
||||
|
||||
export interface VolumeNameOpts {
|
||||
name: string;
|
||||
appId: number;
|
||||
}
|
||||
|
||||
export class VolumeManager {
|
||||
private docker: Docker;
|
||||
private logger: Logger;
|
||||
|
||||
public constructor(opts: VolumeMangerConstructOpts) {
|
||||
this.docker = opts.docker;
|
||||
this.logger = opts.logger;
|
||||
}
|
||||
|
||||
public async get({ name, appId }: VolumeNameOpts): Promise<Volume> {
|
||||
return Volume.fromDockerVolume(
|
||||
{ docker: this.docker, logger: this.logger },
|
||||
await this.docker
|
||||
.getVolume(Volume.generateDockerName(appId, name))
|
||||
.inspect(),
|
||||
);
|
||||
}
|
||||
|
||||
public async getAll(): Promise<Volume[]> {
|
||||
const volumeInspect = await this.listWithBothLabels();
|
||||
return volumeInspect.map(inspect =>
|
||||
Volume.fromDockerVolume(
|
||||
{ logger: this.logger, docker: this.docker },
|
||||
inspect,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
public async getAllByAppId(appId: number): Promise<Volume[]> {
|
||||
const all = await this.getAll();
|
||||
return filter(all, { appId });
|
||||
}
|
||||
|
||||
public async create(volume: Volume): Promise<void> {
|
||||
// First we check that we're not trying to recreate a
|
||||
// volume
|
||||
try {
|
||||
const existing = await this.get({
|
||||
name: volume.name,
|
||||
appId: volume.appId,
|
||||
});
|
||||
|
||||
if (!volume.isEqualConfig(existing)) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Trying to create volume '${
|
||||
volume.name
|
||||
}', but a volume with the same name and different configuration exists`,
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
if (!NotFoundError(e)) {
|
||||
this.logger.logSystemEvent(LogTypes.createVolumeError, {
|
||||
volume: { name: volume.name },
|
||||
error: e,
|
||||
});
|
||||
throw e;
|
||||
}
|
||||
|
||||
await volume.create();
|
||||
}
|
||||
}
|
||||
|
||||
public async createFromLegacy(appId: number): Promise<Volume | void> {
|
||||
const name = defaultLegacyVolume();
|
||||
const legacyPath = Path.join(
|
||||
constants.rootMountPoint,
|
||||
'mnt/data/resin-data',
|
||||
appId.toString(),
|
||||
);
|
||||
|
||||
try {
|
||||
return await this.createFromPath({ name, appId }, {}, legacyPath);
|
||||
} catch (e) {
|
||||
this.logger.logSystemMessage(
|
||||
`Warning: could not migrate legacy /data volume: ${e.message}`,
|
||||
{ error: e },
|
||||
'Volume migration error',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public async createFromPath(
|
||||
{ name, appId }: VolumeNameOpts,
|
||||
config: Partial<VolumeConfig>,
|
||||
oldPath: string,
|
||||
): Promise<Volume> {
|
||||
const volume = Volume.fromComposeVolume(name, appId, config, {
|
||||
logger: this.logger,
|
||||
docker: this.docker,
|
||||
});
|
||||
|
||||
await this.create(volume);
|
||||
const inspect = await this.docker
|
||||
.getVolume(Volume.generateDockerName(volume.appId, volume.name))
|
||||
.inspect();
|
||||
|
||||
const volumePath = Path.join(
|
||||
constants.rootMountPoint,
|
||||
'mnt/data',
|
||||
...inspect.Mountpoint.split(Path.sep).slice(3),
|
||||
);
|
||||
|
||||
await safeRename(oldPath, volumePath);
|
||||
return volume;
|
||||
}
|
||||
|
||||
private async listWithBothLabels(): Promise<Docker.VolumeInspectInfo[]> {
|
||||
const [legacyResponse, currentResponse] = await Promise.all([
|
||||
this.docker.listVolumes({
|
||||
filters: { label: ['io.resin.supervised'] },
|
||||
}),
|
||||
this.docker.listVolumes({
|
||||
filters: { label: ['io.balena.supervised'] },
|
||||
}),
|
||||
]);
|
||||
|
||||
const legacyVolumes = get(legacyResponse, 'Volumes', []);
|
||||
const currentVolumes = get(currentResponse, 'Volumes', []);
|
||||
return unionBy(legacyVolumes, currentVolumes, 'Name');
|
||||
}
|
||||
}
|
||||
|
||||
export default VolumeManager;
|
143
src/compose/volume.ts
Normal file
143
src/compose/volume.ts
Normal file
@ -0,0 +1,143 @@
|
||||
import * as Docker from 'dockerode';
|
||||
import assign = require('lodash/assign');
|
||||
import isEqual = require('lodash/isEqual');
|
||||
|
||||
import constants = require('../lib/constants');
|
||||
import { InternalInconsistencyError } from '../lib/errors';
|
||||
import * as LogTypes from '../lib/log-types';
|
||||
import { LabelObject } from '../lib/types';
|
||||
import Logger from '../logger';
|
||||
import * as ComposeUtils from './utils';
|
||||
|
||||
export interface VolumeConstructOpts {
|
||||
logger: Logger;
|
||||
docker: Docker;
|
||||
}
|
||||
|
||||
export interface VolumeConfig {
|
||||
labels: LabelObject;
|
||||
driverOpts: Docker.VolumeInspectInfo['Options'];
|
||||
}
|
||||
|
||||
export class Volume {
|
||||
public appId: number;
|
||||
public name: string;
|
||||
public config: VolumeConfig;
|
||||
|
||||
private logger: Logger;
|
||||
private docker: Docker;
|
||||
|
||||
private constructor(
|
||||
name: string,
|
||||
appId: number,
|
||||
config: VolumeConfig,
|
||||
opts: VolumeConstructOpts,
|
||||
) {
|
||||
this.name = name;
|
||||
this.appId = appId;
|
||||
|
||||
this.logger = opts.logger;
|
||||
this.docker = opts.docker;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
public static fromDockerVolume(
|
||||
opts: VolumeConstructOpts,
|
||||
inspect: Docker.VolumeInspectInfo,
|
||||
): Volume {
|
||||
// Convert the docker inspect to the config
|
||||
const config: VolumeConfig = {
|
||||
labels: inspect.Labels || {},
|
||||
driverOpts: inspect.Options || {},
|
||||
};
|
||||
|
||||
// Detect the name and appId from the inspect data
|
||||
const { name, appId } = this.deconstructDockerName(inspect.Name);
|
||||
|
||||
return new Volume(name, appId, config, opts);
|
||||
}
|
||||
|
||||
public static fromComposeVolume(
|
||||
name: string,
|
||||
appId: number,
|
||||
config: Partial<VolumeConfig>,
|
||||
opts: VolumeConstructOpts,
|
||||
) {
|
||||
const filledConfig: VolumeConfig = {
|
||||
driverOpts: config.driverOpts || {},
|
||||
labels: ComposeUtils.normalizeLabels(config.labels || {}),
|
||||
};
|
||||
|
||||
// We only need to assign the labels here, as when we
|
||||
// get it from the daemon, they should already be there
|
||||
assign(filledConfig.labels, constants.defaultVolumeLabels);
|
||||
|
||||
return new Volume(name, appId, filledConfig, opts);
|
||||
}
|
||||
|
||||
public isEqualConfig(volume: Volume): boolean {
|
||||
return (
|
||||
isEqual(this.config.driverOpts, volume.config.driverOpts) &&
|
||||
isEqual(this.config.labels, volume.config.labels)
|
||||
);
|
||||
}
|
||||
|
||||
public async create(): Promise<void> {
|
||||
this.logger.logSystemEvent(LogTypes.createVolume, {
|
||||
volume: { name: this.name },
|
||||
});
|
||||
// Check that we're not trying to recreate a volume that
|
||||
// already exists
|
||||
await this.docker.createVolume({
|
||||
Name: Volume.generateDockerName(this.appId, this.name),
|
||||
Labels: this.config.labels,
|
||||
DriverOpts: this.config.driverOpts,
|
||||
});
|
||||
}
|
||||
|
||||
public async remove(): Promise<void> {
|
||||
this.logger.logSystemEvent(LogTypes.removeVolume, {
|
||||
volume: { name: this.name },
|
||||
});
|
||||
|
||||
try {
|
||||
await this.docker
|
||||
.getVolume(Volume.generateDockerName(this.appId, this.name))
|
||||
.remove();
|
||||
} catch (e) {
|
||||
this.logger.logSystemEvent(LogTypes.removeVolumeError, {
|
||||
volume: { name: this.name, appId: this.appId },
|
||||
error: e,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public static generateDockerName(appId: number, name: string) {
|
||||
return `${appId}_${name}`;
|
||||
}
|
||||
|
||||
private static deconstructDockerName(
|
||||
name: string,
|
||||
): { name: string; appId: number } {
|
||||
const match = name.match(/(\d+)_(\S+)/);
|
||||
if (match == null) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Could not detect volume data from docker name: ${name}`,
|
||||
);
|
||||
}
|
||||
|
||||
const appId = parseInt(match[1], 10);
|
||||
if (isNaN(appId)) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Could not detect application id from docker name: ${match[1]}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
appId,
|
||||
name: match[2],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default Volume;
|
@ -1,225 +0,0 @@
|
||||
import * as Dockerode from 'dockerode';
|
||||
import * as _ from 'lodash';
|
||||
import * as path from 'path';
|
||||
|
||||
import Docker from '../lib/docker-utils';
|
||||
import Logger from '../logger';
|
||||
|
||||
import constants = require('../lib/constants');
|
||||
import { InternalInconsistencyError, NotFoundError } from '../lib/errors';
|
||||
import { safeRename } from '../lib/fs-utils';
|
||||
import * as LogTypes from '../lib/log-types';
|
||||
import { defaultLegacyVolume } from '../lib/migration';
|
||||
import { LabelObject } from '../lib/types';
|
||||
import { checkInt } from '../lib/validation';
|
||||
import * as ComposeUtils from './utils';
|
||||
|
||||
interface VolumeConstructOpts {
|
||||
docker: Docker;
|
||||
logger: Logger;
|
||||
}
|
||||
|
||||
export interface ComposeVolume {
|
||||
name: string;
|
||||
appId: number;
|
||||
config: {
|
||||
labels: LabelObject;
|
||||
driverOpts: Dockerode.VolumeInspectInfo['Options'];
|
||||
};
|
||||
dockerVolume: Dockerode.VolumeInspectInfo;
|
||||
}
|
||||
|
||||
interface VolumeNameOpts {
|
||||
name: string;
|
||||
appId: number;
|
||||
}
|
||||
|
||||
// This weird type is currently needed because the create function (and helpers)
|
||||
// accept either a docker volume or a compose volume (or an empty object too apparently).
|
||||
// If we instead split the tasks into createFromCompose and createFromDocker, we will no
|
||||
// longer have this issue (and weird typing)
|
||||
type VolumeConfig = ComposeVolume['config'] | Dockerode.VolumeInspectInfo | {};
|
||||
type VolumeCreateOpts = VolumeNameOpts & {
|
||||
config?: VolumeConfig;
|
||||
};
|
||||
|
||||
export class Volumes {
|
||||
private docker: Docker;
|
||||
private logger: Logger;
|
||||
|
||||
public constructor(opts: VolumeConstructOpts) {
|
||||
this.docker = opts.docker;
|
||||
this.logger = opts.logger;
|
||||
}
|
||||
|
||||
public async getAll(): Promise<ComposeVolume[]> {
|
||||
const volumes = await this.listWithBothLabels();
|
||||
return volumes.map(Volumes.format);
|
||||
}
|
||||
|
||||
public async getAllByAppId(appId: number): Promise<ComposeVolume[]> {
|
||||
const all = await this.getAll();
|
||||
return _.filter(all, { appId });
|
||||
}
|
||||
|
||||
public async get({ name, appId }: VolumeNameOpts): Promise<ComposeVolume> {
|
||||
const volume = await this.docker.getVolume(`${appId}_${name}`).inspect();
|
||||
return Volumes.format(volume);
|
||||
}
|
||||
|
||||
public async create(opts: VolumeCreateOpts): Promise<ComposeVolume> {
|
||||
const { name, config = {}, appId } = opts;
|
||||
const camelCaseConfig: Dictionary<unknown> = _.mapKeys(config, (_v, k) =>
|
||||
_.camelCase(k),
|
||||
);
|
||||
|
||||
this.logger.logSystemEvent(LogTypes.createVolume, { volume: { name } });
|
||||
|
||||
const labels = _.clone(camelCaseConfig.labels as LabelObject) || {};
|
||||
_.assign(labels, constants.defaultVolumeLabels);
|
||||
|
||||
const driverOpts: Dictionary<unknown> =
|
||||
camelCaseConfig.driverOpts != null
|
||||
? (camelCaseConfig.driverOpts as Dictionary<unknown>)
|
||||
: {};
|
||||
|
||||
try {
|
||||
const volume = await this.get({ name, appId });
|
||||
if (!this.isEqualConfig(volume.config, config)) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Trying to create volume '${name}', but a volume with the same name and different configuration exists`,
|
||||
);
|
||||
}
|
||||
return volume;
|
||||
} catch (e) {
|
||||
if (!NotFoundError(e)) {
|
||||
this.logger.logSystemEvent(LogTypes.createVolumeError, {
|
||||
volume: { name },
|
||||
error: e,
|
||||
});
|
||||
throw e;
|
||||
}
|
||||
const volume = await this.docker.createVolume({
|
||||
Name: Volumes.generateVolumeName({ name, appId }),
|
||||
Labels: labels,
|
||||
DriverOpts: driverOpts,
|
||||
});
|
||||
|
||||
return Volumes.format(await volume.inspect());
|
||||
}
|
||||
}
|
||||
|
||||
public async createFromLegacy(appId: number): Promise<ComposeVolume | void> {
|
||||
const name = defaultLegacyVolume();
|
||||
const legacyPath = path.join(
|
||||
constants.rootMountPoint,
|
||||
'mnt/data/resin-data',
|
||||
appId.toString(),
|
||||
);
|
||||
|
||||
try {
|
||||
return await this.createFromPath({ name, appId }, legacyPath);
|
||||
} catch (e) {
|
||||
this.logger.logSystemMessage(
|
||||
`Warning: could not migrate legacy /data volume: ${e.message}`,
|
||||
{ error: e },
|
||||
'Volume migration error',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// oldPath must be a path inside /mnt/data
|
||||
public async createFromPath(
|
||||
opts: VolumeCreateOpts,
|
||||
oldPath: string,
|
||||
): Promise<void> {
|
||||
const volume = await this.create(opts);
|
||||
const handle = volume.dockerVolume;
|
||||
|
||||
// Convert the path to be of the same mountpoint so that rename can work
|
||||
const volumePath = path.join(
|
||||
constants.rootMountPoint,
|
||||
'mnt/data',
|
||||
...handle.Mountpoint.split(path.sep).slice(3),
|
||||
);
|
||||
await safeRename(oldPath, volumePath);
|
||||
}
|
||||
|
||||
public async remove({ name, appId }: VolumeNameOpts) {
|
||||
this.logger.logSystemEvent(LogTypes.removeVolume, { volume: { name } });
|
||||
try {
|
||||
await this.docker
|
||||
.getVolume(Volumes.generateVolumeName({ name, appId }))
|
||||
.remove();
|
||||
} catch (e) {
|
||||
this.logger.logSystemEvent(LogTypes.removeVolumeError, {
|
||||
volume: { name, appId },
|
||||
error: e,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public isEqualConfig(current: VolumeConfig, target: VolumeConfig): boolean {
|
||||
const currentConfig = (_.mapKeys(current, (_v, k) =>
|
||||
_.camelCase(k),
|
||||
) as unknown) as ComposeVolume['config'];
|
||||
const targetConfig = (_.mapKeys(target, (_v, k) =>
|
||||
_.camelCase(k),
|
||||
) as unknown) as ComposeVolume['config'];
|
||||
|
||||
const currentOpts = currentConfig.driverOpts || {};
|
||||
const targetOpts = targetConfig.driverOpts || {};
|
||||
|
||||
const currentLabels = currentConfig.labels || {};
|
||||
const targetLabels = targetConfig.labels || {};
|
||||
|
||||
return (
|
||||
_.isEqual(currentOpts, targetOpts) &&
|
||||
_.isEqual(currentLabels, targetLabels)
|
||||
);
|
||||
}
|
||||
|
||||
private static format(volume: Dockerode.VolumeInspectInfo): ComposeVolume {
|
||||
const match = volume.Name.match(/^([0-9]+)_(.+)$/);
|
||||
if (match == null) {
|
||||
throw new Error('Malformed volume name in Volume.format');
|
||||
}
|
||||
const appId = checkInt(match[1]);
|
||||
const name = match[2];
|
||||
|
||||
return {
|
||||
name,
|
||||
// We know this cast is fine due to the regex
|
||||
appId: appId as number,
|
||||
config: {
|
||||
labels: _.omit(
|
||||
ComposeUtils.normalizeLabels(volume.Labels),
|
||||
_.keys(constants.defaultVolumeLabels),
|
||||
),
|
||||
driverOpts: volume.Options,
|
||||
},
|
||||
dockerVolume: volume,
|
||||
};
|
||||
}
|
||||
|
||||
private async listWithBothLabels(): Promise<Dockerode.VolumeInspectInfo[]> {
|
||||
const [legacyResponse, currentResponse] = await Promise.all([
|
||||
this.docker.listVolumes({
|
||||
filters: { label: ['io.resin.supervised'] },
|
||||
}),
|
||||
this.docker.listVolumes({
|
||||
filters: { label: ['io.balena.supervised'] },
|
||||
}),
|
||||
]);
|
||||
|
||||
const legacyVolumes = _.get(legacyResponse, 'Volumes', []);
|
||||
const currentVolumes = _.get(currentResponse, 'Volumes', []);
|
||||
return _.unionBy(legacyVolumes, currentVolumes, 'Name');
|
||||
}
|
||||
|
||||
private static generateVolumeName({ name, appId }: VolumeNameOpts) {
|
||||
return `${appId}_${name}`;
|
||||
}
|
||||
}
|
||||
|
||||
export default Volumes;
|
Loading…
Reference in New Issue
Block a user