mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2024-12-19 13:47:54 +00:00
Improve testing for supervisor composition modules
This PR cleans up testing for supervisor compose modules. It also fixes broken tests for application manager and removes a lot of dependencies for those tests on DB and other unnecessary mocks. There are probably a lot of cases that tests are missing but this should make writing new tests a lot easier. This PR also creates a new mock dockerode (mockerode) module that should make it easier to test operations that interact with the engine. All references to the old mock-dockerode have not yet been removed but that should come soon in another PR List of squashed commits: - Add tests for network create/remove - Move compose service tests to test/src/compose and reorganize test descriptions - Add support for image creation to mockerode - Add additional tests for compose volumes - Update mockerode so unimplemented fake methods throw. This is to ensure tests using mockerode fail if an unimplemented method is used - Update tests for volume-manager with mockerode - Update tests for compose/images - Simplify tests using mockerode - Clean up compose/app tests - Create application manager tests Change-type: minor
This commit is contained in:
parent
5bd53db905
commit
e04e64763f
@ -179,21 +179,38 @@ export async function getRequiredSteps(
|
||||
ignoreImages: boolean = false,
|
||||
): Promise<CompositionStep[]> {
|
||||
// get some required data
|
||||
const [
|
||||
{ localMode, delta },
|
||||
downloading,
|
||||
cleanupNeeded,
|
||||
availableImages,
|
||||
currentApps,
|
||||
] = await Promise.all([
|
||||
config.getMany(['localMode', 'delta']),
|
||||
const [downloading, availableImages, currentApps] = await Promise.all([
|
||||
imageManager.getDownloadingImageIds(),
|
||||
imageManager.isCleanupNeeded(),
|
||||
imageManager.getAvailable(),
|
||||
getCurrentApps(),
|
||||
]);
|
||||
const containerIdsByAppId = await getAppContainerIds(currentApps);
|
||||
|
||||
return await inferNextSteps(currentApps, targetApps, {
|
||||
ignoreImages,
|
||||
downloading,
|
||||
availableImages,
|
||||
containerIdsByAppId,
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate the required steps from the current to the target state
|
||||
export async function inferNextSteps(
|
||||
currentApps: InstancedAppState,
|
||||
targetApps: InstancedAppState,
|
||||
{
|
||||
ignoreImages = false,
|
||||
downloading = [] as number[],
|
||||
availableImages = [] as Image[],
|
||||
containerIdsByAppId = {} as { [appId: number]: Dictionary<string> },
|
||||
} = {},
|
||||
) {
|
||||
// get some required data
|
||||
const [{ localMode, delta }, cleanupNeeded] = await Promise.all([
|
||||
config.getMany(['localMode', 'delta']),
|
||||
imageManager.isCleanupNeeded(),
|
||||
]);
|
||||
|
||||
if (localMode) {
|
||||
ignoreImages = localMode;
|
||||
}
|
||||
|
@ -1,772 +0,0 @@
|
||||
import * as _ from 'lodash';
|
||||
import { expect } from 'chai';
|
||||
|
||||
import Service from '../src/compose/service';
|
||||
import {
|
||||
ServiceComposeConfig,
|
||||
ServiceConfig,
|
||||
} from '../src/compose/types/service';
|
||||
import * as constants from '../src/lib/constants';
|
||||
|
||||
const configs = {
|
||||
simple: {
|
||||
compose: require('./data/docker-states/simple/compose.json'),
|
||||
imageInfo: require('./data/docker-states/simple/imageInfo.json'),
|
||||
inspect: require('./data/docker-states/simple/inspect.json'),
|
||||
},
|
||||
entrypoint: {
|
||||
compose: require('./data/docker-states/entrypoint/compose.json'),
|
||||
imageInfo: require('./data/docker-states/entrypoint/imageInfo.json'),
|
||||
inspect: require('./data/docker-states/entrypoint/inspect.json'),
|
||||
},
|
||||
networkModeService: {
|
||||
compose: require('./data/docker-states/network-mode-service/compose.json'),
|
||||
imageInfo: require('./data/docker-states/network-mode-service/imageInfo.json'),
|
||||
inspect: require('./data/docker-states/network-mode-service/inspect.json'),
|
||||
},
|
||||
};
|
||||
|
||||
describe('compose/service', () => {
|
||||
it('extends environment variables properly', async () => {
|
||||
const extendEnvVarsOpts = {
|
||||
uuid: '1234',
|
||||
appName: 'awesomeApp',
|
||||
commit: 'abcdef',
|
||||
name: 'awesomeDevice',
|
||||
version: 'v1.0.0',
|
||||
deviceArch: 'amd64',
|
||||
deviceType: 'raspberry-pi',
|
||||
osVersion: 'Resin OS 2.0.2',
|
||||
};
|
||||
const service = {
|
||||
appId: '23',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
serviceName: 'serviceName',
|
||||
environment: {
|
||||
FOO: 'bar',
|
||||
A_VARIABLE: 'ITS_VALUE',
|
||||
},
|
||||
};
|
||||
const s = await Service.fromComposeObject(
|
||||
service,
|
||||
extendEnvVarsOpts as any,
|
||||
);
|
||||
|
||||
expect(s.config.environment).to.deep.equal({
|
||||
FOO: 'bar',
|
||||
A_VARIABLE: 'ITS_VALUE',
|
||||
RESIN_APP_ID: '23',
|
||||
RESIN_APP_NAME: 'awesomeApp',
|
||||
RESIN_DEVICE_UUID: '1234',
|
||||
RESIN_DEVICE_ARCH: 'amd64',
|
||||
RESIN_DEVICE_TYPE: 'raspberry-pi',
|
||||
RESIN_HOST_OS_VERSION: 'Resin OS 2.0.2',
|
||||
RESIN_SERVICE_NAME: 'serviceName',
|
||||
RESIN_APP_LOCK_PATH: '/tmp/balena/updates.lock',
|
||||
RESIN_SERVICE_KILL_ME_PATH: '/tmp/balena/handover-complete',
|
||||
RESIN: '1',
|
||||
BALENA_APP_ID: '23',
|
||||
BALENA_APP_NAME: 'awesomeApp',
|
||||
BALENA_DEVICE_UUID: '1234',
|
||||
BALENA_DEVICE_ARCH: 'amd64',
|
||||
BALENA_DEVICE_TYPE: 'raspberry-pi',
|
||||
BALENA_HOST_OS_VERSION: 'Resin OS 2.0.2',
|
||||
BALENA_SERVICE_NAME: 'serviceName',
|
||||
BALENA_APP_LOCK_PATH: '/tmp/balena/updates.lock',
|
||||
BALENA_SERVICE_HANDOVER_COMPLETE_PATH: '/tmp/balena/handover-complete',
|
||||
BALENA: '1',
|
||||
USER: 'root',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns the correct default bind mounts', async () => {
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
},
|
||||
{ appName: 'foo' } as any,
|
||||
);
|
||||
const binds = (Service as any).defaultBinds(s.appId, s.serviceName);
|
||||
expect(binds).to.deep.equal([
|
||||
'/tmp/balena-supervisor/services/1234/foo:/tmp/resin',
|
||||
'/tmp/balena-supervisor/services/1234/foo:/tmp/balena',
|
||||
]);
|
||||
});
|
||||
|
||||
it('produces the correct port bindings and exposed ports', async () => {
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
expose: [1000, '243/udp'],
|
||||
ports: ['2344', '2345:2354', '2346:2367/udp'],
|
||||
},
|
||||
{
|
||||
imageInfo: {
|
||||
Config: {
|
||||
ExposedPorts: {
|
||||
'53/tcp': {},
|
||||
'53/udp': {},
|
||||
'2354/tcp': {},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as any,
|
||||
);
|
||||
|
||||
const ports = (s as any).generateExposeAndPorts();
|
||||
expect(ports.portBindings).to.deep.equal({
|
||||
'2344/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '2344',
|
||||
},
|
||||
],
|
||||
'2354/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '2345',
|
||||
},
|
||||
],
|
||||
'2367/udp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '2346',
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(ports.exposedPorts).to.deep.equal({
|
||||
'1000/tcp': {},
|
||||
'243/udp': {},
|
||||
'2344/tcp': {},
|
||||
'2354/tcp': {},
|
||||
'2367/udp': {},
|
||||
'53/tcp': {},
|
||||
'53/udp': {},
|
||||
});
|
||||
});
|
||||
|
||||
it('correctly handles port ranges', async () => {
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
expose: [1000, '243/udp'],
|
||||
ports: ['1000-1003:2000-2003'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
const ports = (s as any).generateExposeAndPorts();
|
||||
expect(ports.portBindings).to.deep.equal({
|
||||
'2000/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '1000',
|
||||
},
|
||||
],
|
||||
'2001/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '1001',
|
||||
},
|
||||
],
|
||||
'2002/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '1002',
|
||||
},
|
||||
],
|
||||
'2003/tcp': [
|
||||
{
|
||||
HostIp: '',
|
||||
HostPort: '1003',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(ports.exposedPorts).to.deep.equal({
|
||||
'1000/tcp': {},
|
||||
'2000/tcp': {},
|
||||
'2001/tcp': {},
|
||||
'2002/tcp': {},
|
||||
'2003/tcp': {},
|
||||
'243/udp': {},
|
||||
});
|
||||
});
|
||||
|
||||
it('should correctly handle large port ranges', async function () {
|
||||
this.timeout(60000);
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
ports: ['5-65536:5-65536/tcp', '5-65536:5-65536/udp'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect((s as any).generateExposeAndPorts()).to.not.throw;
|
||||
});
|
||||
|
||||
it('should correctly report implied exposed ports from portMappings', async () => {
|
||||
const service = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123456,
|
||||
serviceId: 123456,
|
||||
serviceName: 'test',
|
||||
ports: ['80:80', '100:100'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(service.config)
|
||||
.to.have.property('expose')
|
||||
.that.deep.equals(['80/tcp', '100/tcp']);
|
||||
});
|
||||
|
||||
it('should correctly handle spaces in volume definitions', async () => {
|
||||
const service = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123,
|
||||
serviceId: 123,
|
||||
serviceName: 'test',
|
||||
volumes: [
|
||||
'vol1:vol2',
|
||||
'vol3 :/usr/src/app',
|
||||
'vol4: /usr/src/app',
|
||||
'vol5 : vol6',
|
||||
],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(service.config)
|
||||
.to.have.property('volumes')
|
||||
.that.deep.equals([
|
||||
'123_vol1:vol2',
|
||||
'123_vol3:/usr/src/app',
|
||||
'123_vol4:/usr/src/app',
|
||||
'123_vol5:vol6',
|
||||
|
||||
'/tmp/balena-supervisor/services/123/test:/tmp/resin',
|
||||
'/tmp/balena-supervisor/services/123/test:/tmp/balena',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should correctly handle io.balena.features.balena-socket label', async () => {
|
||||
const service = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123456,
|
||||
serviceId: 123456,
|
||||
serviceName: 'foobar',
|
||||
labels: {
|
||||
'io.balena.features.balena-socket': '1',
|
||||
},
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(service.config.volumes).to.include.members([
|
||||
`${constants.dockerSocket}:${constants.dockerSocket}`,
|
||||
`${constants.dockerSocket}:/host/run/balena-engine.sock`,
|
||||
`${constants.dockerSocket}:/var/run/balena.sock`,
|
||||
]);
|
||||
|
||||
expect(service.config.environment['DOCKER_HOST']).to.equal(
|
||||
'unix:///host/run/balena-engine.sock',
|
||||
);
|
||||
});
|
||||
|
||||
describe('Ordered array parameters', () => {
|
||||
it('Should correctly compare ordered array parameters', async () => {
|
||||
const svc1 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
dns: ['8.8.8.8', '1.1.1.1'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
let svc2 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
dns: ['8.8.8.8', '1.1.1.1'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
expect(svc1.isEqualConfig(svc2, {})).to.be.true;
|
||||
|
||||
svc2 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
dns: ['1.1.1.1', '8.8.8.8'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
expect(svc1.isEqualConfig(svc2, {})).to.be.false;
|
||||
});
|
||||
|
||||
it('should correctly compare non-ordered array parameters', async () => {
|
||||
const svc1 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
volumes: ['abcdef', 'ghijk'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
let svc2 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
volumes: ['abcdef', 'ghijk'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
expect(svc1.isEqualConfig(svc2, {})).to.be.true;
|
||||
|
||||
svc2 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
volumes: ['ghijk', 'abcdef'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
expect(svc1.isEqualConfig(svc2, {})).to.be.true;
|
||||
});
|
||||
|
||||
it('should correctly compare both ordered and non-ordered array parameters', async () => {
|
||||
const svc1 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
volumes: ['abcdef', 'ghijk'],
|
||||
dns: ['8.8.8.8', '1.1.1.1'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
const svc2 = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
volumes: ['ghijk', 'abcdef'],
|
||||
dns: ['8.8.8.8', '1.1.1.1'],
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
expect(svc1.isEqualConfig(svc2, {})).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseMemoryNumber()', () => {
|
||||
const makeComposeServiceWithLimit = async (memLimit?: string | number) =>
|
||||
await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123456,
|
||||
serviceId: 123456,
|
||||
serviceName: 'foobar',
|
||||
mem_limit: memLimit,
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
it('should correctly parse memory number strings without a unit', async () =>
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64')).config.memLimit,
|
||||
).to.equal(64));
|
||||
|
||||
it('should correctly apply the default value', async () =>
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit(undefined)).config.memLimit,
|
||||
).to.equal(0));
|
||||
|
||||
it('should correctly support parsing numbers as memory limits', async () =>
|
||||
expect((await makeComposeServiceWithLimit(64)).config.memLimit).to.equal(
|
||||
64,
|
||||
));
|
||||
|
||||
it('should correctly parse memory number strings that use a byte unit', async () => {
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64b')).config.memLimit,
|
||||
).to.equal(64);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64B')).config.memLimit,
|
||||
).to.equal(64);
|
||||
});
|
||||
|
||||
it('should correctly parse memory number strings that use a kilobyte unit', async () => {
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64k')).config.memLimit,
|
||||
).to.equal(65536);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64K')).config.memLimit,
|
||||
).to.equal(65536);
|
||||
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64kb')).config.memLimit,
|
||||
).to.equal(65536);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64Kb')).config.memLimit,
|
||||
).to.equal(65536);
|
||||
});
|
||||
|
||||
it('should correctly parse memory number strings that use a megabyte unit', async () => {
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64m')).config.memLimit,
|
||||
).to.equal(67108864);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64M')).config.memLimit,
|
||||
).to.equal(67108864);
|
||||
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64mb')).config.memLimit,
|
||||
).to.equal(67108864);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64Mb')).config.memLimit,
|
||||
).to.equal(67108864);
|
||||
});
|
||||
|
||||
it('should correctly parse memory number strings that use a gigabyte unit', async () => {
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64g')).config.memLimit,
|
||||
).to.equal(68719476736);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64G')).config.memLimit,
|
||||
).to.equal(68719476736);
|
||||
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64gb')).config.memLimit,
|
||||
).to.equal(68719476736);
|
||||
expect(
|
||||
(await makeComposeServiceWithLimit('64Gb')).config.memLimit,
|
||||
).to.equal(68719476736);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getWorkingDir', () => {
|
||||
const makeComposeServiceWithWorkdir = async (workdir?: string) =>
|
||||
await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123456,
|
||||
serviceId: 123456,
|
||||
serviceName: 'foobar',
|
||||
workingDir: workdir,
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
it('should remove a trailing slash', async () => {
|
||||
expect(
|
||||
(await makeComposeServiceWithWorkdir('/usr/src/app/')).config
|
||||
.workingDir,
|
||||
).to.equal('/usr/src/app');
|
||||
expect(
|
||||
(await makeComposeServiceWithWorkdir('/')).config.workingDir,
|
||||
).to.equal('/');
|
||||
expect(
|
||||
(await makeComposeServiceWithWorkdir('/usr/src/app')).config.workingDir,
|
||||
).to.equal('/usr/src/app');
|
||||
expect(
|
||||
(await makeComposeServiceWithWorkdir('')).config.workingDir,
|
||||
).to.equal('');
|
||||
});
|
||||
});
|
||||
|
||||
describe('io.balena.features.gpu: Docker <-> Compose config', () => {
|
||||
const gpuDeviceRequest = {
|
||||
Driver: '',
|
||||
DeviceIDs: [],
|
||||
Count: 1,
|
||||
Capabilities: [['gpu']],
|
||||
Options: {},
|
||||
};
|
||||
it('should succeed from compose object', async () => {
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123,
|
||||
serviceId: 123,
|
||||
serviceName: 'test',
|
||||
labels: {
|
||||
'io.balena.features.gpu': '1',
|
||||
},
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(s.config)
|
||||
.to.have.property('deviceRequests')
|
||||
.that.deep.equals([gpuDeviceRequest]);
|
||||
});
|
||||
|
||||
it('should succeed from docker container', () => {
|
||||
const dockerCfg = _.cloneDeep(
|
||||
require('./data/docker-states/simple/inspect.json'),
|
||||
);
|
||||
dockerCfg.HostConfig.DeviceRequests = [gpuDeviceRequest];
|
||||
const s = Service.fromDockerContainer(dockerCfg);
|
||||
|
||||
expect(s.config)
|
||||
.to.have.property('deviceRequests')
|
||||
.that.deep.equals([gpuDeviceRequest]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Docker <-> Compose config', () => {
|
||||
const omitConfigForComparison = (config: ServiceConfig) =>
|
||||
_.omit(config, ['running', 'networks']);
|
||||
|
||||
it('should be identical when converting a simple service', async () => {
|
||||
const composeSvc = await Service.fromComposeObject(
|
||||
configs.simple.compose,
|
||||
configs.simple.imageInfo,
|
||||
);
|
||||
const dockerSvc = Service.fromDockerContainer(configs.simple.inspect);
|
||||
|
||||
const composeConfig = omitConfigForComparison(composeSvc.config);
|
||||
const dockerConfig = omitConfigForComparison(dockerSvc.config);
|
||||
expect(composeConfig).to.deep.equal(dockerConfig);
|
||||
|
||||
expect(dockerSvc.isEqualConfig(composeSvc, {})).to.be.true;
|
||||
});
|
||||
|
||||
it('should correctly convert formats with a null entrypoint', async () => {
|
||||
const composeSvc = await Service.fromComposeObject(
|
||||
configs.entrypoint.compose,
|
||||
configs.entrypoint.imageInfo,
|
||||
);
|
||||
const dockerSvc = Service.fromDockerContainer(configs.entrypoint.inspect);
|
||||
|
||||
const composeConfig = omitConfigForComparison(composeSvc.config);
|
||||
const dockerConfig = omitConfigForComparison(dockerSvc.config);
|
||||
expect(composeConfig).to.deep.equal(dockerConfig);
|
||||
|
||||
expect(dockerSvc.isEqualConfig(composeSvc, {})).to.equals(true);
|
||||
});
|
||||
|
||||
describe('Networks', () => {
|
||||
it('should correctly convert networks from compose to docker format', async () => {
|
||||
const makeComposeServiceWithNetwork = async (
|
||||
networks: ServiceComposeConfig['networks'],
|
||||
) =>
|
||||
await Service.fromComposeObject(
|
||||
{
|
||||
appId: 123456,
|
||||
serviceId: 123456,
|
||||
serviceName: 'test',
|
||||
networks,
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(
|
||||
(
|
||||
await makeComposeServiceWithNetwork({
|
||||
balena: {
|
||||
ipv4Address: '1.2.3.4',
|
||||
},
|
||||
})
|
||||
).toDockerContainer({ deviceName: 'foo' } as any).NetworkingConfig,
|
||||
).to.deep.equal({
|
||||
EndpointsConfig: {
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPv4Address: '1.2.3.4',
|
||||
},
|
||||
Aliases: ['test'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(
|
||||
(
|
||||
await makeComposeServiceWithNetwork({
|
||||
balena: {
|
||||
aliases: ['test', '1123'],
|
||||
ipv4Address: '1.2.3.4',
|
||||
ipv6Address: '5.6.7.8',
|
||||
linkLocalIps: ['123.123.123'],
|
||||
},
|
||||
})
|
||||
).toDockerContainer({ deviceName: 'foo' } as any).NetworkingConfig,
|
||||
).to.deep.equal({
|
||||
EndpointsConfig: {
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPv4Address: '1.2.3.4',
|
||||
IPv6Address: '5.6.7.8',
|
||||
LinkLocalIPs: ['123.123.123'],
|
||||
},
|
||||
Aliases: ['test', '1123'],
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should correctly convert Docker format to service format', () => {
|
||||
const dockerCfg = require('./data/docker-states/simple/inspect.json');
|
||||
|
||||
const makeServiceFromDockerWithNetwork = (networks: {
|
||||
[name: string]: any;
|
||||
}) => {
|
||||
const newConfig = _.cloneDeep(dockerCfg);
|
||||
newConfig.NetworkSettings = {
|
||||
Networks: networks,
|
||||
};
|
||||
return Service.fromDockerContainer(newConfig);
|
||||
};
|
||||
|
||||
expect(
|
||||
makeServiceFromDockerWithNetwork({
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPv4Address: '1.2.3.4',
|
||||
},
|
||||
Aliases: [],
|
||||
},
|
||||
}).config.networks,
|
||||
).to.deep.equal({
|
||||
'123456_balena': {
|
||||
ipv4Address: '1.2.3.4',
|
||||
},
|
||||
});
|
||||
|
||||
expect(
|
||||
makeServiceFromDockerWithNetwork({
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPv4Address: '1.2.3.4',
|
||||
IPv6Address: '5.6.7.8',
|
||||
LinkLocalIps: ['123.123.123'],
|
||||
},
|
||||
Aliases: ['test', '1123'],
|
||||
},
|
||||
}).config.networks,
|
||||
).to.deep.equal({
|
||||
'123456_balena': {
|
||||
ipv4Address: '1.2.3.4',
|
||||
ipv6Address: '5.6.7.8',
|
||||
linkLocalIps: ['123.123.123'],
|
||||
aliases: ['test', '1123'],
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
return describe('Network mode=service:', () => {
|
||||
it('should correctly add a depends_on entry for the service', async () => {
|
||||
let s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
network_mode: 'service: test',
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(s.dependsOn).to.deep.equal(['test']);
|
||||
|
||||
s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
depends_on: ['another_service'],
|
||||
network_mode: 'service: test',
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
|
||||
expect(s.dependsOn).to.deep.equal(['another_service', 'test']);
|
||||
});
|
||||
|
||||
it('should correctly convert a network_mode service: to a container:', async () => {
|
||||
const s = await Service.fromComposeObject(
|
||||
{
|
||||
appId: '1234',
|
||||
serviceName: 'foo',
|
||||
releaseId: 2,
|
||||
serviceId: 3,
|
||||
imageId: 4,
|
||||
network_mode: 'service: test',
|
||||
},
|
||||
{ appName: 'test' } as any,
|
||||
);
|
||||
return expect(
|
||||
s.toDockerContainer({
|
||||
deviceName: '',
|
||||
containerIds: { test: 'abcdef' },
|
||||
}),
|
||||
)
|
||||
.to.have.property('HostConfig')
|
||||
.that.has.property('NetworkMode')
|
||||
.that.equals('container:abcdef');
|
||||
});
|
||||
|
||||
it('should not cause a container restart if a service: container has not changed', async () => {
|
||||
const composeSvc = await Service.fromComposeObject(
|
||||
configs.networkModeService.compose,
|
||||
configs.networkModeService.imageInfo,
|
||||
);
|
||||
const dockerSvc = Service.fromDockerContainer(
|
||||
configs.networkModeService.inspect,
|
||||
);
|
||||
|
||||
const composeConfig = omitConfigForComparison(composeSvc.config);
|
||||
const dockerConfig = omitConfigForComparison(dockerSvc.config);
|
||||
expect(composeConfig).to.not.deep.equal(dockerConfig);
|
||||
|
||||
expect(dockerSvc.isEqualConfig(composeSvc, { test: 'abcdef' })).to.be
|
||||
.true;
|
||||
});
|
||||
|
||||
it('should restart a container when its dependent network mode container changes', async () => {
|
||||
const composeSvc = await Service.fromComposeObject(
|
||||
configs.networkModeService.compose,
|
||||
configs.networkModeService.imageInfo,
|
||||
);
|
||||
const dockerSvc = Service.fromDockerContainer(
|
||||
configs.networkModeService.inspect,
|
||||
);
|
||||
|
||||
const composeConfig = omitConfigForComparison(composeSvc.config);
|
||||
const dockerConfig = omitConfigForComparison(dockerSvc.config);
|
||||
expect(composeConfig).to.not.deep.equal(dockerConfig);
|
||||
|
||||
return expect(dockerSvc.isEqualConfig(composeSvc, { test: 'qwerty' }))
|
||||
.to.be.false;
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
@ -1,622 +0,0 @@
|
||||
import * as Bluebird from 'bluebird';
|
||||
import * as _ from 'lodash';
|
||||
import { stub } from 'sinon';
|
||||
import { expect } from 'chai';
|
||||
|
||||
import Network from '../src/compose/network';
|
||||
|
||||
import Service from '../src/compose/service';
|
||||
import Volume from '../src/compose/volume';
|
||||
import * as deviceState from '../src/device-state';
|
||||
import * as dockerUtils from '../src/lib/docker-utils';
|
||||
import * as images from '../src/compose/images';
|
||||
|
||||
import prepare = require('./lib/prepare');
|
||||
import * as db from '../src/db';
|
||||
import * as dbFormat from '../src/device-state/db-format';
|
||||
import * as targetStateCache from '../src/device-state/target-state-cache';
|
||||
import * as config from '../src/config';
|
||||
import { TargetApplication, TargetApplications } from '../src/types/state';
|
||||
|
||||
import * as applicationManager from '../src/compose/application-manager';
|
||||
|
||||
let availableImages: any[] | null;
|
||||
let targetState: any[] | null;
|
||||
|
||||
const dependentStateFormat = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: { RESIN_FOO: 'var' },
|
||||
environment: { FOO: 'var2' },
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
const dependentStateFormatNormalised = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar:latest',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: { RESIN_FOO: 'var' },
|
||||
environment: { FOO: 'var2' },
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
let currentState = (targetState = availableImages = null);
|
||||
|
||||
const dependentDBFormat = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar:latest',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: JSON.stringify({ RESIN_FOO: 'var' }),
|
||||
environment: JSON.stringify({ FOO: 'var2' }),
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
describe.skip('ApplicationManager', function () {
|
||||
const originalInspectByName = images.inspectByName;
|
||||
before(async function () {
|
||||
await prepare();
|
||||
await deviceState.initialized;
|
||||
|
||||
this.applications = applicationManager;
|
||||
|
||||
// @ts-expect-error assigning to a RO property
|
||||
images.inspectByName = () =>
|
||||
Promise.resolve({
|
||||
Config: {
|
||||
Cmd: ['someCommand'],
|
||||
Entrypoint: ['theEntrypoint'],
|
||||
Env: [],
|
||||
Labels: {},
|
||||
Volumes: [],
|
||||
},
|
||||
});
|
||||
|
||||
stub(dockerUtils, 'getNetworkGateway').returns(
|
||||
Bluebird.Promise.resolve('172.17.0.1'),
|
||||
);
|
||||
stub(dockerUtils.docker, 'listContainers').returns(
|
||||
Bluebird.Promise.resolve([]),
|
||||
);
|
||||
stub(dockerUtils.docker, 'listImages').returns(
|
||||
Bluebird.Promise.resolve([]),
|
||||
);
|
||||
stub(Service as any, 'extendEnvVars').callsFake(function (env) {
|
||||
env['ADDITIONAL_ENV_VAR'] = 'foo';
|
||||
return env;
|
||||
});
|
||||
|
||||
this.normaliseCurrent = async function (current: {
|
||||
local: { apps: Dictionary<TargetApplication> };
|
||||
}) {
|
||||
const currentCloned: any = _.cloneDeep(current);
|
||||
currentCloned.local.apps = {};
|
||||
|
||||
_.each(current.local.apps, (app, appId) => {
|
||||
const appCloned = {
|
||||
...app,
|
||||
services: _.mapValues(app.services, (svc) =>
|
||||
// @ts-ignore
|
||||
Service.fromComposeObject(svc, { appName: 'test' }),
|
||||
),
|
||||
networks: _.mapValues(app.networks, (conf, name) =>
|
||||
Network.fromComposeObject(name, parseInt(appId, 10), conf),
|
||||
),
|
||||
volumes: _.mapValues(app.volumes, (conf, name) =>
|
||||
Volume.fromComposeObject(name, parseInt(appId, 10), conf),
|
||||
),
|
||||
};
|
||||
currentCloned.local.apps[parseInt(appId, 10)] = appCloned;
|
||||
});
|
||||
return currentCloned;
|
||||
};
|
||||
|
||||
this.normaliseTarget = async (
|
||||
target: {
|
||||
local: { apps: TargetApplications };
|
||||
},
|
||||
available: any,
|
||||
) => {
|
||||
const source = await config.get('apiEndpoint');
|
||||
const cloned: any = _.cloneDeep(target);
|
||||
|
||||
// @ts-ignore types don't quite match up
|
||||
await dbFormat.setApps(target.local.apps, source);
|
||||
|
||||
cloned.local.apps = await dbFormat.getApps();
|
||||
|
||||
// We mock what createTargetService does when an image is available
|
||||
_.each(cloned.local.apps, (app) => {
|
||||
_.each(app.services, (svc) => {
|
||||
const img = _.find(available, (i) => i.name === svc.config.image);
|
||||
if (img != null) {
|
||||
svc.config.image = img.dockerImageId;
|
||||
}
|
||||
});
|
||||
});
|
||||
return cloned;
|
||||
};
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
({
|
||||
currentState,
|
||||
targetState,
|
||||
availableImages,
|
||||
} = require('./lib/application-manager-test-states'));
|
||||
await db.models('app').del();
|
||||
// @ts-expect-error modification of a RO property
|
||||
targetStateCache.targetState = undefined;
|
||||
});
|
||||
|
||||
after(async function () {
|
||||
// @ts-expect-error Assigning to a RO property
|
||||
images.inspectByName = originalInspectByName;
|
||||
// @ts-expect-error restore on non-stubbed type
|
||||
dockerUtils.getNetworkGateway.restore();
|
||||
// @ts-expect-error restore on non-stubbed type
|
||||
dockerUtils.docker.listContainers.restore();
|
||||
// @ts-expect-error restore on non-stubbed type
|
||||
dockerUtils.docker.listImages.restore();
|
||||
// @ts-expect-error use of private function
|
||||
Service.extendEnvVars.restore();
|
||||
|
||||
await db.models('app').del();
|
||||
// @ts-expect-error modification of a RO property
|
||||
targetStateCache.targetState = undefined;
|
||||
});
|
||||
|
||||
it('should init', async () => {
|
||||
await applicationManager.initialized;
|
||||
});
|
||||
|
||||
it('infers a start step when all that changes is a running state', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[0], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'start',
|
||||
current: current.local.apps['1234'].services[24],
|
||||
target: target.local.apps['1234'].services[24],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a kill step when a service has to be removed', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[1], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[24],
|
||||
target: undefined,
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a fetch step when a service has to be updated', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[2], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'fetch',
|
||||
image: this.applications.imageForService(
|
||||
target.local.apps['1234'].services[24],
|
||||
),
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
serviceName: 'anotherService',
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('does not infer a fetch step when the download is already in progress', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[2], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[target.local.apps['1234'].services[24].imageId],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{ action: 'noop', appId: 1234 },
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a kill step when a service has to be updated but the strategy is kill-then-download', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[3], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[24],
|
||||
target: target.local.apps['1234'].services[24],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('does not infer to kill a service with default strategy if a dependency is not downloaded', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[4]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[2]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[2],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'fetch',
|
||||
image: this.applications.imageForService(
|
||||
target.local.apps['1234'].services[23],
|
||||
),
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
serviceName: 'aservice',
|
||||
},
|
||||
{ action: 'noop', appId: 1234 },
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to kill several services as long as there is no unmet dependency', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[5], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[23],
|
||||
target: target.local.apps['1234'].services[23],
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[24],
|
||||
target: target.local.apps['1234'].services[24],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to start the dependency first', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[1]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[23],
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to start a service once its dependency has been met', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[2]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[24],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to remove spurious containers', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[3]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[23],
|
||||
target: undefined,
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[24],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('converts a dependent app from a state format to a db format, normalising the image name', function () {
|
||||
const app = this.applications.proxyvisor.normaliseDependentAppForDB(
|
||||
dependentStateFormat,
|
||||
);
|
||||
return expect(app).to.eventually.deep.equal(dependentDBFormat);
|
||||
});
|
||||
|
||||
it('converts a dependent app in DB format into state format', function () {
|
||||
const app = this.applications.proxyvisor.normaliseDependentAppFromDB(
|
||||
dependentDBFormat,
|
||||
);
|
||||
return expect(app).to.eventually.deep.equal(dependentStateFormatNormalised);
|
||||
});
|
||||
|
||||
return describe('Volumes', function () {
|
||||
before(function () {
|
||||
return stub(this.applications, 'removeAllVolumesForApp').returns(
|
||||
Bluebird.Promise.resolve([
|
||||
{
|
||||
action: 'removeVolume',
|
||||
current: Volume.fromComposeObject('my_volume', 12, {}),
|
||||
},
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
after(function () {
|
||||
return this.applications.removeAllVolumesForApp.restore();
|
||||
});
|
||||
|
||||
it('should not remove volumes when they are no longer referenced', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[6]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[0], availableImages[0]),
|
||||
(current, target) => {
|
||||
return this.applications
|
||||
._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
)
|
||||
.then(
|
||||
// @ts-ignore
|
||||
(steps) =>
|
||||
expect(
|
||||
_.every(steps, (s) => s.action !== 'removeVolume'),
|
||||
'Volumes from current app should not be removed',
|
||||
).to.be.true,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('should remove volumes from previous applications', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[5]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[6], []),
|
||||
(current, target) => {
|
||||
return (
|
||||
this.applications
|
||||
._inferNextSteps(
|
||||
false,
|
||||
[],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
)
|
||||
// tslint:disable-next-line
|
||||
.then(function (steps: { current: any }[]) {
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0])
|
||||
.to.have.property('action')
|
||||
.that.equals('removeVolume');
|
||||
return expect(steps[0].current)
|
||||
.to.have.property('appId')
|
||||
.that.equals(12);
|
||||
})
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
@ -1,151 +0,0 @@
|
||||
import { expect } from 'chai';
|
||||
import { stub, SinonStub } from 'sinon';
|
||||
|
||||
import { docker } from '../src/lib/docker-utils';
|
||||
import * as logger from '../src/logger';
|
||||
import Volume from '../src/compose/volume';
|
||||
import logTypes = require('../src/lib/log-types');
|
||||
|
||||
describe('Compose volumes', () => {
|
||||
let createVolumeStub: SinonStub;
|
||||
let logSystemStub: SinonStub;
|
||||
let logMessageStub: SinonStub;
|
||||
before(() => {
|
||||
createVolumeStub = stub(docker, 'createVolume');
|
||||
logSystemStub = stub(logger, 'logSystemEvent');
|
||||
logMessageStub = stub(logger, 'logSystemMessage');
|
||||
});
|
||||
after(() => {
|
||||
createVolumeStub.restore();
|
||||
logSystemStub.restore();
|
||||
logMessageStub.restore();
|
||||
});
|
||||
|
||||
describe('Parsing volumes', () => {
|
||||
it('should correctly parse docker volumes', () => {
|
||||
const volume = Volume.fromDockerVolume({
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Name: '1032480_one_volume',
|
||||
Options: {},
|
||||
Scope: 'local',
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('local');
|
||||
});
|
||||
|
||||
it('should correctly parse compose volumes without an explicit driver', () => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
'my-label': 'test-label',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({
|
||||
opt1: 'test',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('local');
|
||||
});
|
||||
|
||||
it('should correctly parse compose volumes with an explicit driver', () => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver: 'other',
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
'my-label': 'test-label',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({
|
||||
opt1: 'test',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('other');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Generating docker options', () => {
|
||||
afterEach(() => {
|
||||
createVolumeStub.reset();
|
||||
logSystemStub.reset();
|
||||
logMessageStub.reset();
|
||||
});
|
||||
it('should correctly generate docker options', async () => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
await volume.create();
|
||||
expect(
|
||||
createVolumeStub.calledWith({
|
||||
Labels: {
|
||||
'my-label': 'test-label',
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {
|
||||
opt1: 'test',
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
expect(logSystemStub.calledWith(logTypes.createVolume));
|
||||
});
|
||||
});
|
||||
});
|
@ -1,257 +0,0 @@
|
||||
import { expect } from 'chai';
|
||||
import _ = require('lodash');
|
||||
|
||||
import * as dbFormat from '../src/device-state/db-format';
|
||||
import * as appMock from './lib/application-state-mock';
|
||||
import * as mockedDockerode from './lib/mocked-dockerode';
|
||||
|
||||
import * as applicationManager from '../src/compose/application-manager';
|
||||
import * as config from '../src/config';
|
||||
import * as deviceState from '../src/device-state';
|
||||
|
||||
import Service from '../src/compose/service';
|
||||
import Network from '../src/compose/network';
|
||||
|
||||
import prepare = require('./lib/prepare');
|
||||
import { intialiseContractRequirements } from '../src/lib/contracts';
|
||||
|
||||
describe('compose/application-manager', () => {
|
||||
before(async () => {
|
||||
await config.initialized;
|
||||
await dbFormat.setApps({}, 'test');
|
||||
});
|
||||
beforeEach(() => {
|
||||
appMock.mockSupervisorNetwork(true);
|
||||
});
|
||||
afterEach(() => {
|
||||
appMock.unmockAll();
|
||||
});
|
||||
|
||||
it('should create an App from current state', async () => {
|
||||
appMock.mockManagers(
|
||||
[
|
||||
Service.fromDockerContainer(
|
||||
require('./data/docker-states/simple/inspect.json'),
|
||||
),
|
||||
],
|
||||
[],
|
||||
[],
|
||||
);
|
||||
|
||||
const apps = await applicationManager.getCurrentApps();
|
||||
expect(Object.keys(apps)).to.have.length(1);
|
||||
const app = apps[1011165];
|
||||
expect(app).to.have.property('appId').that.equals(1011165);
|
||||
expect(app).to.have.property('services');
|
||||
const services = _.keyBy(app.services, 'serviceId');
|
||||
expect(services).to.have.property('43697');
|
||||
expect(services[43697]).to.have.property('serviceName').that.equals('main');
|
||||
});
|
||||
|
||||
it('should create multiple Apps when the current state reflects that', async () => {
|
||||
appMock.mockManagers(
|
||||
[
|
||||
Service.fromDockerContainer(
|
||||
require('./data/docker-states/simple/inspect.json'),
|
||||
),
|
||||
],
|
||||
[],
|
||||
[
|
||||
Network.fromDockerNetwork(
|
||||
require('./data/docker-states/networks/1623449_default.json'),
|
||||
),
|
||||
],
|
||||
);
|
||||
|
||||
const apps = await applicationManager.getCurrentApps();
|
||||
expect(Object.keys(apps)).to.deep.equal(['1011165', '1623449']);
|
||||
});
|
||||
|
||||
it('should infer that we need to create the supervisor network if it does not exist', async () => {
|
||||
appMock.mockSupervisorNetwork(false);
|
||||
appMock.mockManagers([], [], []);
|
||||
appMock.mockImages([], false, []);
|
||||
|
||||
const target = await deviceState.getTarget();
|
||||
|
||||
const steps = await applicationManager.getRequiredSteps(target.local.apps);
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0])
|
||||
.to.have.property('action')
|
||||
.that.equals('ensureSupervisorNetwork');
|
||||
});
|
||||
|
||||
it('should kill a service which depends on the supervisor network, if we need to create the network', async () => {
|
||||
appMock.mockSupervisorNetwork(false);
|
||||
appMock.mockManagers(
|
||||
[
|
||||
Service.fromDockerContainer(
|
||||
require('./data/docker-states/supervisor-api/inspect.json'),
|
||||
),
|
||||
],
|
||||
[],
|
||||
[],
|
||||
);
|
||||
appMock.mockImages([], false, []);
|
||||
const target = await deviceState.getTarget();
|
||||
|
||||
const steps = await applicationManager.getRequiredSteps(target.local.apps);
|
||||
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0]).to.have.property('action').that.equals('kill');
|
||||
expect(steps[0])
|
||||
.to.have.property('current')
|
||||
.that.has.property('serviceName')
|
||||
.that.equals('main');
|
||||
});
|
||||
|
||||
it('should infer a cleanup step when a cleanup is required', async () => {
|
||||
appMock.mockManagers([], [], []);
|
||||
appMock.mockImages([], true, []);
|
||||
|
||||
const target = await deviceState.getTarget();
|
||||
|
||||
const steps = await applicationManager.getRequiredSteps(target.local.apps);
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0]).to.have.property('action').that.equals('cleanup');
|
||||
});
|
||||
|
||||
it('should infer that an image should be removed if it is no longer referenced in current or target state', async () => {
|
||||
appMock.mockManagers([], [], []);
|
||||
appMock.mockImages([], false, [
|
||||
{
|
||||
name: 'registry2.balena-cloud.com/v2/asdasdasdasd@sha256:10',
|
||||
appId: 1,
|
||||
serviceId: 1,
|
||||
serviceName: 'test',
|
||||
imageId: 10,
|
||||
dependent: 0,
|
||||
releaseId: 4,
|
||||
},
|
||||
]);
|
||||
|
||||
const target = await deviceState.getTarget();
|
||||
|
||||
const steps = await applicationManager.getRequiredSteps(target.local.apps);
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0]).to.have.property('action').that.equals('removeImage');
|
||||
expect(steps[0])
|
||||
.to.have.property('image')
|
||||
.that.has.property('name')
|
||||
.that.equals('registry2.balena-cloud.com/v2/asdasdasdasd@sha256:10');
|
||||
});
|
||||
|
||||
it.skip(
|
||||
'should infer that an image should be saved if it is not in the database',
|
||||
);
|
||||
|
||||
describe('MultiApp Support', () => {
|
||||
const multiAppState = {
|
||||
local: {
|
||||
name: 'testy-mctestface',
|
||||
config: {
|
||||
HOST_CONFIG_gpu_mem: '512',
|
||||
HOST_FIREWALL_MODE: 'off',
|
||||
HOST_DISCOVERABILITY: 'true',
|
||||
SUPERVISOR_CONNECTIVITY_CHECK: 'true',
|
||||
SUPERVISOR_DELTA: 'false',
|
||||
SUPERVISOR_DELTA_APPLY_TIMEOUT: '0',
|
||||
SUPERVISOR_DELTA_REQUEST_TIMEOUT: '30000',
|
||||
SUPERVISOR_DELTA_RETRY_COUNT: '30',
|
||||
SUPERVISOR_DELTA_RETRY_INTERVAL: '10000',
|
||||
SUPERVISOR_DELTA_VERSION: '2',
|
||||
SUPERVISOR_INSTANT_UPDATE_TRIGGER: 'true',
|
||||
SUPERVISOR_LOCAL_MODE: 'false',
|
||||
SUPERVISOR_LOG_CONTROL: 'true',
|
||||
SUPERVISOR_OVERRIDE_LOCK: 'false',
|
||||
SUPERVISOR_POLL_INTERVAL: '60000',
|
||||
SUPERVISOR_VPN_CONTROL: 'true',
|
||||
SUPERVISOR_PERSISTENT_LOGGING: 'false',
|
||||
},
|
||||
apps: {
|
||||
'1': {
|
||||
appId: 1,
|
||||
name: 'userapp',
|
||||
commit: 'aaaaaaa',
|
||||
releaseId: 1,
|
||||
services: {
|
||||
'1': {
|
||||
serviceName: 'mainy-1-servicey',
|
||||
imageId: 1,
|
||||
image: 'registry2.resin.io/userapp/main',
|
||||
environment: {},
|
||||
labels: {},
|
||||
},
|
||||
},
|
||||
volumes: {},
|
||||
networks: {},
|
||||
},
|
||||
'100': {
|
||||
appId: 100,
|
||||
name: 'systemapp',
|
||||
commit: 'bbbbbbb',
|
||||
releaseId: 100,
|
||||
services: {
|
||||
'100': {
|
||||
serviceName: 'mainy-2-systemapp',
|
||||
imageId: 100,
|
||||
image: 'registry2.resin.io/systemapp/main',
|
||||
environment: {},
|
||||
labels: {},
|
||||
},
|
||||
},
|
||||
volumes: {},
|
||||
networks: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
dependent: { apps: {}, devices: {} },
|
||||
};
|
||||
|
||||
before(async () => {
|
||||
await prepare();
|
||||
|
||||
await config.initialized;
|
||||
await deviceState.initialized;
|
||||
|
||||
intialiseContractRequirements({
|
||||
supervisorVersion: '11.0.0',
|
||||
deviceType: 'intel-nuc',
|
||||
});
|
||||
});
|
||||
|
||||
it('should correctly generate steps for multiple apps', async () => {
|
||||
appMock.mockImages([], false, []);
|
||||
appMock.mockSupervisorNetwork(false);
|
||||
appMock.mockManagers([], [], []);
|
||||
|
||||
await mockedDockerode.testWithData({}, async () => {
|
||||
await deviceState.setTarget(multiAppState);
|
||||
const target = await deviceState.getTarget();
|
||||
|
||||
// The network always should be created first
|
||||
let steps = await applicationManager.getRequiredSteps(
|
||||
target.local.apps,
|
||||
);
|
||||
expect(steps).to.contain.something.like({
|
||||
action: 'ensureSupervisorNetwork',
|
||||
});
|
||||
expect(steps).to.have.length(1);
|
||||
|
||||
// Now we expect the steps to apply to multiple apps
|
||||
appMock.mockSupervisorNetwork(true);
|
||||
steps = await applicationManager.getRequiredSteps(target.local.apps);
|
||||
|
||||
expect(steps).to.not.be.null;
|
||||
expect(steps).to.contain.something.like({
|
||||
action: 'fetch',
|
||||
serviceName: 'mainy-1-servicey',
|
||||
});
|
||||
expect(steps).to.contain.something.like({
|
||||
action: 'fetch',
|
||||
serviceName: 'mainy-2-systemapp',
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
File diff suppressed because it is too large
Load Diff
@ -1,133 +0,0 @@
|
||||
import * as _ from 'lodash';
|
||||
import { expect } from 'chai';
|
||||
|
||||
import { docker } from '../src/lib/docker-utils';
|
||||
import * as Images from '../src/compose/images';
|
||||
import * as mockedDockerode from './lib/mocked-dockerode';
|
||||
import * as mockedDatabase from './lib/mocked-database';
|
||||
import * as db from '../src/db';
|
||||
import * as sampleImageData from './data/compose-image-data.json';
|
||||
|
||||
describe('compose/images', () => {
|
||||
before(() => {
|
||||
mockedDatabase.create();
|
||||
});
|
||||
|
||||
after(() => {
|
||||
try {
|
||||
mockedDatabase.restore();
|
||||
} catch (e) {
|
||||
/* noop */
|
||||
}
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clear Dockerode actions recorded for each test
|
||||
mockedDockerode.resetHistory();
|
||||
});
|
||||
|
||||
it('Removes a legacy Image', async () => {
|
||||
const images = sampleImageData['legacy-image'].dockerode;
|
||||
const IMAGE_TO_REMOVE = sampleImageData['legacy-image'].remove;
|
||||
const IMAGES_FROM_DB = sampleImageData['legacy-image'].database;
|
||||
// Stub the database to return images we want
|
||||
mockedDatabase.setImages(IMAGES_FROM_DB).stub();
|
||||
// Perform the test with our specially crafted data
|
||||
await mockedDockerode.testWithData({ images }, async () => {
|
||||
// Check that our legacy image exists
|
||||
await expect(docker.getImage(IMAGE_TO_REMOVE.name).inspect()).to
|
||||
.eventually.not.be.undefined;
|
||||
await expect(
|
||||
db.models('image').select().where(IMAGE_TO_REMOVE),
|
||||
).to.eventually.have.lengthOf(1);
|
||||
// Check that docker has this Image
|
||||
await expect(docker.getImage(IMAGE_TO_REMOVE.name).inspect()).to
|
||||
.eventually.not.be.undefined;
|
||||
// Now remove this image...
|
||||
await Images.remove(IMAGE_TO_REMOVE);
|
||||
// Check if it still exists!
|
||||
await expect(docker.getImage(IMAGE_TO_REMOVE.name).inspect()).to
|
||||
.eventually.be.undefined;
|
||||
await expect(db.models('image').select().where(IMAGE_TO_REMOVE)).to
|
||||
.eventually.be.empty;
|
||||
// Check that docker remove was called once
|
||||
const removeSteps = _(mockedDockerode.actions)
|
||||
.pickBy({ name: 'remove' })
|
||||
.map()
|
||||
.value();
|
||||
expect(removeSteps).to.have.lengthOf(1);
|
||||
});
|
||||
});
|
||||
|
||||
it('Removes a single Image', async () => {
|
||||
const images = sampleImageData['single-image'].dockerode;
|
||||
const IMAGE_TO_REMOVE = sampleImageData['single-image'].remove;
|
||||
const IMAGES_FROM_DB = sampleImageData['single-image'].database;
|
||||
// Stub the database to return images we want
|
||||
mockedDatabase.setImages(IMAGES_FROM_DB).stub();
|
||||
// Perform the test with our specially crafted data
|
||||
await mockedDockerode.testWithData({ images }, async () => {
|
||||
// Check that a single image is returned when given entire object
|
||||
expect(
|
||||
db.models('image').select().where(IMAGE_TO_REMOVE),
|
||||
).to.eventually.have.lengthOf(1);
|
||||
// Check that only one image with this dockerImageId exists in the db
|
||||
expect(
|
||||
db
|
||||
.models('image')
|
||||
.where({ dockerImageId: IMAGE_TO_REMOVE.dockerImageId })
|
||||
.select(),
|
||||
).to.eventually.have.lengthOf(1);
|
||||
// Now remove this image...
|
||||
await Images.remove(IMAGE_TO_REMOVE);
|
||||
// Check that docker does not have this image
|
||||
await expect(docker.getImage(IMAGE_TO_REMOVE.name).inspect()).to
|
||||
.eventually.be.undefined;
|
||||
// Check that the database longer has this image
|
||||
await expect(db.models('image').select().where(IMAGE_TO_REMOVE)).to
|
||||
.eventually.be.empty;
|
||||
// Check that docker remove was called once
|
||||
const removeSteps = _(mockedDockerode.actions)
|
||||
.pickBy({ name: 'remove' })
|
||||
.map()
|
||||
.value();
|
||||
expect(removeSteps).to.have.lengthOf(1);
|
||||
});
|
||||
});
|
||||
|
||||
it('Removes an Image with digests', async () => {
|
||||
const images = sampleImageData['image-with-digests'].dockerode;
|
||||
const IMAGE_TO_REMOVE = sampleImageData['image-with-digests'].remove;
|
||||
const IMAGES_FROM_DB = sampleImageData['image-with-digests'].database;
|
||||
// Stub the database to return images we want
|
||||
mockedDatabase.setImages(IMAGES_FROM_DB).stub();
|
||||
// Perform the test with our specially crafted data
|
||||
await mockedDockerode.testWithData({ images }, async () => {
|
||||
// Check that a single image is returned when given entire object
|
||||
expect(
|
||||
db.models('image').select().where(IMAGE_TO_REMOVE),
|
||||
).to.eventually.have.lengthOf(1);
|
||||
// Check that multiple images with the same dockerImageId are returned
|
||||
expect(
|
||||
db
|
||||
.models('image')
|
||||
.where({ dockerImageId: IMAGE_TO_REMOVE.dockerImageId })
|
||||
.select(),
|
||||
).to.eventually.have.lengthOf(2);
|
||||
// Now remove these image...
|
||||
await Images.remove(IMAGE_TO_REMOVE);
|
||||
// Check that docker does not have this image
|
||||
await expect(docker.getImage(IMAGE_TO_REMOVE.name).inspect()).to
|
||||
.eventually.be.undefined;
|
||||
// Check that the database no longer has this image
|
||||
await expect(db.models('image').select().where(IMAGE_TO_REMOVE)).to
|
||||
.eventually.be.empty;
|
||||
// Check that docker remove was called twice
|
||||
const removeSteps = _(mockedDockerode.actions)
|
||||
.pickBy({ name: 'remove' })
|
||||
.map()
|
||||
.value();
|
||||
expect(removeSteps).to.have.lengthOf(2);
|
||||
});
|
||||
});
|
||||
});
|
@ -1,184 +0,0 @@
|
||||
import { expect } from 'chai';
|
||||
import { stub, SinonStub } from 'sinon';
|
||||
import { VolumeInspectInfo } from 'dockerode';
|
||||
|
||||
import * as mockedDockerode from './lib/mocked-dockerode';
|
||||
import * as volumeManager from '../src/compose/volume-manager';
|
||||
import log from '../src/lib/supervisor-console';
|
||||
import Volume from '../src/compose/volume';
|
||||
|
||||
describe('Volume Manager', () => {
|
||||
let logDebug: SinonStub;
|
||||
before(() => {
|
||||
logDebug = stub(log, 'debug');
|
||||
});
|
||||
after(() => {
|
||||
logDebug.restore();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clear Dockerode actions recorded for each test
|
||||
mockedDockerode.resetHistory();
|
||||
logDebug.reset();
|
||||
});
|
||||
|
||||
it('gets all supervised Volumes', async () => {
|
||||
// Setup volume data
|
||||
const volumeData = [
|
||||
createVolumeInspectInfo(Volume.generateDockerName(1, 'redis'), {
|
||||
'io.balena.supervised': '1', // Recently created volumes contain io.balena.supervised label
|
||||
}),
|
||||
createVolumeInspectInfo(Volume.generateDockerName(1, 'mysql'), {
|
||||
'io.balena.supervised': '1', // Recently created volumes contain io.balena.supervised label
|
||||
}),
|
||||
createVolumeInspectInfo(Volume.generateDockerName(1, 'backend')), // Old Volumes will not have labels
|
||||
createVolumeInspectInfo('user_created_volume'), // Volume not created by the Supervisor
|
||||
createVolumeInspectInfo('decoy', { 'io.balena.supervised': '1' }), // Added decoy to really test the inference (should not return)
|
||||
];
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
await expect(volumeManager.getAll()).to.eventually.deep.equal([
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'redis',
|
||||
},
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'mysql',
|
||||
},
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {},
|
||||
},
|
||||
name: 'backend',
|
||||
},
|
||||
]);
|
||||
// Check that debug message was logged saying we found a Volume not created by us
|
||||
expect(logDebug.lastCall.lastArg).to.equal(
|
||||
'Found unmanaged Volume: decoy',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('can parse null Volumes', async () => {
|
||||
// Setup volume data
|
||||
// @ts-ignore
|
||||
const volumeData: VolumeInspectInfo[] = null;
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
await expect(volumeManager.getAll()).to.eventually.deep.equal([]);
|
||||
});
|
||||
});
|
||||
|
||||
it('gets a Volume for an application', async () => {
|
||||
// Setup volume data
|
||||
const volumeData = [
|
||||
createVolumeInspectInfo(Volume.generateDockerName(111, 'app'), {
|
||||
'io.balena.supervised': '1',
|
||||
}),
|
||||
createVolumeInspectInfo(Volume.generateDockerName(222, 'otherApp'), {
|
||||
'io.balena.supervised': '1',
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
await expect(volumeManager.getAllByAppId(111)).to.eventually.deep.equal([
|
||||
{
|
||||
appId: 111,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'app',
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
it('creates a Volume', async () => {
|
||||
// Setup volume data
|
||||
const volumeData: Dictionary<any> = [];
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
// Volume to create
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
stub(volume, 'create');
|
||||
// Create volume
|
||||
await volumeManager.create(volume);
|
||||
// Check volume was created
|
||||
expect(volume.create as SinonStub).to.be.calledOnce;
|
||||
});
|
||||
});
|
||||
|
||||
it('does not try to create a volume that already exists', async () => {
|
||||
// Setup volume data
|
||||
const volumeData = [
|
||||
createVolumeInspectInfo(Volume.generateDockerName(111, 'main'), {
|
||||
'io.balena.supervised': '1',
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
// Volume to try again create
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
stub(volume, 'create');
|
||||
// Create volume
|
||||
await volumeManager.create(volume);
|
||||
// Check volume was not created
|
||||
expect(volume.create as SinonStub).to.not.be.called;
|
||||
});
|
||||
});
|
||||
|
||||
it('removes a Volume', async () => {
|
||||
// Setup volume data
|
||||
const volumeData = [
|
||||
createVolumeInspectInfo(Volume.generateDockerName(111, 'main'), {
|
||||
'io.balena.supervised': '1',
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await mockedDockerode.testWithData({ volumes: volumeData }, async () => {
|
||||
// Volume to remove
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
stub(volume, 'remove');
|
||||
// Remove volume
|
||||
await volumeManager.remove(volume);
|
||||
// Check volume was removed
|
||||
expect(volume.remove as SinonStub).to.be.calledOnce;
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
function createVolumeInspectInfo(
|
||||
name: string,
|
||||
labels: { [key: string]: string } = {},
|
||||
driver: string = 'local',
|
||||
options: { [key: string]: string } | null = null,
|
||||
) {
|
||||
return {
|
||||
Name: name,
|
||||
Driver: driver,
|
||||
Labels: labels,
|
||||
Options: options,
|
||||
};
|
||||
}
|
@ -1,408 +0,0 @@
|
||||
{
|
||||
"legacy-image": {
|
||||
"remove": {
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0
|
||||
},
|
||||
"database": [
|
||||
{
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0
|
||||
}
|
||||
],
|
||||
"dockerode": {
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492712,
|
||||
"Id": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:72b80cbd3cc12de08d4adc9dec79916bf466031553f55b59c29e397829ea129f"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024648,
|
||||
"VirtualSize": 217024648
|
||||
},
|
||||
"livepush-supervisor:11.12.11": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492599,
|
||||
"Id": "sha256:db5af2c94366275d8e6d7ea3047f2405eab2f04a27f66843634a45958ef59f5a",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.livepush-image": "1",
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "sha256:4acb1652178e72c0b6143a08225e0df5ef74b338a0c9e2ca4cd261339f4f0431",
|
||||
"RepoDigests": null,
|
||||
"RepoTags": [
|
||||
"livepush-supervisor:11.12.11"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 501352885,
|
||||
"VirtualSize": 501352885
|
||||
}
|
||||
}
|
||||
},
|
||||
"single-image": {
|
||||
"remove": {
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7"
|
||||
},
|
||||
"database": [
|
||||
{
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7"
|
||||
},
|
||||
{
|
||||
"id": 247,
|
||||
"name": "registry2.balena-cloud.com/v2/902cf44eb0ed51675a0bf95a7bbf0c91@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650331,
|
||||
"serviceName": "app_2",
|
||||
"imageId": 2693230,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc901234"
|
||||
}
|
||||
],
|
||||
"dockerode": {
|
||||
"sha256:acf4069b3cf68d05dc8a2df0e511447927303ebef88f897f05cbad823f240d97": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492964,
|
||||
"Id": "sha256:acf4069b3cf68d05dc8a2df0e511447927303ebef88f897f05cbad823f240d97",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/772eca412ce780e860b988da6ea26ee0@sha256:7d5e55d6aac00b8504c5c360a3ee59677fc5a7324360f1f54df19d0bb17c2cfe",
|
||||
"registry2.balena-cloud.com/v2/7f1290fa85b253936ebf6e0dbbd95875@sha256:107f63eb1bc2e9978f2a4bb5b095bc010dd91dd5a6a0a39a494e27ee8b396232"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/772eca412ce780e860b988da6ea26ee0:delta-ff3f0a82c404830b",
|
||||
"registry2.balena-cloud.com/v2/7f1290fa85b253936ebf6e0dbbd95875:delta-79acb6d8bf4795f6"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024666,
|
||||
"VirtualSize": 217024666
|
||||
},
|
||||
"sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492712,
|
||||
"Id": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:72b80cbd3cc12de08d4adc9dec79916bf466031553f55b59c29e397829ea129f"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024648,
|
||||
"VirtualSize": 217024648
|
||||
},
|
||||
"sha256:db5af2c94366275d8e6d7ea3047f2405eab2f04a27f66843634a45958ef59f5a": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492599,
|
||||
"Id": "sha256:db5af2c94366275d8e6d7ea3047f2405eab2f04a27f66843634a45958ef59f5a",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.livepush-image": "1",
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "sha256:4acb1652178e72c0b6143a08225e0df5ef74b338a0c9e2ca4cd261339f4f0431",
|
||||
"RepoDigests": null,
|
||||
"RepoTags": [
|
||||
"livepush-supervisor:11.12.11"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 501352885,
|
||||
"VirtualSize": 501352885
|
||||
},
|
||||
"sha256:272c54e27104cf6c2153538165dba3c29b58ae35837b47134fa55b53ddb61154": {
|
||||
"Containers": -1,
|
||||
"Created": 1599181227,
|
||||
"Id": "sha256:272c54e27104cf6c2153538165dba3c29b58ae35837b47134fa55b53ddb61154",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/459bffd69101d788cc3e0722a012878a@sha256:3cc9231cf0e117585e53ebfa6bf9f75a9a4eaa371fb82c21ab9bca8fe0d5c3e3",
|
||||
"registry2.balena-cloud.com/v2/b0cfe2b1e8c5ab3b6da23f0bd92045b4@sha256:ba5f6d1849c63c8d0f11f35fce694464240002d2c3732898935bf0fedf451063"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/459bffd69101d788cc3e0722a012878a:delta-80ed841a1d3fefa9",
|
||||
"registry2.balena-cloud.com/v2/b0cfe2b1e8c5ab3b6da23f0bd92045b4:delta-532f970c60decb81"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024558,
|
||||
"VirtualSize": 217024558
|
||||
},
|
||||
"sha256:25d6abae14f08de6b80f9d95003e674598738959a535a2f21be34c03675ebd02": {
|
||||
"Containers": -1,
|
||||
"Created": 1599146808,
|
||||
"Id": "sha256:25d6abae14f08de6b80f9d95003e674598738959a535a2f21be34c03675ebd02",
|
||||
"Labels": {
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"balena/aarch64-supervisor@sha256:e7e3b166e855f4c113a67bc528b4ef77408d05e280052f35452f3e2cd7b5322b"
|
||||
],
|
||||
"RepoTags": [
|
||||
"balena/aarch64-supervisor:v11.14.0"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 72305850,
|
||||
"VirtualSize": 72305850
|
||||
},
|
||||
"sha256:60783b8688d395f9b4ce4b288d941244d1b0a4c43114ba980acd012ccffc6b53": {
|
||||
"Containers": -1,
|
||||
"Created": 1585746557,
|
||||
"Id": "sha256:60783b8688d395f9b4ce4b288d941244d1b0a4c43114ba980acd012ccffc6b53",
|
||||
"Labels": {
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"balenalib/aarch64-alpine-supervisor-base@sha256:6eb712fc797ff68f258d9032cf292c266cb9bd8be4cbdaaafeb5a8824bb104fd"
|
||||
],
|
||||
"RepoTags": [
|
||||
"balenalib/aarch64-alpine-supervisor-base:3.11"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 14503741,
|
||||
"VirtualSize": 14503741
|
||||
},
|
||||
"sha256:a29f45ccde2ac0bde957b1277b1501f471960c8ca49f1588c6c885941640ae60": {
|
||||
"Containers": -1,
|
||||
"Created": 1578015959,
|
||||
"Id": "sha256:a29f45ccde2ac0bde957b1277b1501f471960c8ca49f1588c6c885941640ae60",
|
||||
"Labels": null,
|
||||
"ParentId": "",
|
||||
"RepoDigests": null,
|
||||
"RepoTags": [
|
||||
"balena-healthcheck-image:latest"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 9136,
|
||||
"VirtualSize": 9136
|
||||
}
|
||||
}
|
||||
},
|
||||
"image-with-digests": {
|
||||
"remove": {
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7"
|
||||
},
|
||||
"database": [
|
||||
{
|
||||
"id": 246,
|
||||
"name": "registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650325,
|
||||
"serviceName": "app_1",
|
||||
"imageId": 2693229,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7"
|
||||
},
|
||||
{
|
||||
"id": 247,
|
||||
"name": "registry2.balena-cloud.com/v2/902cf44eb0ed51675a0bf95a7bbf0c91@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582",
|
||||
"appId": 1658654,
|
||||
"serviceId": 650331,
|
||||
"serviceName": "app_2",
|
||||
"imageId": 2693230,
|
||||
"releaseId": 1524186,
|
||||
"dependent": 0,
|
||||
"dockerImageId": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7"
|
||||
}
|
||||
],
|
||||
"dockerode": {
|
||||
"sha256:acf4069b3cf68d05dc8a2df0e511447927303ebef88f897f05cbad823f240d97": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492964,
|
||||
"Id": "sha256:acf4069b3cf68d05dc8a2df0e511447927303ebef88f897f05cbad823f240d97",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/772eca412ce780e860b988da6ea26ee0@sha256:7d5e55d6aac00b8504c5c360a3ee59677fc5a7324360f1f54df19d0bb17c2cfe",
|
||||
"registry2.balena-cloud.com/v2/7f1290fa85b253936ebf6e0dbbd95875@sha256:107f63eb1bc2e9978f2a4bb5b095bc010dd91dd5a6a0a39a494e27ee8b396232"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/772eca412ce780e860b988da6ea26ee0:delta-ff3f0a82c404830b",
|
||||
"registry2.balena-cloud.com/v2/7f1290fa85b253936ebf6e0dbbd95875:delta-79acb6d8bf4795f6"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024666,
|
||||
"VirtualSize": 217024666
|
||||
},
|
||||
"sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492712,
|
||||
"Id": "sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:72b80cbd3cc12de08d4adc9dec79916bf466031553f55b59c29e397829ea129f"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024648,
|
||||
"VirtualSize": 217024648
|
||||
},
|
||||
"sha256:db5af2c94366275d8e6d7ea3047f2405eab2f04a27f66843634a45958ef59f5a": {
|
||||
"Containers": -1,
|
||||
"Created": 1599492599,
|
||||
"Id": "sha256:db5af2c94366275d8e6d7ea3047f2405eab2f04a27f66843634a45958ef59f5a",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.livepush-image": "1",
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "sha256:4acb1652178e72c0b6143a08225e0df5ef74b338a0c9e2ca4cd261339f4f0431",
|
||||
"RepoDigests": null,
|
||||
"RepoTags": [
|
||||
"livepush-supervisor:11.12.11"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 501352885,
|
||||
"VirtualSize": 501352885
|
||||
},
|
||||
"sha256:272c54e27104cf6c2153538165dba3c29b58ae35837b47134fa55b53ddb61154": {
|
||||
"Containers": -1,
|
||||
"Created": 1599181227,
|
||||
"Id": "sha256:272c54e27104cf6c2153538165dba3c29b58ae35837b47134fa55b53ddb61154",
|
||||
"Labels": {
|
||||
"io.balena.architecture": "aarch64",
|
||||
"io.balena.device-type": "jetson-tx2",
|
||||
"io.balena.qemu.version": "4.0.0+balena2-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"registry2.balena-cloud.com/v2/459bffd69101d788cc3e0722a012878a@sha256:3cc9231cf0e117585e53ebfa6bf9f75a9a4eaa371fb82c21ab9bca8fe0d5c3e3",
|
||||
"registry2.balena-cloud.com/v2/b0cfe2b1e8c5ab3b6da23f0bd92045b4@sha256:ba5f6d1849c63c8d0f11f35fce694464240002d2c3732898935bf0fedf451063"
|
||||
],
|
||||
"RepoTags": [
|
||||
"registry2.balena-cloud.com/v2/459bffd69101d788cc3e0722a012878a:delta-80ed841a1d3fefa9",
|
||||
"registry2.balena-cloud.com/v2/b0cfe2b1e8c5ab3b6da23f0bd92045b4:delta-532f970c60decb81"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 217024558,
|
||||
"VirtualSize": 217024558
|
||||
},
|
||||
"sha256:25d6abae14f08de6b80f9d95003e674598738959a535a2f21be34c03675ebd02": {
|
||||
"Containers": -1,
|
||||
"Created": 1599146808,
|
||||
"Id": "sha256:25d6abae14f08de6b80f9d95003e674598738959a535a2f21be34c03675ebd02",
|
||||
"Labels": {
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"balena/aarch64-supervisor@sha256:e7e3b166e855f4c113a67bc528b4ef77408d05e280052f35452f3e2cd7b5322b"
|
||||
],
|
||||
"RepoTags": [
|
||||
"balena/aarch64-supervisor:v11.14.0"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 72305850,
|
||||
"VirtualSize": 72305850
|
||||
},
|
||||
"sha256:60783b8688d395f9b4ce4b288d941244d1b0a4c43114ba980acd012ccffc6b53": {
|
||||
"Containers": -1,
|
||||
"Created": 1585746557,
|
||||
"Id": "sha256:60783b8688d395f9b4ce4b288d941244d1b0a4c43114ba980acd012ccffc6b53",
|
||||
"Labels": {
|
||||
"io.balena.qemu.version": "4.0.0+balena-aarch64"
|
||||
},
|
||||
"ParentId": "",
|
||||
"RepoDigests": [
|
||||
"balenalib/aarch64-alpine-supervisor-base@sha256:6eb712fc797ff68f258d9032cf292c266cb9bd8be4cbdaaafeb5a8824bb104fd"
|
||||
],
|
||||
"RepoTags": [
|
||||
"balenalib/aarch64-alpine-supervisor-base:3.11"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 14503741,
|
||||
"VirtualSize": 14503741
|
||||
},
|
||||
"sha256:a29f45ccde2ac0bde957b1277b1501f471960c8ca49f1588c6c885941640ae60": {
|
||||
"Containers": -1,
|
||||
"Created": 1578015959,
|
||||
"Id": "sha256:a29f45ccde2ac0bde957b1277b1501f471960c8ca49f1588c6c885941640ae60",
|
||||
"Labels": null,
|
||||
"ParentId": "",
|
||||
"RepoDigests": null,
|
||||
"RepoTags": [
|
||||
"balena-healthcheck-image:latest"
|
||||
],
|
||||
"SharedSize": -1,
|
||||
"Size": 9136,
|
||||
"VirtualSize": 9136
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -31,7 +31,6 @@ export function mockManagers(svcs: Service[], vols: Volume[], nets: Network[]) {
|
||||
}
|
||||
return s;
|
||||
});
|
||||
console.log('Calling the mock', svcs);
|
||||
return svcs;
|
||||
};
|
||||
}
|
||||
|
82
test/lib/db-helper.ts
Normal file
82
test/lib/db-helper.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import * as constants from '../../src/lib/constants';
|
||||
import * as db from '../../src/db';
|
||||
import * as sinon from 'sinon';
|
||||
|
||||
import rewire = require('rewire');
|
||||
|
||||
// Creates a test database and returns a query builder
|
||||
export async function createDB() {
|
||||
const oldDatabasePath = process.env.DATABASE_PATH;
|
||||
|
||||
// for testing we use an in memory database
|
||||
process.env.DATABASE_PATH = ':memory:';
|
||||
|
||||
// @ts-ignore
|
||||
constants.databasePath = process.env.DATABASE_PATH;
|
||||
|
||||
// Cleanup the module cache in order to have it reloaded in the local context
|
||||
delete require.cache[require.resolve('../../src/db')];
|
||||
const testDb = rewire('../../src/db');
|
||||
|
||||
// Initialize the database module
|
||||
await testDb.initialized;
|
||||
|
||||
// Get the knex instance to allow queries to the db
|
||||
const knex = testDb.__get__('knex');
|
||||
const { models } = testDb;
|
||||
|
||||
// This is hacky but haven't found another way to do it,
|
||||
// stubbing the db methods here ensures the module under test
|
||||
// is using the database we want
|
||||
sinon.stub(db, 'models').callsFake(models);
|
||||
sinon.stub(db, 'upsertModel').callsFake(testDb.upsertModel);
|
||||
|
||||
return {
|
||||
// Returns a query builder instance for the given
|
||||
// table in order perform data operations
|
||||
models,
|
||||
|
||||
// Resets the database to initial value post
|
||||
// migrations
|
||||
async reset() {
|
||||
// Reset the contents of the db
|
||||
await testDb.transaction(async (trx: any) => {
|
||||
const result = await trx.raw(`
|
||||
SELECT name, sql
|
||||
FROM sqlite_master
|
||||
WHERE type='table'`);
|
||||
for (const r of result) {
|
||||
// We don't run the migrations again
|
||||
if (r.name !== 'knex_migrations') {
|
||||
await trx.raw(`DELETE FROM ${r.name}`);
|
||||
}
|
||||
}
|
||||
|
||||
// The supervisor expects this value to already have
|
||||
// been pre-populated
|
||||
await trx('deviceConfig').insert({ targetValues: '{}' });
|
||||
});
|
||||
|
||||
// Reset stub call history
|
||||
(db.models as sinon.SinonStub).resetHistory();
|
||||
(db.upsertModel as sinon.SinonStub).resetHistory();
|
||||
},
|
||||
|
||||
// Destroys the in-memory database and resets environment
|
||||
async destroy() {
|
||||
await knex.destroy();
|
||||
|
||||
// Restore the old datbase path
|
||||
process.env.DATABASE_PATH = oldDatabasePath;
|
||||
|
||||
// Restore stubs
|
||||
(db.models as sinon.SinonStub).restore();
|
||||
(db.upsertModel as sinon.SinonStub).restore();
|
||||
|
||||
// @ts-ignore
|
||||
constants.databasePath = process.env.DATABASE_PATH;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export type TestDatabase = UnwrappedPromise<ReturnType<typeof createDB>>;
|
949
test/lib/mockerode.ts
Normal file
949
test/lib/mockerode.ts
Normal file
@ -0,0 +1,949 @@
|
||||
import * as dockerode from 'dockerode';
|
||||
import * as sinon from 'sinon';
|
||||
|
||||
// Recursively convert properties of an object as optional
|
||||
type DeepPartial<T> = {
|
||||
[P in keyof T]?: T[P] extends Array<infer U>
|
||||
? Array<DeepPartial<U>>
|
||||
: T[P] extends object
|
||||
? DeepPartial<T[P]>
|
||||
: T[P];
|
||||
};
|
||||
|
||||
// Partial container inspect info for receiving as testing data
|
||||
export type PartialContainerInspectInfo = DeepPartial<
|
||||
dockerode.ContainerInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
export type PartialNetworkInspectInfo = DeepPartial<
|
||||
dockerode.NetworkInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
export type PartialVolumeInspectInfo = DeepPartial<
|
||||
dockerode.VolumeInspectInfo
|
||||
> & {
|
||||
Name: string;
|
||||
};
|
||||
|
||||
export type PartialImageInspectInfo = DeepPartial<
|
||||
dockerode.ImageInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
type Methods<T> = {
|
||||
[K in keyof T]: T[K] extends (...args: any) => any ? T[K] : never;
|
||||
};
|
||||
|
||||
function createFake<Prototype extends object>(prototype: Prototype) {
|
||||
return (Object.getOwnPropertyNames(prototype) as Array<keyof Prototype>)
|
||||
.filter((fn) => fn === 'constructor' || typeof prototype[fn] === 'function')
|
||||
.reduce(
|
||||
(res, fn) => ({
|
||||
...res,
|
||||
[fn]: () => {
|
||||
throw Error(
|
||||
`Fake method not implemented: ${prototype.constructor.name}.${fn}()`,
|
||||
);
|
||||
},
|
||||
}),
|
||||
{} as Methods<Prototype>,
|
||||
);
|
||||
}
|
||||
|
||||
export function createNetwork(network: PartialNetworkInspectInfo) {
|
||||
const { Id, ...networkInspect } = network;
|
||||
const inspectInfo = {
|
||||
Id,
|
||||
Name: 'default',
|
||||
Created: '2015-01-06T15:47:31.485331387Z',
|
||||
Scope: 'local',
|
||||
Driver: 'bridge',
|
||||
EnableIPv6: false,
|
||||
Internal: false,
|
||||
Attachable: true,
|
||||
Ingress: false,
|
||||
IPAM: {
|
||||
Driver: 'default',
|
||||
Options: {},
|
||||
Config: [
|
||||
{
|
||||
Subnet: '172.18.0.0/16',
|
||||
Gateway: '172.18.0.1',
|
||||
},
|
||||
],
|
||||
},
|
||||
Containers: {},
|
||||
Options: {},
|
||||
Labels: {},
|
||||
|
||||
// Add defaults
|
||||
...networkInspect,
|
||||
};
|
||||
|
||||
const fakeNetwork = createFake(dockerode.Network.prototype);
|
||||
|
||||
return {
|
||||
...fakeNetwork, // by default all methods fail unless overriden
|
||||
id: Id,
|
||||
inspectInfo,
|
||||
inspect: () => Promise.resolve(inspectInfo),
|
||||
remove: (): Promise<boolean> =>
|
||||
Promise.reject('Mock network not attached to an engine'),
|
||||
};
|
||||
}
|
||||
|
||||
export type MockNetwork = ReturnType<typeof createNetwork>;
|
||||
|
||||
export function createContainer(container: PartialContainerInspectInfo) {
|
||||
const createContainerInspectInfo = (
|
||||
partial: PartialContainerInspectInfo,
|
||||
): dockerode.ContainerInspectInfo => {
|
||||
const {
|
||||
Id,
|
||||
State,
|
||||
Config,
|
||||
NetworkSettings,
|
||||
HostConfig,
|
||||
Mounts,
|
||||
...ContainerInfo
|
||||
} = partial;
|
||||
|
||||
return {
|
||||
Id,
|
||||
Created: '2015-01-06T15:47:31.485331387Z',
|
||||
Path: '/usr/bin/sleep',
|
||||
Args: ['infinity'],
|
||||
State: {
|
||||
Status: 'running',
|
||||
ExitCode: 0,
|
||||
Running: true,
|
||||
Paused: false,
|
||||
Restarting: false,
|
||||
OOMKilled: false,
|
||||
...State, // User passed options
|
||||
},
|
||||
Image: 'deadbeef',
|
||||
Name: 'main',
|
||||
HostConfig: {
|
||||
AutoRemove: false,
|
||||
Binds: [],
|
||||
LogConfig: {
|
||||
Type: 'journald',
|
||||
Config: {},
|
||||
},
|
||||
NetworkMode: 'bridge',
|
||||
PortBindings: {},
|
||||
RestartPolicy: {
|
||||
Name: 'always',
|
||||
MaximumRetryCount: 0,
|
||||
},
|
||||
VolumeDriver: '',
|
||||
CapAdd: [],
|
||||
CapDrop: [],
|
||||
Dns: [],
|
||||
DnsOptions: [],
|
||||
DnsSearch: [],
|
||||
ExtraHosts: [],
|
||||
GroupAdd: [],
|
||||
IpcMode: 'shareable',
|
||||
Privileged: false,
|
||||
SecurityOpt: [],
|
||||
ShmSize: 67108864,
|
||||
Memory: 0,
|
||||
MemoryReservation: 0,
|
||||
OomKillDisable: false,
|
||||
Devices: [],
|
||||
Ulimits: [],
|
||||
...HostConfig, // User passed options
|
||||
},
|
||||
Config: {
|
||||
Hostname: Id,
|
||||
Labels: {},
|
||||
Cmd: ['/usr/bin/sleep', 'infinity'],
|
||||
Env: [] as string[],
|
||||
Volumes: {},
|
||||
Image: 'alpine:latest',
|
||||
...Config, // User passed options
|
||||
},
|
||||
NetworkSettings: {
|
||||
Networks: {
|
||||
default: {
|
||||
Aliases: [],
|
||||
Gateway: '172.18.0.1',
|
||||
IPAddress: '172.18.0.2',
|
||||
IPPrefixLen: 16,
|
||||
MacAddress: '00:00:de:ad:be:ef',
|
||||
},
|
||||
},
|
||||
...NetworkSettings, // User passed options
|
||||
},
|
||||
Mounts: [
|
||||
...(Mounts || []).map(({ Name, ...opts }) => ({
|
||||
Name,
|
||||
Type: 'volume',
|
||||
Source: `/var/lib/docker/volumes/${Name}/_data`,
|
||||
Destination: '/opt/${Name}/path',
|
||||
Driver: 'local',
|
||||
Mode: '',
|
||||
RW: true,
|
||||
Propagation: '',
|
||||
|
||||
// Replace defaults
|
||||
...opts,
|
||||
})),
|
||||
],
|
||||
|
||||
...ContainerInfo,
|
||||
} as dockerode.ContainerInspectInfo;
|
||||
};
|
||||
|
||||
const createContainerInfo = (
|
||||
containerInspectInfo: dockerode.ContainerInspectInfo,
|
||||
): dockerode.ContainerInfo => {
|
||||
const {
|
||||
Id,
|
||||
Name,
|
||||
Created,
|
||||
Image,
|
||||
State,
|
||||
HostConfig,
|
||||
Config,
|
||||
Mounts,
|
||||
NetworkSettings,
|
||||
} = containerInspectInfo;
|
||||
|
||||
const capitalizeFirst = (s: string) =>
|
||||
s.charAt(0).toUpperCase() + s.slice(1);
|
||||
|
||||
// Calculate summary from existing inspectInfo object
|
||||
return {
|
||||
Id,
|
||||
Names: [Name],
|
||||
ImageID: Image,
|
||||
Image: Config.Image,
|
||||
Created: Date.parse(Created),
|
||||
Command: Config.Cmd.join(' '),
|
||||
State: capitalizeFirst(State.Status),
|
||||
Status: `Exit ${State.ExitCode}`,
|
||||
HostConfig: {
|
||||
NetworkMode: HostConfig.NetworkMode!,
|
||||
},
|
||||
Ports: [],
|
||||
Labels: Config.Labels,
|
||||
NetworkSettings: {
|
||||
Networks: NetworkSettings.Networks,
|
||||
},
|
||||
Mounts: Mounts as dockerode.ContainerInfo['Mounts'],
|
||||
};
|
||||
};
|
||||
|
||||
const inspectInfo = createContainerInspectInfo(container);
|
||||
const info = createContainerInfo(inspectInfo);
|
||||
|
||||
const { Id: id } = inspectInfo;
|
||||
|
||||
const fakeContainer = createFake(dockerode.Container.prototype);
|
||||
|
||||
return {
|
||||
...fakeContainer, // by default all methods fail unless overriden
|
||||
id,
|
||||
inspectInfo,
|
||||
info,
|
||||
inspect: () => Promise.resolve(inspectInfo),
|
||||
remove: (): Promise<boolean> =>
|
||||
Promise.reject('Mock container not attached to an engine'),
|
||||
};
|
||||
}
|
||||
|
||||
export type MockContainer = ReturnType<typeof createContainer>;
|
||||
|
||||
interface Reference {
|
||||
repository: string;
|
||||
tag?: string;
|
||||
digest?: string;
|
||||
toString: () => string;
|
||||
}
|
||||
|
||||
const parseReference = (uri: string): Reference => {
|
||||
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go#L62
|
||||
// https://github.com/docker/distribution/blob/release/2.7/reference/regexp.go#L44
|
||||
const match = uri.match(
|
||||
/^(?:(localhost|.*?[.:].*?)\/)?(.+?)(?::(.*?))?(?:@(.*?))?$/,
|
||||
);
|
||||
|
||||
if (!match) {
|
||||
throw new Error(`Could not parse the image: ${uri}`);
|
||||
}
|
||||
|
||||
const [, registry, imageName, tagName, digest] = match;
|
||||
|
||||
let tag = tagName;
|
||||
if (!digest && !tag) {
|
||||
tag = 'latest';
|
||||
}
|
||||
|
||||
const digestMatch = digest?.match(
|
||||
/^[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*:[0-9a-f-A-F]{32,}$/,
|
||||
);
|
||||
if (!imageName || (digest && !digestMatch)) {
|
||||
throw new Error(
|
||||
'Invalid image name, expected [domain.tld/]repo/image[:tag][@digest] format',
|
||||
);
|
||||
}
|
||||
|
||||
const repository = [registry, imageName].filter((s) => !!s).join('/');
|
||||
|
||||
return {
|
||||
repository,
|
||||
tag,
|
||||
digest,
|
||||
toString: () =>
|
||||
repository +
|
||||
(tagName ? `:${tagName}` : '') +
|
||||
(digest ? `@${digest}` : ''),
|
||||
};
|
||||
};
|
||||
|
||||
export function createImage(
|
||||
// Do not allow RepoTags or RepoDigests to be provided.
|
||||
// References must be used instead
|
||||
image: Omit<PartialImageInspectInfo, 'RepoTags' | 'RepoDigests'>,
|
||||
{ References = [] as string[] } = {},
|
||||
) {
|
||||
const createImageInspectInfo = (
|
||||
partialImage: PartialImageInspectInfo,
|
||||
): dockerode.ImageInspectInfo => {
|
||||
const {
|
||||
Id,
|
||||
ContainerConfig,
|
||||
Config,
|
||||
GraphDriver,
|
||||
RootFS,
|
||||
...Info
|
||||
} = partialImage;
|
||||
|
||||
return {
|
||||
Id,
|
||||
RepoTags: [],
|
||||
RepoDigests: [
|
||||
'registry2.resin.io/v2/8ddbe4a22e881f06def0f31400bfb6de@sha256:09b0db9e71cead5f91107fc9254b1af7088444cc6da55afa2da595940f72a34a',
|
||||
],
|
||||
Parent: '',
|
||||
Comment: 'Not a real image',
|
||||
Created: '2018-08-15T12:43:06.43392045Z',
|
||||
Container:
|
||||
'b6cc9227f272b905512a58926b6d515b38de34b604386031aa3c21e94d9dbb4a',
|
||||
ContainerConfig: {
|
||||
Hostname: 'f15babe8256c',
|
||||
Domainname: '',
|
||||
User: '',
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
Env: [
|
||||
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
],
|
||||
Cmd: ['/usr/bin/sleep', 'infinity'],
|
||||
ArgsEscaped: true,
|
||||
Image:
|
||||
'sha256:828d725f5e6d09ee9abc214f6c11fadf69192ba4871b050984cc9c4cec37b208',
|
||||
Volumes: {},
|
||||
WorkingDir: '',
|
||||
Entrypoint: null,
|
||||
OnBuild: null,
|
||||
Labels: {},
|
||||
|
||||
...ContainerConfig,
|
||||
} as dockerode.ImageInspectInfo['ContainerConfig'],
|
||||
DockerVersion: '17.05.0-ce',
|
||||
Author: '',
|
||||
Config: {
|
||||
Hostname: '',
|
||||
Domainname: '',
|
||||
User: '',
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
Env: [
|
||||
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
],
|
||||
Cmd: ['/usr/bin/sleep', 'infinity'],
|
||||
ArgsEscaped: true,
|
||||
Image:
|
||||
'sha256:828d725f5e6d09ee9abc214f6c11fadf69192ba4871b050984cc9c4cec37b208',
|
||||
Volumes: {},
|
||||
WorkingDir: '/usr/src/app',
|
||||
Entrypoint: ['/usr/bin/entry.sh'],
|
||||
OnBuild: [],
|
||||
Labels: {
|
||||
...(Config?.Labels ?? {}),
|
||||
},
|
||||
...Config,
|
||||
} as dockerode.ImageInspectInfo['Config'],
|
||||
|
||||
Architecture: 'arm64',
|
||||
Os: 'linux',
|
||||
Size: 38692178,
|
||||
VirtualSize: 38692178,
|
||||
GraphDriver: {
|
||||
Data: {
|
||||
DeviceId: 'deadbeef',
|
||||
DeviceName: 'dummy',
|
||||
DeviceSize: '10m',
|
||||
},
|
||||
Name: 'aufs',
|
||||
|
||||
...GraphDriver,
|
||||
} as dockerode.ImageInspectInfo['GraphDriver'],
|
||||
RootFS: {
|
||||
Type: 'layers',
|
||||
Layers: [
|
||||
'sha256:7c30ac6ce381873d5388b7d23b346af7d1e5f6af000a84b97e6203ed9e6dcab2',
|
||||
'sha256:450b73019ae79e6a99774fcd37c18769f95065c8b271be936dfb3f93afadc4a8',
|
||||
'sha256:6ab67aaf666bfb7001ab93deffe785f24775f4e0da3d6d421ad6096ba869fd0d',
|
||||
],
|
||||
|
||||
...RootFS,
|
||||
} as dockerode.ImageInspectInfo['RootFS'],
|
||||
|
||||
...Info,
|
||||
};
|
||||
};
|
||||
|
||||
const createImageInfo = (imageInspectInfo: dockerode.ImageInspectInfo) => {
|
||||
const {
|
||||
Id,
|
||||
Parent: ParentId,
|
||||
RepoTags,
|
||||
RepoDigests,
|
||||
Config,
|
||||
} = imageInspectInfo;
|
||||
|
||||
const { Labels } = Config;
|
||||
|
||||
return {
|
||||
Id,
|
||||
ParentId,
|
||||
RepoTags,
|
||||
RepoDigests,
|
||||
Created: 1474925151,
|
||||
Size: 103579269,
|
||||
VirtualSize: 103579269,
|
||||
SharedSize: 0,
|
||||
Labels,
|
||||
Containers: 0,
|
||||
};
|
||||
};
|
||||
|
||||
const references = References.map((uri) => parseReference(uri));
|
||||
|
||||
// Generate image repo tags and digests for inspect
|
||||
const { tags, digests } = references.reduce(
|
||||
(pairs, ref) => {
|
||||
if (ref.tag) {
|
||||
pairs.tags.push([ref.repository, ref.tag].join(':'));
|
||||
}
|
||||
|
||||
if (ref.digest) {
|
||||
pairs.digests.push([ref.repository, ref.digest].join('@'));
|
||||
}
|
||||
|
||||
return pairs;
|
||||
},
|
||||
{ tags: [] as string[], digests: [] as string[] },
|
||||
);
|
||||
|
||||
const inspectInfo = createImageInspectInfo({
|
||||
...image,
|
||||
|
||||
// Use references to fill RepoTags and RepoDigests ignoring
|
||||
// those from the partial inspect info
|
||||
RepoTags: [...new Set([...tags])],
|
||||
RepoDigests: [...new Set([...digests])],
|
||||
});
|
||||
const info = createImageInfo(inspectInfo);
|
||||
const { Id: id } = inspectInfo;
|
||||
|
||||
const fakeImage = createFake(dockerode.Image.prototype);
|
||||
|
||||
return {
|
||||
...fakeImage, // by default all methods fail unless overriden
|
||||
id,
|
||||
info,
|
||||
inspectInfo,
|
||||
references,
|
||||
inspect: () => Promise.resolve(inspectInfo),
|
||||
remove: (_opts: any): Promise<boolean> =>
|
||||
Promise.reject('Mock image not attached to an engine'),
|
||||
};
|
||||
}
|
||||
|
||||
export type MockImage = ReturnType<typeof createImage>;
|
||||
|
||||
export function createVolume(volume: PartialVolumeInspectInfo) {
|
||||
const { Name, Labels, ...partialVolumeInfo } = volume;
|
||||
|
||||
const inspectInfo: dockerode.VolumeInspectInfo = {
|
||||
Name,
|
||||
Driver: 'local',
|
||||
Mountpoint: '/var/lib/docker/volumes/resin-data',
|
||||
Labels: {
|
||||
...Labels,
|
||||
} as dockerode.VolumeInspectInfo['Labels'],
|
||||
Scope: 'local',
|
||||
Options: {},
|
||||
|
||||
...partialVolumeInfo,
|
||||
};
|
||||
|
||||
const fakeVolume = createFake(dockerode.Volume.prototype);
|
||||
return {
|
||||
...fakeVolume, // by default all methods fail unless overriden
|
||||
name: Name,
|
||||
inspectInfo,
|
||||
inspect: () => Promise.resolve(inspectInfo),
|
||||
remove: (): Promise<boolean> =>
|
||||
Promise.reject('Mock volume not attached to an engine'),
|
||||
};
|
||||
}
|
||||
|
||||
export type MockVolume = ReturnType<typeof createVolume>;
|
||||
|
||||
export type MockEngineState = {
|
||||
containers?: MockContainer[];
|
||||
networks?: MockNetwork[];
|
||||
volumes?: MockVolume[];
|
||||
images?: MockImage[];
|
||||
};
|
||||
|
||||
// Good enough function go generate ids for mock engine
|
||||
// source: https://stackoverflow.com/a/2117523
|
||||
function uuidv4() {
|
||||
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
|
||||
// tslint:disable
|
||||
const r = (Math.random() * 16) | 0,
|
||||
v = c == 'x' ? r : (r & 0x3) | 0x8;
|
||||
return v.toString(16);
|
||||
});
|
||||
}
|
||||
|
||||
type Stubs<T> = {
|
||||
[K in keyof T]: T[K] extends (...args: infer TArgs) => infer TReturnValue
|
||||
? sinon.SinonStub<TArgs, TReturnValue>
|
||||
: never;
|
||||
};
|
||||
|
||||
export class MockEngine {
|
||||
networks: Dictionary<MockNetwork> = {};
|
||||
containers: Dictionary<MockContainer> = {};
|
||||
images: Dictionary<MockImage> = {};
|
||||
volumes: Dictionary<MockVolume> = {};
|
||||
|
||||
constructor({
|
||||
networks = [],
|
||||
containers = [],
|
||||
images = [],
|
||||
volumes = [],
|
||||
}: MockEngineState) {
|
||||
// Key networks by id
|
||||
this.networks = networks.reduce((networkMap, network) => {
|
||||
const { id } = network;
|
||||
|
||||
return {
|
||||
...networkMap,
|
||||
[id]: { ...network, remove: () => this.removeNetwork(id) },
|
||||
};
|
||||
}, {});
|
||||
|
||||
this.containers = containers.reduce((containerMap, container) => {
|
||||
const { id } = container;
|
||||
return {
|
||||
...containerMap,
|
||||
[id]: {
|
||||
...container,
|
||||
remove: () => this.removeContainer(id),
|
||||
},
|
||||
};
|
||||
}, {});
|
||||
|
||||
this.images = images.reduce((imageMap, image) => {
|
||||
const { id } = image;
|
||||
|
||||
return {
|
||||
...imageMap,
|
||||
[id]: {
|
||||
...image,
|
||||
remove: (options: any) => this.removeImage(id, options),
|
||||
},
|
||||
};
|
||||
}, {});
|
||||
|
||||
this.volumes = volumes.reduce((volumeMap, volume) => {
|
||||
const { name } = volume;
|
||||
|
||||
return {
|
||||
...volumeMap,
|
||||
[name]: {
|
||||
...volume,
|
||||
remove: () => this.removeVolume(name),
|
||||
},
|
||||
};
|
||||
}, {});
|
||||
}
|
||||
|
||||
getNetwork(id: string) {
|
||||
const network = Object.values(this.networks).find(
|
||||
(n) => n.inspectInfo.Id === id || n.inspectInfo.Name === id,
|
||||
);
|
||||
|
||||
if (!network) {
|
||||
return {
|
||||
id,
|
||||
inspect: () =>
|
||||
Promise.reject({ statusCode: 404, message: `No such network ${id}` }),
|
||||
remove: () =>
|
||||
Promise.reject({ statusCode: 404, message: `No such network ${id}` }),
|
||||
} as MockNetwork;
|
||||
}
|
||||
|
||||
return network;
|
||||
}
|
||||
|
||||
listNetworks() {
|
||||
return Promise.resolve(
|
||||
Object.values(this.networks).map((network) => network.inspectInfo),
|
||||
);
|
||||
}
|
||||
|
||||
removeNetwork(id: string) {
|
||||
const network = Object.values(this.networks).find(
|
||||
(n) => n.inspectInfo.Id === id || n.inspectInfo.Name === id,
|
||||
);
|
||||
|
||||
// Throw an error if the network does not exist
|
||||
// this should never happen
|
||||
if (!network) {
|
||||
return Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such network ${id}`,
|
||||
});
|
||||
}
|
||||
|
||||
return Promise.resolve(delete this.networks[network.id]);
|
||||
}
|
||||
|
||||
createNetwork(options: dockerode.NetworkCreateOptions) {
|
||||
const Id = uuidv4();
|
||||
const network = createNetwork({ Id, ...options });
|
||||
|
||||
// Add to the list
|
||||
this.networks[Id] = network;
|
||||
}
|
||||
|
||||
listContainers() {
|
||||
return Promise.resolve(
|
||||
// List containers returns ContainerInfo objects so we return summaries
|
||||
Object.values(this.containers).map((container) => container.info),
|
||||
);
|
||||
}
|
||||
|
||||
createContainer(options: dockerode.ContainerCreateOptions) {
|
||||
const Id = uuidv4();
|
||||
|
||||
const { name: Name, HostConfig, NetworkingConfig, ...Config } = options;
|
||||
|
||||
const container = createContainer({
|
||||
Id,
|
||||
Name,
|
||||
HostConfig,
|
||||
Config,
|
||||
NetworkSettings: { Networks: NetworkingConfig?.EndpointsConfig ?? {} },
|
||||
State: { Status: 'created' },
|
||||
});
|
||||
|
||||
// Add to the list
|
||||
this.containers[Id] = {
|
||||
...container,
|
||||
remove: () => this.removeContainer(Id),
|
||||
};
|
||||
}
|
||||
|
||||
getContainer(id: string) {
|
||||
const container = Object.values(this.containers).find(
|
||||
(c) => c.inspectInfo.Id === id || c.inspectInfo.Name === id,
|
||||
);
|
||||
|
||||
if (!container) {
|
||||
return {
|
||||
id,
|
||||
inspect: () =>
|
||||
Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such container ${id}`,
|
||||
}),
|
||||
} as MockContainer;
|
||||
}
|
||||
|
||||
return { ...container, remove: () => this.removeContainer(id) };
|
||||
}
|
||||
|
||||
removeContainer(id: string) {
|
||||
const container = Object.values(this.containers).find(
|
||||
(c) => c.inspectInfo.Id === id || c.inspectInfo.Name === id,
|
||||
);
|
||||
|
||||
// Throw an error if the container does not exist
|
||||
// this should never happen
|
||||
if (!container) {
|
||||
return Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such container ${id}`,
|
||||
});
|
||||
}
|
||||
|
||||
return Promise.resolve(delete this.containers[container.id]);
|
||||
}
|
||||
|
||||
listImages() {
|
||||
return Promise.resolve(
|
||||
Object.values(this.images).map((image) => image.info),
|
||||
);
|
||||
}
|
||||
|
||||
getVolume(name: string) {
|
||||
const volume = Object.values(this.volumes).find((v) => v.name === name);
|
||||
|
||||
if (!volume) {
|
||||
return {
|
||||
name,
|
||||
inspect: () =>
|
||||
Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such volume ${name}`,
|
||||
}),
|
||||
remove: () =>
|
||||
Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such volume ${name}`,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
return volume;
|
||||
}
|
||||
|
||||
listVolumes() {
|
||||
return Promise.resolve({
|
||||
Volumes: Object.values(this.volumes).map((volume) => volume.inspectInfo),
|
||||
Warnings: [] as string[],
|
||||
});
|
||||
}
|
||||
|
||||
removeVolume(name: string) {
|
||||
return Promise.resolve(delete this.volumes[name]);
|
||||
}
|
||||
|
||||
createVolume(options: PartialVolumeInspectInfo) {
|
||||
const { Name } = options;
|
||||
const volume = createVolume(options);
|
||||
|
||||
// Add to the list
|
||||
this.volumes[Name] = volume;
|
||||
}
|
||||
|
||||
// NOT a dockerode method
|
||||
// Implements this https://docs.docker.com/engine/reference/commandline/rmi/
|
||||
// Removes (and un-tags) one or more images from the host node.
|
||||
// If an image has multiple tags, using this command with the tag as
|
||||
// a parameter only removes the tag. If the tag is the only one for the image,
|
||||
// both the image and the tag are removed.
|
||||
// Does not remove images from a registry.
|
||||
// You cannot remove an image of a running container unless you use the
|
||||
// -f option. To see all images on a host use the docker image ls command.
|
||||
//
|
||||
// You can remove an image using its short or long ID, its tag, or its digest.
|
||||
// If an image has one or more tags referencing it, you must remove all of them
|
||||
// before the image is removed. Digest references are removed automatically when
|
||||
// an image is removed by tag.
|
||||
removeImage(name: string, { force } = { force: false }) {
|
||||
const image = this.findImage(name);
|
||||
|
||||
// Throw an error if the image does not exist
|
||||
if (!image) {
|
||||
return Promise.reject({
|
||||
statusCode: 404,
|
||||
message: `No such image ${name}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Get the id of the iamge
|
||||
const { Id: id } = image.inspectInfo;
|
||||
|
||||
// If the given identifier is an id
|
||||
if (id === name) {
|
||||
if (!force && image.references.length > 1) {
|
||||
// If the name is an id and there are multiple tags
|
||||
// or digests referencing the image, don't delete unless the force option
|
||||
return Promise.reject({
|
||||
statusCode: 409,
|
||||
message: `Unable to delete image ${name} with multiple references`,
|
||||
});
|
||||
}
|
||||
|
||||
return Promise.resolve(delete this.images[id]);
|
||||
}
|
||||
|
||||
// If the name is not an id, then it must be a reference
|
||||
const ref = parseReference(name);
|
||||
|
||||
const References = image.references
|
||||
.filter(
|
||||
(r) =>
|
||||
r.repository !== ref.repository ||
|
||||
(r.digest !== ref.digest && r.tag !== ref.tag),
|
||||
)
|
||||
.map((r) => r.toString());
|
||||
|
||||
if (References.length > 0) {
|
||||
// If there are still digests or tags, just update the stored image
|
||||
this.images[id] = {
|
||||
...createImage(image.inspectInfo, { References }),
|
||||
remove: (options: any) => this.removeImage(id, options),
|
||||
};
|
||||
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
// Remove the original image
|
||||
return Promise.resolve(delete this.images[id]);
|
||||
}
|
||||
|
||||
private findImage(name: string) {
|
||||
if (this.images[name]) {
|
||||
return this.images[name];
|
||||
}
|
||||
|
||||
// If the identifier is not an id it must be a reference
|
||||
const ref = parseReference(name);
|
||||
|
||||
return Object.values(this.images).find((img) =>
|
||||
img.references.some(
|
||||
(r) =>
|
||||
// Find an image that has a reference with matching digest or tag
|
||||
r.repository === ref.repository &&
|
||||
(r.digest === ref.digest || r.tag === ref.tag),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
getImage(name: string) {
|
||||
const image = this.findImage(name);
|
||||
|
||||
// If the image doesn't exist return an empty object
|
||||
if (!image) {
|
||||
return {
|
||||
id: name,
|
||||
inspect: () =>
|
||||
Promise.reject({ statusCode: 404, message: `No such image ${name}` }),
|
||||
remove: () =>
|
||||
Promise.reject({ statusCode: 404, message: `No such image ${name}` }),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
...image,
|
||||
remove: (options: any) => this.removeImage(name, options),
|
||||
};
|
||||
}
|
||||
|
||||
getEvents() {
|
||||
console.log(`Calling mockerode.getEvents 🐳`);
|
||||
return Promise.resolve({ on: () => void 0, pipe: () => void 0 });
|
||||
}
|
||||
}
|
||||
|
||||
export function createMockerode(engine: MockEngine) {
|
||||
const dockerodeStubs: Stubs<dockerode> = (Object.getOwnPropertyNames(
|
||||
dockerode.prototype,
|
||||
) as (keyof dockerode)[])
|
||||
.filter((fn) => typeof dockerode.prototype[fn] === 'function')
|
||||
.reduce((stubMap, fn) => {
|
||||
const stub = sinon.stub(dockerode.prototype, fn);
|
||||
|
||||
if (MockEngine.prototype.hasOwnProperty(fn)) {
|
||||
stub.callsFake((MockEngine.prototype as any)[fn].bind(engine));
|
||||
} else {
|
||||
// By default all unimplemented methods will throw to avoid the tests
|
||||
// silently failing
|
||||
stub.throws(`Not implemented: Dockerode.${fn}`);
|
||||
}
|
||||
|
||||
return { ...stubMap, [fn]: stub };
|
||||
}, {} as Stubs<dockerode>);
|
||||
|
||||
const { removeImage, removeNetwork, removeVolume, removeContainer } = engine;
|
||||
|
||||
// Add stubs to additional engine methods we want to
|
||||
// be able to check
|
||||
const mockEngineStubs = {
|
||||
removeImage: sinon
|
||||
.stub(engine, 'removeImage')
|
||||
.callsFake(removeImage.bind(engine)),
|
||||
removeNetwork: sinon
|
||||
.stub(engine, 'removeNetwork')
|
||||
.callsFake(removeNetwork.bind(engine)),
|
||||
removeVolume: sinon
|
||||
.stub(engine, 'removeVolume')
|
||||
.callsFake(removeVolume.bind(engine)),
|
||||
removeContainer: sinon
|
||||
.stub(engine, 'removeContainer')
|
||||
.callsFake(removeContainer.bind(engine)),
|
||||
};
|
||||
|
||||
return {
|
||||
...dockerodeStubs,
|
||||
...mockEngineStubs,
|
||||
restore: () => {
|
||||
Object.values(dockerodeStubs).forEach((stub) => stub.restore());
|
||||
Object.values(mockEngineStubs).forEach((spy) => spy.restore());
|
||||
},
|
||||
resetHistory: () => {
|
||||
Object.values(dockerodeStubs).forEach((stub) => stub.resetHistory());
|
||||
Object.values(mockEngineStubs).forEach((spy) => spy.resetHistory());
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export type Mockerode = ReturnType<typeof createMockerode>;
|
||||
|
||||
export async function withMockerode(
|
||||
test: (dockerode: Mockerode) => Promise<any>,
|
||||
initialState: MockEngineState = {
|
||||
containers: [],
|
||||
networks: [],
|
||||
volumes: [],
|
||||
images: [],
|
||||
},
|
||||
) {
|
||||
const mockEngine = new MockEngine(initialState);
|
||||
const mockerode = createMockerode(mockEngine);
|
||||
|
||||
try {
|
||||
// run the tests
|
||||
await test(mockerode);
|
||||
} finally {
|
||||
// restore stubs always
|
||||
mockerode.restore();
|
||||
}
|
||||
}
|
1368
test/src/compose/app.spec.ts
Normal file
1368
test/src/compose/app.spec.ts
Normal file
File diff suppressed because it is too large
Load Diff
1063
test/src/compose/application-manager.spec.ts
Normal file
1063
test/src/compose/application-manager.spec.ts
Normal file
File diff suppressed because it is too large
Load Diff
481
test/src/compose/images.spec.ts
Normal file
481
test/src/compose/images.spec.ts
Normal file
@ -0,0 +1,481 @@
|
||||
import { expect } from 'chai';
|
||||
|
||||
import * as imageManager from '../../../src/compose/images';
|
||||
import * as dbHelper from '../../lib/db-helper';
|
||||
import { createImage, withMockerode } from '../../lib/mockerode';
|
||||
import * as sinon from 'sinon';
|
||||
|
||||
import log from '../../../src/lib/supervisor-console';
|
||||
|
||||
describe('compose/images', () => {
|
||||
let testDb: dbHelper.TestDatabase;
|
||||
before(async () => {
|
||||
testDb = await dbHelper.createDB();
|
||||
|
||||
// disable log output during testing
|
||||
sinon.stub(log, 'debug');
|
||||
sinon.stub(log, 'warn');
|
||||
sinon.stub(log, 'info');
|
||||
sinon.stub(log, 'event');
|
||||
sinon.stub(log, 'success');
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
try {
|
||||
await testDb.destroy();
|
||||
} catch (e) {
|
||||
/* noop */
|
||||
}
|
||||
|
||||
// Restore stubbed methods
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
testDb.reset();
|
||||
});
|
||||
|
||||
it('finds images by the dockerImageId in the database if looking by name does not succeed', async () => {
|
||||
const dbImage = {
|
||||
id: 246,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650325,
|
||||
serviceName: 'app_1',
|
||||
imageId: 2693229,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
};
|
||||
await testDb.models('image').insert([dbImage]);
|
||||
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
Config: {
|
||||
Labels: {
|
||||
'io.balena.some-label': 'this is my label',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
References: [
|
||||
// Delta digest doesn't match image.name digest
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e:@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Looking by name should fail, if not, this is a mockerode issue
|
||||
await expect(mockerode.getImage(dbImage.name).inspect()).to.be.rejected;
|
||||
|
||||
// Looking up the image by id should succeed
|
||||
await expect(mockerode.getImage(dbImage.dockerImageId).inspect()).to.not
|
||||
.be.rejected;
|
||||
|
||||
const img = await imageManager.inspectByName(dbImage.name);
|
||||
|
||||
expect(mockerode.getImage).to.have.been.calledWith(
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
);
|
||||
|
||||
// Check that the found image has the proper labels
|
||||
expect(img.Config.Labels).to.deep.equal({
|
||||
'io.balena.some-label': 'this is my label',
|
||||
});
|
||||
},
|
||||
{ images },
|
||||
);
|
||||
});
|
||||
|
||||
it('removes a single legacy db images without dockerImageId', async () => {
|
||||
// Legacy images don't have a dockerImageId so they are queried by name
|
||||
const imageToRemove = {
|
||||
id: 246,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650325,
|
||||
serviceName: 'app_1',
|
||||
imageId: 2693229,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
};
|
||||
|
||||
await testDb.models('image').insert([imageToRemove]);
|
||||
|
||||
// Engine image state
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id: 'deadbeef',
|
||||
},
|
||||
{
|
||||
// Image references
|
||||
References: [
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
],
|
||||
},
|
||||
),
|
||||
createImage(
|
||||
{
|
||||
Id: 'deadca1f',
|
||||
},
|
||||
{
|
||||
References: ['balena/aarch64-supervisor:11.11.11'],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
// Perform the test with our specially crafted data
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that our legacy image exists
|
||||
// failsafe to check for mockerode problems
|
||||
await expect(
|
||||
mockerode.getImage(imageToRemove.name).inspect(),
|
||||
'image exists on the engine before test',
|
||||
).to.not.be.rejected;
|
||||
|
||||
// Check that the image exists on the db
|
||||
expect(
|
||||
await testDb.models('image').select().where(imageToRemove),
|
||||
).to.have.lengthOf(1);
|
||||
|
||||
// Now remove this image...
|
||||
await imageManager.remove(imageToRemove);
|
||||
|
||||
// This checks that the remove method was ultimately called
|
||||
expect(mockerode.removeImage).to.have.been.calledOnceWith(
|
||||
imageToRemove.name,
|
||||
);
|
||||
|
||||
// Check that the image was removed from the db
|
||||
expect(await testDb.models('image').select().where(imageToRemove)).to.be
|
||||
.empty;
|
||||
},
|
||||
{ images },
|
||||
);
|
||||
});
|
||||
|
||||
it('removes image from DB and engine when there is a single DB image with matching dockerImageId', async () => {
|
||||
// Newer image
|
||||
const imageToRemove = {
|
||||
id: 246,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650325,
|
||||
serviceName: 'app_1',
|
||||
imageId: 2693229,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
};
|
||||
|
||||
// Insert images into the db
|
||||
await testDb.models('image').insert([
|
||||
imageToRemove,
|
||||
{
|
||||
id: 247,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/902cf44eb0ed51675a0bf95a7bbf0c91@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650331,
|
||||
serviceName: 'app_2',
|
||||
imageId: 2693230,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc901234',
|
||||
},
|
||||
]);
|
||||
|
||||
// Engine image state
|
||||
const images = [
|
||||
// The image to remove
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
},
|
||||
{
|
||||
// The target digest matches the image name
|
||||
References: [
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a:delta-ada9fbb57d90e61e@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
],
|
||||
},
|
||||
),
|
||||
// Other images to test
|
||||
createImage(
|
||||
{
|
||||
Id: 'aaa',
|
||||
},
|
||||
{
|
||||
References: ['balena/aarch64-supervisor:11.11.11'],
|
||||
},
|
||||
),
|
||||
// The other image on the database
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc901234',
|
||||
},
|
||||
{
|
||||
References: [
|
||||
'registry2.balena-cloud.com/v2/902cf44eb0ed51675a0bf95a7bbf0c91:delta-80ed841a1d3fefa9@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
// Perform the test with our specially crafted data
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that the image exists
|
||||
// this is really checking that mockerode works ok
|
||||
await expect(
|
||||
mockerode.getImage(imageToRemove.name).inspect(),
|
||||
'image exists on the engine before test',
|
||||
).to.not.be.rejected;
|
||||
|
||||
// Check that only one image with this dockerImageId exists in the db
|
||||
// in memory db is a bit flaky sometimes, this checks for issues
|
||||
expect(
|
||||
await testDb.models('image').where(imageToRemove).select(),
|
||||
'image exists on db before the test',
|
||||
).to.have.lengthOf(1);
|
||||
|
||||
// Check that only one image with this dockerImageId exists in the db
|
||||
expect(
|
||||
await testDb
|
||||
.models('image')
|
||||
.where({ dockerImageId: imageToRemove.dockerImageId })
|
||||
.select(),
|
||||
).to.have.lengthOf(1);
|
||||
|
||||
// Now remove this image...
|
||||
await imageManager.remove(imageToRemove);
|
||||
|
||||
// Check that the remove method was only called once
|
||||
expect(mockerode.removeImage).to.have.been.calledOnceWith(
|
||||
imageToRemove.dockerImageId,
|
||||
);
|
||||
|
||||
// Check that the database no longer has this image
|
||||
expect(await testDb.models('image').select().where(imageToRemove)).to.be
|
||||
.empty;
|
||||
|
||||
// Expect 1 entry left on the database
|
||||
expect(await testDb.models('image').select()).to.have.lengthOf(1);
|
||||
},
|
||||
{ images },
|
||||
);
|
||||
});
|
||||
|
||||
it('removes image from DB by name where there are multiple db images with same docker id', async () => {
|
||||
const imageToRemove = {
|
||||
id: 246,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/793f9296017bbfe026334820ab56bb3a@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650325,
|
||||
serviceName: 'app_1',
|
||||
imageId: 2693229,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
};
|
||||
|
||||
const imageWithSameDockerImageId = {
|
||||
id: 247,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/902cf44eb0ed51675a0bf95a7bbf0c91@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650331,
|
||||
serviceName: 'app_2',
|
||||
imageId: 2693230,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
|
||||
// Same imageId
|
||||
dockerImageId: imageToRemove.dockerImageId,
|
||||
};
|
||||
|
||||
// Insert images into the db
|
||||
await testDb.models('image').insert([
|
||||
imageToRemove,
|
||||
// Another image from the same app
|
||||
imageWithSameDockerImageId,
|
||||
]);
|
||||
|
||||
// Engine image state
|
||||
const images = [
|
||||
// The image to remove
|
||||
createImage(
|
||||
{
|
||||
Id: imageToRemove.dockerImageId,
|
||||
},
|
||||
{
|
||||
References: [imageToRemove.name, imageWithSameDockerImageId.name],
|
||||
},
|
||||
),
|
||||
// Other images to test
|
||||
createImage(
|
||||
{
|
||||
Id: 'aaa',
|
||||
},
|
||||
{
|
||||
References: ['balena/aarch64-supervisor:11.11.11'],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
// Perform the test with our specially crafted data
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that the image is on the engine
|
||||
// really checking mockerode behavior
|
||||
await expect(
|
||||
mockerode.getImage(imageToRemove.dockerImageId).inspect(),
|
||||
'image exists on the engine before the test',
|
||||
).to.not.be.rejected;
|
||||
|
||||
// Check that multiple images with the same dockerImageId are returned
|
||||
expect(
|
||||
await testDb
|
||||
.models('image')
|
||||
.where({ dockerImageId: imageToRemove.dockerImageId })
|
||||
.select(),
|
||||
).to.have.lengthOf(2);
|
||||
|
||||
// Now remove these images
|
||||
await imageManager.remove(imageToRemove);
|
||||
|
||||
// Check that only the image with the right name was removed
|
||||
expect(mockerode.removeImage).to.have.been.calledOnceWith(
|
||||
imageToRemove.name,
|
||||
);
|
||||
|
||||
// Check that the database no longer has this image
|
||||
expect(await testDb.models('image').select().where(imageToRemove)).to.be
|
||||
.empty;
|
||||
|
||||
// Check that the image with the same dockerImageId is still on the database
|
||||
expect(
|
||||
await testDb
|
||||
.models('image')
|
||||
.select()
|
||||
.where({ dockerImageId: imageWithSameDockerImageId.dockerImageId }),
|
||||
).to.have.lengthOf(1);
|
||||
},
|
||||
{ images },
|
||||
);
|
||||
});
|
||||
|
||||
it('removes image from DB by tag where there are multiple db images with same docker id and deltas are being used', async () => {
|
||||
const imageToRemove = {
|
||||
id: 246,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/aaaa@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650325,
|
||||
serviceName: 'app_1',
|
||||
imageId: 2693229,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId: 'sha256:deadbeef',
|
||||
};
|
||||
|
||||
const imageWithSameDockerImageId = {
|
||||
id: 247,
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/bbbb@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
appId: 1658654,
|
||||
serviceId: 650331,
|
||||
serviceName: 'app_2',
|
||||
imageId: 2693230,
|
||||
releaseId: 1524186,
|
||||
dependent: 0,
|
||||
dockerImageId: imageToRemove.dockerImageId,
|
||||
};
|
||||
|
||||
// Insert images into the db
|
||||
await testDb.models('image').insert([
|
||||
imageToRemove,
|
||||
// Another image from the same app
|
||||
imageWithSameDockerImageId,
|
||||
]);
|
||||
|
||||
// Engine image state
|
||||
const images = [
|
||||
// The image to remove
|
||||
createImage(
|
||||
{
|
||||
Id: imageToRemove.dockerImageId,
|
||||
},
|
||||
{
|
||||
References: [
|
||||
// The image has two deltas with different digests than those in image.name
|
||||
'registry2.balena-cloud.com/v2/aaaa:delta-123@sha256:6eb712fc797ff68f258d9032cf292c266cb9bd8be4cbdaaafeb5a8824bb104fd',
|
||||
'registry2.balena-cloud.com/v2/bbbb:delta-456@sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc901234',
|
||||
],
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
// Perform the test with our specially crafted data
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that the image is on the engine
|
||||
await expect(
|
||||
mockerode.getImage(imageToRemove.dockerImageId).inspect(),
|
||||
'image can be found by id before the test',
|
||||
).to.not.be.rejected;
|
||||
|
||||
// Check that a single image is returned when given entire object
|
||||
expect(
|
||||
await testDb.models('image').select().where(imageToRemove),
|
||||
).to.have.lengthOf(1);
|
||||
|
||||
// Check that multiple images with the same dockerImageId are returned
|
||||
expect(
|
||||
await testDb
|
||||
.models('image')
|
||||
.where({ dockerImageId: imageToRemove.dockerImageId })
|
||||
.select(),
|
||||
).to.have.lengthOf(2);
|
||||
|
||||
// Now remove these images
|
||||
await imageManager.remove(imageToRemove);
|
||||
|
||||
// This tests the behavior
|
||||
expect(mockerode.removeImage).to.have.been.calledOnceWith(
|
||||
'registry2.balena-cloud.com/v2/aaaa:delta-123',
|
||||
);
|
||||
|
||||
// Check that the database no longer has this image
|
||||
expect(await testDb.models('image').select().where(imageToRemove)).to.be
|
||||
.empty;
|
||||
|
||||
// Check that the image with the same dockerImageId is still on the database
|
||||
expect(
|
||||
await testDb
|
||||
.models('image')
|
||||
.select()
|
||||
.where(imageWithSameDockerImageId),
|
||||
).to.have.lengthOf(1);
|
||||
},
|
||||
{ images },
|
||||
);
|
||||
});
|
||||
});
|
@ -3,6 +3,7 @@ import * as sinon from 'sinon';
|
||||
|
||||
import { Network } from '../../../src/compose/network';
|
||||
import { NetworkInspectInfo } from 'dockerode';
|
||||
import { createNetwork, withMockerode } from '../../lib/mockerode';
|
||||
|
||||
import { log } from '../../../src/lib/supervisor-console';
|
||||
|
||||
@ -95,8 +96,8 @@ describe('compose/network', () => {
|
||||
},
|
||||
});
|
||||
|
||||
expect(logSpy).to.be.called.calledOnce;
|
||||
expect(logSpy).to.be.called.calledWithMatch(
|
||||
expect(logSpy).to.have.been.calledOnce;
|
||||
expect(logSpy).to.have.been.calledWithMatch(
|
||||
'Network IPAM config entries must have both a subnet and gateway',
|
||||
);
|
||||
|
||||
@ -114,8 +115,8 @@ describe('compose/network', () => {
|
||||
},
|
||||
});
|
||||
|
||||
expect(logSpy).to.be.called.calledOnce;
|
||||
expect(logSpy).to.be.called.calledWithMatch(
|
||||
expect(logSpy).to.have.been.calledOnce;
|
||||
expect(logSpy).to.have.been.calledWithMatch(
|
||||
'Network IPAM config entries must have both a subnet and gateway',
|
||||
);
|
||||
|
||||
@ -395,4 +396,153 @@ describe('compose/network', () => {
|
||||
).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe('creating networks', () => {
|
||||
it('creates a new network on the engine with the given data', async () => {
|
||||
await withMockerode(async (mockerode) => {
|
||||
const network = Network.fromComposeObject('default', 12345, {
|
||||
ipam: {
|
||||
driver: 'default',
|
||||
config: [
|
||||
{
|
||||
subnet: '172.20.0.0/16',
|
||||
ip_range: '172.20.10.0/24',
|
||||
gateway: '172.20.0.1',
|
||||
},
|
||||
],
|
||||
options: {},
|
||||
},
|
||||
});
|
||||
|
||||
// Create the network
|
||||
await network.create();
|
||||
|
||||
// Check that the create function was called with proper arguments
|
||||
expect(mockerode.createNetwork).to.have.been.calledOnceWith({
|
||||
Name: '12345_default',
|
||||
Driver: 'bridge',
|
||||
CheckDuplicate: true,
|
||||
IPAM: {
|
||||
Driver: 'default',
|
||||
Config: [
|
||||
{
|
||||
Subnet: '172.20.0.0/16',
|
||||
IPRange: '172.20.10.0/24',
|
||||
Gateway: '172.20.0.1',
|
||||
},
|
||||
],
|
||||
Options: {},
|
||||
},
|
||||
EnableIPv6: false,
|
||||
Internal: false,
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('throws the error if there is a problem while creating the network', async () => {
|
||||
await withMockerode(async (mockerode) => {
|
||||
const network = Network.fromComposeObject('default', 12345, {
|
||||
ipam: {
|
||||
driver: 'default',
|
||||
config: [
|
||||
{
|
||||
subnet: '172.20.0.0/16',
|
||||
ip_range: '172.20.10.0/24',
|
||||
gateway: '172.20.0.1',
|
||||
},
|
||||
],
|
||||
options: {},
|
||||
},
|
||||
});
|
||||
|
||||
// Re-define the dockerode.createNetwork to throw
|
||||
mockerode.createNetwork.rejects('Unknown engine error');
|
||||
|
||||
// Creating the network should fail
|
||||
return expect(network.create()).to.be.rejected.then((error) =>
|
||||
expect(error).to.have.property('name', 'Unknown engine error'),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('removing a network', () => {
|
||||
it('removes the network from the engine if it exists', async () => {
|
||||
// Create a mock network to add to the mock engine
|
||||
const dockerNetwork = createNetwork({
|
||||
Id: 'deadbeef',
|
||||
Name: '12345_default',
|
||||
});
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that the engine has the network
|
||||
expect(await mockerode.listNetworks()).to.have.lengthOf(1);
|
||||
|
||||
// Create a dummy network object
|
||||
const network = Network.fromComposeObject('default', 12345, {});
|
||||
|
||||
// Perform the operation
|
||||
await network.remove();
|
||||
|
||||
// The removal step should delete the object from the engine data
|
||||
expect(mockerode.removeNetwork).to.have.been.calledOnceWith(
|
||||
'deadbeef',
|
||||
);
|
||||
},
|
||||
{ networks: [dockerNetwork] },
|
||||
);
|
||||
});
|
||||
|
||||
it('ignores the request if the given network does not exist on the engine', async () => {
|
||||
// Create a mock network to add to the mock engine
|
||||
const mockNetwork = createNetwork({
|
||||
Id: 'deadbeef',
|
||||
Name: 'some_network',
|
||||
});
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Check that the engine has the network
|
||||
expect(await mockerode.listNetworks()).to.have.lengthOf(1);
|
||||
|
||||
// Create a dummy network object
|
||||
const network = Network.fromComposeObject('default', 12345, {});
|
||||
|
||||
// This should not fail
|
||||
await expect(network.remove()).to.not.be.rejected;
|
||||
|
||||
// We expect the network state to remain constant
|
||||
expect(await mockerode.listNetworks()).to.have.lengthOf(1);
|
||||
},
|
||||
{ networks: [mockNetwork] },
|
||||
);
|
||||
});
|
||||
|
||||
it('throws the error if there is a problem while removing the network', async () => {
|
||||
// Create a mock network to add to the mock engine
|
||||
const mockNetwork = createNetwork({
|
||||
Id: 'deadbeef',
|
||||
Name: '12345_default',
|
||||
});
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// We can change the return value of the mockerode removeNetwork
|
||||
// to have the remove operation fail
|
||||
mockerode.removeNetwork.throws('Failed to remove the network');
|
||||
|
||||
// Create a dummy network object
|
||||
const network = Network.fromComposeObject('default', 12345, {});
|
||||
|
||||
await expect(network.remove()).to.be.rejected;
|
||||
},
|
||||
{ networks: [mockNetwork] },
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
1034
test/src/compose/service.spec.ts
Normal file
1034
test/src/compose/service.spec.ts
Normal file
File diff suppressed because it is too large
Load Diff
352
test/src/compose/volume-manager.spec.ts
Normal file
352
test/src/compose/volume-manager.spec.ts
Normal file
@ -0,0 +1,352 @@
|
||||
import { expect } from 'chai';
|
||||
|
||||
import * as sinon from 'sinon';
|
||||
import {
|
||||
createVolume,
|
||||
createContainer,
|
||||
withMockerode,
|
||||
} from '../../lib/mockerode';
|
||||
import * as volumeManager from '../../../src/compose/volume-manager';
|
||||
import log from '../../../src/lib/supervisor-console';
|
||||
import Volume from '../../../src/compose/volume';
|
||||
|
||||
describe('compose/volume-manager', () => {
|
||||
describe('Retrieving volumes from the engine', () => {
|
||||
let logDebug: sinon.SinonStub;
|
||||
before(() => {
|
||||
logDebug = sinon.stub(log, 'debug');
|
||||
});
|
||||
after(() => {
|
||||
logDebug.restore();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
logDebug.reset();
|
||||
});
|
||||
|
||||
it('gets all supervised Volumes', async () => {
|
||||
// Setup volume data
|
||||
const volumeData = [
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(1, 'redis'),
|
||||
// Recently created volumes contain io.balena.supervised label
|
||||
Labels: { 'io.balena.supervised': '1' },
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(1, 'mysql'),
|
||||
// Recently created volumes contain io.balena.supervised label
|
||||
Labels: { 'io.balena.supervised': '1' },
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(1, 'backend'),
|
||||
// Old Volumes will not have labels
|
||||
}),
|
||||
// Volume not created by the Supervisor
|
||||
createVolume({ Name: 'user_created_volume' }),
|
||||
createVolume({
|
||||
Name: 'decoy',
|
||||
// Added decoy to really test the inference (should not return)
|
||||
Labels: { 'io.balena.supervised': '1' },
|
||||
}),
|
||||
];
|
||||
|
||||
// Perform test
|
||||
await withMockerode(
|
||||
async () => {
|
||||
await expect(volumeManager.getAll()).to.eventually.deep.equal([
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'redis',
|
||||
},
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'mysql',
|
||||
},
|
||||
{
|
||||
appId: 1,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {},
|
||||
},
|
||||
name: 'backend',
|
||||
},
|
||||
]);
|
||||
// Check that debug message was logged saying we found a Volume not created by us
|
||||
expect(logDebug.lastCall.lastArg).to.equal(
|
||||
'Found unmanaged Volume: decoy',
|
||||
);
|
||||
},
|
||||
{ volumes: volumeData },
|
||||
);
|
||||
});
|
||||
|
||||
it('can parse null Volumes', async () => {
|
||||
// Perform test with no volumes
|
||||
await withMockerode(async () => {
|
||||
await expect(volumeManager.getAll()).to.eventually.deep.equal([]);
|
||||
});
|
||||
});
|
||||
|
||||
it('gets the volume for specific application', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'app'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(222, 'otherApp'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await withMockerode(
|
||||
async () => {
|
||||
await expect(
|
||||
volumeManager.getAllByAppId(111),
|
||||
).to.eventually.deep.equal([
|
||||
{
|
||||
appId: 111,
|
||||
config: {
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
},
|
||||
name: 'app',
|
||||
},
|
||||
]);
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Creating volumes', () => {
|
||||
it('creates a volume if it does not exist', async () => {
|
||||
// Perform test
|
||||
await withMockerode(async (mockerode) => {
|
||||
// The volume does not exist on the engine before
|
||||
expect(
|
||||
mockerode.getVolume(Volume.generateDockerName(111, 'main')).inspect(),
|
||||
).to.be.rejected;
|
||||
|
||||
// Volume to create
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
sinon.spy(volume, 'create');
|
||||
|
||||
// Create volume
|
||||
await volumeManager.create(volume);
|
||||
|
||||
// Check that the creation function was called
|
||||
expect(volume.create).to.have.been.calledOnce;
|
||||
});
|
||||
});
|
||||
|
||||
it('does not try to create a volume that already exists', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'main'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await withMockerode(
|
||||
async () => {
|
||||
// Create compose object for volume already set up in mock engine
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
sinon.spy(volume, 'create');
|
||||
|
||||
// Create volume
|
||||
await volumeManager.create(volume);
|
||||
|
||||
// Check volume was not created
|
||||
expect(volume.create).to.not.have.been.called;
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Removing volumes', () => {
|
||||
it('removes a volume if it exists', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'main'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
// Perform test
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Volume to remove
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
sinon.spy(volume, 'remove');
|
||||
|
||||
// Remove volume
|
||||
await volumeManager.remove(volume);
|
||||
|
||||
// Check volume was removed
|
||||
expect(volume.remove).to.be.calledOnce;
|
||||
expect(mockerode.removeVolume).to.have.been.calledOnceWith(
|
||||
Volume.generateDockerName(111, 'main'),
|
||||
);
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
|
||||
it('does nothing on removal if the volume does not exist', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: 'decoy-volume',
|
||||
}),
|
||||
];
|
||||
|
||||
// Perform test
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
// Volume to remove
|
||||
const volume = Volume.fromComposeObject('main', 111, {});
|
||||
sinon.spy(volume, 'remove');
|
||||
|
||||
// Remove volume
|
||||
await expect(volumeManager.remove(volume)).to.not.be.rejected;
|
||||
expect(mockerode.removeVolume).to.not.have.been.called;
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Removing orphaned volumes', () => {
|
||||
it('removes any remaining unreferenced volumes after services have been deleted', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: 'some-volume',
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'main'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
await volumeManager.removeOrphanedVolumes([]);
|
||||
|
||||
expect(mockerode.removeVolume).to.have.been.calledTwice;
|
||||
expect(mockerode.removeVolume).to.have.been.calledWith('some-volume');
|
||||
expect(mockerode.removeVolume).to.have.been.calledWith(
|
||||
Volume.generateDockerName(111, 'main'),
|
||||
);
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
|
||||
it('keeps volumes still referenced in target state', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: 'some-volume',
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'main'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(222, 'old'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
await volumeManager.removeOrphanedVolumes([
|
||||
Volume.generateDockerName(111, 'main'),
|
||||
]);
|
||||
|
||||
expect(mockerode.removeVolume).to.have.been.calledTwice;
|
||||
expect(mockerode.removeVolume).to.have.been.calledWith('some-volume');
|
||||
expect(mockerode.removeVolume).to.have.been.calledWith(
|
||||
Volume.generateDockerName(222, 'old'),
|
||||
);
|
||||
},
|
||||
{ volumes },
|
||||
);
|
||||
});
|
||||
|
||||
it('keeps volumes still referenced by a container', async () => {
|
||||
// Setup volume data
|
||||
const volumes = [
|
||||
createVolume({
|
||||
Name: 'some-volume',
|
||||
}),
|
||||
createVolume({
|
||||
Name: Volume.generateDockerName(111, 'main'),
|
||||
Labels: {
|
||||
'io.balena.supervised': '1',
|
||||
},
|
||||
}),
|
||||
];
|
||||
|
||||
const containers = [
|
||||
createContainer({
|
||||
Id: 'some-service',
|
||||
Mounts: [
|
||||
{
|
||||
Name: 'some-volume',
|
||||
},
|
||||
],
|
||||
}),
|
||||
];
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
await volumeManager.removeOrphanedVolumes([]);
|
||||
|
||||
// Container that has a volume should not be removed
|
||||
expect(mockerode.removeVolume).to.have.been.calledOnceWith(
|
||||
Volume.generateDockerName(111, 'main'),
|
||||
);
|
||||
},
|
||||
{ volumes, containers },
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
479
test/src/compose/volume.spec.ts
Normal file
479
test/src/compose/volume.spec.ts
Normal file
@ -0,0 +1,479 @@
|
||||
import { expect } from 'chai';
|
||||
import { SinonStub, stub } from 'sinon';
|
||||
import Volume from '../../../src/compose/volume';
|
||||
import * as logTypes from '../../../src/lib/log-types';
|
||||
import * as logger from '../../../src/logger';
|
||||
|
||||
import { createVolume, withMockerode } from '../../lib/mockerode';
|
||||
|
||||
describe('compose/volume', () => {
|
||||
describe('creating a volume from a compose object', () => {
|
||||
it('should use proper defaults when no compose configuration is provided', () => {
|
||||
const volume = Volume.fromComposeObject('my_volume', 1234, {});
|
||||
|
||||
expect(volume.name).to.equal('my_volume');
|
||||
expect(volume.appId).to.equal(1234);
|
||||
expect(volume.config).to.deep.equal({
|
||||
driver: 'local',
|
||||
driverOpts: {},
|
||||
labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should correctly parse compose volumes without an explicit driver', () => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
'my-label': 'test-label',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({
|
||||
opt1: 'test',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('local');
|
||||
});
|
||||
|
||||
it('should correctly parse compose volumes with an explicit driver', () => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver: 'other',
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
'my-label': 'test-label',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({
|
||||
opt1: 'test',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('other');
|
||||
});
|
||||
});
|
||||
|
||||
describe('creating a volume instance from a docker volume', () => {
|
||||
it('should correctly parse app id from volume name', () => {
|
||||
const volume = Volume.fromDockerVolume({
|
||||
Driver: 'local',
|
||||
Name: '1234_my_volume',
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Labels: {},
|
||||
Options: {},
|
||||
Scope: 'local',
|
||||
});
|
||||
|
||||
expect(volume.name).to.equal('my_volume');
|
||||
expect(volume.appId).to.equal(1234);
|
||||
});
|
||||
|
||||
it('should fail if volume name is not properly formatted', () => {
|
||||
expect(() =>
|
||||
Volume.fromDockerVolume({
|
||||
Driver: 'local',
|
||||
Name: 'non_supervised_volume',
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Labels: {},
|
||||
Options: {},
|
||||
Scope: 'local',
|
||||
}),
|
||||
).to.throw;
|
||||
});
|
||||
|
||||
it('should correctly parse docker volumes', () => {
|
||||
const volume = Volume.fromDockerVolume({
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Name: '1032480_one_volume',
|
||||
Options: {},
|
||||
Scope: 'local',
|
||||
});
|
||||
|
||||
expect(volume).to.have.property('appId').that.equals(1032480);
|
||||
expect(volume).to.have.property('name').that.equals('one_volume');
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('labels')
|
||||
.that.deep.equals({
|
||||
'io.balena.supervised': 'true',
|
||||
});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driverOpts')
|
||||
.that.deep.equals({});
|
||||
expect(volume)
|
||||
.to.have.property('config')
|
||||
.that.has.property('driver')
|
||||
.that.equals('local');
|
||||
});
|
||||
});
|
||||
|
||||
describe('creating a docker volume from options', () => {
|
||||
before(() => {
|
||||
stub(logger, 'logSystemEvent');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
(logger.logSystemEvent as SinonStub).reset();
|
||||
});
|
||||
|
||||
after(() => {
|
||||
(logger.logSystemEvent as SinonStub).restore();
|
||||
});
|
||||
|
||||
it('should use defaults to create the volume when no options are given', async () => {
|
||||
await withMockerode(async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {});
|
||||
|
||||
await volume.create();
|
||||
|
||||
expect(mockerode.createVolume).to.have.been.calledOnceWith({
|
||||
Name: '1032480_one_volume',
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
DriverOpts: {},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should pass configuration options to the engine', async () => {
|
||||
await withMockerode(async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {
|
||||
driver_opts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
labels: {
|
||||
'my-label': 'test-label',
|
||||
},
|
||||
});
|
||||
|
||||
await volume.create();
|
||||
|
||||
expect(mockerode.createVolume).to.have.been.calledOnceWith({
|
||||
Name: '1032480_one_volume',
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'my-label': 'test-label',
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
DriverOpts: {
|
||||
opt1: 'test',
|
||||
},
|
||||
});
|
||||
|
||||
expect(logger.logSystemEvent).to.have.been.calledOnceWith(
|
||||
logTypes.createVolume,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should log successful volume creation to the cloud', async () => {
|
||||
await withMockerode(async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('one_volume', 1032480, {});
|
||||
|
||||
await volume.create();
|
||||
|
||||
expect(mockerode.createVolume).to.have.been.calledOnce;
|
||||
expect(logger.logSystemEvent).to.have.been.calledOnceWith(
|
||||
logTypes.createVolume,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('comparing volume configuration', () => {
|
||||
it('should ignore name and supervisor labels in the comparison', () => {
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromComposeObject('bbb', 4567, {
|
||||
driver: 'local',
|
||||
driver_opts: {},
|
||||
}),
|
||||
),
|
||||
).to.be.true;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromComposeObject('bbb', 4567, {}),
|
||||
),
|
||||
).to.be.true;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromDockerVolume({
|
||||
Name: '1234_aaa',
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Scope: 'local',
|
||||
}),
|
||||
),
|
||||
).to.be.true;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromDockerVolume({
|
||||
Name: '4567_bbb',
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Scope: 'local',
|
||||
}),
|
||||
),
|
||||
).to.be.true;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromDockerVolume({
|
||||
Name: '1234_aaa',
|
||||
Driver: 'local',
|
||||
Labels: {
|
||||
'some.other.label': '123',
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Scope: 'local',
|
||||
}),
|
||||
),
|
||||
).to.be.false;
|
||||
});
|
||||
|
||||
it('should compare based on driver configuration and options', () => {
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver: 'other',
|
||||
driver_opts: {},
|
||||
}),
|
||||
),
|
||||
).to.be.false;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver: 'other',
|
||||
}).isEqualConfig(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver: 'other',
|
||||
driver_opts: {},
|
||||
}),
|
||||
),
|
||||
).to.be.true;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {}).isEqualConfig(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver_opts: { opt: '123' },
|
||||
}),
|
||||
),
|
||||
).to.be.false;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver: 'other',
|
||||
labels: { 'some.other.label': '123' },
|
||||
driver_opts: { 'some-opt': '123' },
|
||||
}).isEqualConfig(
|
||||
Volume.fromDockerVolume({
|
||||
Name: '1234_aaa',
|
||||
Driver: 'other',
|
||||
Labels: {
|
||||
'some.other.label': '123',
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: {},
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Scope: 'local',
|
||||
}),
|
||||
),
|
||||
).to.be.false;
|
||||
|
||||
expect(
|
||||
Volume.fromComposeObject('aaa', 1234, {
|
||||
driver: 'other',
|
||||
labels: { 'some.other.label': '123' },
|
||||
driver_opts: { 'some-opt': '123' },
|
||||
}).isEqualConfig(
|
||||
Volume.fromDockerVolume({
|
||||
Name: '1234_aaa',
|
||||
Driver: 'other',
|
||||
Labels: {
|
||||
'some.other.label': '123',
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
Options: { 'some-opt': '123' },
|
||||
Mountpoint: '/var/lib/docker/volumes/1032480_one_volume/_data',
|
||||
Scope: 'local',
|
||||
}),
|
||||
),
|
||||
).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe('removing volumes', () => {
|
||||
before(() => {
|
||||
stub(logger, 'logSystemEvent');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
(logger.logSystemEvent as SinonStub).reset();
|
||||
});
|
||||
|
||||
after(() => {
|
||||
(logger.logSystemEvent as SinonStub).restore();
|
||||
});
|
||||
|
||||
it('should remove the volume from the engine if it exists', async () => {
|
||||
const dockerVolume = createVolume({
|
||||
Name: '1234_aaa',
|
||||
});
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('aaa', 1234, {});
|
||||
|
||||
// Check engine state before (this is really to test that mockerode is doing its job)
|
||||
expect((await mockerode.listVolumes()).Volumes).to.have.lengthOf(1);
|
||||
expect(await mockerode.getVolume('1234_aaa').inspect()).to.deep.equal(
|
||||
dockerVolume.inspectInfo,
|
||||
);
|
||||
|
||||
// Remove the volume
|
||||
await volume.remove();
|
||||
|
||||
// Check that the remove method was called
|
||||
expect(mockerode.removeVolume).to.have.been.calledOnceWith(
|
||||
'1234_aaa',
|
||||
);
|
||||
},
|
||||
{ volumes: [dockerVolume] },
|
||||
);
|
||||
});
|
||||
|
||||
it('should report the volume removal as a system event', async () => {
|
||||
const dockerVolume = createVolume({
|
||||
Name: '1234_aaa',
|
||||
});
|
||||
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('aaa', 1234, {});
|
||||
|
||||
// Check engine state before
|
||||
expect((await mockerode.listVolumes()).Volumes).to.have.lengthOf(1);
|
||||
|
||||
// Remove the volume
|
||||
await volume.remove();
|
||||
|
||||
// Check that the remove method was called
|
||||
expect(mockerode.removeVolume).to.have.been.calledOnceWith(
|
||||
'1234_aaa',
|
||||
);
|
||||
|
||||
// Check that log entry was generated
|
||||
expect(logger.logSystemEvent).to.have.been.calledOnceWith(
|
||||
logTypes.removeVolume,
|
||||
);
|
||||
},
|
||||
{ volumes: [dockerVolume] },
|
||||
);
|
||||
});
|
||||
|
||||
it('should report an error if the volume does not exist', async () => {
|
||||
const dockerVolume = createVolume({
|
||||
Name: '4567_bbb',
|
||||
});
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('aaa', 1234, {});
|
||||
|
||||
// Check engine state before
|
||||
expect((await mockerode.listVolumes()).Volumes).to.have.lengthOf(1);
|
||||
|
||||
// Remove the volume, this should not throw
|
||||
await expect(volume.remove()).to.not.be.rejected;
|
||||
|
||||
// Check that the remove method was called
|
||||
expect(mockerode.removeVolume).to.not.have.been.called;
|
||||
|
||||
// Check that log entry was generated
|
||||
expect(logger.logSystemEvent).to.have.been.calledWith(
|
||||
logTypes.removeVolumeError,
|
||||
);
|
||||
},
|
||||
{ volumes: [dockerVolume] },
|
||||
);
|
||||
});
|
||||
|
||||
it('should report an error if a problem happens while removing the volume', async () => {
|
||||
const dockerVolume = createVolume({
|
||||
Name: '1234_aaa',
|
||||
});
|
||||
await withMockerode(
|
||||
async (mockerode) => {
|
||||
const volume = Volume.fromComposeObject('aaa', 1234, {});
|
||||
|
||||
// Stub the mockerode method to fail
|
||||
mockerode.removeVolume.rejects('Something bad happened');
|
||||
|
||||
// Check engine state before
|
||||
expect((await mockerode.listVolumes()).Volumes).to.have.lengthOf(1);
|
||||
|
||||
// Remove the volume, this should not throw
|
||||
await expect(volume.remove()).to.not.be.rejected;
|
||||
|
||||
// Check that log entry was generated
|
||||
expect(logger.logSystemEvent).to.have.been.calledWith(
|
||||
logTypes.removeVolumeError,
|
||||
);
|
||||
},
|
||||
{ volumes: [dockerVolume] },
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
Loading…
Reference in New Issue
Block a user