Add tests for doPurge action and v1/v2 app purge routes

Signed-off-by: Christina Ying Wang <christina@balena.io>
This commit is contained in:
Christina Ying Wang 2022-11-28 14:40:20 -08:00
parent a24d5acf7f
commit fcd28591c6
9 changed files with 460 additions and 262 deletions

View File

@ -1,15 +1,21 @@
import * as Bluebird from 'bluebird';
import * as _ from 'lodash';
import { getGlobalApiKey, refreshKey } from '.';
import * as eventTracker from '../event-tracker';
import * as deviceState from '../device-state';
import * as logger from '../logger';
import { App } from '../compose/app';
import * as applicationManager from '../compose/application-manager';
import * as serviceManager from '../compose/service-manager';
import * as volumeManager from '../compose/volume-manager';
import log from '../lib/supervisor-console';
import blink = require('../lib/blink');
import { lock } from '../lib/update-lock';
import { InternalInconsistencyError } from '../lib/errors';
import type { InstancedDeviceState } from '../types';
/**
* Run an array of healthchecks, outputting whether all passed or not
* Used by:
@ -95,3 +101,167 @@ export const doRestart = async (appId: number, force: boolean = false) => {
});
});
};
/**
* This doesn't truly return an InstancedDeviceState, but it's close enough to mostly work where it's used
*/
export function safeStateClone(
targetState: InstancedDeviceState,
): InstancedDeviceState {
// We avoid using cloneDeep here, as the class
// instances can cause a maximum call stack exceeded
// error
// TODO: This should really return the config as it
// is returned from the api, but currently that's not
// the easiest thing due to the way they are stored and
// retrieved from the db - when all of the application
// manager is strongly typed, revisit this. The best
// thing to do would be to represent the input with
// io-ts and make sure the below conforms to it
const cloned: DeepPartial<InstancedDeviceState> = {
local: {
config: {},
},
dependent: {
config: {},
},
};
if (targetState.local != null) {
cloned.local = {
name: targetState.local.name,
config: _.cloneDeep(targetState.local.config),
apps: _.mapValues(targetState.local.apps, safeAppClone),
};
}
if (targetState.dependent != null) {
cloned.dependent = _.cloneDeep(targetState.dependent);
}
return cloned as InstancedDeviceState;
}
export function safeAppClone(app: App): App {
const containerIdForService = _.fromPairs(
_.map(app.services, (svc) => [
svc.serviceName,
svc.containerId != null ? svc.containerId.substring(0, 12) : '',
]),
);
return new App(
{
appId: app.appId,
appUuid: app.appUuid,
appName: app.appName,
commit: app.commit,
source: app.source,
services: app.services.map((svc) => {
// This is a bit of a hack, but when applying the target state as if it's
// the current state, this will include the previous containerId as a
// network alias. The container ID will be there as Docker adds it
// implicitly when creating a container. Here, we remove any previous
// container IDs before passing it back as target state. We have to do this
// here as when passing it back as target state, the service class cannot
// know that the alias being given is not in fact a user given one.
// TODO: Make the process of moving from a current state to a target state
// well-defined (and implemented in a separate module)
const svcCopy = _.cloneDeep(svc);
_.each(svcCopy.config.networks, (net) => {
if (Array.isArray(net.aliases)) {
net.aliases = net.aliases.filter(
(alias) => alias !== containerIdForService[svcCopy.serviceName],
);
}
});
return svcCopy;
}),
volumes: _.cloneDeep(app.volumes),
networks: _.cloneDeep(app.networks),
isHost: app.isHost,
},
true,
);
}
/**
* Purges volumes for an application.
* Used by:
* - POST /v1/purge
* - POST /v2/applications/:appId/purge
*/
export const doPurge = async (appId: number, force: boolean = false) => {
await deviceState.initialized();
logger.logSystemMessage(
`Purging data for app ${appId}`,
{ appId },
'Purge data',
);
return await lock(appId, { force }, async () => {
const currentState = await deviceState.getCurrentState();
if (currentState.local.apps?.[appId] == null) {
throw new InternalInconsistencyError(
`Application with ID ${appId} is not in the current state`,
);
}
const app = currentState.local.apps?.[appId];
/**
* With multi-container, Docker adds an invalid network alias equal to the current containerId
* to that service's network configs when starting a service. Thus when reapplying intermediateState
* after purging, use a cloned state instance which automatically filters out invalid network aliases.
* This will prevent error logs like the following:
* https://gist.github.com/cywang117/84f9cd4e6a9641dbed530c94e1172f1d#file-logs-sh-L58
*
* When networks do not match because of their aliases, services are killed and recreated
* an additional time which is unnecessary. Filtering prevents this additional restart BUT
* it is a stopgap measure until we can keep containerId network aliases from being stored
* in state's service config objects (TODO)
*/
const clonedState = safeStateClone(currentState);
// Set services & volumes as empty to be applied as intermediate state
app.services = [];
app.volumes = {};
applicationManager.setIsApplyingIntermediate(true);
return deviceState
.pausingApply(() =>
deviceState
.applyIntermediateTarget(currentState, { skipLock: true })
.then(() => {
// Explicitly remove volumes because application-manager won't
// remove any volumes that are part of an active application.
return Bluebird.each(volumeManager.getAllByAppId(appId), (vol) =>
vol.remove(),
);
})
.then(() => {
return deviceState.applyIntermediateTarget(clonedState, {
skipLock: true,
});
}),
)
.finally(() => {
applicationManager.setIsApplyingIntermediate(false);
deviceState.triggerApplyTarget();
});
})
.then(() =>
logger.logSystemMessage('Purged data', { appId }, 'Purge data success'),
)
.catch((err) => {
applicationManager.setIsApplyingIntermediate(false);
logger.logSystemMessage(
`Error purging data: ${err}`,
{ appId, error: err },
'Purge data error',
);
throw err;
});
};

View File

@ -1,177 +0,0 @@
import * as Bluebird from 'bluebird';
import * as _ from 'lodash';
import * as logger from '../logger';
import * as deviceState from '../device-state';
import * as applicationManager from '../compose/application-manager';
import * as volumeManager from '../compose/volume-manager';
import { App } from '../compose/app';
import { lock } from '../lib/update-lock';
import { appNotFoundMessage } from './messages';
import type { InstancedDeviceState } from '../types';
export async function doPurge(appId: number, force: boolean) {
await deviceState.initialized();
await applicationManager.initialized();
logger.logSystemMessage(
`Purging data for app ${appId}`,
{ appId },
'Purge data',
);
return lock(appId, { force }, () =>
deviceState.getCurrentState().then(function (currentState) {
const allApps = currentState.local.apps;
if (allApps?.[appId] == null) {
throw new Error(appNotFoundMessage);
}
const clonedState = safeStateClone(currentState);
/**
* With multi-container, Docker adds an invalid network alias equal to the current containerId
* to that service's network configs when starting a service. Thus when reapplying intermediateState
* after purging, use a cloned state instance which automatically filters out invalid network aliases.
*
* This will prevent error logs like the following:
* https://gist.github.com/cywang117/84f9cd4e6a9641dbed530c94e1172f1d#file-logs-sh-L58
*
* When networks do not match because of their aliases, services are killed and recreated
* an additional time which is unnecessary. Filtering prevents this additional restart BUT
* it is a stopgap measure until we can keep containerId network aliases from being stored
* in state's service config objects (TODO)
*
* See https://github.com/balena-os/balena-supervisor/blob/master/src/device-api/common.js#L160-L180
* for a more in-depth explanation of why aliases need to be filtered out.
*/
// After cloning, set services & volumes as empty to be applied as intermediateTargetState
allApps[appId].services = [];
allApps[appId].volumes = {};
applicationManager.setIsApplyingIntermediate(true);
return deviceState
.pausingApply(() =>
deviceState
.applyIntermediateTarget(currentState, { skipLock: true })
.then(() => {
// Now that we're not running anything, explicitly
// remove the volumes, we must do this here, as the
// application-manager will not remove any volumes
// which are part of an active application
return Bluebird.each(volumeManager.getAllByAppId(appId), (vol) =>
vol.remove(),
);
})
.then(() => {
return deviceState.applyIntermediateTarget(clonedState, {
skipLock: true,
});
}),
)
.finally(() => {
applicationManager.setIsApplyingIntermediate(false);
deviceState.triggerApplyTarget();
});
}),
)
.then(() =>
logger.logSystemMessage('Purged data', { appId }, 'Purge data success'),
)
.catch((err) => {
applicationManager.setIsApplyingIntermediate(false);
logger.logSystemMessage(
`Error purging data: ${err}`,
{ appId, error: err },
'Purge data error',
);
throw err;
});
}
/**
* This doesn't truly return an InstancedDeviceState, but it's close enough to mostly work where it's used
*/
export function safeStateClone(
targetState: InstancedDeviceState,
): InstancedDeviceState {
// We avoid using cloneDeep here, as the class
// instances can cause a maximum call stack exceeded
// error
// TODO: This should really return the config as it
// is returned from the api, but currently that's not
// the easiest thing due to the way they are stored and
// retrieved from the db - when all of the application
// manager is strongly typed, revisit this. The best
// thing to do would be to represent the input with
// io-ts and make sure the below conforms to it
const cloned: DeepPartial<InstancedDeviceState> = {
local: {
config: {},
},
dependent: {
config: {},
},
};
if (targetState.local != null) {
cloned.local = {
name: targetState.local.name,
config: _.cloneDeep(targetState.local.config),
apps: _.mapValues(targetState.local.apps, safeAppClone),
};
}
if (targetState.dependent != null) {
cloned.dependent = _.cloneDeep(targetState.dependent);
}
return cloned as InstancedDeviceState;
}
export function safeAppClone(app: App): App {
const containerIdForService = _.fromPairs(
_.map(app.services, (svc) => [
svc.serviceName,
svc.containerId != null ? svc.containerId.substring(0, 12) : '',
]),
);
return new App(
{
appId: app.appId,
appUuid: app.appUuid,
appName: app.appName,
commit: app.commit,
source: app.source,
services: _.map(app.services, (svc) => {
// This is a bit of a hack, but when applying the target state as if it's
// the current state, this will include the previous containerId as a
// network alias. The container ID will be there as Docker adds it
// implicitly when creating a container. Here, we remove any previous
// container IDs before passing it back as target state. We have to do this
// here as when passing it back as target state, the service class cannot
// know that the alias being given is not in fact a user given one.
// TODO: Make the process of moving from a current state to a target state
// well-defined (and implemented in a separate module)
const svcCopy = _.cloneDeep(svc);
_.each(svcCopy.config.networks, (net) => {
if (Array.isArray(net.aliases)) {
net.aliases = net.aliases.filter(
(alias) => alias !== containerIdForService[svcCopy.serviceName],
);
}
});
return svcCopy;
}),
volumes: _.cloneDeep(app.volumes),
networks: _.cloneDeep(app.networks),
isHost: app.isHost,
},
true,
);
}

View File

@ -2,7 +2,6 @@ import * as express from 'express';
import * as _ from 'lodash';
import * as actions from './actions';
import { doPurge } from './common';
import { AuthorizedRequest } from './api-keys';
import * as eventTracker from '../event-tracker';
import { isReadyForUpdates } from '../api-binder';
@ -217,7 +216,8 @@ router.post('/v1/purge', (req: AuthorizedRequest, res, next) => {
return;
}
return doPurge(appId, force)
return actions
.doPurge(appId, force)
.then(() => res.status(200).json({ Data: 'OK', Error: '' }))
.catch(next);
});

View File

@ -32,7 +32,6 @@ import supervisorVersion = require('../lib/supervisor-version');
import { checkInt, checkTruthy } from '../lib/validation';
import { isVPNActive } from '../network';
import * as actions from './actions';
import { doPurge, safeStateClone } from './common';
import { AuthorizedRequest } from './api-keys';
import { fromV2TargetState } from '../lib/legacy';
@ -118,8 +117,8 @@ const createServiceActionHandler = (action: string) =>
router.post(
'/v2/applications/:appId/purge',
(req: AuthorizedRequest, res: Response, next: NextFunction) => {
const { force } = req.body;
const appId = checkInt(req.params.appId);
const force = checkTruthy(req.body.force);
if (!appId) {
return res.status(400).json({
status: 'failed',
@ -135,7 +134,8 @@ router.post(
});
}
return doPurge(appId, force)
return actions
.doPurge(appId, force)
.then(() => {
res.status(200).send('OK');
})
@ -329,7 +329,7 @@ router.get(
router.get('/v2/local/target-state', async (_req, res) => {
const targetState = await deviceState.getTarget();
const target = safeStateClone(targetState);
const target = actions.safeStateClone(targetState);
res.status(200).json({
status: 'success',

View File

@ -180,9 +180,6 @@ describe('manages application lifecycle', () => {
}
await setTimeout(500);
}
// Make sure Supervisor doesn't have any apps running before assertions
await setSupervisorTarget(await generateTarget({ serviceCount: 0 }));
});
after(async () => {
@ -204,13 +201,10 @@ describe('manages application lifecycle', () => {
serviceCount,
serviceNames,
});
// Create a single-container application in local mode
await setSupervisorTarget(targetState);
});
afterEach(async () => {
// Make sure target state has reset to single-container app between assertions
beforeEach(async () => {
// Create a single-container application in local mode
await setSupervisorTarget(targetState);
});
@ -247,6 +241,61 @@ describe('manages application lifecycle', () => {
containers.map((ctn) => ctn.Id),
);
});
// This test should be ordered last in this `describe` block, because the test compares
// the `CreatedAt` timestamps of volumes to determine whether purge was successful. Thus,
// ordering the assertion last will ensure some time has passed between the first `CreatedAt`
// and the `CreatedAt` extracted from the new volume to pass this assertion.
it('should purge an application by removing services then removing volumes', async () => {
containers = await waitForSetup(targetState);
const isRestartSuccessful = startTimesChanged(
containers.map((ctn) => ctn.State.StartedAt),
);
// Get volume metadata. As the name stays the same, we just need to check that the volume
// has been deleted & recreated. We can use the CreatedAt timestamp to determine this.
const volume = (await docker.listVolumes()).Volumes.find((vol) =>
/data/.test(vol.Name),
);
if (!volume) {
expect.fail('Expected initial volume with name matching "data"');
}
// CreatedAt is a valid key but isn't typed properly
const createdAt = (volume as any).CreatedAt;
// Calling actions.doPurge won't work as intended because purge relies on
// setting and applying intermediate state before applying target state again,
// but target state is set in the balena-supervisor container instead of sut.
// NOTE: if running ONLY this test, it has a chance of failing since the first and
// second volume creation happen in quick succession (sometimes in the same second).
await request(BALENA_SUPERVISOR_ADDRESS)
.post('/v1/purge')
.set('Content-Type', 'application/json')
.send(JSON.stringify({ appId: 1 }));
const restartedContainers = await waitForSetup(
targetState,
isRestartSuccessful,
);
// Technically the wait function above should already verify that the two
// containers have been restarted, but verify explcitly with an assertion
expect(isRestartSuccessful(restartedContainers)).to.be.true;
// Containers should have different Ids since they're recreated
expect(restartedContainers.map(({ Id }) => Id)).to.not.have.members(
containers.map((ctn) => ctn.Id),
);
// Volume should be recreated
const newVolume = (await docker.listVolumes()).Volumes.find((vol) =>
/data/.test(vol.Name),
);
if (!volume) {
expect.fail('Expected recreated volume with name matching "data"');
}
expect((newVolume as any).CreatedAt).to.not.equal(createdAt);
});
});
describe('manages multi-container application lifecycle', () => {
@ -260,13 +309,10 @@ describe('manages application lifecycle', () => {
serviceCount,
serviceNames,
});
// Create a single-container application in local mode
await setSupervisorTarget(targetState);
});
afterEach(async () => {
// Make sure target state has reset to single-container app between assertions
beforeEach(async () => {
// Create a multi-container application in local mode
await setSupervisorTarget(targetState);
});
@ -303,5 +349,60 @@ describe('manages application lifecycle', () => {
containers.map((ctn) => ctn.Id),
);
});
// This test should be ordered last in this `describe` block, because the test compares
// the `CreatedAt` timestamps of volumes to determine whether purge was successful. Thus,
// ordering the assertion last will ensure some time has passed between the first `CreatedAt`
// and the `CreatedAt` extracted from the new volume to pass this assertion.
it('should purge an application by removing services then removing volumes', async () => {
containers = await waitForSetup(targetState);
const isRestartSuccessful = startTimesChanged(
containers.map((ctn) => ctn.State.StartedAt),
);
// Get volume metadata. As the name stays the same, we just need to check that the volume
// has been deleted & recreated. We can use the CreatedAt timestamp to determine this.
const volume = (await docker.listVolumes()).Volumes.find((vol) =>
/data/.test(vol.Name),
);
if (!volume) {
expect.fail('Expected initial volume with name matching "data"');
}
// CreatedAt is a valid key but isn't typed properly
const createdAt = (volume as any).CreatedAt;
// Calling actions.doPurge won't work as intended because purge relies on
// setting and applying intermediate state before applying target state again,
// but target state is set in the balena-supervisor container instead of sut.
// NOTE: if running ONLY this test, it has a chance of failing since the first and
// second volume creation happen in quick succession (sometimes in the same second).
await request(BALENA_SUPERVISOR_ADDRESS)
.post('/v1/purge')
.set('Content-Type', 'application/json')
.send(JSON.stringify({ appId: 1 }));
const restartedContainers = await waitForSetup(
targetState,
isRestartSuccessful,
);
// Technically the wait function above should already verify that the two
// containers have been restarted, but verify explcitly with an assertion
expect(isRestartSuccessful(restartedContainers)).to.be.true;
// Containers should have different Ids since they're recreated
expect(restartedContainers.map(({ Id }) => Id)).to.not.have.members(
containers.map((ctn) => ctn.Id),
);
// Volume should be recreated
const newVolume = (await docker.listVolumes()).Volumes.find((vol) =>
/data/.test(vol.Name),
);
if (!volume) {
expect.fail('Expected recreated volume with name matching "data"');
}
expect((newVolume as any).CreatedAt).to.not.equal(createdAt);
});
});
});

View File

@ -181,4 +181,86 @@ describe('device-api/v1', () => {
.expect(503);
});
});
describe('POST /v1/purge', () => {
let doPurgeStub: SinonStub;
beforeEach(() => {
doPurgeStub = stub(actions, 'doPurge').resolves();
});
afterEach(async () => {
doPurgeStub.restore();
// Remove all scoped API keys between tests
await db.models('apiSecret').whereNot({ appId: 0 }).del();
});
it('validates data from request body', async () => {
// Parses force: false
await request(api)
.post('/v1/purge')
.send({ appId: 1234567, force: false })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(1234567, false);
doPurgeStub.resetHistory();
// Parses force: true
await request(api)
.post('/v1/purge')
.send({ appId: 7654321, force: true })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(7654321, true);
doPurgeStub.resetHistory();
// Defaults to force: false
await request(api)
.post('/v1/purge')
.send({ appId: 7654321 })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(7654321, false);
});
it('responds with 400 if appId is missing', async () => {
await request(api)
.post('/v1/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(400);
});
it("responds with 401 if caller's API key is not in scope of appId", async () => {
const scopedKey = await deviceApi.generateScopedKey(1234567, 'main');
await request(api)
.post('/v1/purge')
.send({ appId: 7654321 })
.set('Authorization', `Bearer ${scopedKey}`)
.expect(401);
});
it('responds with 200 if purge succeeded', async () => {
await request(api)
.post('/v1/purge')
.send({ appId: 1234567 })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
});
it('responds with 423 if there are update locks', async () => {
doPurgeStub.throws(new UpdatesLockedError());
await request(api)
.post('/v1/purge')
.send({ appId: 1234567 })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(423);
});
it('responds with 503 for other errors that occur during purge', async () => {
doPurgeStub.throws(new Error());
await request(api)
.post('/v1/purge')
.send({ appId: 1234567 })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(503);
});
});
});

View File

@ -105,4 +105,82 @@ describe('device-api/v2', () => {
.expect(503);
});
});
describe('POST /v2/applications/:appId/purge', () => {
// Actions are tested elsewhere so we can stub the dependency here
let doPurgeStub: SinonStub;
beforeEach(() => {
doPurgeStub = stub(actions, 'doPurge').resolves();
});
afterEach(async () => {
doPurgeStub.restore();
// Remove all scoped API keys between tests
await db.models('apiSecret').whereNot({ appId: 0 }).del();
});
it('validates data from request body', async () => {
// Parses force: false
await request(api)
.post('/v2/applications/1234567/purge')
.send({ force: false })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(1234567, false);
doPurgeStub.resetHistory();
// Parses force: true
await request(api)
.post('/v2/applications/7654321/purge')
.send({ force: true })
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(7654321, true);
doPurgeStub.resetHistory();
// Defaults to force: false
await request(api)
.post('/v2/applications/7654321/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
expect(doPurgeStub).to.have.been.calledWith(7654321, false);
});
it('responds with 400 if appId is missing', async () => {
await request(api)
.post('/v2/applications/badAppId/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(400);
});
it("responds with 401 if caller's API key is not in scope of appId", async () => {
const scopedKey = await deviceApi.generateScopedKey(1234567, 'main');
await request(api)
.post('/v2/applications/7654321/purge')
.set('Authorization', `Bearer ${scopedKey}`)
.expect(401);
});
it('responds with 200 if purge succeeded', async () => {
await request(api)
.post('/v2/applications/1234567/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(200);
});
it('responds with 423 if there are update locks', async () => {
doPurgeStub.throws(new UpdatesLockedError());
await request(api)
.post('/v2/applications/1234567/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(423);
});
it('responds with 503 for other errors that occur during purge', async () => {
doPurgeStub.throws(new Error());
await request(api)
.post('/v2/applications/7654321/purge')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(503);
});
});
});

View File

@ -20,7 +20,6 @@ import * as updateLock from '~/lib/update-lock';
import * as TargetState from '~/src/device-state/target-state';
import * as targetStateCache from '~/src/device-state/target-state-cache';
import constants = require('~/lib/constants');
import * as deviceAPIActions from '~/src/device-api/common';
import { UpdatesLockedError } from '~/lib/errors';
import { SchemaTypeKey } from '~/src/config/schema-type';
import log from '~/lib/supervisor-console';
@ -1227,69 +1226,4 @@ describe('SupervisorAPI [V1 Endpoints]', () => {
});
});
});
describe('POST /v1/purge', () => {
it('errors if no appId found in request body', async () => {
await request
.post('/v1/purge')
.send({})
.set('Accept', 'application/json')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(
sampleResponses.V1.POST['/purge [400 Invalid/missing appId]']
.statusCode,
)
.then((response) => {
expect(response.text).to.equal(
sampleResponses.V1.POST['/purge [400 Invalid/missing appId]'].text,
);
});
});
it('purges the /data directory with valid appId', async () => {
const doPurgeStub: SinonStub = stub(
deviceAPIActions,
'doPurge',
).resolves();
await mockedDockerode.testWithData({ containers, images }, async () => {
await request
.post('/v1/purge')
.send({ appId: 2 })
.set('Accept', 'application/json')
.set('Authorization', `Bearer ${await deviceApi.getGlobalApiKey()}`)
.expect(sampleResponses.V1.POST['/purge [200]'].statusCode)
.then((response) => {
expect(response.body).to.deep.equal(
sampleResponses.V1.POST['/purge [200]'].body,
);
});
});
expect(doPurgeStub.callCount).to.equal(1);
doPurgeStub.restore();
});
it('errors if appId is out of scope (application not available)', async () => {
// Generate a new scoped key to call the endpoint, as mocked
// appId = 2 services are all in the global scope and thus
// resolve to true for any isScoped check
const scopedKey = await deviceApi.generateScopedKey(
2,
containers[0].serviceName,
);
await request
.post('/v1/purge')
.send({ appId: 3 })
.set('Accept', 'application/json')
.set('Authorization', `Bearer ${scopedKey}`)
.expect(sampleResponses.V1.POST['/purge [401 Out of scope]'].statusCode)
.then((response) => {
expect(response.body).to.deep.equal(
sampleResponses.V1.POST['/purge [401 Out of scope]'].body,
);
});
});
});
});

View File

@ -1,6 +1,7 @@
import * as Docker from 'dockerode';
import * as tar from 'tar-stream';
import { strict as assert } from 'assert';
import { setTimeout } from 'timers/promises';
import { isStatusError } from '~/lib/errors';
@ -62,6 +63,15 @@ export const cleanupDocker = async (docker = new Docker()) => {
}
}
// Wait until containers are all removed
while (true) {
if ((await docker.listContainers({ all: true })).length > 0) {
await setTimeout(100);
} else {
break;
}
}
// Remove all networks except defaults
const networks = await docker.listNetworks();
await Promise.all(