mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2024-12-18 21:27:54 +00:00
Merge pull request #2020 from balena-os/update-typescript
Update to typescript 4.8.2
This commit is contained in:
commit
03f65653c2
1058
package-lock.json
generated
1058
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -10,7 +10,7 @@
|
||||
"scripts": {
|
||||
"start": "./entry.sh",
|
||||
"build": "npm run clean && npm run release && webpack",
|
||||
"lint": "balena-lint -e ts -e js --typescript src/ test/ typings/ build-utils/ webpack.config.js",
|
||||
"lint": "balena-lint -e ts -e js src/ test/ typings/ build-utils/ webpack.config.js",
|
||||
"test:build": "tsc --noEmit && tsc --noEmit --project tsconfig.js.json",
|
||||
"test:unit": "mocha --config test/unit/.mocharc.js",
|
||||
"test:integration": "mocha --config test/integration/.mocharc.js",
|
||||
@ -20,7 +20,7 @@
|
||||
"test:compose": "docker-compose -f docker-compose.yml -f docker-compose.test.yml up --build --remove-orphans --exit-code-from=sut ; npm run compose:down",
|
||||
"test": "npm run lint && npm run test:build && npm run test:unit && npm run test:legacy",
|
||||
"compose:down": "docker-compose -f docker-compose.test.yml down",
|
||||
"prettify": "balena-lint -e ts -e js --typescript --fix src/ test/ typings/ build-utils/ webpack.config.js",
|
||||
"prettify": "balena-lint -e ts -e js --fix src/ test/ typings/ build-utils/ webpack.config.js",
|
||||
"release": "tsc --project tsconfig.release.json && mv build/src/* build",
|
||||
"sync": "ts-node --files sync/sync.ts",
|
||||
"clean": "rimraf build",
|
||||
@ -42,7 +42,7 @@
|
||||
"devDependencies": {
|
||||
"@balena/contrato": "^0.6.0",
|
||||
"@balena/es-version": "^1.0.1",
|
||||
"@balena/lint": "^5.1.0",
|
||||
"@balena/lint": "^6.2.0",
|
||||
"@types/bluebird": "^3.5.32",
|
||||
"@types/chai": "^4.2.16",
|
||||
"@types/chai-as-promised": "^7.1.3",
|
||||
@ -129,7 +129,7 @@
|
||||
"ts-node": "^8.10.2",
|
||||
"tsconfig-paths": "^4.1.0",
|
||||
"typed-error": "^3.2.1",
|
||||
"typescript": "^4.2.4",
|
||||
"typescript": "^4.8.3",
|
||||
"webpack": "^4.44.1",
|
||||
"webpack-cli": "^3.3.12",
|
||||
"winston": "^3.3.3",
|
||||
|
@ -46,15 +46,12 @@ interface DeviceTag {
|
||||
let readyForUpdates = false;
|
||||
|
||||
export async function healthcheck() {
|
||||
const {
|
||||
appUpdatePollInterval,
|
||||
unmanaged,
|
||||
connectivityCheckEnabled,
|
||||
} = await config.getMany([
|
||||
'appUpdatePollInterval',
|
||||
'unmanaged',
|
||||
'connectivityCheckEnabled',
|
||||
]);
|
||||
const { appUpdatePollInterval, unmanaged, connectivityCheckEnabled } =
|
||||
await config.getMany([
|
||||
'appUpdatePollInterval',
|
||||
'unmanaged',
|
||||
'connectivityCheckEnabled',
|
||||
]);
|
||||
|
||||
// Don't have to perform checks for unmanaged
|
||||
if (unmanaged) {
|
||||
@ -535,7 +532,7 @@ async function reportInitialName(
|
||||
device_name: name,
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
log.error('Unable to report initial device name to API');
|
||||
logger.logSystemMessage(
|
||||
'Unable to report initial device name to API',
|
||||
|
@ -57,9 +57,9 @@ async function report({ body, opts }: StateReport) {
|
||||
body,
|
||||
};
|
||||
|
||||
const [
|
||||
{ statusCode, body: statusMessage, headers },
|
||||
] = await request.patchAsync(endpoint, params).timeout(apiTimeout);
|
||||
const [{ statusCode, body: statusMessage, headers }] = await request
|
||||
.patchAsync(endpoint, params)
|
||||
.timeout(apiTimeout);
|
||||
|
||||
if (statusCode < 200 || statusCode >= 300) {
|
||||
throw new StatusError(
|
||||
|
@ -750,25 +750,21 @@ export class App {
|
||||
},
|
||||
);
|
||||
|
||||
const [
|
||||
opts,
|
||||
supervisorApiHost,
|
||||
hostPathExists,
|
||||
hostname,
|
||||
] = await Promise.all([
|
||||
config.get('extendedEnvOptions'),
|
||||
dockerUtils
|
||||
.getNetworkGateway(constants.supervisorNetworkInterface)
|
||||
.catch(() => '127.0.0.1'),
|
||||
(async () => ({
|
||||
firmware: await pathExistsOnHost('/lib/firmware'),
|
||||
modules: await pathExistsOnHost('/lib/modules'),
|
||||
}))(),
|
||||
(
|
||||
(await config.get('hostname')) ??
|
||||
(await fs.readFile('/etc/hostname', 'utf-8'))
|
||||
).trim(),
|
||||
]);
|
||||
const [opts, supervisorApiHost, hostPathExists, hostname] =
|
||||
await Promise.all([
|
||||
config.get('extendedEnvOptions'),
|
||||
dockerUtils
|
||||
.getNetworkGateway(constants.supervisorNetworkInterface)
|
||||
.catch(() => '127.0.0.1'),
|
||||
(async () => ({
|
||||
firmware: await pathExistsOnHost('/lib/firmware'),
|
||||
modules: await pathExistsOnHost('/lib/modules'),
|
||||
}))(),
|
||||
(
|
||||
(await config.get('hostname')) ??
|
||||
(await fs.readFile('/etc/hostname', 'utf-8'))
|
||||
).trim(),
|
||||
]);
|
||||
|
||||
const svcOpts = {
|
||||
appName: app.name,
|
||||
@ -809,7 +805,7 @@ export class App {
|
||||
let imageInfo: ImageInspectInfo | undefined;
|
||||
try {
|
||||
imageInfo = await imageManager.inspectByName(svc.image);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
throw e;
|
||||
}
|
||||
@ -824,7 +820,7 @@ export class App {
|
||||
// FIXME: Typings for DeviceMetadata
|
||||
return await Service.fromComposeObject(
|
||||
svc,
|
||||
(thisSvcOpts as unknown) as DeviceMetadata,
|
||||
thisSvcOpts as unknown as DeviceMetadata,
|
||||
);
|
||||
}),
|
||||
);
|
||||
|
@ -51,12 +51,10 @@ type ApplicationManagerEventEmitter = StrictEventEmitter<
|
||||
const events: ApplicationManagerEventEmitter = new EventEmitter();
|
||||
export const on: typeof events['on'] = events.on.bind(events);
|
||||
export const once: typeof events['once'] = events.once.bind(events);
|
||||
export const removeListener: typeof events['removeListener'] = events.removeListener.bind(
|
||||
events,
|
||||
);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] = events.removeAllListeners.bind(
|
||||
events,
|
||||
);
|
||||
export const removeListener: typeof events['removeListener'] =
|
||||
events.removeListener.bind(events);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] =
|
||||
events.removeAllListeners.bind(events);
|
||||
|
||||
const proxyvisor = new Proxyvisor();
|
||||
const localModeManager = new LocalModeManager();
|
||||
|
@ -70,17 +70,14 @@ const events = new ImageEventEmitter();
|
||||
|
||||
export const on: typeof events['on'] = events.on.bind(events);
|
||||
export const once: typeof events['once'] = events.once.bind(events);
|
||||
export const removeListener: typeof events['removeListener'] = events.removeListener.bind(
|
||||
events,
|
||||
);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] = events.removeAllListeners.bind(
|
||||
events,
|
||||
);
|
||||
export const removeListener: typeof events['removeListener'] =
|
||||
events.removeListener.bind(events);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] =
|
||||
events.removeAllListeners.bind(events);
|
||||
|
||||
const imageFetchFailures: Dictionary<number> = {};
|
||||
const imageFetchLastFailureTime: Dictionary<ReturnType<
|
||||
typeof process.hrtime
|
||||
>> = {};
|
||||
const imageFetchLastFailureTime: Dictionary<ReturnType<typeof process.hrtime>> =
|
||||
{};
|
||||
const imageCleanupFailures: Dictionary<number> = {};
|
||||
|
||||
type ImageState = Pick<Image, 'status' | 'downloadProgress'>;
|
||||
@ -239,7 +236,7 @@ export async function triggerFetch(
|
||||
await markAsSupervised({ ...image, dockerImageId: img.Id });
|
||||
|
||||
success = true;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
if (!(e instanceof ImageDownloadBackoffError)) {
|
||||
addImageFailure(image.name);
|
||||
@ -538,9 +535,11 @@ async function getImagesForCleanup(): Promise<string[]> {
|
||||
// for images with deltas this should return unless there is some inconsistency
|
||||
// and the tag was deleted.
|
||||
const inspectByReference = async (imageName: string) => {
|
||||
const { registry, imageName: name, tagName } = dockerUtils.getRegistryAndName(
|
||||
imageName,
|
||||
);
|
||||
const {
|
||||
registry,
|
||||
imageName: name,
|
||||
tagName,
|
||||
} = dockerUtils.getRegistryAndName(imageName);
|
||||
|
||||
const repo = [registry, name].filter((s) => !!s).join('/');
|
||||
const reference = [repo, tagName].filter((s) => !!s).join(':');
|
||||
@ -618,7 +617,7 @@ export async function cleanup() {
|
||||
try {
|
||||
await docker.getImage(image).remove({ force: true });
|
||||
delete imageCleanupFailures[image];
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
logger.logSystemMessage(
|
||||
`Error cleaning up ${image}: ${e.message} - will ignore for 1 hour`,
|
||||
{ error: e },
|
||||
@ -730,7 +729,7 @@ async function removeImageIfNotNeeded(image: Image): Promise<void> {
|
||||
|
||||
// Mark the image as removed
|
||||
removed = true;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (NotFoundError(e)) {
|
||||
removed = false;
|
||||
} else {
|
||||
@ -781,7 +780,7 @@ async function fetchDelta(
|
||||
): Promise<string> {
|
||||
logger.logSystemEvent(LogTypes.downloadImageDelta, { image });
|
||||
|
||||
const deltaOpts = (opts as unknown) as DeltaFetchOptions;
|
||||
const deltaOpts = opts as unknown as DeltaFetchOptions;
|
||||
const srcImage = await inspectByName(deltaOpts.deltaSource);
|
||||
|
||||
deltaOpts.deltaSourceId = srcImage.Id;
|
||||
|
@ -45,7 +45,7 @@ export async function create(network: Network) {
|
||||
|
||||
// We have a network with the same config and name
|
||||
// already created, we can skip this
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
logger.logSystemEvent(logTypes.createNetworkError, {
|
||||
network: { name: network.name, appUuid: network.appUuid },
|
||||
|
@ -20,9 +20,11 @@ export class Network {
|
||||
|
||||
private constructor() {}
|
||||
|
||||
private static deconstructDockerName(
|
||||
name: string,
|
||||
): { name: string; appId?: number; appUuid?: string } {
|
||||
private static deconstructDockerName(name: string): {
|
||||
name: string;
|
||||
appId?: number;
|
||||
appUuid?: string;
|
||||
} {
|
||||
const matchWithAppId = name.match(/^(\d+)_(\S+)/);
|
||||
if (matchWithAppId == null) {
|
||||
const matchWithAppUuid = name.match(/^([0-9a-f-A-F]{32,})_(\S+)/);
|
||||
|
@ -2,7 +2,8 @@ import * as _ from 'lodash';
|
||||
import { TypedError } from 'typed-error';
|
||||
|
||||
// Adapted from https://github.com/docker/docker-py/blob/master/docker/utils/ports.py#L3
|
||||
const PORTS_REGEX = /^(?:(?:([a-fA-F\d.:]+):)?([\d]*)(?:-([\d]+))?:)?([\d]+)(?:-([\d]+))?(?:\/(udp|tcp))?$/;
|
||||
const PORTS_REGEX =
|
||||
/^(?:(?:([a-fA-F\d.:]+):)?([\d]*)(?:-([\d]+))?:)?([\d]+)(?:-([\d]+))?(?:\/(udp|tcp))?$/;
|
||||
|
||||
// A regex to extract the protocol and internal port of the incoming Docker options
|
||||
const DOCKER_OPTS_PORTS_REGEX = /(\d+)(?:\/?([a-z]+))?/i;
|
||||
|
@ -43,12 +43,10 @@ interface KillOpts {
|
||||
|
||||
export const on: typeof events['on'] = events.on.bind(events);
|
||||
export const once: typeof events['once'] = events.once.bind(events);
|
||||
export const removeListener: typeof events['removeListener'] = events.removeListener.bind(
|
||||
events,
|
||||
);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] = events.removeAllListeners.bind(
|
||||
events,
|
||||
);
|
||||
export const removeListener: typeof events['removeListener'] =
|
||||
events.removeListener.bind(events);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] =
|
||||
events.removeAllListeners.bind(events);
|
||||
|
||||
// Whether a container has died, indexed by ID
|
||||
const containerHasDied: Dictionary<boolean> = {};
|
||||
@ -74,7 +72,7 @@ export const getAll = async (
|
||||
service.status = vState.status;
|
||||
}
|
||||
return service;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (NotFoundError(e)) {
|
||||
return null;
|
||||
}
|
||||
@ -90,10 +88,8 @@ async function get(service: Service) {
|
||||
const containerIds = await getContainerIdMap(
|
||||
service.appUuid || service.appId,
|
||||
);
|
||||
const services = (
|
||||
await getAll(`service-name=${service.serviceName}`)
|
||||
).filter((currentService) =>
|
||||
currentService.isEqualConfig(service, containerIds),
|
||||
const services = (await getAll(`service-name=${service.serviceName}`)).filter(
|
||||
(currentService) => currentService.isEqualConfig(service, containerIds),
|
||||
);
|
||||
|
||||
if (services.length === 0) {
|
||||
@ -210,7 +206,7 @@ export async function remove(service: Service) {
|
||||
|
||||
try {
|
||||
await docker.getContainer(existingService.containerId).remove({ v: true });
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
logger.logSystemEvent(LogTypes.removeDeadServiceError, {
|
||||
service,
|
||||
@ -231,7 +227,7 @@ async function create(service: Service) {
|
||||
);
|
||||
}
|
||||
return docker.getContainer(existing.containerId);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
logger.logSystemEvent(LogTypes.installServiceError, {
|
||||
service,
|
||||
@ -387,7 +383,7 @@ export function listenToEvents() {
|
||||
let service: Service | null = null;
|
||||
try {
|
||||
service = await getByDockerContainerId(data.id);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
throw e;
|
||||
}
|
||||
@ -418,7 +414,7 @@ export function listenToEvents() {
|
||||
await logMonitor.detach(data.id);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
log.error('Error on docker event:', e, e.stack);
|
||||
}
|
||||
}
|
||||
|
@ -92,9 +92,8 @@ export class Service {
|
||||
'dns',
|
||||
'dnsSearch',
|
||||
];
|
||||
public static allConfigArrayFields: ServiceConfigArrayField[] = Service.configArrayFields.concat(
|
||||
Service.orderedConfigArrayFields,
|
||||
);
|
||||
public static allConfigArrayFields: ServiceConfigArrayField[] =
|
||||
Service.configArrayFields.concat(Service.orderedConfigArrayFields);
|
||||
|
||||
// A list of fields to ignore when comparing container configuration
|
||||
private static omitFields = [
|
||||
@ -724,9 +723,8 @@ export class Service {
|
||||
ExposedPorts: exposedPorts,
|
||||
Image: this.config.image,
|
||||
Labels: this.config.labels,
|
||||
NetworkingConfig: ComposeUtils.serviceNetworksToDockerNetworks(
|
||||
mainNetwork,
|
||||
),
|
||||
NetworkingConfig:
|
||||
ComposeUtils.serviceNetworksToDockerNetworks(mainNetwork),
|
||||
StopSignal: this.config.stopSignal,
|
||||
Domainname: this.config.domainname,
|
||||
Hostname: this.config.hostname,
|
||||
@ -821,8 +819,8 @@ export class Service {
|
||||
// Service.orderedConfigArrayFields are defined as
|
||||
// fields inside of Service.config
|
||||
const arrayEq = ComposeUtils.compareArrayFields(
|
||||
(this.config as unknown) as Dictionary<unknown>,
|
||||
(service.config as unknown) as Dictionary<unknown>,
|
||||
this.config as unknown as Dictionary<unknown>,
|
||||
service.config as unknown as Dictionary<unknown>,
|
||||
Service.configArrayFields,
|
||||
Service.orderedConfigArrayFields,
|
||||
);
|
||||
|
@ -249,9 +249,8 @@ export function getHealthcheck(
|
||||
const imageServiceHealthcheck = dockerHealthcheckToServiceHealthcheck(
|
||||
_.get(imageInfo, 'Config.Healthcheck', null),
|
||||
);
|
||||
const composeServiceHealthcheck = composeHealthcheckToServiceHealthcheck(
|
||||
composeHealthcheck,
|
||||
);
|
||||
const composeServiceHealthcheck =
|
||||
composeHealthcheckToServiceHealthcheck(composeHealthcheck);
|
||||
|
||||
// Overlay any compose healthcheck fields on the image healthchecks
|
||||
return _.assign(
|
||||
@ -277,9 +276,8 @@ export function getWorkingDir(
|
||||
workingDir: string | null | undefined,
|
||||
imageInfo?: Dockerode.ImageInspectInfo,
|
||||
): string {
|
||||
return (workingDir != null
|
||||
? workingDir
|
||||
: _.get(imageInfo, 'Config.WorkingDir', '')
|
||||
return (
|
||||
workingDir != null ? workingDir : _.get(imageInfo, 'Config.WorkingDir', '')
|
||||
).replace(/(^.+)\/$/, '$1');
|
||||
}
|
||||
|
||||
@ -448,9 +446,10 @@ export function serviceUlimitsToDockerUlimits(
|
||||
return ret;
|
||||
}
|
||||
|
||||
export function serviceRestartToDockerRestartPolicy(
|
||||
restart: string,
|
||||
): { Name: string; MaximumRetryCount: number } {
|
||||
export function serviceRestartToDockerRestartPolicy(restart: string): {
|
||||
Name: string;
|
||||
MaximumRetryCount: number;
|
||||
} {
|
||||
return {
|
||||
Name: restart,
|
||||
MaximumRetryCount: 0,
|
||||
@ -535,9 +534,9 @@ export function normalizeNullValues(obj: Dictionary<any>): void {
|
||||
});
|
||||
}
|
||||
|
||||
export function normalizeLabels(labels: {
|
||||
export function normalizeLabels(labels: { [key: string]: string }): {
|
||||
[key: string]: string;
|
||||
}): { [key: string]: string } {
|
||||
} {
|
||||
const legacyLabels = _.mapKeys(
|
||||
_.pickBy(labels, (_v, k) => _.startsWith(k, 'io.resin.')),
|
||||
(_v, k) => {
|
||||
|
@ -58,7 +58,7 @@ export async function create(volume: Volume): Promise<void> {
|
||||
if (!volume.isEqualConfig(existing)) {
|
||||
throw new ResourceRecreationAttemptError('volume', volume.name);
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!NotFoundError(e)) {
|
||||
logger.logSystemEvent(LogTypes.createVolumeError, {
|
||||
volume: { name: volume.name },
|
||||
|
@ -122,9 +122,10 @@ export class Volume {
|
||||
return `${appId}_${name}`;
|
||||
}
|
||||
|
||||
private static deconstructDockerName(
|
||||
name: string,
|
||||
): { name: string; appId: number } {
|
||||
private static deconstructDockerName(name: string): {
|
||||
name: string;
|
||||
appId: number;
|
||||
} {
|
||||
const match = name.match(/(\d+)_(\S+)/);
|
||||
if (match == null) {
|
||||
throw new InternalInconsistencyError(
|
||||
|
@ -225,10 +225,8 @@ export class ExtraUEnv extends ConfigBackend {
|
||||
// Reduce ConfigOptions into a Map that joins collections
|
||||
return Object.entries(configs).reduce(
|
||||
(configMap: Map<string, string>, [configKey, configValue]) => {
|
||||
const {
|
||||
key: ENTRY_KEY,
|
||||
collection: ENTRY_IS_COLLECTION,
|
||||
} = ExtraUEnv.supportedConfigs[configKey];
|
||||
const { key: ENTRY_KEY, collection: ENTRY_IS_COLLECTION } =
|
||||
ExtraUEnv.supportedConfigs[configKey];
|
||||
// Check if we have to build the value for the entry
|
||||
if (ENTRY_IS_COLLECTION) {
|
||||
return configMap.set(
|
||||
|
@ -20,12 +20,9 @@ export class Odmdata extends ConfigBackend {
|
||||
private static supportedConfigs = ['configuration'];
|
||||
private BYTE_OFFSETS = [1659, 5243, 18043];
|
||||
private CONFIG_BYTES = [
|
||||
0x0 /* Config Option #1 */,
|
||||
0x1 /* Config Option #2 */,
|
||||
0x6 /* Config Option #3 */,
|
||||
0x7 /* Config Option #4 */,
|
||||
0x2 /* Config Option #5 */,
|
||||
0x3 /* Config Option #6 */,
|
||||
0x0 /* Config Option #1 */, 0x1 /* Config Option #2 */,
|
||||
0x6 /* Config Option #3 */, 0x7 /* Config Option #4 */,
|
||||
0x2 /* Config Option #5 */, 0x3 /* Config Option #6 */,
|
||||
];
|
||||
private CONFIG_BUFFER = Buffer.from(this.CONFIG_BYTES);
|
||||
|
||||
@ -186,7 +183,7 @@ export class Odmdata extends ConfigBackend {
|
||||
): Promise<fs.FileHandle> {
|
||||
try {
|
||||
return await fs.open(file, flags);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
switch (e.code) {
|
||||
case 'ENOENT':
|
||||
log.error(`File not found at: ${file}`);
|
||||
|
@ -35,9 +35,9 @@ export default class ConfigJsonConfigBackend {
|
||||
readLock('config.json').disposer((release) => release());
|
||||
}
|
||||
|
||||
public async set<T extends Schema.SchemaKey>(
|
||||
keyVals: { [key in T]: unknown },
|
||||
) {
|
||||
public async set<T extends Schema.SchemaKey>(keyVals: {
|
||||
[key in T]: unknown;
|
||||
}) {
|
||||
await this.init();
|
||||
await Bluebird.using(this.writeLockConfigJson(), async () => {
|
||||
let changed = false;
|
||||
|
@ -35,9 +35,8 @@ interface ConfigEventTypes {
|
||||
change: ConfigChangeMap<SchemaTypeKey>;
|
||||
}
|
||||
|
||||
export const configJsonBackend: ConfigJsonConfigBackend = new ConfigJsonConfigBackend(
|
||||
Schema.schema,
|
||||
);
|
||||
export const configJsonBackend: ConfigJsonConfigBackend =
|
||||
new ConfigJsonConfigBackend(Schema.schema);
|
||||
|
||||
type ConfigEventEmitter = StrictEventEmitter<EventEmitter, ConfigEventTypes>;
|
||||
class ConfigEvents extends (EventEmitter as new () => ConfigEventEmitter) {}
|
||||
@ -46,9 +45,8 @@ const events = new ConfigEvents();
|
||||
// Expose methods which make this module act as an EventEmitter
|
||||
export const on: typeof events['on'] = events.on.bind(events);
|
||||
export const once: typeof events['once'] = events.once.bind(events);
|
||||
export const removeListener: typeof events['removeListener'] = events.removeListener.bind(
|
||||
events,
|
||||
);
|
||||
export const removeListener: typeof events['removeListener'] =
|
||||
events.removeListener.bind(events);
|
||||
|
||||
export async function get<T extends SchemaTypeKey>(
|
||||
key: T,
|
||||
@ -120,9 +118,9 @@ export async function getMany<T extends SchemaTypeKey>(
|
||||
trx?: Transaction,
|
||||
): Promise<{ [key in T]: SchemaReturn<key> }> {
|
||||
const values = await Promise.all(keys.map((k) => get(k, trx)));
|
||||
return (_.zipObject(keys, values) as unknown) as Promise<
|
||||
{ [key in T]: SchemaReturn<key> }
|
||||
>;
|
||||
return _.zipObject(keys, values) as unknown as Promise<{
|
||||
[key in T]: SchemaReturn<key>;
|
||||
}>;
|
||||
}
|
||||
|
||||
export async function set<T extends SchemaTypeKey>(
|
||||
@ -332,11 +330,11 @@ function valueToString(value: unknown, name: string) {
|
||||
}
|
||||
}
|
||||
|
||||
function checkValueDecode(
|
||||
decoded: Either<t.Errors, unknown>,
|
||||
function checkValueDecode<T>(
|
||||
decoded: Either<t.Errors, T>,
|
||||
key: string,
|
||||
value: unknown,
|
||||
): decoded is Right<unknown> {
|
||||
value: T,
|
||||
): decoded is Right<T> {
|
||||
if (isLeft(decoded)) {
|
||||
throw new ConfigurationValidationError(key, value);
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ export function createV2Api(router: Router) {
|
||||
let apps: any;
|
||||
try {
|
||||
apps = await applicationManager.getLegacyState();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
log.error(e.message);
|
||||
return res.status(500).json({
|
||||
status: 'failed',
|
||||
@ -358,7 +358,7 @@ export function createV2Api(router: Router) {
|
||||
status: 'success',
|
||||
message: 'OK',
|
||||
});
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
res.status(400).json({
|
||||
status: 'failed',
|
||||
message: e.message,
|
||||
@ -380,7 +380,7 @@ export function createV2Api(router: Router) {
|
||||
deviceType,
|
||||
},
|
||||
});
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
res.status(500).json({
|
||||
status: 'failed',
|
||||
message: e.message,
|
||||
@ -536,7 +536,7 @@ export function createV2Api(router: Router) {
|
||||
status: 'success',
|
||||
tags,
|
||||
});
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
log.error(e);
|
||||
res.status(500).json({
|
||||
status: 'failed',
|
||||
|
@ -79,7 +79,7 @@ const actionExecutors: DeviceActionExecutors = {
|
||||
success: true,
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
if (step.humanReadableTarget) {
|
||||
logger.logConfigChange(step.humanReadableTarget, {
|
||||
err,
|
||||
@ -102,7 +102,7 @@ const actionExecutors: DeviceActionExecutors = {
|
||||
if (!initial) {
|
||||
logger.logConfigChange(logValue, { success: true });
|
||||
}
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
logger.logConfigChange(logValue, { err });
|
||||
throw err;
|
||||
}
|
||||
@ -271,7 +271,7 @@ export async function getTarget({
|
||||
let conf: Dictionary<string>;
|
||||
try {
|
||||
conf = JSON.parse(devConfig.targetValues);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
throw new Error(`Corrupted supervisor database! Error: ${e.message}`);
|
||||
}
|
||||
if (initial || conf.SUPERVISOR_VPN_CONTROL == null) {
|
||||
@ -697,7 +697,7 @@ async function isVPNEnabled(): Promise<boolean> {
|
||||
try {
|
||||
const activeState = await dbus.serviceActiveState(vpnServiceName);
|
||||
return !_.includes(['inactive', 'deactivating'], activeState);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (UnitNotLoadedError(e)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ function createDeviceStateRouter() {
|
||||
try {
|
||||
const response = await executeStepAction({ action }, { force });
|
||||
res.status(202).json(response);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
const status = e instanceof UpdatesLockedError ? 423 : 500;
|
||||
res.status(status).json({
|
||||
Data: '',
|
||||
@ -155,7 +155,7 @@ function createDeviceStateRouter() {
|
||||
validation.checkTruthy(req.body.force) || lockOverride,
|
||||
);
|
||||
res.status(200).send('OK');
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
// TODO: We should be able to throw err if it's UpdatesLockedError
|
||||
// and the error middleware will handle it, but this doesn't work in
|
||||
// the test environment. Fix this when fixing API tests.
|
||||
@ -194,7 +194,7 @@ function createDeviceStateRouter() {
|
||||
stateToSend.download_progress = service.download_progress;
|
||||
}
|
||||
res.json(stateToSend);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
res.status(500).json({
|
||||
Data: '',
|
||||
Error: (e != null ? e.message : undefined) || e || 'Unknown error',
|
||||
@ -231,12 +231,10 @@ type DeviceStateEventEmitter = StrictEventEmitter<
|
||||
const events = new EventEmitter() as DeviceStateEventEmitter;
|
||||
export const on: typeof events['on'] = events.on.bind(events);
|
||||
export const once: typeof events['once'] = events.once.bind(events);
|
||||
export const removeListener: typeof events['removeListener'] = events.removeListener.bind(
|
||||
events,
|
||||
);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] = events.removeAllListeners.bind(
|
||||
events,
|
||||
);
|
||||
export const removeListener: typeof events['removeListener'] =
|
||||
events.removeListener.bind(events);
|
||||
export const removeAllListeners: typeof events['removeAllListeners'] =
|
||||
events.removeAllListeners.bind(events);
|
||||
|
||||
type DeviceStateStepTarget = 'reboot' | 'shutdown' | 'noop';
|
||||
|
||||
@ -509,9 +507,10 @@ export async function setTarget(target: TargetState, localSource?: boolean) {
|
||||
export function getTarget({
|
||||
initial = false,
|
||||
intermediate = false,
|
||||
}: { initial?: boolean; intermediate?: boolean } = {}): Bluebird<
|
||||
InstancedDeviceState
|
||||
> {
|
||||
}: {
|
||||
initial?: boolean;
|
||||
intermediate?: boolean;
|
||||
} = {}): Bluebird<InstancedDeviceState> {
|
||||
return usingReadLockTarget(async () => {
|
||||
if (intermediate) {
|
||||
return intermediateTarget!;
|
||||
@ -772,7 +771,7 @@ export async function applyStep<T extends PossibleStepTargets>(
|
||||
skipLock,
|
||||
});
|
||||
emitAsync('step-completed', null, step, stepResult || undefined);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
emitAsync('step-error', e, step);
|
||||
throw e;
|
||||
}
|
||||
@ -918,7 +917,7 @@ export const applyTarget = async ({
|
||||
nextDelay,
|
||||
retryCount,
|
||||
});
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e instanceof UpdatesLockedError) {
|
||||
// Forward the UpdatesLockedError directly
|
||||
throw e;
|
||||
|
@ -53,9 +53,11 @@ export async function setApps(
|
||||
|
||||
const services = Object.keys(release.services ?? {}).map((serviceName) => {
|
||||
const { id: releaseId } = release;
|
||||
const { id: serviceId, image_id: imageId, ...service } = release.services[
|
||||
serviceName
|
||||
];
|
||||
const {
|
||||
id: serviceId,
|
||||
image_id: imageId,
|
||||
...service
|
||||
} = release.services[serviceName];
|
||||
|
||||
return {
|
||||
...service,
|
||||
@ -94,53 +96,62 @@ export async function getTargetJson(): Promise<TargetApps> {
|
||||
const dbApps = await getDBEntry();
|
||||
|
||||
return dbApps
|
||||
.map(({ source, uuid, releaseId, commit: releaseUuid, ...app }): [
|
||||
string,
|
||||
TargetApp,
|
||||
] => {
|
||||
const services = (JSON.parse(app.services) as DatabaseService[])
|
||||
.map(({ serviceName, serviceId, imageId, ...service }): [
|
||||
string,
|
||||
TargetService,
|
||||
] => [
|
||||
serviceName,
|
||||
{
|
||||
id: serviceId,
|
||||
image_id: imageId,
|
||||
..._.omit(service, ['appId', 'appUuid', 'commit', 'releaseId']),
|
||||
} as TargetService,
|
||||
])
|
||||
// Map by serviceName
|
||||
.reduce(
|
||||
(svcs, [serviceName, s]) => ({
|
||||
...svcs,
|
||||
[serviceName]: s,
|
||||
}),
|
||||
{},
|
||||
);
|
||||
|
||||
const releases = releaseUuid
|
||||
? {
|
||||
[releaseUuid]: {
|
||||
id: releaseId,
|
||||
services,
|
||||
networks: JSON.parse(app.networks),
|
||||
volumes: JSON.parse(app.volumes),
|
||||
} as TargetRelease,
|
||||
}
|
||||
: {};
|
||||
|
||||
return [
|
||||
.map(
|
||||
({
|
||||
source,
|
||||
uuid,
|
||||
{
|
||||
id: app.appId,
|
||||
name: app.name,
|
||||
class: app.class,
|
||||
is_host: !!app.isHost,
|
||||
releases,
|
||||
},
|
||||
];
|
||||
})
|
||||
releaseId,
|
||||
commit: releaseUuid,
|
||||
...app
|
||||
}): [string, TargetApp] => {
|
||||
const services = (JSON.parse(app.services) as DatabaseService[])
|
||||
.map(
|
||||
({
|
||||
serviceName,
|
||||
serviceId,
|
||||
imageId,
|
||||
...service
|
||||
}): [string, TargetService] => [
|
||||
serviceName,
|
||||
{
|
||||
id: serviceId,
|
||||
image_id: imageId,
|
||||
..._.omit(service, ['appId', 'appUuid', 'commit', 'releaseId']),
|
||||
} as TargetService,
|
||||
],
|
||||
)
|
||||
// Map by serviceName
|
||||
.reduce(
|
||||
(svcs, [serviceName, s]) => ({
|
||||
...svcs,
|
||||
[serviceName]: s,
|
||||
}),
|
||||
{},
|
||||
);
|
||||
|
||||
const releases = releaseUuid
|
||||
? {
|
||||
[releaseUuid]: {
|
||||
id: releaseId,
|
||||
services,
|
||||
networks: JSON.parse(app.networks),
|
||||
volumes: JSON.parse(app.volumes),
|
||||
} as TargetRelease,
|
||||
}
|
||||
: {};
|
||||
|
||||
return [
|
||||
uuid,
|
||||
{
|
||||
id: app.appId,
|
||||
name: app.name,
|
||||
class: app.class,
|
||||
is_host: !!app.isHost,
|
||||
releases,
|
||||
},
|
||||
];
|
||||
},
|
||||
)
|
||||
.reduce((apps, [uuid, app]) => ({ ...apps, [uuid]: app }), {});
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ export async function loadTargetFromFile(appsPath: string): Promise<boolean> {
|
||||
let stateFromFile: AppsJsonFormat | any[];
|
||||
try {
|
||||
stateFromFile = JSON.parse(content);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
throw new AppsJsonParseError(e);
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ export async function loadTargetFromFile(appsPath: string): Promise<boolean> {
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
// Ensure that this is actually a file, and not an empty path
|
||||
// It can be an empty path because if the file does not exist
|
||||
// on host, the docker daemon creates an empty directory when
|
||||
|
@ -24,10 +24,8 @@ interface TargetStateEvents {
|
||||
) => void;
|
||||
'target-state-apply': (force: boolean, isFromApi: boolean) => void;
|
||||
}
|
||||
export const emitter: StrictEventEmitter<
|
||||
EventEmitter,
|
||||
TargetStateEvents
|
||||
> = new EventEmitter();
|
||||
export const emitter: StrictEventEmitter<EventEmitter, TargetStateEvents> =
|
||||
new EventEmitter();
|
||||
|
||||
const lockGetTarget = () =>
|
||||
writeLock('getTarget').disposer((release) => release());
|
||||
@ -105,17 +103,13 @@ export const update = async (
|
||||
): Promise<void> => {
|
||||
await config.initialized();
|
||||
return Bluebird.using(lockGetTarget(), async () => {
|
||||
const {
|
||||
uuid,
|
||||
apiEndpoint,
|
||||
apiTimeout,
|
||||
deviceApiKey,
|
||||
} = await config.getMany([
|
||||
'uuid',
|
||||
'apiEndpoint',
|
||||
'apiTimeout',
|
||||
'deviceApiKey',
|
||||
]);
|
||||
const { uuid, apiEndpoint, apiTimeout, deviceApiKey } =
|
||||
await config.getMany([
|
||||
'uuid',
|
||||
'apiEndpoint',
|
||||
'apiTimeout',
|
||||
'deviceApiKey',
|
||||
]);
|
||||
|
||||
if (typeof apiEndpoint !== 'string') {
|
||||
throw new InternalInconsistencyError(
|
||||
@ -188,7 +182,7 @@ const poll = async (
|
||||
await update();
|
||||
// Reset fetchErrors because we successfuly updated
|
||||
fetchErrors = 0;
|
||||
} catch (e) {
|
||||
} catch {
|
||||
// Exponential back off if request fails
|
||||
pollInterval = Math.min(appUpdatePollInterval, 15000 * 2 ** fetchErrors);
|
||||
++fetchErrors;
|
||||
@ -228,10 +222,8 @@ export const startPoll = async (): Promise<void> => {
|
||||
});
|
||||
|
||||
// Query and set config values we need to avoid multiple db hits
|
||||
const {
|
||||
instantUpdates: updates,
|
||||
appUpdatePollInterval: interval,
|
||||
} = await config.getMany(['instantUpdates', 'appUpdatePollInterval']);
|
||||
const { instantUpdates: updates, appUpdatePollInterval: interval } =
|
||||
await config.getMany(['instantUpdates', 'appUpdatePollInterval']);
|
||||
instantUpdates = updates;
|
||||
appUpdatePollInterval = interval;
|
||||
} catch {
|
||||
|
@ -32,17 +32,9 @@ export let client: mixpanel.Mixpanel | null = null;
|
||||
export const initialized = _.once(async () => {
|
||||
await config.initialized();
|
||||
|
||||
const {
|
||||
unmanaged,
|
||||
mixpanelHost,
|
||||
mixpanelToken,
|
||||
uuid,
|
||||
} = await config.getMany([
|
||||
'unmanaged',
|
||||
'mixpanelHost',
|
||||
'mixpanelToken',
|
||||
'uuid',
|
||||
]);
|
||||
const { unmanaged, mixpanelHost, mixpanelToken, uuid } = await config.getMany(
|
||||
['unmanaged', 'mixpanelHost', 'mixpanelToken', 'uuid'],
|
||||
);
|
||||
|
||||
defaultProperties = {
|
||||
distinct_id: uuid,
|
||||
|
@ -67,7 +67,7 @@ async function readProxy(): Promise<ProxyConfig | undefined> {
|
||||
let redsocksConf: string;
|
||||
try {
|
||||
redsocksConf = await fs.readFile(redsocksConfPath, 'utf-8');
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!ENOENT(e)) {
|
||||
throw e;
|
||||
}
|
||||
@ -99,7 +99,7 @@ async function readProxy(): Promise<ProxyConfig | undefined> {
|
||||
if (noProxy.length) {
|
||||
conf.noProxy = noProxy;
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (!ENOENT(e)) {
|
||||
throw e;
|
||||
}
|
||||
@ -141,7 +141,7 @@ async function setProxy(maybeConf: ProxyConfig | null): Promise<void> {
|
||||
let currentConf: ProxyConfig | undefined;
|
||||
try {
|
||||
currentConf = await readProxy();
|
||||
} catch (err) {
|
||||
} catch {
|
||||
// Noop - current redsocks.conf does not exist
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ export const fetchDevice = async (
|
||||
}
|
||||
|
||||
return device;
|
||||
} catch (e) {
|
||||
} catch {
|
||||
throw new DeviceNotFoundError();
|
||||
}
|
||||
};
|
||||
@ -123,12 +123,14 @@ export const exchangeKeyAndGetDevice = async (
|
||||
opts.provisioningApiKey,
|
||||
apiTimeout,
|
||||
);
|
||||
} catch (err) {
|
||||
} catch {
|
||||
throw new ExchangeKeyError(`Couldn't fetch device with provisioning key`);
|
||||
}
|
||||
|
||||
// We found the device so we can try to register a working device key for it
|
||||
const [res] = await (await request.getRequestInstance())
|
||||
const [res] = await (
|
||||
await request.getRequestInstance()
|
||||
)
|
||||
.postAsync(`${opts.apiEndpoint}/api-key/device/${device.id}/device-key`, {
|
||||
json: true,
|
||||
body: {
|
||||
|
@ -72,7 +72,7 @@ export const isScoped = (
|
||||
scopes: Scope[],
|
||||
) =>
|
||||
scopes.some((scope) =>
|
||||
scopeChecks[scope.type](resources, (scope as unknown) as any),
|
||||
scopeChecks[scope.type](resources, scope as unknown as any),
|
||||
);
|
||||
|
||||
export type AuthorizedRequest = express.Request & {
|
||||
|
@ -219,7 +219,7 @@ export function validateTargetContracts(
|
||||
service.labels?.['io.balena.features.optional'],
|
||||
),
|
||||
};
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
throw new ContractValidationError(serviceName, e.message);
|
||||
}
|
||||
}
|
||||
|
@ -170,10 +170,9 @@ export async function fetchDeltaWithProgress(
|
||||
|
||||
const url = `${deltaOpts.deltaEndpoint}/api/v${deltaOpts.deltaVersion}/delta?src=${deltaOpts.deltaSource}&dest=${imgDest}`;
|
||||
|
||||
const [res, data] = await (await request.getRequestInstance()).getAsync(
|
||||
url,
|
||||
opts,
|
||||
);
|
||||
const [res, data] = await (
|
||||
await request.getRequestInstance()
|
||||
).getAsync(url, opts);
|
||||
if (res.statusCode === 502 || res.statusCode === 504) {
|
||||
throw new DeltaStillProcessingError();
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ export function equals<T>(value: T, other: T): boolean {
|
||||
* Returns true if the the object equals `{}` or is an empty
|
||||
* array
|
||||
*/
|
||||
export function empty<T>(value: T): boolean {
|
||||
export function empty<T extends {}>(value: T): boolean {
|
||||
return (Array.isArray(value) && value.length === 0) || equals(value, {});
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ async function createVolumeFromLegacyData(
|
||||
{},
|
||||
legacyPath,
|
||||
);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
logger.logSystemMessage(
|
||||
`Warning: could not migrate legacy /data volume: ${e.message}`,
|
||||
{ error: e },
|
||||
@ -85,7 +85,7 @@ export async function normaliseLegacyDatabase() {
|
||||
|
||||
try {
|
||||
services = JSON.parse(app.services);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
throw new DatabaseParseError(e);
|
||||
}
|
||||
|
||||
@ -304,76 +304,74 @@ export async function fromV2TargetApps(
|
||||
return (
|
||||
(
|
||||
await Promise.all(
|
||||
Object.keys(apps).map(
|
||||
async (id): Promise<[string, TargetApp]> => {
|
||||
const appId = parseInt(id, 10);
|
||||
const app = apps[appId];
|
||||
Object.keys(apps).map(async (id): Promise<[string, TargetApp]> => {
|
||||
const appId = parseInt(id, 10);
|
||||
const app = apps[appId];
|
||||
|
||||
// If local mode or connectivity is not available just use id as uuid
|
||||
const uuid = local
|
||||
? id
|
||||
: await getUUIDFromAPI(appId).catch(() => {
|
||||
throw new Error(
|
||||
'Cannot migrate from v2 apps.json without Internet connectivity. Please use balenaCLI v13.5.1+ for offline preload support.',
|
||||
);
|
||||
});
|
||||
// If local mode or connectivity is not available just use id as uuid
|
||||
const uuid = local
|
||||
? id
|
||||
: await getUUIDFromAPI(appId).catch(() => {
|
||||
throw new Error(
|
||||
'Cannot migrate from v2 apps.json without Internet connectivity. Please use balenaCLI v13.5.1+ for offline preload support.',
|
||||
);
|
||||
});
|
||||
|
||||
const releases = app.commit
|
||||
? {
|
||||
[app.commit]: {
|
||||
id: app.releaseId,
|
||||
services: Object.keys(app.services ?? {})
|
||||
.map((serviceId) => {
|
||||
const {
|
||||
imageId,
|
||||
serviceName,
|
||||
const releases = app.commit
|
||||
? {
|
||||
[app.commit]: {
|
||||
id: app.releaseId,
|
||||
services: Object.keys(app.services ?? {})
|
||||
.map((serviceId) => {
|
||||
const {
|
||||
imageId,
|
||||
serviceName,
|
||||
image,
|
||||
environment,
|
||||
labels,
|
||||
running,
|
||||
serviceId: _serviceId,
|
||||
contract,
|
||||
...composition
|
||||
} = app.services[serviceId];
|
||||
|
||||
return [
|
||||
serviceName,
|
||||
{
|
||||
id: serviceId,
|
||||
image_id: imageId,
|
||||
image,
|
||||
environment,
|
||||
labels,
|
||||
running,
|
||||
serviceId: _serviceId,
|
||||
contract,
|
||||
...composition
|
||||
} = app.services[serviceId];
|
||||
composition,
|
||||
},
|
||||
];
|
||||
})
|
||||
.reduce(
|
||||
(res, [serviceName, svc]) => ({
|
||||
...res,
|
||||
[serviceName]: svc,
|
||||
}),
|
||||
{},
|
||||
),
|
||||
volumes: app.volumes ?? {},
|
||||
networks: app.networks ?? {},
|
||||
},
|
||||
}
|
||||
: {};
|
||||
|
||||
return [
|
||||
serviceName,
|
||||
{
|
||||
id: serviceId,
|
||||
image_id: imageId,
|
||||
image,
|
||||
environment,
|
||||
labels,
|
||||
running,
|
||||
contract,
|
||||
composition,
|
||||
},
|
||||
];
|
||||
})
|
||||
.reduce(
|
||||
(res, [serviceName, svc]) => ({
|
||||
...res,
|
||||
[serviceName]: svc,
|
||||
}),
|
||||
{},
|
||||
),
|
||||
volumes: app.volumes ?? {},
|
||||
networks: app.networks ?? {},
|
||||
},
|
||||
}
|
||||
: {};
|
||||
|
||||
return [
|
||||
uuid,
|
||||
{
|
||||
id: appId,
|
||||
name: app.name,
|
||||
class: 'fleet',
|
||||
releases,
|
||||
} as TargetApp,
|
||||
];
|
||||
},
|
||||
),
|
||||
return [
|
||||
uuid,
|
||||
{
|
||||
id: appId,
|
||||
name: app.name,
|
||||
class: 'fleet',
|
||||
releases,
|
||||
} as TargetApp,
|
||||
];
|
||||
}),
|
||||
)
|
||||
)
|
||||
// Key by uuid
|
||||
|
@ -126,7 +126,7 @@ export async function unlock(path: string): Promise<void> {
|
||||
export function unlockSync(path: string) {
|
||||
try {
|
||||
return unlinkSync(path);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.code === 'EPERM' || e.code === 'EISDIR') {
|
||||
return rmdirSync(path);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ const getOSReleaseData = _.memoize(
|
||||
const value = _.trim(values.join('=')).replace(/^"(.+(?="$))"$/, '$1');
|
||||
releaseItems[_.trim(key)] = value;
|
||||
}
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Unable to read file at ${path}: ${e.message} ${e.stack}`,
|
||||
);
|
||||
|
@ -42,7 +42,7 @@ const formatter = winston.format.printf((args) => {
|
||||
)}${message}`;
|
||||
});
|
||||
|
||||
export const winstonLog = (winston.createLogger({
|
||||
export const winstonLog = winston.createLogger({
|
||||
format: winston.format.combine(winston.format.colorize(), formatter),
|
||||
transports: [new winston.transports.Console()],
|
||||
// In the future we can reduce this logging level in
|
||||
@ -57,7 +57,7 @@ export const winstonLog = (winston.createLogger({
|
||||
// below, we first cast to unknown so we can do what we
|
||||
// like, and then assign every log level a function (which
|
||||
// is what happens internally in winston)
|
||||
}) as unknown) as { [key in logLevel]: (message: string) => void };
|
||||
}) as unknown as { [key in logLevel]: (message: string) => void };
|
||||
|
||||
winston.addColors(colors);
|
||||
|
||||
|
@ -46,11 +46,9 @@ export function abortIfHUPInProgress({
|
||||
force: boolean | undefined;
|
||||
}): Promise<boolean | never> {
|
||||
return Promise.all(
|
||||
[
|
||||
'rollback-health-breadcrumb',
|
||||
'rollback-altboot-breadcrumb',
|
||||
].map((filename) =>
|
||||
pathExistsOnHost(path.join(constants.stateMountPoint, filename)),
|
||||
['rollback-health-breadcrumb', 'rollback-altboot-breadcrumb'].map(
|
||||
(filename) =>
|
||||
pathExistsOnHost(path.join(constants.stateMountPoint, filename)),
|
||||
),
|
||||
).then((existsArray) => {
|
||||
const anyExists = existsArray.some((exists) => exists);
|
||||
@ -112,7 +110,7 @@ export async function lock<T extends unknown>(
|
||||
let lockOverride: boolean;
|
||||
try {
|
||||
lockOverride = await config.get('lockOverride');
|
||||
} catch (err) {
|
||||
} catch (err: any) {
|
||||
throw new InternalInconsistencyError(
|
||||
`Error getting lockOverride config value: ${err?.message ?? err}`,
|
||||
);
|
||||
|
@ -161,7 +161,7 @@ export class LocalModeManager {
|
||||
return this.collectContainerResources(
|
||||
this.containerId || SUPERVISOR_CONTAINER_NAME_FALLBACK,
|
||||
);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (this.containerId !== undefined) {
|
||||
try {
|
||||
// Inspect operation fails (container ID is out of sync?).
|
||||
@ -172,7 +172,7 @@ export class LocalModeManager {
|
||||
e.message,
|
||||
);
|
||||
return this.collectContainerResources(fallback);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
// Inspect operation fails (using legacy container name?).
|
||||
const fallback = SUPERVISOR_LEGACY_CONTAINER_NAME_FALLBACK;
|
||||
log.warn(
|
||||
@ -230,7 +230,7 @@ export class LocalModeManager {
|
||||
EngineSnapshot.fromJSON(r.snapshot),
|
||||
LocalModeManager.parseTimestamp(r.timestamp),
|
||||
);
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
// Some parsing error happened. Ensure we add data details to the error description.
|
||||
throw new Error(
|
||||
`Cannot parse snapshot data ${JSON.stringify(r)}.` +
|
||||
|
@ -101,9 +101,8 @@ class LogMonitor {
|
||||
timestamp: Date.now(),
|
||||
writeRequired: false,
|
||||
};
|
||||
this.containers[
|
||||
containerId
|
||||
].timestamp = await this.getContainerSentTimestamp(containerId);
|
||||
this.containers[containerId].timestamp =
|
||||
await this.getContainerSentTimestamp(containerId);
|
||||
this.backfill(containerId, this.containers[containerId].timestamp);
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ const _ = require('lodash');
|
||||
var tryParse = function (obj) {
|
||||
try {
|
||||
return JSON.parse(obj);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -22,7 +22,7 @@ exports.up = function (knex) {
|
||||
try {
|
||||
const parsed = JSON.parse(data.toString());
|
||||
resolve(parsed);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
console.log(
|
||||
'Failed to parse config.json! Things may fail unexpectedly!',
|
||||
);
|
||||
|
@ -25,7 +25,7 @@ exports.up = function (knex) {
|
||||
return resolve(checkTruthy(parsed.localMode));
|
||||
}
|
||||
return resolve(false);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
console.log(
|
||||
'Failed to parse config.json! Things may fail unexpectedly!',
|
||||
);
|
||||
|
@ -46,7 +46,7 @@ export async function isVPNActive(): Promise<boolean> {
|
||||
let active: boolean = true;
|
||||
try {
|
||||
await fs.lstat(`${constants.vpnStatusPath}/active`);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
active = false;
|
||||
}
|
||||
log.info(`VPN connection is ${active ? 'active' : 'not active'}.`);
|
||||
@ -116,7 +116,8 @@ export const connectivityCheckEnabled = Bluebird.method(
|
||||
() => isConnectivityCheckEnabled,
|
||||
);
|
||||
|
||||
const IP_REGEX = /^(?:(?:balena|docker|rce|tun)[0-9]+|tun[0-9]+|resin-vpn|lo|resin-dns|supervisor0|balena-redsocks|resin-redsocks|br-[0-9a-f]{12})$/;
|
||||
const IP_REGEX =
|
||||
/^(?:(?:balena|docker|rce|tun)[0-9]+|tun[0-9]+|resin-vpn|lo|resin-dns|supervisor0|balena-redsocks|resin-redsocks|br-[0-9a-f]{12})$/;
|
||||
|
||||
export const shouldReportInterface = (intf: string) => !IP_REGEX.test(intf);
|
||||
|
||||
|
@ -96,7 +96,7 @@ const createProxyvisorRouter = function (proxyvisor) {
|
||||
const fields = await db.models('dependentDevice').select();
|
||||
const devices = fields.map(parseDeviceFields);
|
||||
res.json(devices);
|
||||
} catch (err) {
|
||||
} catch (/** @type {any} */ err) {
|
||||
res.status(503).send(err?.message || err || 'Unknown error');
|
||||
}
|
||||
});
|
||||
@ -320,7 +320,7 @@ const createProxyvisorRouter = function (proxyvisor) {
|
||||
);
|
||||
}
|
||||
res.sendFile(dest);
|
||||
} catch (err) {
|
||||
} catch (/** @type {any} */ err) {
|
||||
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
|
||||
return res.status(503).send(err?.message || err || 'Unknown error');
|
||||
}
|
||||
@ -337,7 +337,7 @@ const createProxyvisorRouter = function (proxyvisor) {
|
||||
config: JSON.parse(app.config ?? '{}'),
|
||||
}));
|
||||
res.json($apps);
|
||||
} catch (err) {
|
||||
} catch (/** @type {any} */ err) {
|
||||
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
|
||||
return res.status(503).send(err?.message || err || 'Unknown error');
|
||||
}
|
||||
@ -350,9 +350,8 @@ export class Proxyvisor {
|
||||
constructor() {
|
||||
this.executeStepAction = this.executeStepAction.bind(this);
|
||||
this.getCurrentStates = this.getCurrentStates.bind(this);
|
||||
this.normaliseDependentAppForDB = this.normaliseDependentAppForDB.bind(
|
||||
this,
|
||||
);
|
||||
this.normaliseDependentAppForDB =
|
||||
this.normaliseDependentAppForDB.bind(this);
|
||||
this.setTargetInTransaction = this.setTargetInTransaction.bind(this);
|
||||
this.getTarget = this.getTarget.bind(this);
|
||||
this._getHookStep = this._getHookStep.bind(this);
|
||||
|
@ -58,7 +58,7 @@ export class SupervisorAPI {
|
||||
return res.status(500).send('Unhealthy');
|
||||
}
|
||||
return res.sendStatus(200);
|
||||
} catch (_e) {
|
||||
} catch {
|
||||
log.error('Healthcheck failed');
|
||||
return res.status(500).send('Unhealthy');
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ export type DeviceName = t.TypeOf<typeof DeviceName>;
|
||||
const restrictedRecord = <
|
||||
K extends t.Mixed,
|
||||
V extends t.Mixed,
|
||||
R extends { [key in t.TypeOf<K>]: t.TypeOf<V> }
|
||||
R extends { [key in t.TypeOf<K>]: t.TypeOf<V> },
|
||||
>(
|
||||
k: K,
|
||||
v: V,
|
||||
|
@ -109,7 +109,7 @@ export async function initDevice(opts: Opts) {
|
||||
opts.docker,
|
||||
true,
|
||||
);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
await Bluebird.delay(500);
|
||||
}
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ describe('lib/lockfile', () => {
|
||||
await expect(lockfile.lock(lockOne)).to.not.be.rejected;
|
||||
await expect(lockfile.lock(lockTwo, NOBODY_UID)).to.not.be.rejected;
|
||||
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
process.emit('exit');
|
||||
|
||||
// Verify lockfile removal regardless of appId / appUuid
|
||||
|
@ -47,14 +47,14 @@ describe('Database Migrations', () => {
|
||||
});
|
||||
|
||||
after(() => {
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.databasePath = process.env.DATABASE_PATH;
|
||||
delete require.cache[require.resolve('~/src/db')];
|
||||
});
|
||||
|
||||
it('creates a database at the path passed on creation', async () => {
|
||||
const databasePath = process.env.DATABASE_PATH_2!;
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.databasePath = databasePath;
|
||||
delete require.cache[require.resolve('~/src/db')];
|
||||
|
||||
@ -67,7 +67,7 @@ describe('Database Migrations', () => {
|
||||
const databasePath = process.env.DATABASE_PATH_3!;
|
||||
|
||||
const knexForDB = await createOldDatabase(databasePath);
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.databasePath = databasePath;
|
||||
delete require.cache[require.resolve('~/src/db')];
|
||||
const testDb = await import('~/src/db');
|
||||
|
@ -113,7 +113,7 @@ describe('device-state', () => {
|
||||
|
||||
try {
|
||||
await testDb.destroy();
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* noop */
|
||||
}
|
||||
sinon.restore();
|
||||
|
@ -91,9 +91,9 @@ describe('EventTracker', () => {
|
||||
it('initializes a mixpanel client when not in unmanaged mode', () => {
|
||||
expect(eventTracker.initialized()).to.be.fulfilled.then(() => {
|
||||
expect(mixpanel.init).to.have.been.calledWith('someToken');
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(eventTracker.client.token).to.equal('someToken');
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(eventTracker.client.track).to.be.a('function');
|
||||
});
|
||||
});
|
||||
@ -138,7 +138,7 @@ describe('EventTracker', () => {
|
||||
'Test event 2',
|
||||
JSON.stringify({ appId: 'someOtherValue' }),
|
||||
);
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(eventTracker.client.track).to.be.calledWith('Test event 2', {
|
||||
appId: 'someOtherValue',
|
||||
uuid: 'barbaz',
|
||||
@ -150,7 +150,7 @@ describe('EventTracker', () => {
|
||||
it('can be passed an Error and it is added to the event properties', async () => {
|
||||
const theError = new Error('something went wrong');
|
||||
await eventTracker.track('Error event', theError);
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(eventTracker.client.track).to.be.calledWith('Error event', {
|
||||
error: {
|
||||
message: theError.message,
|
||||
@ -174,7 +174,7 @@ describe('EventTracker', () => {
|
||||
},
|
||||
};
|
||||
await eventTracker.track('Some app event', props);
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(eventTracker.client.track).to.be.calledWith('Some app event', {
|
||||
service: { appId: '1' },
|
||||
uuid: 'barbaz',
|
||||
|
@ -83,7 +83,7 @@ describe('network', () => {
|
||||
} as any),
|
||||
);
|
||||
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
after(() => os.networkInterfaces.restore());
|
||||
|
||||
it('returns only the relevant IP addresses', () =>
|
||||
|
@ -418,9 +418,7 @@ describe('ApiBinder', () => {
|
||||
|
||||
// Copy previous values to restore later
|
||||
const previousStateReportErrors = currentState.stateReportErrors;
|
||||
const previousDeviceStateConnected =
|
||||
// @ts-ignore
|
||||
components.deviceState.connected;
|
||||
const previousDeviceStateConnected = components.deviceState.connected;
|
||||
|
||||
// Set additional conditions not in configStub to cause a fail
|
||||
try {
|
||||
|
@ -25,7 +25,7 @@ describe('Logger', function () {
|
||||
this.requestStub = sinon.stub(https, 'request').returns(this._req);
|
||||
|
||||
configStub = sinon.stub(config, 'getMany').returns(
|
||||
// @ts-ignore this should actually work but the type system doesnt like it
|
||||
// @ts-expect-error this should actually work but the type system doesnt like it
|
||||
Promise.resolve({
|
||||
apiEndpoint: 'https://example.com',
|
||||
uuid: 'deadbeef',
|
||||
@ -134,7 +134,7 @@ describe('Logger', function () {
|
||||
'\u0001\u0000\u0000\u0000\u0000\u0000\u0000?2018-09-21T12:37:09.819134000Z this is the message';
|
||||
const buffer = Buffer.from(message);
|
||||
|
||||
// @ts-ignore accessing a private function
|
||||
// @ts-expect-error accessing a private function
|
||||
expect(ContainerLogs.extractMessage(buffer)).to.deep.equal({
|
||||
message: 'this is the message',
|
||||
timestamp: 1537533429819,
|
||||
|
@ -158,7 +158,6 @@ describe('device-config', () => {
|
||||
it('correctly parses a config.txt file', async () => {
|
||||
// Will try to parse /test/data/mnt/boot/config.txt
|
||||
await expect(
|
||||
// @ts-ignore accessing private value
|
||||
deviceConfig.getBootConfig(configTxtBackend),
|
||||
).to.eventually.deep.equal({
|
||||
HOST_CONFIG_dtparam: '"i2c_arm=on","spi=on","audio=on"',
|
||||
@ -181,7 +180,6 @@ describe('device-config', () => {
|
||||
);
|
||||
|
||||
await expect(
|
||||
// @ts-ignore accessing private value
|
||||
deviceConfig.getBootConfig(configTxtBackend),
|
||||
).to.eventually.deep.equal({
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
@ -207,7 +205,7 @@ describe('device-config', () => {
|
||||
};
|
||||
|
||||
expect(() =>
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
deviceConfig.bootConfigChangeRequired(
|
||||
configTxtBackend,
|
||||
current,
|
||||
@ -243,7 +241,7 @@ describe('device-config', () => {
|
||||
};
|
||||
|
||||
expect(
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
deviceConfig.bootConfigChangeRequired(
|
||||
configTxtBackend,
|
||||
current,
|
||||
@ -311,7 +309,7 @@ describe('device-config', () => {
|
||||
};
|
||||
|
||||
expect(
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
deviceConfig.bootConfigChangeRequired(
|
||||
configTxtBackend,
|
||||
current,
|
||||
@ -386,11 +384,10 @@ describe('device-config', () => {
|
||||
};
|
||||
|
||||
expect(
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
deviceConfig.bootConfigChangeRequired(extlinuxBackend, current, target),
|
||||
).to.equal(true);
|
||||
|
||||
// @ts-ignore accessing private value
|
||||
await deviceConfig.setBootConfig(extlinuxBackend, target);
|
||||
expect(logSpy).to.be.calledTwice;
|
||||
expect(logSpy.getCall(1).args[2]).to.equal('Apply boot config success');
|
||||
|
@ -30,12 +30,12 @@ describe('conversions', function () {
|
||||
}));
|
||||
|
||||
it('should return an empty object with an empty input', function () {
|
||||
// @ts-ignore passing invalid value to test
|
||||
// @ts-expect-error passing invalid value to test
|
||||
expect(conversion.envArrayToObject(null)).to.deep.equal({});
|
||||
// @ts-ignore passing invalid value to test
|
||||
// @ts-expect-error passing invalid value to test
|
||||
expect(conversion.envArrayToObject('')).to.deep.equal({});
|
||||
expect(conversion.envArrayToObject([])).to.deep.equal({});
|
||||
// @ts-ignore passing invalid value to test
|
||||
// @ts-expect-error passing invalid value to test
|
||||
expect(conversion.envArrayToObject(1)).to.deep.equal({});
|
||||
});
|
||||
});
|
||||
|
@ -2,7 +2,7 @@ import { PortMap, PortRange } from '~/src/compose/ports';
|
||||
import { expect } from 'chai';
|
||||
|
||||
// Force cast `PortMap` as a public version so we can test it
|
||||
const PortMapPublic = (PortMap as any) as new (
|
||||
const PortMapPublic = PortMap as any as new (
|
||||
portStrOrObj: string | PortRange,
|
||||
) => PortMap;
|
||||
|
||||
|
@ -39,7 +39,7 @@ describe('SupervisorAPI', () => {
|
||||
after(async () => {
|
||||
try {
|
||||
await api.stop();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.message !== 'Server is not running.') {
|
||||
throw e;
|
||||
}
|
||||
@ -170,7 +170,7 @@ describe('SupervisorAPI', () => {
|
||||
// Start each case with API stopped
|
||||
try {
|
||||
await api.stop();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.message !== 'Server is not running.') {
|
||||
throw e;
|
||||
}
|
||||
@ -178,9 +178,9 @@ describe('SupervisorAPI', () => {
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
Log.info.restore();
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
Log.error.restore();
|
||||
// Resume API for other test suites
|
||||
return api.listen(mockedOptions.listenPort, mockedOptions.timeout);
|
||||
@ -190,7 +190,7 @@ describe('SupervisorAPI', () => {
|
||||
// Start API
|
||||
await api.listen(mockedOptions.listenPort, mockedOptions.timeout);
|
||||
// Check if success start was logged
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(Log.info.lastCall?.lastArg).to.equal(
|
||||
`Supervisor API successfully started on port ${mockedOptions.listenPort}`,
|
||||
);
|
||||
@ -202,7 +202,7 @@ describe('SupervisorAPI', () => {
|
||||
// Stop API
|
||||
await api.stop();
|
||||
// Check if stopped with info was logged
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(Log.info.lastCall?.lastArg).to.equal('Stopped Supervisor API');
|
||||
});
|
||||
|
||||
@ -212,7 +212,7 @@ describe('SupervisorAPI', () => {
|
||||
// Stop API with error
|
||||
await api.stop({ errored: true });
|
||||
// Check if stopped with error was logged
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(Log.error.lastCall?.lastArg).to.equal('Stopped Supervisor API');
|
||||
});
|
||||
});
|
||||
|
@ -166,9 +166,8 @@ describe('LocalModeManager', () => {
|
||||
const stubEngineObjectMethods = (
|
||||
removeThrows: boolean,
|
||||
): Array<sinon.SinonStubbedInstance<EngineStubbedObject>> => {
|
||||
const resArray: Array<sinon.SinonStubbedInstance<
|
||||
EngineStubbedObject
|
||||
>> = [];
|
||||
const resArray: Array<sinon.SinonStubbedInstance<EngineStubbedObject>> =
|
||||
[];
|
||||
|
||||
const stub = <T>(
|
||||
c: sinon.StubbableType<EngineStubbedObject>,
|
||||
@ -186,7 +185,7 @@ describe('LocalModeManager', () => {
|
||||
}
|
||||
|
||||
resArray.push(res);
|
||||
return (res as unknown) as T;
|
||||
return res as unknown as T;
|
||||
};
|
||||
dockerStub.getImage.returns(stub(Docker.Image, 'image'));
|
||||
dockerStub.getContainer.returns(stub(Docker.Container, 'container'));
|
||||
@ -400,7 +399,7 @@ describe('LocalModeManager', () => {
|
||||
try {
|
||||
const result = await localMode.retrieveLatestSnapshot();
|
||||
expect(result).to.not.exist;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
expect(e.message).to.match(/Cannot parse snapshot data.*"bad json"/);
|
||||
}
|
||||
});
|
||||
@ -416,7 +415,7 @@ describe('LocalModeManager', () => {
|
||||
try {
|
||||
const result = await localMode.retrieveLatestSnapshot();
|
||||
expect(result).to.not.exist;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
expect(e.message).to.match(
|
||||
/Cannot parse snapshot data.*"bad timestamp"/,
|
||||
);
|
||||
|
@ -341,57 +341,51 @@ describe('Container contracts', () => {
|
||||
|
||||
describe('Optional containers', () => {
|
||||
it('should correctly run passing optional containers', async () => {
|
||||
const {
|
||||
valid,
|
||||
unmetServices,
|
||||
fulfilledServices,
|
||||
} = containerContractsFulfilled({
|
||||
service1: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service1',
|
||||
requires: [
|
||||
{
|
||||
type: 'sw.supervisor',
|
||||
version: `<${supervisorVersionGreater}`,
|
||||
},
|
||||
],
|
||||
const { valid, unmetServices, fulfilledServices } =
|
||||
containerContractsFulfilled({
|
||||
service1: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service1',
|
||||
requires: [
|
||||
{
|
||||
type: 'sw.supervisor',
|
||||
version: `<${supervisorVersionGreater}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
});
|
||||
});
|
||||
expect(valid).to.equal(true);
|
||||
expect(unmetServices).to.deep.equal([]);
|
||||
expect(fulfilledServices).to.deep.equal(['service1']);
|
||||
});
|
||||
|
||||
it('should corrrectly omit failing optional containers', async () => {
|
||||
const {
|
||||
valid,
|
||||
unmetServices,
|
||||
fulfilledServices,
|
||||
} = containerContractsFulfilled({
|
||||
service1: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service1',
|
||||
requires: [
|
||||
{
|
||||
type: 'sw.supervisor',
|
||||
version: `>${supervisorVersionGreater}`,
|
||||
},
|
||||
],
|
||||
const { valid, unmetServices, fulfilledServices } =
|
||||
containerContractsFulfilled({
|
||||
service1: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service1',
|
||||
requires: [
|
||||
{
|
||||
type: 'sw.supervisor',
|
||||
version: `>${supervisorVersionGreater}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
service2: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service2',
|
||||
service2: {
|
||||
contract: {
|
||||
type: 'sw.container',
|
||||
slug: 'service2',
|
||||
},
|
||||
optional: false,
|
||||
},
|
||||
optional: false,
|
||||
},
|
||||
});
|
||||
});
|
||||
expect(valid).to.equal(true);
|
||||
expect(unmetServices).to.deep.equal(['service1']);
|
||||
expect(fulfilledServices).to.deep.equal(['service2']);
|
||||
|
@ -8,8 +8,7 @@ describe('Deltas', () => {
|
||||
const imageStub = stub(dockerUtils.docker, 'getImage').returns({
|
||||
inspect: () => {
|
||||
return Promise.resolve({
|
||||
Id:
|
||||
'sha256:34ec91fe6e08cb0f867bbc069c5f499d39297eb8e874bb8ce9707537d983bcbc',
|
||||
Id: 'sha256:34ec91fe6e08cb0f867bbc069c5f499d39297eb8e874bb8ce9707537d983bcbc',
|
||||
RepoTags: [],
|
||||
RepoDigests: [],
|
||||
Parent: '',
|
||||
|
@ -23,7 +23,7 @@ describe('Extlinux Configuration', () => {
|
||||
APPEND \${cbootargs} \${resin_kernel_root} ro rootwait\
|
||||
`;
|
||||
|
||||
// @ts-ignore accessing private method
|
||||
// @ts-expect-error accessing private method
|
||||
const parsed = Extlinux.parseExtlinuxFile(text);
|
||||
expect(parsed.globals).to.have.property('DEFAULT').that.equals('primary');
|
||||
expect(parsed.globals).to.have.property('TIMEOUT').that.equals('30');
|
||||
@ -60,7 +60,7 @@ describe('Extlinux Configuration', () => {
|
||||
APPEND test4\
|
||||
`;
|
||||
|
||||
// @ts-ignore accessing private method
|
||||
// @ts-expect-error accessing private method
|
||||
const parsed = Extlinux.parseExtlinuxFile(text);
|
||||
expect(parsed.labels).to.have.property('primary').that.deep.equals({
|
||||
LINUX: 'test1',
|
||||
@ -147,7 +147,7 @@ describe('Extlinux Configuration', () => {
|
||||
// Expect correct rejection from the given bad config
|
||||
try {
|
||||
await backend.getBootConfig();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
expect(e.message).to.equal(badConfig.reason);
|
||||
}
|
||||
// Restore stub
|
||||
@ -248,12 +248,11 @@ describe('Extlinux Configuration', () => {
|
||||
});
|
||||
|
||||
it('normalizes variable value', () => {
|
||||
[
|
||||
{ input: { key: 'key', value: 'value' }, output: 'value' },
|
||||
].forEach(({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
[{ input: { key: 'key', value: 'value' }, output: 'value' }].forEach(
|
||||
({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -43,7 +43,7 @@ describe('db-format', () => {
|
||||
after(async () => {
|
||||
try {
|
||||
await testDb.destroy();
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* noop */
|
||||
}
|
||||
sinon.restore();
|
||||
|
@ -6,7 +6,7 @@ describe('FDT directive', () => {
|
||||
|
||||
it('parses valid FDT value', () => {
|
||||
VALID_VALUES.forEach(({ input, output }) =>
|
||||
// @ts-ignore input with no FDT can still be parsed
|
||||
// @ts-expect-error input with no FDT can still be parsed
|
||||
expect(directive.parse(input)).to.deep.equal(output),
|
||||
);
|
||||
});
|
||||
|
@ -24,7 +24,7 @@ describe('extra_uEnv Configuration', () => {
|
||||
custom_fdt_file=mycustom.dtb
|
||||
extra_os_cmdline=isolcpus=3,4 splash console=tty0
|
||||
`;
|
||||
// @ts-ignore accessing private method
|
||||
// @ts-expect-error accessing private method
|
||||
const parsed = ExtraUEnv.parseOptions(fileContents);
|
||||
expect(parsed).to.deep.equal({
|
||||
fdt: 'mycustom.dtb',
|
||||
@ -100,10 +100,10 @@ describe('extra_uEnv Configuration', () => {
|
||||
readFileStub.resolves(badConfig.contents);
|
||||
// Expect warning log from the given bad config
|
||||
await backend.getBootConfig();
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
expect(Log.warn.lastCall?.lastArg).to.equal(badConfig.reason);
|
||||
}
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
Log.warn.restore();
|
||||
});
|
||||
|
||||
@ -144,10 +144,10 @@ describe('extra_uEnv Configuration', () => {
|
||||
stub(fsUtils, 'writeAndSyncFile').resolves();
|
||||
const logWarningStub = spy(Log, 'warn');
|
||||
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
const previousSupportedConfigs = ExtraUEnv.supportedConfigs;
|
||||
// Stub isSupportedConfig so we can confirm collections work
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
ExtraUEnv.supportedConfigs = {
|
||||
fdt: { key: 'custom_fdt_file', collection: false },
|
||||
isolcpus: { key: 'extra_os_cmdline', collection: true },
|
||||
@ -171,7 +171,7 @@ describe('extra_uEnv Configuration', () => {
|
||||
// Restore stubs
|
||||
(fsUtils.writeAndSyncFile as SinonStub).restore();
|
||||
logWarningStub.restore();
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
ExtraUEnv.supportedConfigs = previousSupportedConfigs;
|
||||
});
|
||||
|
||||
@ -212,12 +212,11 @@ describe('extra_uEnv Configuration', () => {
|
||||
});
|
||||
|
||||
it('normalizes variable value', () => {
|
||||
[
|
||||
{ input: { key: 'key', value: 'value' }, output: 'value' },
|
||||
].forEach(({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
[{ input: { key: 'key', value: 'value' }, output: 'value' }].forEach(
|
||||
({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -10,17 +10,17 @@ describe('ODMDATA Configuration', () => {
|
||||
const backend = new Odmdata();
|
||||
let logWarningStub: SinonStub;
|
||||
let logErrorStub: SinonStub;
|
||||
// @ts-ignore accessing private vluae
|
||||
// @ts-expect-error accessing private vluae
|
||||
const previousConfigPath = Odmdata.bootConfigPath;
|
||||
const testConfigPath = resolve(process.cwd(), 'test/data/boot0.img');
|
||||
|
||||
before(() => {
|
||||
// @ts-ignore setting value of private variable
|
||||
// @ts-expect-error setting value of private variable
|
||||
Odmdata.bootConfigPath = testConfigPath;
|
||||
});
|
||||
|
||||
after(() => {
|
||||
// @ts-ignore setting value of private variable
|
||||
// @ts-expect-error setting value of private variable
|
||||
Odmdata.bootConfigPath = previousConfigPath;
|
||||
});
|
||||
|
||||
@ -60,9 +60,9 @@ describe('ODMDATA Configuration', () => {
|
||||
// Stub openFileStub with specific error
|
||||
openFileStub.rejects(log.error);
|
||||
try {
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
await backend.getFileHandle(testConfigPath);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
// noop
|
||||
}
|
||||
// Check that correct message was logged
|
||||
@ -80,7 +80,7 @@ describe('ODMDATA Configuration', () => {
|
||||
|
||||
it('correctly parses configuration mode', async () => {
|
||||
for (const config of CONFIG_MODES) {
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
expect(backend.parseOptions(config.buffer)).to.deep.equal({
|
||||
configuration: config.mode,
|
||||
});
|
||||
@ -90,7 +90,7 @@ describe('ODMDATA Configuration', () => {
|
||||
it('logs error for malformed configuration mode', async () => {
|
||||
// Logs when configuration mode is unknown
|
||||
try {
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
backend.parseOptions(Buffer.from([0x9, 0x9, 0x9]));
|
||||
} catch (e) {
|
||||
// noop
|
||||
@ -102,9 +102,9 @@ describe('ODMDATA Configuration', () => {
|
||||
|
||||
// Logs when bytes don't match
|
||||
try {
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
backend.parseOptions(Buffer.from([0x1, 0x0, 0x0]));
|
||||
} catch (e) {
|
||||
} catch {
|
||||
// noop
|
||||
}
|
||||
// Check that correct message was logged
|
||||
@ -115,7 +115,7 @@ describe('ODMDATA Configuration', () => {
|
||||
|
||||
it('unlock/lock bootConfigPath RO access', async () => {
|
||||
const writeSpy = stub().resolves();
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
const handleStub = stub(backend, 'getFileHandle').resolves({
|
||||
write: writeSpy,
|
||||
close: async (): Promise<void> => {
|
||||
@ -123,11 +123,11 @@ describe('ODMDATA Configuration', () => {
|
||||
},
|
||||
});
|
||||
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
await backend.setReadOnly(false); // Try to unlock
|
||||
expect(writeSpy).to.be.calledWith('0');
|
||||
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
await backend.setReadOnly(true); // Try to lock
|
||||
expect(writeSpy).to.be.calledWith('1');
|
||||
|
||||
@ -135,7 +135,7 @@ describe('ODMDATA Configuration', () => {
|
||||
});
|
||||
|
||||
it('sets new config values', async () => {
|
||||
// @ts-ignore accessing private value
|
||||
// @ts-expect-error accessing private value
|
||||
const setROStub = stub(backend, 'setReadOnly');
|
||||
setROStub.resolves();
|
||||
// Get current config
|
||||
@ -189,12 +189,11 @@ describe('ODMDATA Configuration', () => {
|
||||
});
|
||||
|
||||
it('normalizes variable value', () => {
|
||||
[
|
||||
{ input: { key: 'key', value: 'value' }, output: 'value' },
|
||||
].forEach(({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
[{ input: { key: 'key', value: 'value' }, output: 'value' }].forEach(
|
||||
({ input, output }) =>
|
||||
expect(backend.processConfigVarValue(input.key, input.value)).to.equal(
|
||||
output,
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -118,7 +118,7 @@ describe('SupervisorAPI [V1 Endpoints]', () => {
|
||||
after(async () => {
|
||||
try {
|
||||
await api.stop();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.message !== 'Server is not running.') {
|
||||
throw e;
|
||||
}
|
||||
@ -1065,11 +1065,9 @@ describe('SupervisorAPI [V1 Endpoints]', () => {
|
||||
|
||||
it('skips restarting hostname services if they are part of config-json.target', async () => {
|
||||
// stub servicePartOf to return the config-json.target we are looking for
|
||||
stub(dbus, 'servicePartOf').callsFake(
|
||||
async (): Promise<string> => {
|
||||
return 'config-json.target';
|
||||
},
|
||||
);
|
||||
stub(dbus, 'servicePartOf').callsFake(async (): Promise<string> => {
|
||||
return 'config-json.target';
|
||||
});
|
||||
|
||||
await unlinkAll(redsocksPath, noProxyPath);
|
||||
|
||||
@ -1223,11 +1221,9 @@ describe('SupervisorAPI [V1 Endpoints]', () => {
|
||||
|
||||
it('skips restarting proxy services when part of redsocks-conf.target', async () => {
|
||||
// stub servicePartOf to return the redsocks-conf.target we are looking for
|
||||
stub(dbus, 'servicePartOf').callsFake(
|
||||
async (): Promise<string> => {
|
||||
return 'redsocks-conf.target';
|
||||
},
|
||||
);
|
||||
stub(dbus, 'servicePartOf').callsFake(async (): Promise<string> => {
|
||||
return 'redsocks-conf.target';
|
||||
});
|
||||
// Test each proxy patch sequentially to prevent conflicts when writing to fs
|
||||
for (const key of Object.keys(validProxyReqs)) {
|
||||
const patchBodyValuesforKey: string[] | number[] =
|
||||
|
@ -62,7 +62,7 @@ describe('SupervisorAPI [V2 Endpoints]', () => {
|
||||
after(async () => {
|
||||
try {
|
||||
await api.stop();
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
if (e.message !== 'Server is not running.') {
|
||||
throw e;
|
||||
}
|
||||
|
@ -396,10 +396,11 @@ describe('compose/app', () => {
|
||||
volumes: [volume],
|
||||
isTarget: true,
|
||||
});
|
||||
const recreateVolumeSteps = currentWithVolumesRemoved.nextStepsForAppUpdate(
|
||||
contextWithImages,
|
||||
target,
|
||||
);
|
||||
const recreateVolumeSteps =
|
||||
currentWithVolumesRemoved.nextStepsForAppUpdate(
|
||||
contextWithImages,
|
||||
target,
|
||||
);
|
||||
|
||||
expect(recreateVolumeSteps).to.have.length(1);
|
||||
expectSteps('createVolume', recreateVolumeSteps);
|
||||
@ -411,10 +412,11 @@ describe('compose/app', () => {
|
||||
volumes: [volume],
|
||||
});
|
||||
|
||||
const createServiceSteps = currentWithVolumeRecreated.nextStepsForAppUpdate(
|
||||
contextWithImages,
|
||||
target,
|
||||
);
|
||||
const createServiceSteps =
|
||||
currentWithVolumeRecreated.nextStepsForAppUpdate(
|
||||
contextWithImages,
|
||||
target,
|
||||
);
|
||||
expectSteps('start', createServiceSteps);
|
||||
});
|
||||
});
|
||||
|
@ -195,7 +195,7 @@ describe('compose/application-manager', () => {
|
||||
after(async () => {
|
||||
try {
|
||||
await testDb.destroy();
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* noop */
|
||||
}
|
||||
// Restore stubbed methods
|
||||
@ -216,15 +216,11 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService({ running: false, appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [await createService({ running: false, appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -248,15 +244,11 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService()],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [await createService()],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [killStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -282,16 +274,12 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService({ appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [await createService({ appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [],
|
||||
});
|
||||
|
||||
const [fetchStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -317,16 +305,12 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService({ appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
downloading: ['image-new'],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [await createService({ appId: 1 })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
downloading: ['image-new'],
|
||||
});
|
||||
|
||||
const [noopStep, ...nextSteps] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -360,25 +344,21 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{
|
||||
image: 'image-old',
|
||||
labels,
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
},
|
||||
{ options: { imageInfo: { Id: 'sha256:image-old-id' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{
|
||||
image: 'image-old',
|
||||
labels,
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
},
|
||||
{ options: { imageInfo: { Id: 'sha256:image-old-id' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [killStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -414,25 +394,21 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{
|
||||
image: 'image-old',
|
||||
labels,
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
},
|
||||
{ options: { imageInfo: { Id: 'sha256:image-old-id' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{
|
||||
image: 'image-old',
|
||||
labels,
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
},
|
||||
{ options: { imageInfo: { Id: 'sha256:image-old-id' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [killStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -468,23 +444,19 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
images: [
|
||||
createImage({
|
||||
appId: 1,
|
||||
name: 'image-old',
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'image-old-id',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
images: [
|
||||
createImage({
|
||||
appId: 1,
|
||||
name: 'image-old',
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'image-old-id',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [removeImage] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -527,38 +499,34 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
serviceName: 'main',
|
||||
composition: {
|
||||
depends_on: ['dep'],
|
||||
},
|
||||
}),
|
||||
await createService({
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
serviceName: 'dep',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
downloading: ['dep-image'], // dep-image is still being downloaded
|
||||
images: [
|
||||
// main-image was already downloaded
|
||||
createImage({
|
||||
appId: 1,
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
serviceName: 'main',
|
||||
composition: {
|
||||
depends_on: ['dep'],
|
||||
},
|
||||
}),
|
||||
await createService({
|
||||
appId: 1,
|
||||
commit: 'old-release',
|
||||
serviceName: 'dep',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
downloading: ['dep-image'], // dep-image is still being downloaded
|
||||
images: [
|
||||
// main-image was already downloaded
|
||||
createImage({
|
||||
appId: 1,
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -601,48 +569,44 @@ describe('compose/application-manager', () => {
|
||||
true,
|
||||
);
|
||||
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
commit: 'old-release',
|
||||
serviceName: 'main',
|
||||
composition: {
|
||||
depends_on: ['dep'],
|
||||
},
|
||||
}),
|
||||
await createService({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
commit: 'old-release',
|
||||
serviceName: 'dep',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
commit: 'old-release',
|
||||
serviceName: 'main',
|
||||
composition: {
|
||||
depends_on: ['dep'],
|
||||
},
|
||||
}),
|
||||
await createService({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
commit: 'old-release',
|
||||
serviceName: 'dep',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
appId: 1,
|
||||
appUuid: 'appuuid',
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -690,28 +654,24 @@ describe('compose/application-manager', () => {
|
||||
true,
|
||||
);
|
||||
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const [startStep, ...nextSteps] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -756,34 +716,30 @@ describe('compose/application-manager', () => {
|
||||
true,
|
||||
);
|
||||
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
image: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService({
|
||||
image: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Both images have been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
createImage({
|
||||
name: 'dep-image',
|
||||
serviceName: 'dep',
|
||||
commit: 'new-release',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const [startStep, ...nextSteps] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -812,22 +768,20 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService({ appId: 5, serviceName: 'old-service' })],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Image has been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService({ appId: 5, serviceName: 'old-service' }),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// Image has been downloaded
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
serviceName: 'main',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -857,16 +811,12 @@ describe('compose/application-manager', () => {
|
||||
|
||||
it('should not remove an app volumes when they are no longer referenced', async () => {
|
||||
const targetApps = createApps({ networks: [DEFAULT_NETWORK] }, true);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
volumes: [Volume.fromComposeObject('test-volume', 1, 'deadbeef')],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
volumes: [Volume.fromComposeObject('test-volume', 1, 'deadbeef')],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -883,17 +833,13 @@ describe('compose/application-manager', () => {
|
||||
|
||||
it('should remove volumes from previous applications', async () => {
|
||||
const targetApps = createApps({ networks: [DEFAULT_NETWORK] }, true);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [],
|
||||
// Volume with different id
|
||||
volumes: [Volume.fromComposeObject('test-volume', 2, 'deadbeef')],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [],
|
||||
// Volume with different id
|
||||
volumes: [Volume.fromComposeObject('test-volume', 2, 'deadbeef')],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -916,24 +862,18 @@ describe('compose/application-manager', () => {
|
||||
{ services: [await createService()], networks: [DEFAULT_NETWORK] },
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [
|
||||
ensureNetworkStep,
|
||||
...nextSteps
|
||||
] = await applicationManager.inferNextSteps(currentApps, targetApps, {
|
||||
downloading,
|
||||
availableImages,
|
||||
containerIdsByAppId,
|
||||
});
|
||||
const [ensureNetworkStep, ...nextSteps] =
|
||||
await applicationManager.inferNextSteps(currentApps, targetApps, {
|
||||
downloading,
|
||||
availableImages,
|
||||
containerIdsByAppId,
|
||||
});
|
||||
expect(ensureNetworkStep).to.deep.include({
|
||||
action: 'ensureSupervisorNetwork',
|
||||
});
|
||||
@ -955,17 +895,13 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService({ labels }, { options: { listenPort: '48484' } }),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService({ labels }, { options: { listenPort: '48484' } }),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [killStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -995,15 +931,11 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [await createService()],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [await createService()],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
});
|
||||
|
||||
const [cleanupStep, ...nextSteps] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -1036,30 +968,26 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// An image for a service that no longer exists
|
||||
createImage({
|
||||
name: 'old-image',
|
||||
appId: 5,
|
||||
serviceName: 'old-service',
|
||||
dockerImageId: 'sha256:aaaa',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
appId: 1,
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'sha256:bbbb',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// An image for a service that no longer exists
|
||||
createImage({
|
||||
name: 'old-image',
|
||||
appId: 5,
|
||||
serviceName: 'old-service',
|
||||
dockerImageId: 'sha256:aaaa',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
appId: 1,
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'sha256:bbbb',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const [removeImageStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -1088,36 +1016,32 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{ image: 'main-image' },
|
||||
// Target has a matching image already
|
||||
{ options: { imageInfo: { Id: 'sha256:bbbb' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// An image for a service that no longer exists
|
||||
createImage({
|
||||
name: 'old-image',
|
||||
appId: 5,
|
||||
serviceName: 'old-service',
|
||||
dockerImageId: 'sha256:aaaa',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
appId: 1,
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'sha256:bbbb',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [
|
||||
await createService(
|
||||
{ image: 'main-image' },
|
||||
// Target has a matching image already
|
||||
{ options: { imageInfo: { Id: 'sha256:bbbb' } } },
|
||||
),
|
||||
],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [
|
||||
// An image for a service that no longer exists
|
||||
createImage({
|
||||
name: 'old-image',
|
||||
appId: 5,
|
||||
serviceName: 'old-service',
|
||||
dockerImageId: 'sha256:aaaa',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image',
|
||||
appId: 1,
|
||||
serviceName: 'main',
|
||||
dockerImageId: 'sha256:bbbb',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const [removeImageStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -1152,16 +1076,12 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [], // no available images exist
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [DEFAULT_NETWORK],
|
||||
images: [], // no available images exist
|
||||
});
|
||||
|
||||
const [saveImageStep] = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
@ -1207,35 +1127,31 @@ describe('compose/application-manager', () => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
const {
|
||||
currentApps,
|
||||
availableImages,
|
||||
downloading,
|
||||
containerIdsByAppId,
|
||||
} = createCurrentState({
|
||||
services: [],
|
||||
networks: [
|
||||
// Default networks for two apps
|
||||
Network.fromComposeObject('default', 1, 'app-one', {}),
|
||||
Network.fromComposeObject('default', 2, 'app-two', {}),
|
||||
],
|
||||
images: [
|
||||
createImage({
|
||||
name: 'main-image-1',
|
||||
appId: 1,
|
||||
appUuid: 'app-one',
|
||||
serviceName: 'main',
|
||||
commit: 'commit-for-app-1',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image-2',
|
||||
appId: 2,
|
||||
appUuid: 'app-two',
|
||||
serviceName: 'main',
|
||||
commit: 'commit-for-app-2',
|
||||
}),
|
||||
],
|
||||
});
|
||||
const { currentApps, availableImages, downloading, containerIdsByAppId } =
|
||||
createCurrentState({
|
||||
services: [],
|
||||
networks: [
|
||||
// Default networks for two apps
|
||||
Network.fromComposeObject('default', 1, 'app-one', {}),
|
||||
Network.fromComposeObject('default', 2, 'app-two', {}),
|
||||
],
|
||||
images: [
|
||||
createImage({
|
||||
name: 'main-image-1',
|
||||
appId: 1,
|
||||
appUuid: 'app-one',
|
||||
serviceName: 'main',
|
||||
commit: 'commit-for-app-1',
|
||||
}),
|
||||
createImage({
|
||||
name: 'main-image-2',
|
||||
appId: 2,
|
||||
appUuid: 'app-two',
|
||||
serviceName: 'main',
|
||||
commit: 'commit-for-app-2',
|
||||
}),
|
||||
],
|
||||
});
|
||||
|
||||
const steps = await applicationManager.inferNextSteps(
|
||||
currentApps,
|
||||
|
@ -57,8 +57,7 @@ describe('compose/images', () => {
|
||||
|
||||
it('finds image by matching digest on the database', async () => {
|
||||
const dbImage = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/aaaaa@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/aaaaa@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
dockerImageId:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
});
|
||||
@ -67,8 +66,7 @@ describe('compose/images', () => {
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
Id: 'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
},
|
||||
{
|
||||
References: [
|
||||
@ -108,8 +106,7 @@ describe('compose/images', () => {
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
Id: 'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
},
|
||||
{
|
||||
References: ['some-image:some-tag'],
|
||||
@ -149,8 +146,7 @@ describe('compose/images', () => {
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
Id: 'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
},
|
||||
{
|
||||
References: [
|
||||
@ -186,8 +182,7 @@ describe('compose/images', () => {
|
||||
const images = [
|
||||
createImage(
|
||||
{
|
||||
Id:
|
||||
'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
Id: 'sha256:f1154d76c731f04711e5856b6e6858730e3023d9113124900ac65c2ccc90e8e7',
|
||||
},
|
||||
{
|
||||
References: [
|
||||
@ -262,8 +257,7 @@ describe('compose/images', () => {
|
||||
dockerImageId: 'sha256:second-image-id',
|
||||
}),
|
||||
createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/three@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf558',
|
||||
name: 'registry2.balena-cloud.com/v2/three@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf558',
|
||||
serviceName: 'app_3',
|
||||
// Third image has different name but same docker id
|
||||
dockerImageId: 'sha256:second-image-id',
|
||||
@ -381,8 +375,7 @@ describe('compose/images', () => {
|
||||
it('removes image from DB and engine when there is a single DB image with matching name', async () => {
|
||||
// Newer image
|
||||
const imageToRemove = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
dockerImageId: 'sha256:image-id-one',
|
||||
});
|
||||
|
||||
@ -390,8 +383,7 @@ describe('compose/images', () => {
|
||||
await testDb.models('image').insert([
|
||||
imageToRemove,
|
||||
createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/two@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/two@sha256:12345a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
dockerImageId: 'sha256:image-id-two',
|
||||
}),
|
||||
]);
|
||||
@ -469,14 +461,12 @@ describe('compose/images', () => {
|
||||
|
||||
it('removes the requested image even when there are multiple DB images with same docker ID', async () => {
|
||||
const imageToRemove = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
dockerImageId: 'sha256:image-id-one',
|
||||
});
|
||||
|
||||
const imageWithSameDockerImageId = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/two@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/two@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
// Same imageId
|
||||
dockerImageId: 'sha256:image-id-one',
|
||||
});
|
||||
@ -554,14 +544,12 @@ describe('compose/images', () => {
|
||||
|
||||
it('removes image from DB by tag when deltas are being used', async () => {
|
||||
const imageToRemove = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/one@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
dockerImageId: 'sha256:image-one-id',
|
||||
});
|
||||
|
||||
const imageWithSameDockerImageId = createDBImage({
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/two@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
name: 'registry2.balena-cloud.com/v2/two@sha256:2c969a1ba1c6bc10df53481f48c6a74dbd562cfb41ba58f81beabd03facf5582',
|
||||
// Same docker id
|
||||
dockerImageId: 'sha256:image-one-id',
|
||||
});
|
||||
|
@ -40,12 +40,12 @@ describe('lib/update-lock', () => {
|
||||
|
||||
// TODO: Remove these hooks when we don't need './test/data' as test process's rootMountPoint
|
||||
before(() => {
|
||||
// @ts-ignore // Set rootMountPoint for mockFs
|
||||
// @ts-expect-error // Set rootMountPoint for mockFs
|
||||
constants.rootMountPoint = '/mnt/root';
|
||||
});
|
||||
|
||||
after(() => {
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.rootMountPoint = process.env.ROOT_MOUNTPOINT;
|
||||
});
|
||||
|
||||
@ -125,7 +125,7 @@ describe('lib/update-lock', () => {
|
||||
lockSpy = spy(lockfile, 'lock');
|
||||
// lockfile.lock calls exec to interface with the lockfile binary,
|
||||
// so mock it here as we don't have access to the binary in the test env
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
execStub = stub(fsUtils, 'exec').callsFake(async (command, opts) => {
|
||||
// Sanity check for the command call
|
||||
expect(command.trim().startsWith('lockfile')).to.be.true;
|
||||
|
@ -9,7 +9,7 @@ export async function createDB() {
|
||||
// for testing we use an in memory database
|
||||
process.env.DATABASE_PATH = ':memory:';
|
||||
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.databasePath = process.env.DATABASE_PATH;
|
||||
|
||||
// Cleanup the module cache in order to have it reloaded in the local context
|
||||
@ -71,7 +71,7 @@ export async function createDB() {
|
||||
(db.upsertModel as sinon.SinonStub).restore();
|
||||
|
||||
// Restore the constants
|
||||
// @ts-ignore
|
||||
// @ts-expect-error
|
||||
constants.databasePath = process.env.DATABASE_PATH;
|
||||
|
||||
// Cleanup the module cache in order to have it reloaded
|
||||
|
@ -37,7 +37,7 @@ export function setImages(images: Image[]) {
|
||||
|
||||
function stubImages() {
|
||||
// Set the functions for this model (add them as you need for your test cases)
|
||||
MOCKED_MODELS['image'] = ({
|
||||
MOCKED_MODELS['image'] = {
|
||||
select: () => {
|
||||
return {
|
||||
where: async (condition: Partial<Image>) =>
|
||||
@ -64,5 +64,5 @@ function stubImages() {
|
||||
},
|
||||
};
|
||||
},
|
||||
} as unknown) as QueryBuilder;
|
||||
} as unknown as QueryBuilder;
|
||||
}
|
||||
|
@ -94,8 +94,7 @@ const mockService = (overrides?: Partial<Service>) => {
|
||||
const mockImage = (overrides?: Partial<Image>) => {
|
||||
return {
|
||||
...{
|
||||
name:
|
||||
'registry2.balena-cloud.com/v2/e2bf6410ffc30850e96f5071cdd1dca8@sha256:e2e87a8139b8fc14510095b210ad652d7d5badcc64fdc686cbf749d399fba15e',
|
||||
name: 'registry2.balena-cloud.com/v2/e2bf6410ffc30850e96f5071cdd1dca8@sha256:e2e87a8139b8fc14510095b210ad652d7d5badcc64fdc686cbf749d399fba15e',
|
||||
appId: 1658654,
|
||||
serviceName: 'main',
|
||||
imageId: 2885946,
|
||||
|
@ -75,7 +75,7 @@ registerOverride(
|
||||
export function registerOverride<
|
||||
T extends DockerodeFunction,
|
||||
P extends Parameters<dockerode[T]>,
|
||||
R extends ReturnType<dockerode[T]>
|
||||
R extends ReturnType<dockerode[T]>,
|
||||
>(name: T, fn: (...args: P) => R) {
|
||||
console.log(`Overriding ${name}...`);
|
||||
overrides[name] = fn;
|
||||
@ -207,7 +207,7 @@ function createMockedDockerode(data: TestData) {
|
||||
return mockedDockerode;
|
||||
}
|
||||
|
||||
type Prototype = Dictionary<(...args: any[]) => any>;
|
||||
type Prototype = { [key: string]: any };
|
||||
function clonePrototype(prototype: Prototype): Prototype {
|
||||
const clone: Prototype = {};
|
||||
Object.getOwnPropertyNames(prototype).forEach((fn) => {
|
||||
|
@ -11,29 +11,25 @@ type DeepPartial<T> = {
|
||||
};
|
||||
|
||||
// Partial container inspect info for receiving as testing data
|
||||
export type PartialContainerInspectInfo = DeepPartial<
|
||||
dockerode.ContainerInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
export type PartialContainerInspectInfo =
|
||||
DeepPartial<dockerode.ContainerInspectInfo> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
export type PartialNetworkInspectInfo = DeepPartial<
|
||||
dockerode.NetworkInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
export type PartialNetworkInspectInfo =
|
||||
DeepPartial<dockerode.NetworkInspectInfo> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
export type PartialVolumeInspectInfo = DeepPartial<
|
||||
dockerode.VolumeInspectInfo
|
||||
> & {
|
||||
Name: string;
|
||||
};
|
||||
export type PartialVolumeInspectInfo =
|
||||
DeepPartial<dockerode.VolumeInspectInfo> & {
|
||||
Name: string;
|
||||
};
|
||||
|
||||
export type PartialImageInspectInfo = DeepPartial<
|
||||
dockerode.ImageInspectInfo
|
||||
> & {
|
||||
Id: string;
|
||||
};
|
||||
export type PartialImageInspectInfo =
|
||||
DeepPartial<dockerode.ImageInspectInfo> & {
|
||||
Id: string;
|
||||
};
|
||||
|
||||
type Methods<T> = {
|
||||
[K in keyof T]: T[K] extends (...args: any) => any ? T[K] : never;
|
||||
@ -47,7 +43,9 @@ function createFake<Prototype extends object>(prototype: Prototype) {
|
||||
...res,
|
||||
[fn]: () => {
|
||||
throw Error(
|
||||
`Fake method not implemented: ${prototype.constructor.name}.${fn}()`,
|
||||
`Fake method not implemented: ${
|
||||
prototype.constructor.name
|
||||
}.${fn.toString()}()`,
|
||||
);
|
||||
},
|
||||
}),
|
||||
@ -318,14 +316,8 @@ export function createImage(
|
||||
const createImageInspectInfo = (
|
||||
partialImage: PartialImageInspectInfo,
|
||||
): dockerode.ImageInspectInfo => {
|
||||
const {
|
||||
Id,
|
||||
ContainerConfig,
|
||||
Config,
|
||||
GraphDriver,
|
||||
RootFS,
|
||||
...Info
|
||||
} = partialImage;
|
||||
const { Id, ContainerConfig, Config, GraphDriver, RootFS, ...Info } =
|
||||
partialImage;
|
||||
|
||||
return {
|
||||
Id,
|
||||
@ -903,9 +895,9 @@ export class MockEngine {
|
||||
}
|
||||
|
||||
export function createMockerode(engine: MockEngine) {
|
||||
const dockerodeStubs: Stubs<dockerode> = (Object.getOwnPropertyNames(
|
||||
dockerode.prototype,
|
||||
) as (keyof dockerode)[])
|
||||
const dockerodeStubs: Stubs<dockerode> = (
|
||||
Object.getOwnPropertyNames(dockerode.prototype) as (keyof dockerode)[]
|
||||
)
|
||||
.filter((fn) => typeof dockerode.prototype[fn] === 'function')
|
||||
.reduce((stubMap, fn) => {
|
||||
const stub = sinon.stub(dockerode.prototype, fn);
|
||||
|
@ -24,13 +24,13 @@ export = async function () {
|
||||
|
||||
try {
|
||||
fs.unlinkSync(process.env.DATABASE_PATH_2!);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* ignore /*/
|
||||
}
|
||||
|
||||
try {
|
||||
fs.unlinkSync(process.env.DATABASE_PATH_3!);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* ignore /*/
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ export = async function () {
|
||||
'./test/data/config-apibinder-offline2.json',
|
||||
fs.readFileSync('./test/data/testconfig-apibinder-offline2.json'),
|
||||
);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
/* ignore /*/
|
||||
}
|
||||
|
||||
|
@ -155,9 +155,9 @@ describe('lib/fs-utils', () => {
|
||||
|
||||
it("should return the paths of one or more files as they exist on host OS's root", async () => {
|
||||
expect(fsUtils.getPathOnHost(testFileName1)).to.deep.equal(testFile1);
|
||||
expect(
|
||||
fsUtils.getPathOnHost(testFileName1, testFileName2),
|
||||
).to.deep.equal([testFile1, testFile2]);
|
||||
expect(fsUtils.getPathOnHost(testFileName1, testFileName2)).to.deep.equal(
|
||||
[testFile1, testFile2],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -12,7 +12,6 @@ describe('System information', () => {
|
||||
stub(systeminformation, 'mem').resolves(mockMemory);
|
||||
stub(systeminformation, 'currentLoad').resolves(mockCPU.load);
|
||||
stub(systeminformation, 'cpuTemperature').resolves(mockCPU.temp);
|
||||
// @ts-ignore TS thinks we can't return a buffer...
|
||||
stub(fs, 'readFile').resolves(mockCPU.idBuffer);
|
||||
stub(fsUtils, 'exec');
|
||||
});
|
||||
|
@ -38,7 +38,7 @@ const lookForOptionalDeps = function (sourceDir) {
|
||||
packageJson = JSON.parse(
|
||||
fs.readFileSync(path.join(sourceDir, dir, '/package.json'), 'utf8'),
|
||||
);
|
||||
} catch (e) {
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (packageJson.optionalDependencies != null) {
|
||||
|
Loading…
Reference in New Issue
Block a user