Merge pull request #1325 from balena-io/update-balena-lint

Update to @balena/lint 5.x
This commit is contained in:
Page- 2020-05-15 13:02:46 +01:00 committed by GitHub
commit 4cf242c976
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 1379 additions and 2634 deletions

View File

@ -4,6 +4,6 @@
// Also, JSONStream starts a pipe between stdin and stdout if module.parent is undefined.
// This pipe can fail throwing an uncaught exception, so we fake a module.parent to prevent this.
// See https://github.com/dominictarr/JSONStream/issues/129
module.exports = function(source) {
module.exports = function (source) {
return 'module.parent = {};\n' + source.toString().replace(/^#! .*\n/, '');
};

View File

@ -1,7 +1,7 @@
// knex migrations use dynamic requires which break with webpack.
// This hack makes the migrations directory a constant so that at least we can use webpack contexts for the
// require.
module.exports = function(source) {
module.exports = function (source) {
return source
.toString()
.replace(

2783
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@
"test:build": "npm run typescript:test-build && npm run coffeescript:test && npm run testitems:copy && npm run packagejson:copy",
"test:fast": "TEST=1 mocha --opts test/fast-mocha.opts",
"test:debug": "npm run test:build && TEST=1 mocha --inspect-brk",
"prettify": "balena-lint -e ts -e js --typescript --fix src/ test/ typings/",
"prettify": "balena-lint -e ts -e js --typescript --fix src/ test/ typings/ build-utils/",
"typescript:test-build": "tsc --project tsconfig.json",
"typescript:release": "tsc --project tsconfig.release.json && cp -r build/src/* build && rm -rf build/src",
"coffeescript:test": "coffee -m -c -o build .",
@ -38,7 +38,7 @@
},
"devDependencies": {
"@balena/contrato": "^0.2.1",
"@balena/lint": "^4.1.0",
"@balena/lint": "^5.1.0",
"@types/bluebird": "^3.5.30",
"@types/chai": "^4.2.11",
"@types/chai-as-promised": "^7.1.2",

View File

@ -206,7 +206,7 @@ export class APIBinder {
// When we've provisioned, try to load the backup. We
// must wait for the provisioning because we need a
// target state on which to apply the backup
globalEventBus.getInstance().once('targetStateChanged', async state => {
globalEventBus.getInstance().once('targetStateChanged', async (state) => {
await loadBackupFromMigration(
this.deviceState,
state,
@ -394,7 +394,7 @@ export class APIBinder {
},
})) as Array<Dictionary<unknown>>;
return tags.map(tag => {
return tags.map((tag) => {
// Do some type safe decoding and throw if we get an unexpected value
const id = t.number.decode(tag.id);
const name = t.string.decode(tag.tag_key);
@ -577,7 +577,7 @@ export class APIBinder {
this.deviceState.triggerApplyTarget({ force, isFromApi });
}
})
.tapCatch(ContractValidationError, ContractViolationError, e => {
.tapCatch(ContractValidationError, ContractViolationError, (e) => {
log.error(`Could not store target state for device: ${e}`);
// the dashboard does not display lines correctly,
// split them explcitly here
@ -593,7 +593,7 @@ export class APIBinder {
e instanceof ContractValidationError ||
e instanceof ContractViolationError
),
err => {
(err) => {
log.error(`Failed to get target state for device: ${err}`);
},
)
@ -944,7 +944,7 @@ export class APIBinder {
}
private lockGetTarget() {
return writeLock('getTarget').disposer(release => {
return writeLock('getTarget').disposer((release) => {
release();
});
}
@ -960,7 +960,7 @@ export class APIBinder {
if (apiBinder.readyForUpdates) {
this.config
.get('instantUpdates')
.then(instantUpdates => {
.then((instantUpdates) => {
if (instantUpdates) {
apiBinder
.getAndSetTargetState(req.body.force, true)

View File

@ -42,7 +42,7 @@ import { serviceAction } from './device-api/common';
const readFileAsync = Promise.promisify(fs.readFile);
// TODO: move this to an Image class?
const imageForService = service => ({
const imageForService = (service) => ({
name: service.imageName,
appId: service.appId,
serviceId: service.serviceId,
@ -52,7 +52,7 @@ const imageForService = service => ({
dependent: 0,
});
const fetchAction = service => ({
const fetchAction = (service) => ({
action: 'fetch',
image: imageForService(service),
serviceId: service.serviceId,
@ -61,7 +61,7 @@ const fetchAction = service => ({
// TODO: implement additional v2 endpoints
// Some v1 endpoins only work for single-container apps as they assume the app has a single service.
const createApplicationManagerRouter = function(applications) {
const createApplicationManagerRouter = function (applications) {
const router = express.Router();
router.use(bodyParser.urlencoded({ extended: true, limit: '10mb' }));
router.use(bodyParser.json({ limit: '10mb' }));
@ -216,7 +216,7 @@ export class ApplicationManager extends EventEmitter {
this.db,
);
this.config.on('change', changedConfig => {
this.config.on('change', (changedConfig) => {
if (changedConfig.appUpdatePollInterval) {
this.images.appUpdatePollInterval = changedConfig.appUpdatePollInterval;
}
@ -231,10 +231,10 @@ export class ApplicationManager extends EventEmitter {
images: this.images,
config: this.config,
callbacks: {
containerStarted: id => {
containerStarted: (id) => {
this._containerStarted[id] = true;
},
containerKilled: id => {
containerKilled: (id) => {
delete this._containerStarted[id];
},
fetchStart: () => {
@ -243,10 +243,10 @@ export class ApplicationManager extends EventEmitter {
fetchEnd: () => {
this.fetchesInProgress -= 1;
},
fetchTime: time => {
fetchTime: (time) => {
this.timeSpentFetching += time;
},
stateReport: state => this.reportCurrentState(state),
stateReport: (state) => this.reportCurrentState(state),
bestDeltaSource: this.bestDeltaSource,
},
});
@ -265,15 +265,17 @@ export class ApplicationManager extends EventEmitter {
init() {
return this.config
.get('appUpdatePollInterval')
.then(interval => {
.then((interval) => {
this.images.appUpdatePollInterval = interval;
return this.images.cleanupDatabase();
})
.then(() => {
const cleanup = () => {
return this.docker.listContainers({ all: true }).then(containers => {
return this.logger.clearOutOfDateDBLogs(_.map(containers, 'Id'));
});
return this.docker
.listContainers({ all: true })
.then((containers) => {
return this.logger.clearOutOfDateDBLogs(_.map(containers, 'Id'));
});
};
// Rather than relying on removing out of date database entries when we're no
// longer using them, set a task that runs periodically to clear out the database
@ -302,7 +304,7 @@ export class ApplicationManager extends EventEmitter {
this.services.getStatus(),
this.images.getStatus(),
this.config.get('currentCommit'),
function(services, images, currentCommit) {
function (services, images, currentCommit) {
const apps = {};
const dependent = {};
let releaseId = null;
@ -422,7 +424,7 @@ export class ApplicationManager extends EventEmitter {
// multi-app warning!
// This is just wrong on every level
_.each(apps, app => {
_.each(apps, (app) => {
app.commit = currentCommit;
});
@ -450,7 +452,7 @@ export class ApplicationManager extends EventEmitter {
}
getTargetApp(appId) {
return this.targetStateWrapper.getTargetApp(appId).then(app => {
return this.targetStateWrapper.getTargetApp(appId).then((app) => {
if (app == null) {
return;
}
@ -524,7 +526,7 @@ export class ApplicationManager extends EventEmitter {
// Returns true if a service matches its target except it should be running and it is not, but we've
// already started it before. In this case it means it just exited so we don't want to start it again.
const alreadyStarted = serviceId => {
const alreadyStarted = (serviceId) => {
return (
currentServicesPerId[serviceId].isEqualExceptForRunningState(
targetServicesPerId[serviceId],
@ -537,7 +539,7 @@ export class ApplicationManager extends EventEmitter {
const needUpdate = _.filter(
toBeMaybeUpdated,
serviceId =>
(serviceId) =>
!currentServicesPerId[serviceId].isEqual(
targetServicesPerId[serviceId],
containerIds,
@ -572,7 +574,7 @@ export class ApplicationManager extends EventEmitter {
const toBeUpdated = _.filter(
_.intersection(targetNames, currentNames),
name => !current[name].isEqualConfig(target[name]),
(name) => !current[name].isEqualConfig(target[name]),
);
for (const name of toBeUpdated) {
outputPairs.push({
@ -605,16 +607,17 @@ export class ApplicationManager extends EventEmitter {
}
const hasNetwork = _.some(
networkPairs,
pair => `${service.appId}_${pair.current?.name}` === service.networkMode,
(pair) =>
`${service.appId}_${pair.current?.name}` === service.networkMode,
);
if (hasNetwork) {
return true;
}
const hasVolume = _.some(service.volumes, function(volume) {
const hasVolume = _.some(service.volumes, function (volume) {
const name = _.split(volume, ':')[0];
return _.some(
volumePairs,
pair => `${service.appId}_${pair.current?.name}` === name,
(pair) => `${service.appId}_${pair.current?.name}` === name,
);
});
return hasVolume;
@ -629,8 +632,8 @@ export class ApplicationManager extends EventEmitter {
pendingPairs,
) {
// for dependsOn, check no install or update pairs have that service
const dependencyUnmet = _.some(target.dependsOn, dependency =>
_.some(pendingPairs, pair => pair.target?.serviceName === dependency),
const dependencyUnmet = _.some(target.dependsOn, (dependency) =>
_.some(pendingPairs, (pair) => pair.target?.serviceName === dependency),
);
if (dependencyUnmet) {
return false;
@ -639,12 +642,12 @@ export class ApplicationManager extends EventEmitter {
if (
_.some(
networkPairs,
pair => `${target.appId}_${pair.target?.name}` === target.networkMode,
(pair) => `${target.appId}_${pair.target?.name}` === target.networkMode,
)
) {
return false;
}
const volumeUnmet = _.some(target.volumes, function(volumeDefinition) {
const volumeUnmet = _.some(target.volumes, function (volumeDefinition) {
const [sourceName, destName] = volumeDefinition.split(':');
if (destName == null) {
// If this is not a named volume, ignore it
@ -652,7 +655,7 @@ export class ApplicationManager extends EventEmitter {
}
return _.some(
volumePairs,
pair => `${target.appId}_${pair.target?.name}` === sourceName,
(pair) => `${target.appId}_${pair.target?.name}` === sourceName,
);
});
return !volumeUnmet;
@ -681,7 +684,7 @@ export class ApplicationManager extends EventEmitter {
if (
!_.some(
availableImages,
image =>
(image) =>
image.dockerImageId === dependencyService.image ||
Images.isSameImage(image, { name: dependencyService.imageName }),
)
@ -702,7 +705,7 @@ export class ApplicationManager extends EventEmitter {
) {
// Check none of the currentApp.services use this network or volume
if (current != null) {
const dependencies = _.filter(currentApp.services, service =>
const dependencies = _.filter(currentApp.services, (service) =>
dependencyComparisonFn(service, current),
);
if (_.isEmpty(dependencies)) {
@ -746,7 +749,7 @@ export class ApplicationManager extends EventEmitter {
_nextStepsForVolume({ current, target }, currentApp, changingPairs) {
// Check none of the currentApp.services use this network or volume
const dependencyComparisonFn = (service, curr) =>
_.some(service.config.volumes, function(volumeDefinition) {
_.some(service.config.volumes, function (volumeDefinition) {
const [sourceName, destName] = volumeDefinition.split(':');
return (
destName != null && sourceName === `${service.appId}_${curr?.name}`
@ -815,7 +818,7 @@ export class ApplicationManager extends EventEmitter {
if (!localMode) {
needsDownload = !_.some(
availableImages,
image =>
(image) =>
image.dockerImageId === target?.config.image ||
Images.isSameImage(image, { name: target.imageName }),
);
@ -1021,11 +1024,11 @@ export class ApplicationManager extends EventEmitter {
}
const appId = targetApp.appId ?? currentApp.appId;
return _.map(steps, step => _.assign({}, step, { appId }));
return _.map(steps, (step) => _.assign({}, step, { appId }));
}
normaliseAppForDB(app) {
const services = _.map(app.services, function(s, serviceId) {
const services = _.map(app.services, function (s, serviceId) {
const service = _.clone(s);
service.appId = app.appId;
service.releaseId = app.releaseId;
@ -1033,10 +1036,10 @@ export class ApplicationManager extends EventEmitter {
service.commit = app.commit;
return service;
});
return Promise.map(services, service => {
return Promise.map(services, (service) => {
service.image = this.images.normalise(service.image);
return Promise.props(service);
}).then(function($services) {
}).then(function ($services) {
const dbApp = {
appId: app.appId,
commit: app.commit,
@ -1056,7 +1059,7 @@ export class ApplicationManager extends EventEmitter {
// this in a bluebird promise until we convert this to typescript
return Promise.resolve(this.images.inspectByName(service.image))
.catchReturn(NotFoundError, undefined)
.then(function(imageInfo) {
.then(function (imageInfo) {
const serviceOpts = {
serviceName: service.serviceName,
imageInfo,
@ -1134,9 +1137,9 @@ export class ApplicationManager extends EventEmitter {
},
);
return Promise.map(JSON.parse(app.services), service =>
return Promise.map(JSON.parse(app.services), (service) =>
this.createTargetService(service, configOpts),
).then(services => {
).then((services) => {
// If a named volume is defined in a service but NOT in the volumes of the compose file, we add it app-wide so that we can track it and purge it
// !! DEPRECATED, WILL BE REMOVED IN NEXT MAJOR RELEASE !!
for (const s of services) {
@ -1167,14 +1170,14 @@ export class ApplicationManager extends EventEmitter {
setTarget(apps, dependent, source, maybeTrx) {
const setInTransaction = (filtered, trx) => {
return Promise.try(() => {
const appsArray = _.map(filtered, function(app, appId) {
const appsArray = _.map(filtered, function (app, appId) {
const appClone = _.clone(app);
appClone.appId = checkInt(appId);
appClone.source = source;
return appClone;
});
return Promise.map(appsArray, this.normaliseAppForDB)
.then(appsForDB => {
.then((appsForDB) => {
return this.targetStateWrapper.setTargetApps(appsForDB, trx);
})
.then(() =>
@ -1243,7 +1246,7 @@ export class ApplicationManager extends EventEmitter {
.then(() => {
this._targetVolatilePerImageId = {};
})
.finally(function() {
.finally(function () {
if (!_.isEmpty(contractViolators)) {
throw new ContractViolationError(contractViolators);
}
@ -1259,7 +1262,7 @@ export class ApplicationManager extends EventEmitter {
clearTargetVolatileForServices(imageIds) {
return imageIds.map(
imageId => (this._targetVolatilePerImageId[imageId] = {}),
(imageId) => (this._targetVolatilePerImageId[imageId] = {}),
);
}
@ -1268,9 +1271,9 @@ export class ApplicationManager extends EventEmitter {
this.targetStateWrapper.getTargetApps(),
this.normaliseAndExtendAppFromDB,
)
.map(app => {
.map((app) => {
if (!_.isEmpty(app.services)) {
app.services = _.map(app.services, service => {
app.services = _.map(app.services, (service) => {
if (this._targetVolatilePerImageId[service.imageId] != null) {
_.merge(service, this._targetVolatilePerImageId[service.imageId]);
}
@ -1279,7 +1282,7 @@ export class ApplicationManager extends EventEmitter {
}
return app;
})
.then(apps => _.keyBy(apps, 'appId'));
.then((apps) => _.keyBy(apps, 'appId'));
}
getDependentTargets() {
@ -1314,9 +1317,9 @@ export class ApplicationManager extends EventEmitter {
// - are locally available (i.e. an image with the same digest exists)
// - are not saved to the DB with all their metadata (serviceId, serviceName, etc)
_compareImages(current, target, available, localMode) {
const allImagesForTargetApp = app => _.map(app.services, imageForService);
const allImagesForCurrentApp = app =>
_.map(app.services, function(service) {
const allImagesForTargetApp = (app) => _.map(app.services, imageForService);
const allImagesForCurrentApp = (app) =>
_.map(app.services, function (service) {
const img =
_.find(available, {
dockerImageId: service.config.image,
@ -1324,13 +1327,13 @@ export class ApplicationManager extends EventEmitter {
}) ?? _.find(available, { dockerImageId: service.config.image });
return _.omit(img, ['dockerImageId', 'id']);
});
const allImageDockerIdsForTargetApp = app =>
const allImageDockerIdsForTargetApp = (app) =>
_(app.services)
.map(svc => [svc.imageName, svc.config.image])
.filter(img => img[1] != null)
.map((svc) => [svc.imageName, svc.config.image])
.filter((img) => img[1] != null)
.value();
const availableWithoutIds = _.map(available, image =>
const availableWithoutIds = _.map(available, (image) =>
_.omit(image, ['dockerImageId', 'id']),
);
const currentImages = _.flatMap(current.local.apps, allImagesForCurrentApp);
@ -1341,16 +1344,16 @@ export class ApplicationManager extends EventEmitter {
const availableAndUnused = _.filter(
availableWithoutIds,
image =>
!_.some(currentImages.concat(targetImages), imageInUse =>
(image) =>
!_.some(currentImages.concat(targetImages), (imageInUse) =>
_.isEqual(image, imageInUse),
),
);
const imagesToDownload = _.filter(
targetImages,
targetImage =>
!_.some(available, availableImage =>
(targetImage) =>
!_.some(available, (availableImage) =>
Images.isSameImage(availableImage, targetImage),
),
);
@ -1358,8 +1361,10 @@ export class ApplicationManager extends EventEmitter {
// Images that are available but we don't have them in the DB with the exact metadata:
let imagesToSave = [];
if (!localMode) {
imagesToSave = _.filter(targetImages, function(targetImage) {
const isActuallyAvailable = _.some(available, function(availableImage) {
imagesToSave = _.filter(targetImages, function (targetImage) {
const isActuallyAvailable = _.some(available, function (
availableImage,
) {
if (Images.isSameImage(availableImage, targetImage)) {
return true;
}
@ -1371,31 +1376,33 @@ export class ApplicationManager extends EventEmitter {
}
return false;
});
const isNotSaved = !_.some(availableWithoutIds, img =>
const isNotSaved = !_.some(availableWithoutIds, (img) =>
_.isEqual(img, targetImage),
);
return isActuallyAvailable && isNotSaved;
});
}
const deltaSources = _.map(imagesToDownload, image => {
const deltaSources = _.map(imagesToDownload, (image) => {
return this.bestDeltaSource(image, available);
});
const proxyvisorImages = this.proxyvisor.imagesInUse(current, target);
const potentialDeleteThenDownload = _.filter(
current.local.apps.services,
svc =>
(svc) =>
svc.config.labels['io.balena.update.strategy'] ===
'delete-then-download' && svc.status === 'Stopped',
);
const imagesToRemove = _.filter(
availableAndUnused.concat(potentialDeleteThenDownload),
function(image) {
function (image) {
const notUsedForDelta = !_.includes(deltaSources, image.name);
const notUsedByProxyvisor = !_.some(proxyvisorImages, proxyvisorImage =>
Images.isSameImage(image, { name: proxyvisorImage }),
const notUsedByProxyvisor = !_.some(
proxyvisorImages,
(proxyvisorImage) =>
Images.isSameImage(image, { name: proxyvisorImage }),
);
return notUsedForDelta && notUsedByProxyvisor;
},
@ -1437,8 +1444,10 @@ export class ApplicationManager extends EventEmitter {
// multi-app warning: this will break
let appsForVolumeRemoval;
if (!localMode) {
const currentAppIds = _.keys(current.local.apps).map(n => checkInt(n));
const targetAppIds = _.keys(target.local.apps).map(n => checkInt(n));
const currentAppIds = _.keys(current.local.apps).map((n) =>
checkInt(n),
);
const targetAppIds = _.keys(target.local.apps).map((n) => checkInt(n));
appsForVolumeRemoval = _.difference(currentAppIds, targetAppIds);
}
@ -1519,7 +1528,7 @@ export class ApplicationManager extends EventEmitter {
}
}
}
const newDownloads = nextSteps.filter(s => s.action === 'fetch').length;
const newDownloads = nextSteps.filter((s) => s.action === 'fetch').length;
if (!ignoreImages && delta && newDownloads > 0) {
// Check that this is not the first pull for an
@ -1530,7 +1539,7 @@ export class ApplicationManager extends EventEmitter {
let downloadsToBlock =
downloading.length + newDownloads - constants.maxDeltaDownloads;
nextSteps = nextSteps.filter(function(step) {
nextSteps = nextSteps.filter(function (step) {
if (step.action === 'fetch' && downloadsToBlock > 0) {
const imagesForThisApp = appImages[step.image.appId];
if (imagesForThisApp == null || imagesForThisApp.length === 0) {
@ -1551,8 +1560,8 @@ export class ApplicationManager extends EventEmitter {
nextSteps.push({ action: 'noop' });
}
return _.uniqWith(nextSteps, _.isEqual);
}).then(nextSteps =>
Promise.all(volumePromises).then(function(volSteps) {
}).then((nextSteps) =>
Promise.all(volumePromises).then(function (volSteps) {
nextSteps = nextSteps.concat(_.flatten(volSteps));
return nextSteps;
}),
@ -1561,7 +1570,7 @@ export class ApplicationManager extends EventEmitter {
stopAll({ force = false, skipLock = false } = {}) {
return Promise.resolve(this.services.getAll())
.map(service => {
.map((service) => {
return this._lockingIfNecessary(
service.appId,
{ force, skipLock },
@ -1583,8 +1592,8 @@ export class ApplicationManager extends EventEmitter {
}
return this.config
.get('lockOverride')
.then(lockOverride => lockOverride || force)
.then(lockOverridden =>
.then((lockOverride) => lockOverride || force)
.then((lockOverridden) =>
updateLock.lock(appId, { force: lockOverridden }, fn),
);
}
@ -1607,7 +1616,7 @@ export class ApplicationManager extends EventEmitter {
.keys()
.concat(_.keys(targetState.local.apps))
.uniq()
.each(id => {
.each((id) => {
const intId = checkInt(id);
if (intId == null) {
throw new Error(`Invalid id: ${id}`);
@ -1615,7 +1624,7 @@ export class ApplicationManager extends EventEmitter {
containerIdsByAppId[intId] = this.services.getContainerIdMap(intId);
});
return this.config.get('localMode').then(localMode => {
return this.config.get('localMode').then((localMode) => {
return Promise.props({
cleanupNeeded: this.images.isCleanupNeeded(),
availableImages: this.images.getAvailable(),
@ -1656,7 +1665,7 @@ export class ApplicationManager extends EventEmitter {
ignoreImages,
conf,
containerIds,
).then(nextSteps => {
).then((nextSteps) => {
if (ignoreImages && _.some(nextSteps, { action: 'fetch' })) {
throw new Error('Cannot fetch images while executing an API action');
}
@ -1668,12 +1677,12 @@ export class ApplicationManager extends EventEmitter {
targetState,
nextSteps,
)
.then(proxyvisorSteps => nextSteps.concat(proxyvisorSteps));
.then((proxyvisorSteps) => nextSteps.concat(proxyvisorSteps));
});
}
serviceNameFromId(serviceId) {
return this.getTargetApps().then(function(apps) {
return this.getTargetApps().then(function (apps) {
// Multi-app warning!
// We assume here that there will only be a single
// application
@ -1681,7 +1690,7 @@ export class ApplicationManager extends EventEmitter {
const app = apps[appId];
const service = _.find(
app.services,
svc => svc.serviceId === serviceId,
(svc) => svc.serviceId === serviceId,
);
if (service?.serviceName == null) {
throw new InternalInconsistencyError(
@ -1697,8 +1706,8 @@ export class ApplicationManager extends EventEmitter {
}
removeAllVolumesForApp(appId) {
return this.volumes.getAllByAppId(appId).then(volumes =>
volumes.map(v => ({
return this.volumes.getAllByAppId(appId).then((volumes) =>
volumes.map((v) => ({
action: 'removeVolume',
current: v,
})),

View File

@ -144,7 +144,7 @@ export function getExecutors(app: {
callbacks: CompositionCallbacks;
}) {
const executors: Executors<CompositionStepAction> = {
stop: step => {
stop: (step) => {
return app.lockFn(
step.current.appId,
{
@ -161,7 +161,7 @@ export function getExecutors(app: {
},
);
},
kill: step => {
kill: (step) => {
return app.lockFn(
step.current.appId,
{
@ -177,12 +177,12 @@ export function getExecutors(app: {
},
);
},
remove: async step => {
remove: async (step) => {
// Only called for dead containers, so no need to
// take locks
await app.services.remove(step.current);
},
updateMetadata: step => {
updateMetadata: (step) => {
const skipLock =
step.skipLock ||
checkTruthy(step.current.config.labels['io.balena.legacy-container']);
@ -197,7 +197,7 @@ export function getExecutors(app: {
},
);
},
restart: step => {
restart: (step) => {
return app.lockFn(
step.current.appId,
{
@ -212,20 +212,20 @@ export function getExecutors(app: {
},
);
},
stopAll: async step => {
stopAll: async (step) => {
await app.applications.stopAll({
force: step.force,
skipLock: step.skipLock,
});
},
start: async step => {
start: async (step) => {
const container = await app.services.start(step.target);
app.callbacks.containerStarted(container.id);
},
updateCommit: async step => {
updateCommit: async (step) => {
await app.config.set({ currentCommit: step.target });
},
handover: step => {
handover: (step) => {
return app.lockFn(
step.current.appId,
{
@ -237,7 +237,7 @@ export function getExecutors(app: {
},
);
},
fetch: async step => {
fetch: async (step) => {
const startTime = process.hrtime();
app.callbacks.fetchStart();
const [fetchOpts, availableImages] = await Promise.all([
@ -253,7 +253,7 @@ export function getExecutors(app: {
await app.images.triggerFetch(
step.image,
opts,
async success => {
async (success) => {
app.callbacks.fetchEnd();
const elapsed = process.hrtime(startTime);
const elapsedMs = elapsed[0] * 1000 + elapsed[1] / 1e6;
@ -269,10 +269,10 @@ export function getExecutors(app: {
step.serviceName,
);
},
removeImage: async step => {
removeImage: async (step) => {
await app.images.remove(step.image);
},
saveImage: async step => {
saveImage: async (step) => {
await app.images.save(step.image);
},
cleanup: async () => {
@ -281,16 +281,16 @@ export function getExecutors(app: {
await app.images.cleanup();
}
},
createNetwork: async step => {
createNetwork: async (step) => {
await app.networks.create(step.target);
},
createVolume: async step => {
createVolume: async (step) => {
await app.volumes.create(step.target);
},
removeNetwork: async step => {
removeNetwork: async (step) => {
await app.networks.remove(step.current);
},
removeVolume: async step => {
removeVolume: async (step) => {
await app.volumes.remove(step.current);
},
ensureSupervisorNetwork: async () => {

View File

@ -194,10 +194,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
}
public async getByDockerId(id: string): Promise<Image> {
return await this.db
.models('image')
.where({ dockerImageId: id })
.first();
return await this.db.models('image').where({ dockerImageId: id }).first();
}
public async removeByDockerId(id: string): Promise<void> {
@ -216,7 +213,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
cb: (dockerImages: NormalisedDockerImage[], composeImages: Image[]) => T,
) {
const [normalisedImages, dbImages] = await Promise.all([
Bluebird.map(this.docker.listImages({ digests: true }), async image => {
Bluebird.map(this.docker.listImages({ digests: true }), async (image) => {
const newImage = _.clone(image) as NormalisedDockerImage;
newImage.NormalisedRepoTags = await this.getNormalisedTags(image);
return newImage;
@ -240,7 +237,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
): boolean {
return (
_.includes(dockerImage.NormalisedRepoTags, image.name) ||
_.some(dockerImage.RepoDigests, digest =>
_.some(dockerImage.RepoDigests, (digest) =>
Images.hasSameDigest(image.name, digest),
)
);
@ -252,7 +249,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
): boolean {
return _.some(
dockerImages,
dockerImage =>
(dockerImage) =>
this.matchesTagOrDigest(image, dockerImage) ||
image.dockerImageId === dockerImage.Id,
);
@ -261,7 +258,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
public async getAvailable(): Promise<Image[]> {
const images = await this.withImagesFromDockerAndDB(
(dockerImages, supervisedImages) =>
_.filter(supervisedImages, image =>
_.filter(supervisedImages, (image) =>
this.isAvailableInDocker(image, dockerImages),
),
);
@ -288,7 +285,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
// some entries in the db might need to have the dockerImageId populated
if (supervisedImage.dockerImageId == null) {
const id = _.get(
_.find(dockerImages, dockerImage =>
_.find(dockerImages, (dockerImage) =>
this.matchesTagOrDigest(supervisedImage, dockerImage),
),
'Id',
@ -303,20 +300,14 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
}
}
}
return _.reject(supervisedImages, image =>
return _.reject(supervisedImages, (image) =>
this.isAvailableInDocker(image, dockerImages),
);
},
);
const ids = _(imagesToRemove)
.map('id')
.compact()
.value();
await this.db
.models('image')
.del()
.whereIn('id', ids);
const ids = _(imagesToRemove).map('id').compact().value();
await this.db.models('image').del().whereIn('id', ids);
}
public async getStatus() {
@ -362,7 +353,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
this.db
.models('image')
.select('dockerImageId')
.then(vals => vals.map((img: Image) => img.dockerImageId)),
.then((vals) => vals.map((img: Image) => img.dockerImageId)),
]);
const supervisorRepos = [supervisorImageInfo.imageName];
@ -381,7 +372,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
tagName: string;
}) => {
return (
_.some(supervisorRepos, repo => imageName === repo) &&
_.some(supervisorRepos, (repo) => imageName === repo) &&
tagName !== supervisorImageInfo.tagName
);
};
@ -408,7 +399,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
return _(images)
.uniq()
.filter(
image =>
(image) =>
this.imageCleanupFailures[image] == null ||
Date.now() - this.imageCleanupFailures[image] >
constants.imageCleanupErrorIgnoreTimeout,
@ -505,10 +496,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
// We first fetch the image from the DB to ensure it exists,
// and get the dockerImageId and any other missing fields
const images = await this.db
.models('image')
.select()
.where(image);
const images = await this.db.models('image').select().where(image);
if (images.length === 0) {
removed = false;
@ -547,7 +535,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
if (
dockerImage.RepoTags.length > 1 &&
_.includes(dockerImage.RepoTags, img.name) &&
_.some(dockerImage.RepoTags, t =>
_.some(dockerImage.RepoTags, (t) =>
_.some(differentTags, { name: t }),
)
) {
@ -568,10 +556,7 @@ export class Images extends (EventEmitter as new () => ImageEventEmitter) {
this.reportChange(image.imageId);
}
await this.db
.models('image')
.del()
.where({ id: img.id });
await this.db.models('image').del().where({ id: img.id });
if (removed) {
this.logger.logSystemEvent(LogTypes.deleteImageSuccess, { image });

View File

@ -26,7 +26,7 @@ export class NetworkManager {
return this.docker
.getNetwork(network.Name)
.inspect()
.then(net => {
.then((net) => {
return Network.fromDockerNetwork(
{
docker: this.docker,
@ -93,7 +93,7 @@ export class NetworkManager {
.getNetwork(constants.supervisorNetworkInterface)
.inspect();
})
.then(network => {
.then((network) => {
return (
network.Options['com.docker.network.bridge.name'] ===
constants.supervisorNetworkInterface &&
@ -119,7 +119,7 @@ export class NetworkManager {
return Bluebird.resolve(
this.docker.getNetwork(constants.supervisorNetworkInterface).inspect(),
)
.then(net => {
.then((net) => {
if (
net.Options['com.docker.network.bridge.name'] !==
constants.supervisorNetworkInterface ||

View File

@ -60,7 +60,7 @@ export class Network {
driver: network.Driver,
ipam: {
driver: network.IPAM.Driver,
config: _.map(network.IPAM.Config, conf => {
config: _.map(network.IPAM.Config, (conf) => {
const newConf: NetworkConfig['ipam']['config'][0] = {};
if (conf.Subnet != null) {
@ -155,7 +155,7 @@ export class Network {
CheckDuplicate: true,
IPAM: {
Driver: this.config.ipam.driver,
Config: _.map(this.config.ipam.config, conf => {
Config: _.map(this.config.ipam.config, (conf) => {
const ipamConf: DockerIPAMConfig = {};
if (conf.subnet != null) {
ipamConf.Subnet = conf.subnet;
@ -194,7 +194,7 @@ export class Network {
this.docker
.getNetwork(Network.generateDockerName(this.appId, this.name))
.remove(),
).tapCatch(error => {
).tapCatch((error) => {
this.logger.logSystemEvent(logTypes.removeNetworkError, {
network: { name: this.name, appId: this.appId },
error,

View File

@ -70,7 +70,7 @@ export class PortMap {
this.ports.internalStart,
this.ports.internalEnd,
);
return _.map(internalRange, internal => {
return _.map(internalRange, (internal) => {
return `${internal}/${this.ports.protocol}`;
});
}
@ -118,9 +118,9 @@ export class PortMap {
public static normalisePortMaps(portMaps: PortMap[]): PortMap[] {
// Fold any ranges into each other if possible
return _(portMaps)
.sortBy(p => p.ports.protocol)
.sortBy(p => p.ports.host)
.sortBy(p => p.ports.internalStart)
.sortBy((p) => p.ports.protocol)
.sortBy((p) => p.ports.host)
.sortBy((p) => p.ports.internalStart)
.reduce((res: PortMap[], p: PortMap) => {
const last = _.last(res);
@ -141,7 +141,7 @@ export class PortMap {
}
public static fromComposePorts(ports: string[]): PortMap[] {
return PortMap.normalisePortMaps(ports.map(p => new PortMap(p)));
return PortMap.normalisePortMaps(ports.map((p) => new PortMap(p)));
}
private parsePortString(portStr: string): void {

View File

@ -69,7 +69,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
const filterLabels = ['supervised'].concat(extraLabelFilters);
const containers = await this.listWithBothLabels(filterLabels);
const services = await Bluebird.map(containers, async container => {
const services = await Bluebird.map(containers, async (container) => {
try {
const serviceInspect = await this.docker
.getContainer(container.Id)
@ -90,7 +90,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
}
});
return services.filter(s => s != null) as Service[];
return services.filter((s) => s != null) as Service[];
}
public async get(service: Service) {
@ -98,7 +98,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
const containerIds = await this.getContainerIdMap(service.appId!);
const services = (
await this.getAll(`service-id=${service.serviceId}`)
).filter(currentService =>
).filter((currentService) =>
currentService.isEqualConfig(service, containerIds),
);
@ -399,7 +399,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
filters: { type: ['container'] } as any,
});
stream.on('error', e => {
stream.on('error', (e) => {
log.error(`Error on docker events stream:`, e);
});
const parser = JSONStream.parse();
@ -462,7 +462,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
};
Bluebird.resolve(listen())
.catch(e => {
.catch((e) => {
log.error('Error listening to events:', e, e.stack);
})
.finally(() => {
@ -554,7 +554,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
return containerObj.remove({ v: true });
}
})
.catch(e => {
.catch((e) => {
// Get the statusCode from the original cause and make sure it's
// definitely an int for comparison reasons
const maybeStatusCode = PermissiveNumber.decode(e.statusCode);
@ -585,7 +585,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
delete this.containerHasDied[containerId];
this.logger.logSystemEvent(LogTypes.stopServiceSuccess, { service });
})
.catch(e => {
.catch((e) => {
this.logger.logSystemEvent(LogTypes.stopServiceError, {
service,
error: e,
@ -611,7 +611,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
this.docker.listContainers({
all: true,
filters: {
label: _.map(labelList, v => `${prefix}${v}`),
label: _.map(labelList, (v) => `${prefix}${v}`),
},
});
@ -646,7 +646,7 @@ export class ServiceManager extends (EventEmitter as new () => ServiceManagerEve
const wait = (): Bluebird<void> =>
Bluebird.any(
handoverCompletePaths.map(file =>
handoverCompletePaths.map((file) =>
fs.stat(file).then(() => fs.unlink(file).catch(_.noop)),
),
).catch(async () => {

View File

@ -130,7 +130,7 @@ export class Service {
// First process the networks correctly
let networks: ServiceConfig['networks'] = {};
if (_.isArray(config.networks)) {
_.each(config.networks, name => {
_.each(config.networks, (name) => {
networks[name] = {};
});
} else if (_.isObject(config.networks)) {
@ -139,7 +139,7 @@ export class Service {
// Prefix the network entries with the app id
networks = _.mapKeys(networks, (_v, k) => `${service.appId}_${k}`);
// Ensure that we add an alias of the service name
networks = _.mapValues(networks, v => {
networks = _.mapValues(networks, (v) => {
if (v.aliases == null) {
v.aliases = [];
}
@ -305,7 +305,7 @@ export class Service {
);
expose = expose.concat(_.keys(imageExposedPorts));
// Also add any exposed ports which are implied from the portMaps
const exposedFromPortMappings = _.flatMap(portMaps, port =>
const exposedFromPortMappings = _.flatMap(portMaps, (port) =>
port.toExposedPortArray(),
);
expose = expose.concat(exposedFromPortMappings);
@ -326,11 +326,13 @@ export class Service {
}
if (_.isArray(config.sysctls)) {
config.sysctls = _.fromPairs(_.map(config.sysctls, v => _.split(v, '=')));
config.sysctls = _.fromPairs(
_.map(config.sysctls, (v) => _.split(v, '=')),
);
}
config.sysctls = _.mapValues(config.sysctls, String);
_.each(['cpuShares', 'cpuQuota', 'oomScoreAdj'], key => {
_.each(['cpuShares', 'cpuQuota', 'oomScoreAdj'], (key) => {
const numVal = checkInt(config[key]);
if (numVal) {
config[key] = numVal;
@ -458,7 +460,7 @@ export class Service {
const portMaps = PortMap.fromDockerOpts(container.HostConfig.PortBindings);
let expose = _.flatMap(
_.flatMap(portMaps, p => p.toDockerOpts().exposedPorts),
_.flatMap(portMaps, (p) => p.toDockerOpts().exposedPorts),
_.keys,
);
if (container.Config.ExposedPorts != null) {
@ -578,7 +580,7 @@ export class Service {
const { exposedPorts, portBindings } = this.generateExposeAndPorts();
const tmpFs: Dictionary<''> = {};
_.each(this.config.tmpfs, tmp => {
_.each(this.config.tmpfs, (tmp) => {
tmpFs[tmp] = '';
});
@ -814,7 +816,7 @@ export class Service {
this.appId || 0,
this.serviceName || '',
);
const validVolumes = _.map(this.config.volumes, volume => {
const validVolumes = _.map(this.config.volumes, (volume) => {
if (_.includes(defaults, volume) || !_.includes(volume, ':')) {
return null;
}
@ -855,7 +857,7 @@ export class Service {
} {
const binds: string[] = [];
const volumes: { [volName: string]: {} } = {};
_.each(this.config.volumes, volume => {
_.each(this.config.volumes, (volume) => {
if (_.includes(volume, ':')) {
binds.push(volume);
} else {
@ -870,7 +872,7 @@ export class Service {
const exposed: DockerPortOptions['exposedPorts'] = {};
const ports: DockerPortOptions['portBindings'] = {};
_.each(this.config.portMaps, pmap => {
_.each(this.config.portMaps, (pmap) => {
const { exposedPorts, portBindings } = pmap.toDockerOpts();
_.merge(exposed, exposedPorts);
_.mergeWith(ports, portBindings, (destVal, srcVal) => {
@ -884,7 +886,7 @@ export class Service {
// We also want to merge the compose and image exposedPorts
// into the list of exposedPorts
const composeExposed: DockerPortOptions['exposedPorts'] = {};
_.each(this.config.expose, port => {
_.each(this.config.expose, (port) => {
composeExposed[port] = {};
});
_.merge(exposed, composeExposed);
@ -948,9 +950,9 @@ export class Service {
const [currentAliases, targetAliases] = [
current.aliases,
target.aliases,
].map(aliases =>
].map((aliases) =>
_.sortBy(
aliases.filter(a => !_.startsWith(this.containerId || '', a)),
aliases.filter((a) => !_.startsWith(this.containerId || '', a)),
),
);
@ -1006,7 +1008,7 @@ export class Service {
): ServiceConfig['volumes'] {
let volumes: ServiceConfig['volumes'] = [];
_.each(composeVolumes, volume => {
_.each(composeVolumes, (volume) => {
const isBind = _.includes(volume, ':');
if (isBind) {
const [bindSource, bindDest, mode] = volume.split(':');

View File

@ -316,7 +316,7 @@ export function addFeaturesFromLabels(
service: Service,
options: DeviceMetadata,
): void {
const setEnvVariables = function(key: string, val: string) {
const setEnvVariables = function (key: string, val: string) {
service.config.environment[`RESIN_${key}`] = val;
service.config.environment[`BALENA_${key}`] = val;
};

View File

@ -41,7 +41,7 @@ export class VolumeManager {
public async getAll(): Promise<Volume[]> {
const volumeInspect = await this.listWithBothLabels();
return volumeInspect.map(inspect =>
return volumeInspect.map((inspect) =>
Volume.fromDockerVolume(
{ logger: this.logger, docker: this.docker },
inspect,
@ -144,11 +144,11 @@ export class VolumeManager {
]);
const containerVolumes = _(dockerContainers)
.flatMap(c => c.Mounts)
.filter(m => m.Type === 'volume')
.flatMap((c) => c.Mounts)
.filter((m) => m.Type === 'volume')
// We know that the name must be set, if the mount is
// a volume
.map(m => m.Name as string)
.map((m) => m.Name as string)
.uniq()
.value();
const volumeNames = _.map(dockerVolumes.Volumes, 'Name');
@ -161,7 +161,7 @@ export class VolumeManager {
referencedVolumes,
);
await Promise.all(
volumesToRemove.map(v => this.docker.getVolume(v).remove()),
volumesToRemove.map((v) => this.docker.getVolume(v).remove()),
);
}

View File

@ -175,7 +175,7 @@ export class RPiConfigBackend extends DeviceConfigBackend {
confStatements.push(`${key} ${value}`);
} else if (_.isArray(value)) {
confStatements = confStatements.concat(
_.map(value, entry => `${key}=${entry}`),
_.map(value, (entry) => `${key}=${entry}`),
);
} else {
confStatements.push(`${key}=${value}`);
@ -334,7 +334,7 @@ export class ExtlinuxConfigBackend extends DeviceConfigBackend {
);
}
const appendLine = _.filter(defaultEntry.APPEND.split(' '), entry => {
const appendLine = _.filter(defaultEntry.APPEND.split(' '), (entry) => {
const lhs = entry.split('=');
return !this.isSupportedConfig(lhs[0]);
});
@ -384,7 +384,7 @@ export class ExtlinuxConfigBackend extends DeviceConfigBackend {
// Firstly split by line and filter any comments and empty lines
let lines = confStr.split(/\r?\n/);
lines = _.filter(lines, l => {
lines = _.filter(lines, (l) => {
const trimmed = _.trimStart(l);
return trimmed !== '' && !_.startsWith(trimmed, '#');
});
@ -647,7 +647,7 @@ export class ConfigfsConfigBackend extends DeviceConfigBackend {
return [value];
} else {
// or, it could be parsable as the content of a JSON array; "value" | "value1","value2"
return value.split(',').map(v => v.replace('"', '').trim());
return value.split(',').map((v) => v.replace('"', '').trim());
}
default:
return value;

View File

@ -30,9 +30,9 @@ export default class ConfigJsonConfigBackend {
this.schema = schema;
this.writeLockConfigJson = () =>
writeLock('config.json').disposer(release => release());
writeLock('config.json').disposer((release) => release());
this.readLockConfigJson = () =>
readLock('config.json').disposer(release => release());
readLock('config.json').disposer((release) => release());
}
public async set<T extends Schema.SchemaKey>(
@ -94,12 +94,12 @@ export default class ConfigJsonConfigBackend {
private write(): Promise<void> {
let atomicWritePossible = true;
return this.pathOnHost()
.catch(err => {
.catch((err) => {
log.error('There was an error detecting the config.json path', err);
atomicWritePossible = false;
return constants.configJsonNonAtomicPath;
})
.then(configPath => {
.then((configPath) => {
if (atomicWritePossible) {
return writeFileAtomic(configPath, JSON.stringify(this.cache));
} else {

View File

@ -24,7 +24,7 @@ export const fnSchema = {
provisioned: (config: Config) => {
return config
.getMany(['uuid', 'apiEndpoint', 'registered_at', 'deviceId'])
.then(requiredValues => {
.then((requiredValues) => {
return _.every(_.values(requiredValues));
});
},
@ -64,7 +64,7 @@ export const fnSchema = {
'registered_at',
'deviceId',
])
.then(conf => {
.then((conf) => {
return {
uuid: conf.uuid,
applicationId: conf.applicationId,
@ -80,7 +80,7 @@ export const fnSchema = {
});
},
mixpanelHost: (config: Config) => {
return config.get('apiEndpoint').then(apiEndpoint => {
return config.get('apiEndpoint').then((apiEndpoint) => {
if (!apiEndpoint) {
return null;
}
@ -117,7 +117,7 @@ export const fnSchema = {
]);
},
unmanaged: (config: Config) => {
return config.get('apiEndpoint').then(apiEndpoint => {
return config.get('apiEndpoint').then((apiEndpoint) => {
return !apiEndpoint;
});
},

View File

@ -70,7 +70,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
if (Schema.schema.hasOwnProperty(key)) {
const schemaKey = key as Schema.SchemaKey;
return this.getSchema(schemaKey, db).then(value => {
return this.getSchema(schemaKey, db).then((value) => {
if (value == null) {
const defaultValue = schemaTypes[key].default;
if (defaultValue instanceof t.Type) {
@ -118,7 +118,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
keys: T[],
trx?: Transaction,
): Bluebird<{ [key in T]: SchemaReturn<key> }> {
return Bluebird.map(keys, (key: T) => this.get(key, trx)).then(values => {
return Bluebird.map(keys, (key: T) => this.get(key, trx)).then((values) => {
return _.zipObject(keys, values);
}) as Bluebird<{ [key in T]: SchemaReturn<key> }>;
}
@ -198,10 +198,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
if (Schema.schema[key].source === 'config.json') {
return this.configJsonBackend.remove(key);
} else if (Schema.schema[key].source === 'db') {
await this.db
.models('config')
.del()
.where({ key });
await this.db.models('config').del().where({ key });
} else {
throw new Error(
`Unknown or unsupported config backend: ${Schema.schema[key].source}`,
@ -247,9 +244,7 @@ export class Config extends (EventEmitter as new () => ConfigEventEmitter) {
value = await this.configJsonBackend.get(key);
break;
case 'db':
const [conf] = await db('config')
.select('value')
.where({ key });
const [conf] = await db('config').select('value').where({ key });
if (conf != null) {
return conf.value;
}

View File

@ -18,7 +18,7 @@ export const PermissiveBoolean = new t.Type<boolean, t.TypeOf<PermissiveType>>(
'PermissiveBoolean',
_.isBoolean,
(m, c) =>
either.chain(permissiveValue.validate(m, c), v => {
either.chain(permissiveValue.validate(m, c), (v) => {
switch (typeof v) {
case 'string':
case 'boolean':
@ -51,7 +51,7 @@ export const PermissiveNumber = new t.Type<number, string | number>(
'PermissiveNumber',
_.isNumber,
(m, c) =>
either.chain(t.union([t.string, t.number]).validate(m, c), v => {
either.chain(t.union([t.string, t.number]).validate(m, c), (v) => {
switch (typeof v) {
case 'number':
return t.success(v);
@ -82,7 +82,7 @@ export class StringJSON<T> extends t.Type<T, string> {
(m, c) =>
// Accept either an object, or a string which represents the
// object
either.chain(t.union([t.string, type]).validate(m, c), v => {
either.chain(t.union([t.string, type]).validate(m, c), (v) => {
let obj: T;
if (typeof v === 'string') {
obj = JSON.parse(v);

View File

@ -28,7 +28,7 @@ export const initialiseConfigBackend = async (
};
function getConfigBackend(deviceType: string): DeviceConfigBackend | undefined {
return _.find(configBackends, backend => backend.matches(deviceType));
return _.find(configBackends, (backend) => backend.matches(deviceType));
}
export function envToBootConfig(
@ -54,7 +54,7 @@ export function bootConfigToEnv(
): EnvVarObject {
return _(config)
.mapKeys((_val, key) => configBackend.createConfigVarName(key))
.mapValues(val => {
.mapValues((val) => {
if (_.isArray(val)) {
return JSON.stringify(val).replace(/^\[(.*)\]$/, '$1');
}

View File

@ -49,9 +49,7 @@ export class DB {
): Promise<any> {
const knex = trx || this.knex;
const n = await knex(modelName)
.update(obj)
.where(id);
const n = await knex(modelName).update(obj).where(id);
if (n === 0) {
return knex(modelName).insert(obj);
}

View File

@ -6,7 +6,7 @@ export function doRestart(applications, appId, force) {
const { _lockingIfNecessary, deviceState } = applications;
return _lockingIfNecessary(appId, { force }, () =>
deviceState.getCurrentForComparison().then(function(currentState) {
deviceState.getCurrentForComparison().then(function (currentState) {
const app = currentState.local.apps[appId];
const imageIds = _.map(app.services, 'imageId');
applications.clearTargetVolatileForServices(imageIds);
@ -17,7 +17,7 @@ export function doRestart(applications, appId, force) {
.pausingApply(() =>
deviceState
.applyIntermediateTarget(currentState, { skipLock: true })
.then(function() {
.then(function () {
currentState.local.apps[appId] = app;
return deviceState.applyIntermediateTarget(currentState, {
skipLock: true,
@ -38,7 +38,7 @@ export function doPurge(applications, appId, force) {
'Purge data',
);
return _lockingIfNecessary(appId, { force }, () =>
deviceState.getCurrentForComparison().then(function(currentState) {
deviceState.getCurrentForComparison().then(function (currentState) {
const app = currentState.local.apps[appId];
if (app == null) {
throw new Error(appNotFoundMessage);
@ -56,11 +56,11 @@ export function doPurge(applications, appId, force) {
// remove the volumes, we must do this here, as the
// application-manager will not remove any volumes
// which are part of an active application
return Bluebird.each(volumes.getAllByAppId(appId), vol =>
return Bluebird.each(volumes.getAllByAppId(appId), (vol) =>
vol.remove(),
);
})
.then(function() {
.then(function () {
currentState.local.apps[appId] = app;
return deviceState.applyIntermediateTarget(currentState, {
skipLock: true,
@ -73,7 +73,7 @@ export function doPurge(applications, appId, force) {
.tap(() =>
logger.logSystemMessage('Purged data', { appId }, 'Purge data success'),
)
.tapCatch(err =>
.tapCatch((err) =>
logger.logSystemMessage(
`Error purging data: ${err}`,
{ appId, error: err },

View File

@ -4,10 +4,10 @@ import * as constants from '../lib/constants';
import { checkInt, checkTruthy } from '../lib/validation';
import { doRestart, doPurge, serviceAction } from './common';
export const createV1Api = function(router, applications) {
export const createV1Api = function (router, applications) {
const { eventTracker } = applications;
router.post('/v1/restart', function(req, res, next) {
router.post('/v1/restart', function (req, res, next) {
const appId = checkInt(req.body.appId);
const force = checkTruthy(req.body.force) ?? false;
eventTracker.track('Restart container (v1)', { appId });
@ -19,7 +19,7 @@ export const createV1Api = function(router, applications) {
.catch(next);
});
const v1StopOrStart = function(req, res, next, action) {
const v1StopOrStart = function (req, res, next, action) {
const appId = checkInt(req.params.appId);
const force = checkTruthy(req.body.force) ?? false;
if (appId == null) {
@ -27,7 +27,7 @@ export const createV1Api = function(router, applications) {
}
return applications
.getCurrentApp(appId)
.then(function(app) {
.then(function (app) {
let service = app?.app.services?.[0];
if (service == null) {
return res.status(400).send('App not found');
@ -49,12 +49,12 @@ export const createV1Api = function(router, applications) {
}),
{ force },
)
.then(function() {
.then(function () {
if (action === 'stop') {
return service;
}
// We refresh the container id in case we were starting an app with no container yet
return applications.getCurrentApp(appId).then(function(app2) {
return applications.getCurrentApp(appId).then(function (app2) {
service = app2?.services?.[0];
if (service == null) {
throw new Error('App not found after running action');
@ -62,20 +62,20 @@ export const createV1Api = function(router, applications) {
return service;
});
})
.then(service2 =>
.then((service2) =>
res.status(200).json({ containerId: service2.containerId }),
);
})
.catch(next);
};
const createV1StopOrStartHandler = action =>
const createV1StopOrStartHandler = (action) =>
_.partial(v1StopOrStart, _, _, _, action);
router.post('/v1/apps/:appId/stop', createV1StopOrStartHandler('stop'));
router.post('/v1/apps/:appId/start', createV1StopOrStartHandler('start'));
router.get('/v1/apps/:appId', function(req, res, next) {
router.get('/v1/apps/:appId', function (req, res, next) {
const appId = checkInt(req.params.appId);
eventTracker.track('GET app (v1)', { appId });
if (appId == null) {
@ -84,7 +84,7 @@ export const createV1Api = function(router, applications) {
return Promise.join(
applications.getCurrentApp(appId),
applications.getStatus(),
function(app, status) {
function (app, status) {
const service = app?.services?.[0];
if (service == null) {
return res.status(400).send('App not found');
@ -112,7 +112,7 @@ export const createV1Api = function(router, applications) {
).catch(next);
});
router.post('/v1/purge', function(req, res, next) {
router.post('/v1/purge', function (req, res, next) {
const appId = checkInt(req.body.appId);
const force = checkTruthy(req.body.force) ?? false;
if (appId == null) {

View File

@ -39,7 +39,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
return _lockingIfNecessary(appId, { force }, () => {
return applications
.getCurrentApp(appId)
.then(app => {
.then((app) => {
if (app == null) {
res.status(404).send(appNotFoundMessage);
return;
@ -54,11 +54,11 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
let service: Service | undefined;
if (imageId != null) {
service = _.find(app.services, svc => svc.imageId === imageId);
service = _.find(app.services, (svc) => svc.imageId === imageId);
} else {
service = _.find(
app.services,
svc => svc.serviceName === serviceName,
(svc) => svc.serviceName === serviceName,
);
}
if (service == null) {
@ -173,7 +173,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
const appNameById: { [id: number]: string } = {};
apps.forEach(app => {
apps.forEach((app) => {
const appId = parseInt(app.appId, 10);
response[app.name] = {
appId,
@ -184,7 +184,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
appNameById[appId] = app.name;
});
images.forEach(img => {
images.forEach((img) => {
const appName = appNameById[img.appId];
if (appName == null) {
log.warn(
@ -224,7 +224,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
// Get all services and their statuses, and return it
applications
.getStatus()
.then(apps => {
.then((apps) => {
res.status(200).json(apps);
})
.catch(next);
@ -258,14 +258,14 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
target.local = {
name: targetState.local.name,
config: _.cloneDeep(targetState.local.config),
apps: _.mapValues(targetState.local.apps, app => ({
apps: _.mapValues(targetState.local.apps, (app) => ({
appId: app.appId,
name: app.name,
commit: app.commit,
releaseId: app.releaseId,
services: _.map(app.services, s => s.toComposeObject()),
volumes: _.mapValues(app.volumes, v => v.toComposeObject()),
networks: _.mapValues(app.networks, n => n.toComposeObject()),
services: _.map(app.services, (s) => s.toComposeObject()),
volumes: _.mapValues(app.volumes, (v) => v.toComposeObject()),
networks: _.mapValues(app.networks, (n) => n.toComposeObject()),
})),
};
}
@ -369,7 +369,10 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
if (req.query.serviceName != null || req.query.service != null) {
const serviceName = req.query.serviceName || req.query.service;
const service = _.find(services, svc => svc.serviceName === serviceName);
const service = _.find(
services,
(svc) => svc.serviceName === serviceName,
);
if (service != null) {
res.status(200).json({
status: 'success',
@ -396,7 +399,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
const currentRelease = await applications.config.get('currentCommit');
const pending = applications.deviceState.applyInProgress;
const containerStates = (await applications.services.getAll()).map(svc =>
const containerStates = (await applications.services.getAll()).map((svc) =>
_.pick(
svc,
'status',
@ -411,7 +414,7 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
let downloadProgressTotal = 0;
let downloads = 0;
const imagesStates = (await applications.images.getStatus()).map(img => {
const imagesStates = (await applications.images.getStatus()).map((img) => {
if (img.downloadProgress != null) {
downloadProgressTotal += img.downloadProgress;
downloads += 1;
@ -483,8 +486,8 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
router.get('/v2/cleanup-volumes', async (_req, res) => {
const targetState = await applications.getTargetApps();
const referencedVolumes: string[] = [];
_.each(targetState, app => {
_.each(app.volumes, vol => {
_.each(targetState, (app) => {
_.each(app.volumes, (vol) => {
referencedVolumes.push(Volume.generateDockerName(vol.appId, vol.name));
});
});

View File

@ -156,7 +156,7 @@ export class DeviceConfig {
this.logger = logger;
this.actionExecutors = {
changeConfig: async step => {
changeConfig: async (step) => {
try {
if (step.humanReadableTarget) {
this.logger.logConfigChange(step.humanReadableTarget);
@ -203,7 +203,7 @@ export class DeviceConfig {
throw err;
}
},
setBootConfig: async step => {
setBootConfig: async (step) => {
const configBackend = await this.getConfigBackend();
if (!_.isObject(step.target)) {
throw new Error(
@ -330,7 +330,7 @@ export class DeviceConfig {
}
public resetRateLimits() {
_.each(this.rateLimits, action => {
_.each(this.rateLimits, (action) => {
action.lastAttempt = null;
});
}
@ -468,7 +468,7 @@ export class DeviceConfig {
}
const now = Date.now();
steps = _.map(steps, step => {
steps = _.map(steps, (step) => {
const action = step.action;
if (action in this.rateLimits) {
const lastAttempt = this.rateLimits[action].lastAttempt;
@ -640,7 +640,7 @@ export class DeviceConfig {
if (!_.includes(conf.dtoverlay, field)) {
conf.dtoverlay.push(field);
}
conf.dtoverlay = conf.dtoverlay.filter(s => !_.isEmpty(s));
conf.dtoverlay = conf.dtoverlay.filter((s) => !_.isEmpty(s));
return conf;
}

View File

@ -121,8 +121,8 @@ function createDeviceStateRouter(deviceState: DeviceState) {
router.get('/v1/device/host-config', (_req, res) =>
hostConfig
.get()
.then(conf => res.json(conf))
.catch(err =>
.then((conf) => res.json(conf))
.catch((err) =>
res.status(503).send(err?.message ?? err ?? 'Unknown error'),
),
);
@ -131,7 +131,7 @@ function createDeviceStateRouter(deviceState: DeviceState) {
hostConfig
.patch(req.body, deviceState.config)
.then(() => res.status(200).send('OK'))
.catch(err =>
.catch((err) =>
res.status(503).send(err?.message ?? err ?? 'Unknown error'),
),
);
@ -271,8 +271,8 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
apiBinder,
});
this.on('error', err => log.error('deviceState error: ', err));
this.on('apply-target-state-end', function(err) {
this.on('error', (err) => log.error('deviceState error: ', err));
this.on('apply-target-state-end', function (err) {
if (err != null) {
if (!(err instanceof UpdatesLockedError)) {
return log.error('Device state apply error', err);
@ -285,7 +285,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
return this.deviceConfig.resetRateLimits();
}
});
this.applications.on('change', d => this.reportCurrentState(d));
this.applications.on('change', (d) => this.reportCurrentState(d));
this.router = createDeviceStateRouter(this);
}
@ -307,7 +307,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
}
public async init() {
this.config.on('change', changedConfig => {
this.config.on('change', (changedConfig) => {
if (changedConfig.loggingEnabled != null) {
this.logger.enable(changedConfig.loggingEnabled);
}
@ -389,11 +389,11 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
network.startConnectivityCheck(
apiEndpoint,
connectivityCheckEnabled,
connected => {
(connected) => {
return (this.connected = connected);
},
);
this.config.on('change', function(changedConfig) {
this.config.on('change', function (changedConfig) {
if (changedConfig.connectivityCheckEnabled != null) {
return network.enableConnectivityCheck(
changedConfig.connectivityCheckEnabled,
@ -402,7 +402,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
});
log.debug('Starting periodic check for IP addresses');
await network.startIPAddressUpdate()(async addresses => {
await network.startIPAddressUpdate()(async (addresses) => {
await this.reportCurrentState({
ip_address: addresses.join(' '),
});
@ -433,11 +433,11 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
}
private readLockTarget = () =>
this.readLock('target').disposer(release => release());
this.readLock('target').disposer((release) => release());
private writeLockTarget = () =>
this.writeLock('target').disposer(release => release());
this.writeLock('target').disposer((release) => release());
private inferStepsLock = () =>
this.writeLock('inferSteps').disposer(release => release());
this.writeLock('inferSteps').disposer((release) => release());
private usingReadLockTarget(fn: () => any) {
return Bluebird.using(this.readLockTarget, () => fn());
}
@ -463,7 +463,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
const apiEndpoint = await this.config.get('apiEndpoint');
await this.usingWriteLockTarget(async () => {
await this.db.transaction(async trx => {
await this.db.transaction(async (trx) => {
await this.config.set({ name: target.local.name }, trx);
await this.deviceConfig.setTarget(target.local.config, trx);
@ -762,7 +762,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
if (!intermediate) {
this.reportCurrentState({ update_pending: true });
}
if (_.every(steps, step => step.action === 'noop')) {
if (_.every(steps, (step) => step.action === 'noop')) {
if (backoff) {
retryCount += 1;
// Backoff to a maximum of 10 minutes
@ -774,7 +774,7 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
try {
await Promise.all(
steps.map(s => this.applyStep(s, { force, initial, skipLock })),
steps.map((s) => this.applyStep(s, { force, initial, skipLock })),
);
await Bluebird.delay(nextDelay);
@ -798,20 +798,20 @@ export class DeviceState extends (EventEmitter as new () => DeviceStateEventEmit
JSON.stringify(_.map(steps, 'action')),
);
}
}).catch(err => {
}).catch((err) => {
return this.applyError(err, { force, initial, intermediate });
});
}
public pausingApply(fn: () => any) {
const lock = () => {
return this.writeLock('pause').disposer(release => release());
return this.writeLock('pause').disposer((release) => release());
};
// TODO: This function is a bit of a mess
const pause = () => {
return Bluebird.try(() => {
let res;
this.applyBlocker = new Promise(resolve => {
this.applyBlocker = new Promise((resolve) => {
res = resolve;
});
return res;

View File

@ -92,7 +92,7 @@ export class EventTracker {
(event: string) => {
// Call this function at maximum once every minute
return _.throttle(
properties => {
(properties) => {
if (this.client != null) {
this.client.track(event, properties);
}

View File

@ -61,7 +61,7 @@ const memoizedAuthRegex = _.memoize(
);
const memoizedRegex = _.memoize(
proxyField => new RegExp(proxyField + '\\s*=\\s*([^;\\s]*)\\s*;'),
(proxyField) => new RegExp(proxyField + '\\s*=\\s*([^;\\s]*)\\s*;'),
);
async function readProxy(): Promise<ProxyConfig | undefined> {

View File

@ -63,10 +63,7 @@ function isValidRequirementType(
export function containerContractsFulfilled(
serviceContracts: ServiceContracts,
): ApplicationContractResult {
const containers = _(serviceContracts)
.map('contract')
.compact()
.value();
const containers = _(serviceContracts).map('contract').compact().value();
const blueprintMembership: Dictionary<number> = {};
for (const component of _.keys(contractRequirementVersions)) {
@ -91,7 +88,7 @@ export function containerContractsFulfilled(
[
...getContractsFromVersions(contractRequirementVersions),
...containers,
].map(c => new Contract(c)),
].map((c) => new Contract(c)),
);
const solution = blueprint.reproduce(universe);
@ -132,13 +129,13 @@ export function containerContractsFulfilled(
const [fulfilledServices, unfulfilledServices] = _.partition(
_.keys(serviceContracts),
serviceName => {
(serviceName) => {
const { contract } = serviceContracts[serviceName];
if (!contract) {
return true;
}
// Did we find the contract in the generated state?
return _.some(children, child =>
return _.some(children, (child) =>
_.isEqual((child as any).raw, contract),
);
},
@ -146,7 +143,7 @@ export function containerContractsFulfilled(
const [unmetAndRequired, unmetAndOptional] = _.partition(
unfulfilledServices,
serviceName => {
(serviceName) => {
return !serviceContracts[serviceName].optional;
},
);

View File

@ -281,7 +281,7 @@ export class DockerUtils extends DockerToolbelt {
.on('progress', onProgress)
.on('retry', onProgress)
.on('error', reject)
.on('response', res => {
.on('response', (res) => {
if (res.statusCode !== 200) {
reject(
new Error(
@ -297,8 +297,8 @@ export class DockerUtils extends DockerToolbelt {
});
res
.pipe(deltaStream)
.on('id', id => resolve(`sha256:${id}`))
.on('error', err => {
.on('id', (id) => resolve(`sha256:${id}`))
.on('error', (err) => {
logFn(`Delta stream emitted error: ${err}`);
req.abort();
reject(err);

View File

@ -5,7 +5,7 @@ import * as constants from './constants';
import { ENOENT } from './errors';
export function writeAndSyncFile(path: string, data: string): Bluebird<void> {
return Bluebird.resolve(fs.open(path, 'w')).then(fd => {
return Bluebird.resolve(fs.open(path, 'w')).then((fd) => {
fs.write(fd, data, 0, 'utf8')
.then(() => fs.fsync(fd))
.then(() => fs.close(fd));

View File

@ -11,8 +11,8 @@ function applyIptablesArgs(args: string): Bluebird<void> {
// We want to run both commands regardless, but also rethrow an error
// if one of them fails
return execAsync(`iptables ${args}`)
.catch(e => (err = e))
.then(() => execAsync(`ip6tables ${args}`).catch(e => (err = e)))
.catch((e) => (err = e))
.then(() => execAsync(`ip6tables ${args}`).catch((e) => (err = e)))
.then(() => {
if (err != null) {
throw err;
@ -42,7 +42,7 @@ export function rejectOnAllInterfacesExcept(
): Bluebird<void> {
// We delete each rule and create it again to ensure ordering (all ACCEPTs before the REJECT/DROP).
// This is especially important after a supervisor update.
return Bluebird.each(allowedInterfaces, iface =>
return Bluebird.each(allowedInterfaces, (iface) =>
clearAndInsertIptablesRule(
`INPUT -p tcp --dport ${port} -i ${iface} -j ACCEPT`,
),

View File

@ -166,10 +166,7 @@ export async function normaliseLegacyDatabase(
log.warn(
`No compatible releases found in API, removing ${app.appId} from target state`,
);
await db
.models('app')
.where({ appId: app.appId })
.del();
await db.models('app').where({ appId: app.appId }).del();
}
// We need to get the release.id, serviceId, image.id and updated imageUrl
@ -187,7 +184,7 @@ export async function normaliseLegacyDatabase(
const imageFromDocker = await application.docker
.getImage(service.image)
.inspect()
.catch(error => {
.catch((error) => {
if (error instanceof NotFoundError) {
return;
}
@ -203,9 +200,7 @@ export async function normaliseLegacyDatabase(
try {
if (imagesFromDatabase.length > 0) {
log.debug('Deleting existing image entry in db');
await trx('image')
.where({ name: service.image })
.del();
await trx('image').where({ name: service.image }).del();
} else {
log.debug('No image in db to delete');
}
@ -243,9 +238,7 @@ export async function normaliseLegacyDatabase(
log.debug('Updating app entry in db');
log.success('Successfully migrated legacy application');
await trx('app')
.update(app)
.where({ appId: app.appId });
await trx('app').update(app).where({ appId: app.appId });
}
});
}
@ -312,10 +305,10 @@ export async function loadBackupFromMigration(
// If the volume exists (from a previous incomplete run of this restoreBackup), we delete it first
await deviceState.applications.volumes
.get({ appId, name: volumeName })
.then(volume => {
.then((volume) => {
return volume.remove();
})
.catch(error => {
.catch((error) => {
if (error instanceof NotFoundError) {
return;
}

View File

@ -30,7 +30,7 @@ const maxLevelLength = _(levels)
const uncolorize = winston.format.uncolorize();
const formatter = winston.format.printf(args => {
const formatter = winston.format.printf((args) => {
const { level, message } = args;
const { level: strippedLevel } = uncolorize.transform(args, {
level: true,
@ -64,7 +64,7 @@ winston.addColors(colors);
const messageFormatter = (printFn: (message: string) => void) => {
return (...parts: any[]) => {
parts
.map(p => {
.map((p) => {
if (p instanceof Error) {
return p.stack;
}

View File

@ -28,7 +28,7 @@ export function lockPath(appId: number, serviceName: string): string {
}
function lockFilesOnHost(appId: number, serviceName: string): string[] {
return ['updates.lock', 'resin-updates.lock'].map(filename =>
return ['updates.lock', 'resin-updates.lock'].map((filename) =>
path.join(constants.rootMountPoint, lockPath(appId, serviceName), filename),
);
}
@ -56,7 +56,7 @@ export const readLock: LockFn = Bluebird.promisify(locker.async.readLock, {
});
function dispose(release: () => void): Bluebird<void> {
return Bluebird.map(_.keys(locksTaken), lockName => {
return Bluebird.map(_.keys(locksTaken), (lockName) => {
delete locksTaken[lockName];
return lockFile.unlockAsync(lockName);
})
@ -82,10 +82,10 @@ export function lock(
return Bluebird.resolve(fs.readdir(lockDir))
.catchReturn(ENOENT, [])
.mapSeries(serviceName => {
.mapSeries((serviceName) => {
return Bluebird.mapSeries(
lockFilesOnHost(appId, serviceName),
tmpLockName => {
(tmpLockName) => {
return Bluebird.try(() => {
if (force) {
return lockFile.unlockAsync(tmpLockName);
@ -97,7 +97,7 @@ export function lock(
})
.catchReturn(ENOENT, undefined);
},
).catch(err => {
).catch((err) => {
return dispose(release).throw(
new UpdatesLockedError(`Updates are locked: ${err.message}`),
);

View File

@ -479,7 +479,7 @@ export function isValidDependentDevicesObject(devices: any): boolean {
return _.every(
a as TargetState['dependent']['devices'][any]['apps'],
app => {
(app) => {
app = _.defaults(_.clone(app), {
config: undefined,
environment: undefined,

View File

@ -83,7 +83,7 @@ export class LocalModeManager {
public async init() {
// Setup a listener to catch state changes relating to local mode
this.config.on('change', changed => {
this.config.on('change', (changed) => {
if (changed.localMode != null) {
const local = changed.localMode || false;
@ -125,16 +125,16 @@ export class LocalModeManager {
public async collectEngineSnapshot(): Promise<EngineSnapshotRecord> {
const containersPromise = this.docker
.listContainers()
.then(resp => _.map(resp, 'Id'));
.then((resp) => _.map(resp, 'Id'));
const imagesPromise = this.docker
.listImages()
.then(resp => _.map(resp, 'Id'));
.then((resp) => _.map(resp, 'Id'));
const volumesPromise = this.docker
.listVolumes()
.then(resp => _.map(resp.Volumes, 'Name'));
.then((resp) => _.map(resp.Volumes, 'Name'));
const networksPromise = this.docker
.listNetworks()
.then(resp => _.map(resp, 'Id'));
.then((resp) => _.map(resp, 'Id'));
const [containers, images, volumes, networks] = await Bluebird.all([
containersPromise,
@ -155,8 +155,8 @@ export class LocalModeManager {
return new EngineSnapshot(
[inspectInfo.Id],
[inspectInfo.Image],
inspectInfo.Mounts.filter(m => m.Name != null).map(m => m.Name!),
_.map(inspectInfo.NetworkSettings.Networks, n => n.NetworkID),
inspectInfo.Mounts.filter((m) => m.Name != null).map((m) => m.Name!),
_.map(inspectInfo.NetworkSettings.Networks, (n) => n.NetworkID),
);
}
@ -237,29 +237,29 @@ export class LocalModeManager {
log.debug(`Going to delete the following objects: ${objects}`);
// Delete engine objects. We catch every deletion error, so that we can attempt other objects deletions.
await Bluebird.map(objects.containers, cId => {
await Bluebird.map(objects.containers, (cId) => {
return this.docker
.getContainer(cId)
.remove({ force: true })
.catch(e => log.error(`Unable to delete container ${cId}`, e));
.catch((e) => log.error(`Unable to delete container ${cId}`, e));
});
await Bluebird.map(objects.images, iId => {
await Bluebird.map(objects.images, (iId) => {
return this.docker
.getImage(iId)
.remove({ force: true })
.catch(e => log.error(`Unable to delete image ${iId}`, e));
.catch((e) => log.error(`Unable to delete image ${iId}`, e));
});
await Bluebird.map(objects.networks, nId => {
await Bluebird.map(objects.networks, (nId) => {
return this.docker
.getNetwork(nId)
.remove()
.catch(e => log.error(`Unable to delete network ${nId}`, e));
.catch((e) => log.error(`Unable to delete network ${nId}`, e));
});
await Bluebird.map(objects.volumes, vId => {
await Bluebird.map(objects.volumes, (vId) => {
return this.docker
.getVolume(vId)
.remove()
.catch(e => log.error(`Unable to delete volume ${vId}`, e));
.catch((e) => log.error(`Unable to delete volume ${vId}`, e));
});
// Remove any local mode state added to the database.
@ -267,7 +267,7 @@ export class LocalModeManager {
.models('app')
.del()
.where({ source: 'local' })
.catch(e =>
.catch((e) =>
log.error('Cannot delete local app entries in the database', e),
);
}

View File

@ -158,7 +158,7 @@ export class Logger {
}
public lock(containerId: string): Bluebird.Disposer<() => void> {
return writeLock(containerId).disposer(release => {
return writeLock(containerId).disposer((release) => {
release();
});
}
@ -177,11 +177,11 @@ export class Logger {
return Bluebird.using(this.lock(containerId), async () => {
const logs = new ContainerLogs(containerId, docker);
this.containerLogs[containerId] = logs;
logs.on('error', err => {
logs.on('error', (err) => {
log.error('Container log retrieval error', err);
delete this.containerLogs[containerId];
});
logs.on('log', async logMessage => {
logs.on('log', async (logMessage) => {
this.log(_.merge({}, serviceInfo, logMessage));
// Take the timestamp and set it in the database as the last

View File

@ -124,7 +124,7 @@ export class BalenaLogBackend extends LogBackend {
// Since we haven't sent the request body yet, and never will,the
// only reason for the server to prematurely respond is to
// communicate an error. So teardown the connection immediately
this.req.on('response', res => {
this.req.on('response', (res) => {
log.error(
'LogBackend: server responded with status code:',
res.statusCode,
@ -134,7 +134,7 @@ export class BalenaLogBackend extends LogBackend {
this.req.on('timeout', () => this.teardown());
this.req.on('close', () => this.teardown());
this.req.on('error', err => {
this.req.on('error', (err) => {
log.error('LogBackend: unexpected error:', err);
this.teardown();
});

View File

@ -43,7 +43,7 @@ export class ContainerLogs extends (EventEmitter as new () => LogsEventEmitter)
[stderrStream, false],
].forEach(([stream, isStdout]: [Stream.Readable, boolean]) => {
stream
.on('error', err => {
.on('error', (err) => {
this.emit(
'error',
new Error(`Error on container logs: ${err} ${err.stack}`),
@ -59,7 +59,7 @@ export class ContainerLogs extends (EventEmitter as new () => LogsEventEmitter)
this.emit('log', { isStdout, ...logMsg });
}
})
.on('error', err => {
.on('error', (err) => {
this.emit(
'error',
new Error(`Error on container logs: ${err} ${err.stack}`),
@ -76,7 +76,7 @@ export class ContainerLogs extends (EventEmitter as new () => LogsEventEmitter)
// https://docs.docker.com/engine/api/v1.30/#operation/ContainerAttach
if (
_.includes([0, 1, 2], msgBuf[0]) &&
_.every(msgBuf.slice(1, 7), c => c === 0)
_.every(msgBuf.slice(1, 7), (c) => c === 0)
) {
// Take the header from this message, and parse it as normal
msgBuf = msgBuf.slice(8);

View File

@ -44,12 +44,12 @@ export class LocalLogBackend extends LogBackend {
})
.then((msg: LogMessage | null) => {
if (msg != null) {
_.each(this.globalListeners, listener => {
_.each(this.globalListeners, (listener) => {
listener.push(`${JSON.stringify(msg)}\n`);
});
}
})
.catch(e => {
.catch((e) => {
log.error('Error streaming local log output:', e);
});
}

View File

@ -7,27 +7,31 @@
// a few dropColumn and dropTable calls to delete things that were removed throughout the supervisor's
// history without actually adding drop statements (mostly just becoming unused, but still there).
exports.up = function(knex) {
const addColumn = function(table, column, type) {
return knex.schema.hasColumn(table, column).then(exists => {
exports.up = function (knex) {
const addColumn = function (table, column, type) {
return knex.schema.hasColumn(table, column).then((exists) => {
if (!exists) {
return knex.schema.table(table, t => {
return knex.schema.table(table, (t) => {
return t[type](column);
});
}
});
};
const dropColumn = function(table, column) {
return knex.schema.hasColumn(table, column).then(exists => {
const dropColumn = function (table, column) {
return knex.schema.hasColumn(table, column).then((exists) => {
if (exists) {
return knex.schema.table(table, t => {
return knex.schema.table(table, (t) => {
return t.dropColumn(column);
});
}
});
};
const createTableOrRun = function(tableName, tableCreator, runIfTableExists) {
return knex.schema.hasTable(tableName).then(exists => {
const createTableOrRun = function (
tableName,
tableCreator,
runIfTableExists,
) {
return knex.schema.hasTable(tableName).then((exists) => {
if (!exists) {
return knex.schema.createTable(tableName, tableCreator);
} else if (runIfTableExists != null) {
@ -35,25 +39,25 @@ exports.up = function(knex) {
}
});
};
const dropTable = function(tableName) {
return knex.schema.hasTable(tableName).then(exists => {
const dropTable = function (tableName) {
return knex.schema.hasTable(tableName).then((exists) => {
if (exists) {
return knex.schema.dropTable(tableName);
}
});
};
return Promise.all([
createTableOrRun('config', t => {
createTableOrRun('config', (t) => {
t.string('key').primary();
t.string('value');
}),
createTableOrRun('deviceConfig', t => {
createTableOrRun('deviceConfig', (t) => {
t.json('values');
t.json('targetValues');
}).then(() => {
return knex('deviceConfig')
.select()
.then(deviceConfigs => {
.then((deviceConfigs) => {
if (deviceConfigs.length === 0) {
return knex('deviceConfig').insert({
values: '{}',
@ -64,7 +68,7 @@ exports.up = function(knex) {
}),
createTableOrRun(
'app',
t => {
(t) => {
t.increments('id').primary();
t.string('name');
t.string('containerName');
@ -99,7 +103,7 @@ exports.up = function(knex) {
),
createTableOrRun(
'dependentApp',
t => {
(t) => {
t.increments('id').primary();
t.string('appId');
t.string('parentAppId');
@ -115,7 +119,7 @@ exports.up = function(knex) {
),
createTableOrRun(
'dependentDevice',
t => {
(t) => {
t.increments('id').primary();
t.string('uuid');
t.string('appId');
@ -151,6 +155,6 @@ exports.up = function(knex) {
]);
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not implemented'));
};

View File

@ -1,7 +1,7 @@
const Bluebird = require('bluebird');
const _ = require('lodash');
var tryParse = function(obj) {
var tryParse = function (obj) {
try {
return JSON.parse(obj);
} catch (e) {
@ -9,7 +9,7 @@ var tryParse = function(obj) {
}
};
var singleToMulticontainerApp = function(app) {
var singleToMulticontainerApp = function (app) {
// From *very* old supervisors, env or config may be null
// so we ignore errors parsing them
const conf = tryParse(app.config);
@ -68,7 +68,7 @@ var singleToMulticontainerApp = function(app) {
return newApp;
};
var jsonifyAppFields = function(app) {
var jsonifyAppFields = function (app) {
const newApp = _.clone(app);
newApp.services = JSON.stringify(app.services);
newApp.networks = JSON.stringify(app.networks);
@ -76,7 +76,7 @@ var jsonifyAppFields = function(app) {
return newApp;
};
var imageForApp = function(app) {
var imageForApp = function (app) {
const service = app.services[0];
return {
name: service.image,
@ -89,7 +89,7 @@ var imageForApp = function(app) {
};
};
var imageForDependentApp = function(app) {
var imageForDependentApp = function (app) {
return {
name: app.image,
appId: app.appId,
@ -101,9 +101,9 @@ var imageForDependentApp = function(app) {
};
};
exports.up = function(knex) {
exports.up = function (knex) {
return Bluebird.resolve(
knex.schema.createTable('image', t => {
knex.schema.createTable('image', (t) => {
t.increments('id').primary();
t.string('name');
t.integer('appId');
@ -120,7 +120,7 @@ exports.up = function(knex) {
.whereNot({ markedForDeletion: true })
.orWhereNull('markedForDeletion'),
)
.tap(apps => {
.tap((apps) => {
if (apps.length > 0) {
return knex('config').insert({
key: 'legacyAppsPresent',
@ -132,7 +132,7 @@ exports.up = function(knex) {
// We're in a transaction, and it's easier to drop and recreate
// than to migrate each field...
return knex.schema.dropTable('app').then(() => {
return knex.schema.createTable('app', t => {
return knex.schema.createTable('app', (t) => {
t.increments('id').primary();
t.string('name');
t.integer('releaseId');
@ -144,7 +144,7 @@ exports.up = function(knex) {
});
});
})
.map(app => {
.map((app) => {
const migratedApp = singleToMulticontainerApp(app);
return knex('app')
.insert(jsonifyAppFields(migratedApp))
@ -157,7 +157,7 @@ exports.up = function(knex) {
// to the config table.
return knex('deviceConfig')
.select()
.then(deviceConf => {
.then((deviceConf) => {
return knex.schema.dropTable('deviceConfig').then(() => {
const values = JSON.parse(deviceConf[0].values);
const configKeys = {
@ -172,7 +172,7 @@ exports.up = function(knex) {
RESIN_SUPERVISOR_DELTA_RETRY_INTERVAL: 'deltaRequestTimeout',
RESIN_SUPERVISOR_OVERRIDE_LOCK: 'lockOverride',
};
return Bluebird.map(Object.keys(values), envVarName => {
return Bluebird.map(Object.keys(values), (envVarName) => {
if (configKeys[envVarName] != null) {
return knex('config').insert({
key: configKeys[envVarName],
@ -183,18 +183,18 @@ exports.up = function(knex) {
});
})
.then(() => {
return knex.schema.createTable('deviceConfig', t => {
return knex.schema.createTable('deviceConfig', (t) => {
t.json('targetValues');
});
})
.then(() => knex('deviceConfig').insert({ targetValues: '{}' }));
})
.then(() => knex('dependentApp').select())
.then(dependentApps => {
.then((dependentApps) => {
return knex.schema
.dropTable('dependentApp')
.then(() => {
return knex.schema.createTable('dependentApp', t => {
return knex.schema.createTable('dependentApp', (t) => {
t.increments('id').primary();
t.integer('appId');
t.integer('parentApp');
@ -208,7 +208,7 @@ exports.up = function(knex) {
});
})
.then(() => {
return knex.schema.createTable('dependentAppTarget', t => {
return knex.schema.createTable('dependentAppTarget', (t) => {
t.increments('id').primary();
t.integer('appId');
t.integer('parentApp');
@ -222,7 +222,7 @@ exports.up = function(knex) {
});
})
.then(() => {
return Bluebird.map(dependentApps, app => {
return Bluebird.map(dependentApps, (app) => {
const newApp = {
appId: parseInt(app.appId, 10),
parentApp: parseInt(app.parentAppId, 10),
@ -242,11 +242,11 @@ exports.up = function(knex) {
});
})
.then(() => knex('dependentDevice').select())
.then(dependentDevices => {
.then((dependentDevices) => {
return knex.schema
.dropTable('dependentDevice')
.then(() => {
return knex.schema.createTable('dependentDevice', t => {
return knex.schema.createTable('dependentDevice', (t) => {
t.increments('id').primary();
t.string('uuid');
t.integer('appId');
@ -270,7 +270,7 @@ exports.up = function(knex) {
});
})
.then(() => {
return knex.schema.createTable('dependentDeviceTarget', t => {
return knex.schema.createTable('dependentDeviceTarget', (t) => {
t.increments('id').primary();
t.string('uuid');
t.string('name');
@ -278,7 +278,7 @@ exports.up = function(knex) {
});
})
.then(() => {
return Bluebird.map(dependentDevices, device => {
return Bluebird.map(dependentDevices, (device) => {
const newDevice = _.clone(device);
newDevice.appId = parseInt(device.appId, 10);
newDevice.deviceId = parseInt(device.deviceId, 10);
@ -316,6 +316,6 @@ exports.up = function(knex) {
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not implemented'));
};

View File

@ -1,10 +1,10 @@
// Adds a dockerImageId column to the image table to identify images downloaded with deltas
exports.up = function(knex) {
return knex.schema.table('image', t => {
exports.up = function (knex) {
return knex.schema.table('image', (t) => {
t.string('dockerImageId');
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not implemented'));
};

View File

@ -1,8 +1,8 @@
const fs = require('fs');
const configJsonPath = process.env.CONFIG_MOUNT_POINT;
exports.up = function(knex) {
return new Promise(resolve => {
exports.up = function (knex) {
return new Promise((resolve) => {
if (!configJsonPath) {
console.log(
'Unable to locate config.json! Things may fail unexpectedly!',
@ -27,9 +27,9 @@ exports.up = function(knex) {
resolve({});
}
});
}).then(config => {
}).then((config) => {
return knex.schema
.table('app', t => {
.table('app', (t) => {
// Create a new column on the table and add the apiEndpoint config json
// field if it exists
t.string('source');
@ -40,6 +40,6 @@ exports.up = function(knex) {
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -2,8 +2,8 @@ const Bluebird = require('bluebird');
const fs = require('fs');
const configJsonPath = process.env.CONFIG_MOUNT_POINT;
exports.up = function(knex) {
return new Bluebird(resolve => {
exports.up = function (knex) {
return new Bluebird((resolve) => {
if (!configJsonPath) {
console.log(
'Unable to locate config.json! Things may fail unexpectedly!',
@ -33,20 +33,20 @@ exports.up = function(knex) {
.tap(() => {
// take the logsChannelSecret, and the apiEndpoint config field,
// and store them in a new table
return knex.schema.hasTable('logsChannelSecret').then(exists => {
return knex.schema.hasTable('logsChannelSecret').then((exists) => {
if (!exists) {
return knex.schema.createTable('logsChannelSecret', t => {
return knex.schema.createTable('logsChannelSecret', (t) => {
t.string('backend');
t.string('secret');
});
}
});
})
.then(config => {
.then((config) => {
return knex('config')
.where({ key: 'logsChannelSecret' })
.select('value')
.then(results => {
.then((results) => {
if (results.length === 0) {
return { config, secret: null };
}
@ -60,12 +60,10 @@ exports.up = function(knex) {
});
})
.then(() => {
return knex('config')
.where('key', 'logsChannelSecret')
.del();
return knex('config').where('key', 'logsChannelSecret').del();
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -1,5 +1,5 @@
exports.up = knex => {
return knex.schema.createTable('engineSnapshot', t => {
exports.up = (knex) => {
return knex.schema.createTable('engineSnapshot', (t) => {
t.string('snapshot'); // Engine snapshot encoded as JSON.
t.string('timestamp'); // When the snapshot was created.
});

View File

@ -3,10 +3,10 @@ const _ = require('lodash');
// We take legacy deviceConfig targets and store them without the RESIN_ prefix
// (we also strip the BALENA_ prefix for completeness, even though no supervisors
// using this prefix made it to production)
exports.up = function(knex) {
exports.up = function (knex) {
return knex('deviceConfig')
.select('targetValues')
.then(devConfigs => {
.then((devConfigs) => {
const devConfig = devConfigs[0];
const targetValues = JSON.parse(devConfig.targetValues);
const filteredTargetValues = _.mapKeys(targetValues, (_v, k) => {
@ -18,6 +18,6 @@ exports.up = function(knex) {
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -3,11 +3,11 @@ const configJsonPath = process.env.CONFIG_MOUNT_POINT;
const { checkTruthy } = require('../lib/validation');
exports.up = function(knex) {
exports.up = function (knex) {
return knex('config')
.where({ key: 'localMode' })
.select('value')
.then(results => {
.then((results) => {
if (results.length === 0) {
// We don't need to do anything
return;
@ -16,7 +16,7 @@ exports.up = function(knex) {
let value = checkTruthy(results[0].value);
value = value != null ? value : false;
return new Promise(resolve => {
return new Promise((resolve) => {
if (!configJsonPath) {
console.log(
'Unable to locate config.json! Things may fail unexpectedly!',
@ -37,7 +37,7 @@ exports.up = function(knex) {
// Assign the local mode value
parsed.localMode = value;
fs.writeFile(configJsonPath, JSON.stringify(parsed), err2 => {
fs.writeFile(configJsonPath, JSON.stringify(parsed), (err2) => {
if (err2) {
console.log(
'Failed to write config.json! Things may fail unexpectedly!',
@ -54,13 +54,11 @@ exports.up = function(knex) {
}
});
}).then(() => {
return knex('config')
.where('key', 'localMode')
.del();
return knex('config').where('key', 'localMode').del();
});
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -3,8 +3,8 @@ const configJsonPath = process.env.CONFIG_MOUNT_POINT;
const { checkTruthy } = require('../lib/validation');
exports.up = function(knex) {
return new Promise(resolve => {
exports.up = function (knex) {
return new Promise((resolve) => {
if (!configJsonPath) {
console.log(
'Unable to locate config.json! Things may fail unexpectedly!',
@ -32,7 +32,7 @@ exports.up = function(knex) {
return resolve(false);
}
});
}).then(localMode => {
}).then((localMode) => {
// We can be sure that this does not already exist in the db because of the previous
// migration
return knex('config').insert({
@ -42,6 +42,6 @@ exports.up = function(knex) {
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -1,4 +1,4 @@
exports.up = function(knex) {
exports.up = function (knex) {
return knex('deviceConfig')
.select('targetValues')
.then(([target]) => {
@ -16,6 +16,6 @@ exports.up = function(knex) {
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -1,10 +1,10 @@
exports.up = function(knex) {
return knex.schema.createTable('containerLogs', table => {
exports.up = function (knex) {
return knex.schema.createTable('containerLogs', (table) => {
table.string('containerId');
table.integer('lastSentTimestamp');
});
};
exports.down = function() {
exports.down = function () {
return Promise.reject(new Error('Not Implemented'));
};

View File

@ -89,7 +89,7 @@ export const startConnectivityCheck = _.once(
path: parsedUrl.path || '/',
interval: 10 * 1000,
},
connected => {
(connected) => {
onChangeCallback?.(connected);
if (connected) {
log.info('Internet Connectivity: OK');
@ -127,7 +127,7 @@ export function getIPAddresses(): string[] {
// - custom docker network bridges (br- + 12 hex characters)
return _(os.networkInterfaces())
.omitBy((_interfaceFields, interfaceName) => IP_REGEX.test(interfaceName))
.flatMap(validInterfaces => {
.flatMap((validInterfaces) => {
return _(validInterfaces)
.pickBy({ family: 'IPv4' })
.map('address')
@ -144,11 +144,7 @@ export function startIPAddressUpdate(): (
return (cb, interval) => {
const getAndReportIP = () => {
const ips = getIPAddresses();
if (
!_(ips)
.xor(lastIPValues)
.isEmpty()
) {
if (!_(ips).xor(lastIPValues).isEmpty()) {
lastIPValues = ips;
cb(ips);
}

View File

@ -20,7 +20,7 @@ const mkdirpAsync = Promise.promisify(mkdirp);
const isDefined = _.negate(_.isUndefined);
const parseDeviceFields = function(device) {
const parseDeviceFields = function (device) {
device.id = parseInt(device.deviceId, 10);
device.appId = parseInt(device.appId, 10);
device.config = JSON.parse(device.config ?? '{}');
@ -30,7 +30,7 @@ const parseDeviceFields = function(device) {
return _.omit(device, 'markedForDeletion', 'logs_channel');
};
const tarDirectory = appId => `/data/dependent-assets/${appId}`;
const tarDirectory = (appId) => `/data/dependent-assets/${appId}`;
const tarFilename = (appId, commit) => `${appId}-${commit}.tar`;
@ -46,7 +46,7 @@ const getTarArchive = (source, destination) =>
),
);
const cleanupTars = function(appId, commit) {
const cleanupTars = function (appId, commit) {
let fileToKeep;
if (commit != null) {
fileToKeep = tarFilename(appId, commit);
@ -57,29 +57,29 @@ const cleanupTars = function(appId, commit) {
return fs
.readdir(dir)
.catch(() => [])
.then(function(files) {
.then(function (files) {
if (fileToKeep != null) {
files = _.reject(files, fileToKeep);
}
return Promise.map(files, file => fs.unlink(path.join(dir, file)));
return Promise.map(files, (file) => fs.unlink(path.join(dir, file)));
});
};
const formatTargetAsState = device => ({
const formatTargetAsState = (device) => ({
appId: parseInt(device.appId, 10),
commit: device.targetCommit,
environment: device.targetEnvironment,
config: device.targetConfig,
});
const formatCurrentAsState = device => ({
const formatCurrentAsState = (device) => ({
appId: parseInt(device.appId, 10),
commit: device.commit,
environment: device.environment,
config: device.config,
});
const createProxyvisorRouter = function(proxyvisor) {
const createProxyvisorRouter = function (proxyvisor) {
const { db } = proxyvisor;
const router = express.Router();
router.use(bodyParser.urlencoded({ limit: '10mb', extended: true }));
@ -89,13 +89,13 @@ const createProxyvisorRouter = function(proxyvisor) {
.models('dependentDevice')
.select()
.map(parseDeviceFields)
.then(devices => res.json(devices))
.catch(err =>
.then((devices) => res.json(devices))
.catch((err) =>
res.status(503).send(err?.message || err || 'Unknown error'),
),
);
router.post('/v1/devices', function(req, res) {
router.post('/v1/devices', function (req, res) {
let { appId, device_type } = req.body;
if (
@ -115,7 +115,7 @@ const createProxyvisorRouter = function(proxyvisor) {
};
return proxyvisor.apiBinder
.provisionDependentDevice(d)
.then(function(dev) {
.then(function (dev) {
// If the response has id: null then something was wrong in the request
// but we don't know precisely what.
if (dev.id == null) {
@ -137,19 +137,19 @@ const createProxyvisorRouter = function(proxyvisor) {
.insert(deviceForDB)
.then(() => res.status(201).send(dev));
})
.catch(function(err) {
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
});
});
router.get('/v1/devices/:uuid', function(req, res) {
router.get('/v1/devices/:uuid', function (req, res) {
const { uuid } = req.params;
return db
.models('dependentDevice')
.select()
.where({ uuid })
.then(function([device]) {
.then(function ([device]) {
if (device == null) {
return res.status(404).send('Device not found');
}
@ -158,13 +158,13 @@ const createProxyvisorRouter = function(proxyvisor) {
}
return res.json(parseDeviceFields(device));
})
.catch(function(err) {
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
});
});
router.post('/v1/devices/:uuid/logs', function(req, res) {
router.post('/v1/devices/:uuid/logs', function (req, res) {
const { uuid } = req.params;
const m = {
message: req.body.message,
@ -178,7 +178,7 @@ const createProxyvisorRouter = function(proxyvisor) {
.models('dependentDevice')
.select()
.where({ uuid })
.then(function([device]) {
.then(function ([device]) {
if (device == null) {
return res.status(404).send('Device not found');
}
@ -188,13 +188,13 @@ const createProxyvisorRouter = function(proxyvisor) {
proxyvisor.logger.logDependent(m, uuid);
return res.status(202).send('OK');
})
.catch(function(err) {
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
});
});
router.put('/v1/devices/:uuid', function(req, res) {
router.put('/v1/devices/:uuid', function (req, res) {
const { uuid } = req.params;
let {
status,
@ -204,7 +204,7 @@ const createProxyvisorRouter = function(proxyvisor) {
environment,
config,
} = req.body;
const validateDeviceFields = function() {
const validateDeviceFields = function () {
if (isDefined(is_online) && !_.isBoolean(is_online)) {
return 'is_online must be a boolean';
}
@ -262,7 +262,7 @@ const createProxyvisorRouter = function(proxyvisor) {
.models('dependentDevice')
.select()
.where({ uuid })
.then(function([device]) {
.then(function ([device]) {
if (device == null) {
return res.status(404).send('Device not found');
}
@ -272,7 +272,7 @@ const createProxyvisorRouter = function(proxyvisor) {
if (device.deviceId == null) {
throw new Error('Device is invalid');
}
return Promise.try(function() {
return Promise.try(function () {
if (!_.isEmpty(fieldsToUpdateOnAPI)) {
return proxyvisor.apiBinder.patchDevice(
device.deviceId,
@ -286,17 +286,12 @@ const createProxyvisorRouter = function(proxyvisor) {
.update(fieldsToUpdateOnDB)
.where({ uuid }),
)
.then(() =>
db
.models('dependentDevice')
.select()
.where({ uuid }),
)
.then(function([dbDevice]) {
.then(() => db.models('dependentDevice').select().where({ uuid }))
.then(function ([dbDevice]) {
return res.json(parseDeviceFields(dbDevice));
});
})
.catch(function(err) {
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
});
@ -307,7 +302,7 @@ const createProxyvisorRouter = function(proxyvisor) {
.models('dependentApp')
.select()
.where(_.pick(req.params, 'appId', 'commit'))
.then(function([app]) {
.then(function ([app]) {
if (!app) {
return res.status(404).send('Not found');
}
@ -317,12 +312,12 @@ const createProxyvisorRouter = function(proxyvisor) {
.catch(() =>
Promise.using(
proxyvisor.docker.imageRootDirMounted(app.image),
rootDir => getTarArchive(rootDir + '/assets', dest),
(rootDir) => getTarArchive(rootDir + '/assets', dest),
),
)
.then(() => res.sendFile(dest));
})
.catch(function(err) {
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
}),
@ -332,14 +327,14 @@ const createProxyvisorRouter = function(proxyvisor) {
db
.models('dependentApp')
.select()
.map(app => ({
.map((app) => ({
id: parseInt(app.appId, 10),
commit: app.commit,
name: app.name,
config: JSON.parse(app.config ?? '{}'),
}))
.then(apps => res.json(apps))
.catch(function(err) {
.then((apps) => res.json(apps))
.catch(function (err) {
log.error(`Error on ${req.method} ${url.parse(req.url).pathname}`, err);
return res.status(503).send(err?.message || err || 'Unknown error');
}),
@ -375,7 +370,7 @@ export class Proxyvisor {
this.lastRequestForDevice = {};
this.router = createProxyvisorRouter(this);
this.actionExecutors = {
updateDependentTargets: step => {
updateDependentTargets: (step) => {
return this.config
.getMany(['currentApiKey', 'apiTimeout'])
.then(({ currentApiKey, apiTimeout }) => {
@ -383,12 +378,10 @@ export class Proxyvisor {
// - if update returns 0, then use APIBinder to fetch the device, then store it to the db
// - set markedForDeletion: true for devices that are not in the step.devices list
// - update dependentApp with step.app
return Promise.map(step.devices, device => {
return Promise.map(step.devices, (device) => {
const { uuid } = device;
// Only consider one app per dependent device for now
const appId = _(device.apps)
.keys()
.head();
const appId = _(device.apps).keys().head();
if (appId == null) {
throw new Error(
'Could not find an app for the dependent device',
@ -409,7 +402,7 @@ export class Proxyvisor {
name: device.name,
})
.where({ uuid })
.then(n => {
.then((n) => {
if (n !== 0) {
return;
}
@ -417,7 +410,7 @@ export class Proxyvisor {
// so we need to fetch it.
return this.apiBinder
.fetchDevice(uuid, currentApiKey, apiTimeout)
.then(dev => {
.then((dev) => {
const deviceForDB = {
uuid,
appId,
@ -446,7 +439,7 @@ export class Proxyvisor {
.then(() => {
return this.normaliseDependentAppForDB(step.app);
})
.then(appForDB => {
.then((appForDB) => {
return this.db.upsertModel('dependentApp', appForDB, {
appId: step.appId,
});
@ -455,12 +448,12 @@ export class Proxyvisor {
});
},
sendDependentHooks: step => {
sendDependentHooks: (step) => {
return Promise.join(
this.config.get('apiTimeout'),
this.getHookEndpoint(step.appId),
(apiTimeout, endpoint) => {
return Promise.mapSeries(step.devices, device => {
return Promise.mapSeries(step.devices, (device) => {
return Promise.try(() => {
if (this.lastRequestForDevice[device.uuid] != null) {
const diff =
@ -482,17 +475,15 @@ export class Proxyvisor {
);
},
removeDependentApp: step => {
removeDependentApp: (step) => {
// find step.app and delete it from the DB
// find devices with step.appId and delete them from the DB
return this.db.transaction(trx =>
return this.db.transaction((trx) =>
trx('dependentApp')
.where({ appId: step.appId })
.del()
.then(() =>
trx('dependentDevice')
.where({ appId: step.appId })
.del(),
trx('dependentDevice').where({ appId: step.appId }).del(),
)
.then(() => cleanupTars(step.appId)),
);
@ -522,8 +513,8 @@ export class Proxyvisor {
this.normaliseDependentAppFromDB,
),
this.db.models('dependentDevice').select(),
function(apps, devicesFromDB) {
const devices = _.map(devicesFromDB, function(device) {
function (apps, devicesFromDB) {
const devices = _.map(devicesFromDB, function (device) {
const dev = {
uuid: device.uuid,
name: device.name,
@ -568,8 +559,8 @@ export class Proxyvisor {
}
normaliseDependentDeviceTargetForDB(device, appCommit) {
return Promise.try(function() {
const apps = _.mapValues(_.clone(device.apps ?? {}), function(app) {
return Promise.try(function () {
const apps = _.mapValues(_.clone(device.apps ?? {}), function (app) {
app.commit = appCommit || null;
if (app.config == null) {
app.config = {};
@ -591,14 +582,14 @@ export class Proxyvisor {
setTargetInTransaction(dependent, trx) {
return Promise.try(() => {
if (dependent?.apps != null) {
const appsArray = _.map(dependent.apps, function(app, appId) {
const appsArray = _.map(dependent.apps, function (app, appId) {
const appClone = _.clone(app);
appClone.appId = checkInt(appId);
return appClone;
});
return Promise.map(appsArray, this.normaliseDependentAppForDB)
.tap(appsForDB => {
return Promise.map(appsForDB, app => {
.tap((appsForDB) => {
return Promise.map(appsForDB, (app) => {
return this.db.upsertModel(
'dependentAppTarget',
app,
@ -607,7 +598,7 @@ export class Proxyvisor {
);
});
})
.then(appsForDB =>
.then((appsForDB) =>
trx('dependentAppTarget')
.whereNotIn('appId', _.map(appsForDB, 'appId'))
.del(),
@ -615,19 +606,19 @@ export class Proxyvisor {
}
}).then(() => {
if (dependent?.devices != null) {
const devicesArray = _.map(dependent.devices, function(dev, uuid) {
const devicesArray = _.map(dependent.devices, function (dev, uuid) {
const devClone = _.clone(dev);
devClone.uuid = uuid;
return devClone;
});
return Promise.map(devicesArray, device => {
return Promise.map(devicesArray, (device) => {
const appId = _.keys(device.apps)[0];
return this.normaliseDependentDeviceTargetForDB(
device,
dependent.apps[appId]?.commit,
);
}).then(devicesForDB => {
return Promise.map(devicesForDB, device => {
}).then((devicesForDB) => {
return Promise.map(devicesForDB, (device) => {
return this.db.upsertModel(
'dependentDeviceTarget',
device,
@ -645,7 +636,7 @@ export class Proxyvisor {
}
normaliseDependentAppFromDB(app) {
return Promise.try(function() {
return Promise.try(function () {
const outApp = {
appId: app.appId,
name: app.name,
@ -662,11 +653,11 @@ export class Proxyvisor {
}
normaliseDependentDeviceTargetFromDB(device) {
return Promise.try(function() {
return Promise.try(function () {
const outDevice = {
uuid: device.uuid,
name: device.name,
apps: _.mapValues(JSON.parse(device.apps), function(a) {
apps: _.mapValues(JSON.parse(device.apps), function (a) {
if (a.commit == null) {
a.commit = null;
}
@ -678,7 +669,7 @@ export class Proxyvisor {
}
normaliseDependentDeviceFromDB(device) {
return Promise.try(function() {
return Promise.try(function () {
const outDevice = _.clone(device);
for (const prop of [
'environment',
@ -766,7 +757,7 @@ export class Proxyvisor {
}
_compareDevices(currentDevices, targetDevices, appId) {
let currentDeviceTargets = _.map(currentDevices, function(dev) {
let currentDeviceTargets = _.map(currentDevices, function (dev) {
if (dev.markedForDeletion) {
return null;
}
@ -783,7 +774,7 @@ export class Proxyvisor {
});
currentDeviceTargets = _.filter(
currentDeviceTargets,
dev => !_.isNull(dev),
(dev) => !_.isNull(dev),
);
return !_.isEmpty(
_.xorWith(currentDeviceTargets, targetDevices, _.isEqual),
@ -819,7 +810,7 @@ export class Proxyvisor {
];
}
if (_.some(stepsInProgress, step => step.appId === target.parentApp)) {
if (_.some(stepsInProgress, (step) => step.appId === target.parentApp)) {
return [{ action: 'noop' }];
}
@ -886,8 +877,8 @@ export class Proxyvisor {
let steps = [];
for (const appId of allAppIds) {
const devicesForApp = devices =>
_.filter(devices, d => _.has(d.apps, appId));
const devicesForApp = (devices) =>
_.filter(devices, (d) => _.has(d.apps, appId));
const currentDevices = devicesForApp(current.dependent.devices);
const targetDevices = devicesForApp(target.dependent.devices);
@ -915,13 +906,13 @@ export class Proxyvisor {
.then(([{ parentApp }]) => {
return this.applications.getTargetApp(parentApp);
})
.then(parentApp => {
return Promise.map(parentApp?.services ?? [], service => {
.then((parentApp) => {
return Promise.map(parentApp?.services ?? [], (service) => {
return this.docker.getImageEnv(service.image);
}).then(function(imageEnvs) {
}).then(function (imageEnvs) {
const imageHookAddresses = _.map(
imageEnvs,
env =>
(env) =>
env.BALENA_DEPENDENT_DEVICES_HOOK_ADDRESS ??
env.RESIN_DEPENDENT_DEVICES_HOOK_ADDRESS,
);
@ -941,7 +932,7 @@ export class Proxyvisor {
sendUpdate(device, timeout, endpoint) {
return Promise.resolve(request.getRequestInstance())
.then(instance =>
.then((instance) =>
instance.putAsync(`${endpoint}${device.uuid}`, {
json: true,
body: device.target,
@ -958,42 +949,36 @@ export class Proxyvisor {
}
}
})
.catch(err => log.error(`Error updating device ${device.uuid}`, err));
.catch((err) => log.error(`Error updating device ${device.uuid}`, err));
}
sendDeleteHook({ uuid }, timeout, endpoint) {
return Promise.resolve(request.getRequestInstance())
.then(instance => instance.delAsync(`${endpoint}${uuid}`))
.then((instance) => instance.delAsync(`${endpoint}${uuid}`))
.timeout(timeout)
.spread((response, body) => {
if (response.statusCode === 200) {
return this.db
.models('dependentDevice')
.del()
.where({ uuid });
return this.db.models('dependentDevice').del().where({ uuid });
} else {
throw new Error(`Hook returned ${response.statusCode}: ${body}`);
}
})
.catch(err => log.error(`Error deleting device ${uuid}`, err));
.catch((err) => log.error(`Error deleting device ${uuid}`, err));
}
sendUpdates({ uuid }) {
return Promise.join(
this.db
.models('dependentDevice')
.where({ uuid })
.select(),
this.db.models('dependentDevice').where({ uuid }).select(),
this.config.get('apiTimeout'),
([dev], apiTimeout) => {
if (dev == null) {
log.warn(`Trying to send update to non-existent device ${uuid}`);
return;
}
return this.normaliseDependentDeviceFromDB(dev).then(device => {
return this.normaliseDependentDeviceFromDB(dev).then((device) => {
const currentState = formatCurrentAsState(device);
const targetState = formatTargetAsState(device);
return this.getHookEndpoint(device.appId).then(endpoint => {
return this.getHookEndpoint(device.appId).then((endpoint) => {
if (device.markedForDeletion) {
return this.sendDeleteHook(device, apiTimeout, endpoint);
} else if (

View File

@ -71,7 +71,7 @@ const expressLogger = morgan(
'ms',
].join(' '),
{
stream: { write: d => log.api(d.toString().trimRight()) },
stream: { write: (d) => log.api(d.toString().trimRight()) },
},
);
@ -119,7 +119,7 @@ export class SupervisorAPI {
this.api.get('/v1/healthy', async (_req, res) => {
try {
const healths = await Promise.all(this.healthchecks.map(fn => fn()));
const healths = await Promise.all(this.healthchecks.map((fn) => fn()));
if (!_.every(healths)) {
log.error('Healthcheck failed');
return res.status(500).send('Unhealthy');
@ -194,7 +194,7 @@ export class SupervisorAPI {
await this.applyRules(localMode || false, port, allowedInterfaces);
// Monitor the switching of local mode, and change which interfaces will
// be listened to based on that
this.config.on('change', changedConfig => {
this.config.on('change', (changedConfig) => {
if (changedConfig.localMode != null) {
this.applyRules(
changedConfig.localMode || false,
@ -204,7 +204,7 @@ export class SupervisorAPI {
}
});
return new Promise(resolve => {
return new Promise((resolve) => {
this.server = this.api.listen(port, () => {
log.info(`Supervisor API successfully started on port ${port}`);
if (this.server) {

View File

@ -31,7 +31,7 @@ export class TargetStateAccessor {
// If we switch backend, the target state also needs to
// be invalidated (this includes switching to and from
// local mode)
this.config.on('change', conf => {
this.config.on('change', (conf) => {
if (conf.apiEndpoint != null || conf.localMode != null) {
this.targetState = undefined;
}
@ -45,7 +45,7 @@ export class TargetStateAccessor {
await this.getTargetApps();
}
return _.find(this.targetState, app => app.appId === appId);
return _.find(this.targetState, (app) => app.appId === appId);
}
public async getTargetApps(): Promise<DatabaseApps> {
@ -70,7 +70,7 @@ export class TargetStateAccessor {
this.targetState = undefined;
await Promise.all(
apps.map(app =>
apps.map((app) =>
this.db.upsertModel('app', app, { appId: app.appId }, trx),
),
);

View File

@ -5,7 +5,7 @@ const { expect } = ChaiConfig;
import constants = require('../src/lib/constants');
describe('constants', function() {
describe('constants', function () {
before(() => prepare());
it('has the correct configJsonPathOnHost', () =>
expect(constants.configJsonPathOnHost).to.equal('/config.json'));

View File

@ -22,23 +22,25 @@ async function createOldDatabase(path: string) {
name: string,
fn: (trx: Knex.CreateTableBuilder) => void,
) =>
knex.schema.createTable(name, t => {
knex.schema.createTable(name, (t) => {
if (fn != null) {
return fn(t);
}
});
await createEmptyTable('app', t => {
await createEmptyTable('app', (t) => {
t.increments('id').primary();
t.boolean('privileged');
return t.string('containerId');
});
await createEmptyTable('config', t => {
await createEmptyTable('config', (t) => {
t.string('key');
return t.string('value');
});
await createEmptyTable('dependentApp', t => t.increments('id').primary());
await createEmptyTable('dependentDevice', t => t.increments('id').primary());
await createEmptyTable('dependentApp', (t) => t.increments('id').primary());
await createEmptyTable('dependentDevice', (t) =>
t.increments('id').primary(),
);
return knex;
}
@ -103,6 +105,6 @@ describe('DB', () => {
});
it('allows performing transactions', () => {
return db.transaction(trx => expect(trx.commit()).to.be.fulfilled);
return db.transaction((trx) => expect(trx.commit()).to.be.fulfilled);
});
});

View File

@ -111,8 +111,8 @@ describe('Config', () => {
expect(conf.get('unknownInvalidValue' as any)).to.be.rejected;
});
it('emits a change event when values are set', done => {
conf.on('change', val => {
it('emits a change event when values are set', (done) => {
conf.on('change', (val) => {
expect(val).to.deep.equal({ name: 'someValue' });
return done();
});

View File

@ -207,7 +207,7 @@ describe('compose/service', () => {
});
});
it('should correctly handle large port ranges', function() {
it('should correctly handle large port ranges', function () {
this.timeout(60000);
const s = Service.fromComposeObject(
{

View File

@ -223,7 +223,7 @@ describe('deviceState', () => {
track: console.log,
};
stub(Service as any, 'extendEnvVars').callsFake(env => {
stub(Service as any, 'extendEnvVars').callsFake((env) => {
env['ADDITIONAL_ENV_VAR'] = 'foo';
return env;
});
@ -308,12 +308,8 @@ describe('deviceState', () => {
(deviceState as any).deviceConfig.getCurrent.restore();
const pinned = await config.get('pinDevice');
expect(pinned)
.to.have.property('app')
.that.equals(1234);
expect(pinned)
.to.have.property('commit')
.that.equals('abcdef');
expect(pinned).to.have.property('app').that.equals(1234);
expect(pinned).to.have.property('commit').that.equals('abcdef');
});
it('emits a change event when a new state is reported', () => {
@ -349,7 +345,7 @@ describe('deviceState', () => {
expect(deviceState.setTarget(testTargetInvalid as any)).to.be.rejected;
});
it('allows triggering applying the target state', done => {
it('allows triggering applying the target state', (done) => {
stub(deviceState as any, 'applyTarget').returns(Promise.resolve());
deviceState.triggerApplyTarget({ force: true });
@ -365,7 +361,7 @@ describe('deviceState', () => {
}, 5);
});
it('cancels current promise applying the target state', done => {
it('cancels current promise applying the target state', (done) => {
(deviceState as any).scheduledApply = { force: false, delay: 100 };
(deviceState as any).applyInProgress = true;
(deviceState as any).applyCancelled = false;

View File

@ -14,7 +14,7 @@ describe('EventTracker', () => {
before(() => {
initStub = stub(mixpanel, 'init').callsFake(
token =>
(token) =>
(({
token,
track: stub().returns(undefined),

View File

@ -7,14 +7,14 @@ import * as sinon from 'sinon';
import { Logger } from '../src/logger';
import { ContainerLogs } from '../src/logging/container';
describe('Logger', function() {
beforeEach(function() {
describe('Logger', function () {
beforeEach(function () {
this._req = new stream.PassThrough();
this._req.flushHeaders = sinon.spy();
this._req.end = sinon.spy();
this._req.body = '';
this._req.pipe(zlib.createGunzip()).on('data', chunk => {
this._req.pipe(zlib.createGunzip()).on('data', (chunk) => {
this._req.body += chunk;
});
@ -36,11 +36,11 @@ describe('Logger', function() {
});
});
afterEach(function() {
afterEach(function () {
this.requestStub.restore();
});
it('waits the grace period before sending any logs', function() {
it('waits the grace period before sending any logs', function () {
const clock = sinon.useFakeTimers();
this.logger.log({ message: 'foobar', serviceId: 15 });
clock.tick(4999);
@ -51,7 +51,7 @@ describe('Logger', function() {
});
});
it('tears down the connection after inactivity', function() {
it('tears down the connection after inactivity', function () {
const clock = sinon.useFakeTimers();
this.logger.log({ message: 'foobar', serviceId: 15 });
clock.tick(61000);
@ -62,7 +62,7 @@ describe('Logger', function() {
});
});
it('sends logs as gzipped ndjson', function() {
it('sends logs as gzipped ndjson', function () {
const timestamp = Date.now();
this.logger.log({ message: 'foobar', serviceId: 15 });
this.logger.log({ timestamp: 1337, message: 'foobar', serviceId: 15 });
@ -87,15 +87,9 @@ describe('Logger', function() {
expect(lines[2]).to.equal('');
let msg = JSON.parse(lines[0]);
expect(msg)
.to.have.property('message')
.that.equals('foobar');
expect(msg)
.to.have.property('serviceId')
.that.equals(15);
expect(msg)
.to.have.property('timestamp')
.that.is.at.least(timestamp);
expect(msg).to.have.property('message').that.equals('foobar');
expect(msg).to.have.property('serviceId').that.equals(15);
expect(msg).to.have.property('timestamp').that.is.at.least(timestamp);
msg = JSON.parse(lines[1]);
expect(msg).to.deep.equal({
timestamp: 1337,
@ -105,7 +99,7 @@ describe('Logger', function() {
});
});
it('allows logging system messages which are also reported to the eventTracker', function() {
it('allows logging system messages which are also reported to the eventTracker', function () {
const timestamp = Date.now();
this.logger.logSystemMessage(
'Hello there!',
@ -122,19 +116,13 @@ describe('Logger', function() {
expect(lines[1]).to.equal('');
const msg = JSON.parse(lines[0]);
expect(msg)
.to.have.property('message')
.that.equals('Hello there!');
expect(msg)
.to.have.property('isSystem')
.that.equals(true);
expect(msg)
.to.have.property('timestamp')
.that.is.at.least(timestamp);
expect(msg).to.have.property('message').that.equals('Hello there!');
expect(msg).to.have.property('isSystem').that.equals(true);
expect(msg).to.have.property('timestamp').that.is.at.least(timestamp);
});
});
it('should support non-tty log lines', function() {
it('should support non-tty log lines', function () {
const message =
'\u0001\u0000\u0000\u0000\u0000\u0000\u0000?2018-09-21T12:37:09.819134000Z this is the message';
const buffer = Buffer.from(message);

View File

@ -1,8 +1,8 @@
import { expect } from './lib/chai-config';
import * as conversion from '../src/lib/conversions';
describe('conversions', function() {
describe('envArrayToObject', function() {
describe('conversions', function () {
describe('envArrayToObject', function () {
it('should convert an env array to an object', () =>
expect(
conversion.envArrayToObject([
@ -29,7 +29,7 @@ describe('conversions', function() {
key1: 'value1',
}));
it('should return an empty object with an empty input', function() {
it('should return an empty object with an empty input', function () {
// @ts-ignore passing invalid value to test
expect(conversion.envArrayToObject(null)).to.deep.equal({});
// @ts-ignore passing invalid value to test

View File

@ -6,9 +6,9 @@ const PortMapPublic = (PortMap as any) as new (
portStrOrObj: string | PortRange,
) => PortMap;
describe('Ports', function() {
describe('Port string parsing', function() {
it('should correctly parse a port string without a range', function() {
describe('Ports', function () {
describe('Port string parsing', function () {
it('should correctly parse a port string without a range', function () {
expect(new PortMapPublic('80')).to.deep.equal(
new PortMapPublic({
internalStart: 80,
@ -56,7 +56,7 @@ describe('Ports', function() {
}),
));
it('should correctly parse a protocol', function() {
it('should correctly parse a protocol', function () {
expect(new PortMapPublic('80/udp')).to.deep.equal(
new PortMapPublic({
internalStart: 80,
@ -95,7 +95,7 @@ describe('Ports', function() {
expect(() => new PortMapPublic('80-90:80-85')).to.throw);
});
describe('toDockerOpts', function() {
describe('toDockerOpts', function () {
it('should correctly generate docker options', () =>
expect(new PortMapPublic('80').toDockerOpts()).to.deep.equal({
exposedPorts: {
@ -127,7 +127,7 @@ describe('Ports', function() {
}));
});
describe('fromDockerOpts', function() {
describe('fromDockerOpts', function () {
it('should correctly detect a port range', () =>
expect(
PortMap.fromDockerOpts({
@ -297,7 +297,7 @@ describe('Ports', function() {
});
describe('Running container comparison', () =>
it('should not consider order when comparing current and target state', function() {
it('should not consider order when comparing current and target state', function () {
const portBindings = require('./data/ports/not-ascending/port-bindings.json');
const compose = require('./data/ports/not-ascending/compose.json');
const portMapsCurrent = PortMap.fromDockerOpts(portBindings);
@ -312,8 +312,8 @@ describe('Ports', function() {
PortMap.fromComposePorts(['80:80', '81:81', '82:82']),
).to.deep.equal([new PortMapPublic('80-82')])));
describe('normalisePortMaps', function() {
it('should correctly normalise PortMap lists', function() {
describe('normalisePortMaps', function () {
it('should correctly normalise PortMap lists', function () {
expect(
PortMap.normalisePortMaps([
new PortMapPublic('80:90'),
@ -340,7 +340,7 @@ describe('Ports', function() {
expect(PortMap.normalisePortMaps([])).to.deep.equal([]);
});
it('should correctly consider protocols', function() {
it('should correctly consider protocols', function () {
expect(
PortMap.normalisePortMaps([
new PortMapPublic('80:90'),
@ -376,7 +376,7 @@ describe('Ports', function() {
]);
});
it('should correctly consider hosts', function() {
it('should correctly consider hosts', function () {
expect(
PortMap.normalisePortMaps([
new PortMapPublic('127.0.0.1:80:80'),

View File

@ -8,9 +8,9 @@ const extlinuxBackend = new ExtlinuxConfigBackend();
const rpiBackend = new RPiConfigBackend();
describe('Config Utilities', () =>
describe('Boot config utilities', function() {
describe('Boot config utilities', function () {
describe('Env <-> Config', () =>
it('correctly transforms environments to boot config objects', function() {
it('correctly transforms environments to boot config objects', function () {
const bootConfig = configUtils.envToBootConfig(rpiBackend, {
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
@ -26,8 +26,8 @@ describe('Config Utilities', () =>
});
}));
describe('TX2 boot config utilities', function() {
it('should parse a extlinux.conf file', function() {
describe('TX2 boot config utilities', function () {
it('should parse a extlinux.conf file', function () {
const text = `\
DEFAULT primary
# Comment
@ -45,9 +45,7 @@ APPEND \${cbootargs} \${resin_kernel_root} ro rootwait\
expect(parsed.globals)
.to.have.property('DEFAULT')
.that.equals('primary');
expect(parsed.globals)
.to.have.property('TIMEOUT')
.that.equals('30');
expect(parsed.globals).to.have.property('TIMEOUT').that.equals('30');
expect(parsed.globals)
.to.have.property('MENU TITLE')
.that.equals('Boot Options');
@ -57,15 +55,13 @@ APPEND \${cbootargs} \${resin_kernel_root} ro rootwait\
expect(primary)
.to.have.property('MENU LABEL')
.that.equals('primary Image');
expect(primary)
.to.have.property('LINUX')
.that.equals('/Image');
expect(primary).to.have.property('LINUX').that.equals('/Image');
expect(primary)
.to.have.property('APPEND')
.that.equals('${cbootargs} ${resin_kernel_root} ro rootwait');
});
it('should parse multiple service entries', function() {
it('should parse multiple service entries', function () {
const text = `\
DEFAULT primary
# Comment
@ -82,21 +78,17 @@ APPEND test4\
// @ts-ignore accessing private method
const parsed = ExtlinuxConfigBackend.parseExtlinuxFile(text);
expect(parsed.labels)
.to.have.property('primary')
.that.deep.equals({
LINUX: 'test1',
APPEND: 'test2',
});
expect(parsed.labels)
.to.have.property('secondary')
.that.deep.equals({
LINUX: 'test3',
APPEND: 'test4',
});
expect(parsed.labels).to.have.property('primary').that.deep.equals({
LINUX: 'test1',
APPEND: 'test2',
});
expect(parsed.labels).to.have.property('secondary').that.deep.equals({
LINUX: 'test3',
APPEND: 'test4',
});
});
it('should parse configuration options from an extlinux.conf file', function() {
it('should parse configuration options from an extlinux.conf file', function () {
let text = `\
DEFAULT primary
# Comment
@ -112,9 +104,7 @@ APPEND \${cbootargs} \${resin_kernel_root} ro rootwait isolcpus=3\
let readFileStub = stub(fs, 'readFile').resolves(text);
let parsed = extlinuxBackend.getBootConfig();
expect(parsed)
.to.eventually.have.property('isolcpus')
.that.equals('3');
expect(parsed).to.eventually.have.property('isolcpus').that.equals('3');
readFileStub.restore();
text = `\

View File

@ -1,9 +1,9 @@
import { expect } from './lib/chai-config';
import { Network } from '../src/compose/network';
describe('compose/network', function() {
describe('compose config -> internal config', function() {
it('should convert a compose configuration to an internal representation', function() {
describe('compose/network', function () {
describe('compose config -> internal config', function () {
it('should convert a compose configuration to an internal representation', function () {
const network = Network.fromComposeObject(
'test',
123,
@ -42,7 +42,7 @@ describe('compose/network', function() {
});
});
it('should handle an incomplete ipam configuration', function() {
it('should handle an incomplete ipam configuration', function () {
const network = Network.fromComposeObject(
'test',
123,
@ -81,7 +81,7 @@ describe('compose/network', function() {
});
describe('internal config -> docker config', () =>
it('should convert an internal representation to a docker representation', function() {
it('should convert an internal representation to a docker representation', function () {
const network = Network.fromComposeObject(
'test',
123,

View File

@ -2,7 +2,7 @@ import { expect } from './lib/chai-config';
import * as ComposeUtils from '../src/compose/utils';
describe('Composition utilities', () =>
it('Should correctly camel case the configuration', function() {
it('Should correctly camel case the configuration', function () {
const config = {
networks: ['test', 'test2'],
};

View File

@ -28,12 +28,8 @@ describe('Compose volumes', () => {
Scope: 'local',
});
expect(volume)
.to.have.property('appId')
.that.equals(1032480);
expect(volume)
.to.have.property('name')
.that.equals('one_volume');
expect(volume).to.have.property('appId').that.equals(1032480);
expect(volume).to.have.property('name').that.equals('one_volume');
expect(volume)
.to.have.property('config')
.that.has.property('labels')
@ -65,12 +61,8 @@ describe('Compose volumes', () => {
opts,
);
expect(volume)
.to.have.property('appId')
.that.equals(1032480);
expect(volume)
.to.have.property('name')
.that.equals('one_volume');
expect(volume).to.have.property('appId').that.equals(1032480);
expect(volume).to.have.property('name').that.equals('one_volume');
expect(volume)
.to.have.property('config')
.that.has.property('labels')
@ -106,12 +98,8 @@ describe('Compose volumes', () => {
opts,
);
expect(volume)
.to.have.property('appId')
.that.equals(1032480);
expect(volume)
.to.have.property('name')
.that.equals('one_volume');
expect(volume).to.have.property('appId').that.equals(1032480);
expect(volume).to.have.property('name').that.equals('one_volume');
expect(volume)
.to.have.property('config')
.that.has.property('labels')

View File

@ -44,10 +44,7 @@ describe('SupervisorAPI', () => {
describe('/ping', () => {
it('responds with OK (without auth)', async () => {
await request
.get('/ping')
.set('Accept', 'application/json')
.expect(200);
await request.get('/ping').set('Accept', 'application/json').expect(200);
});
it('responds with OK (with auth)', async () => {
await request
@ -71,7 +68,7 @@ describe('SupervisorAPI', () => {
.set('Authorization', `Bearer ${VALID_SECRET}`)
.expect('Content-Type', /json/)
.expect(sampleResponses.V2.GET['/device/vpn'].statusCode)
.then(response => {
.then((response) => {
expect(response.body).to.deep.equal(
sampleResponses.V2.GET['/device/vpn'].body,
);

View File

@ -27,7 +27,7 @@ describe('LocalModeManager', () => {
.models('engineSnapshot')
.count('* as cnt')
.first()
.then(r => r.cnt);
.then((r) => r.cnt);
// Cleanup the database (to make sure nothing is left since last tests).
beforeEach(async () => {
@ -235,7 +235,7 @@ describe('LocalModeManager', () => {
await localMode.handleLocalModeStateChange(false);
removeStubs.forEach(s => expect(s.remove.calledTwice).to.be.true);
removeStubs.forEach((s) => expect(s.remove.calledTwice).to.be.true);
});
it('keeps objects from the previous snapshot on local mode exit', async () => {
@ -255,7 +255,7 @@ describe('LocalModeManager', () => {
.true;
expect(dockerStub.getVolume.calledWithExactly('volume-2')).to.be.true;
expect(dockerStub.getNetwork.calledWithExactly('network-2')).to.be.true;
removeStubs.forEach(s => expect(s.remove.calledOnce).to.be.true);
removeStubs.forEach((s) => expect(s.remove.calledOnce).to.be.true);
});
it('logs but consumes cleanup errors on local mode exit', async () => {
@ -267,7 +267,7 @@ describe('LocalModeManager', () => {
await localMode.handleLocalModeStateChange(false);
// Even though remove method throws, we still attempt all removals.
removeStubs.forEach(s => expect(s.remove.calledTwice).to.be.true);
removeStubs.forEach((s) => expect(s.remove.calledTwice).to.be.true);
});
it('skips cleanup without previous snapshot on local mode exit', async () => {
@ -279,7 +279,7 @@ describe('LocalModeManager', () => {
expect(dockerStub.getContainer.notCalled).to.be.true;
expect(dockerStub.getVolume.notCalled).to.be.true;
expect(dockerStub.getNetwork.notCalled).to.be.true;
removeStubs.forEach(s => expect(s.remove.notCalled).to.be.true);
removeStubs.forEach((s) => expect(s.remove.notCalled).to.be.true);
});
it('can be awaited', async () => {
@ -292,7 +292,7 @@ describe('LocalModeManager', () => {
// Await like it's done by DeviceState.
await localMode.switchCompletion();
removeStubs.forEach(s => expect(s.remove.calledTwice).to.be.true);
removeStubs.forEach((s) => expect(s.remove.calledTwice).to.be.true);
});
it('cleans the last snapshot so that nothing is done on restart', async () => {
@ -312,7 +312,7 @@ describe('LocalModeManager', () => {
expect(dockerStub.getContainer.callCount).to.be.equal(3); // +1 for supervisor inspect call.
expect(dockerStub.getVolume.callCount).to.be.equal(2);
expect(dockerStub.getNetwork.callCount).to.be.equal(2);
removeStubs.forEach(s => expect(s.remove.callCount).to.be.equal(2));
removeStubs.forEach((s) => expect(s.remove.callCount).to.be.equal(2));
});
it('skips cleanup in case of data corruption', async () => {
@ -330,7 +330,7 @@ describe('LocalModeManager', () => {
expect(dockerStub.getContainer.notCalled).to.be.true;
expect(dockerStub.getVolume.notCalled).to.be.true;
expect(dockerStub.getNetwork.notCalled).to.be.true;
removeStubs.forEach(s => expect(s.remove.notCalled).to.be.true);
removeStubs.forEach((s) => expect(s.remove.notCalled).to.be.true);
});
describe('with supervisor being updated', () => {
@ -362,7 +362,7 @@ describe('LocalModeManager', () => {
// Current engine objects include 2 entities of each type.
// Container-1, network-1, image-1, and volume-1 are resources associated with currently running supervisor.
// Only xxx-2 objects must be deleted.
removeStubs.forEach(s => expect(s.remove.calledOnce).to.be.true);
removeStubs.forEach((s) => expect(s.remove.calledOnce).to.be.true);
});
});
});

View File

@ -82,10 +82,12 @@ describe('Container contracts', () => {
// package.json will, we generate values which are above
// and below the current value, and use these to reason
// about the contract engine results
const supervisorVersionGreater = `${semver.major(supervisorVersion)! +
1}.0.0`;
const supervisorVersionLesser = `${semver.major(supervisorVersion)! -
1}.0.0`;
const supervisorVersionGreater = `${
semver.major(supervisorVersion)! + 1
}.0.0`;
const supervisorVersionLesser = `${
semver.major(supervisorVersion)! - 1
}.0.0`;
before(async () => {
// We ensure that the versions we're using for testing
@ -275,9 +277,7 @@ describe('Container contracts', () => {
optional: false,
},
});
expect(fulfilled)
.to.have.property('valid')
.that.equals(false);
expect(fulfilled).to.have.property('valid').that.equals(false);
expect(fulfilled)
.to.have.property('unmetServices')
.that.deep.equals(['service']);
@ -298,9 +298,7 @@ describe('Container contracts', () => {
optional: false,
},
});
expect(fulfilled)
.to.have.property('valid')
.that.equals(false);
expect(fulfilled).to.have.property('valid').that.equals(false);
expect(fulfilled)
.to.have.property('unmetServices')
.that.deep.equals(['service2']);
@ -335,9 +333,7 @@ describe('Container contracts', () => {
optional: false,
},
});
expect(fulfilled)
.to.have.property('valid')
.that.equals(false);
expect(fulfilled).to.have.property('valid').that.equals(false);
expect(fulfilled)
.to.have.property('unmetServices')
.that.deep.equals(['service2']);

View File

@ -6,12 +6,12 @@ import { expect } from './lib/chai-config';
describe('journald', () => {
let spawn: SinonStub;
beforeEach(done => {
beforeEach((done) => {
spawn = stub(require('child_process'), 'spawn');
done();
});
afterEach(done => {
afterEach((done) => {
spawn.restore();
done();
});
@ -50,7 +50,7 @@ describe('journald', () => {
expect(actualCommand).deep.equal(expectedCommand);
expect(actualCoreArgs).deep.equal(expectedCoreArgs);
expectedOptionalArgs.forEach(arg => {
expectedOptionalArgs.forEach((arg) => {
expect(actualOptionalArgs).to.include(arg);
});
});

View File

@ -40,7 +40,7 @@ api.balenaBackend = {
: null;
if (uuid != null) {
return res.json({
d: _.filter(api.balenaBackend!.devices, dev => dev.uuid === uuid),
d: _.filter(api.balenaBackend!.devices, (dev) => dev.uuid === uuid),
});
} else {
return res.json({ d: [] });

View File

@ -7,7 +7,7 @@
*/
import * as fs from 'fs';
export = function() {
export = function () {
try {
fs.unlinkSync(process.env.DATABASE_PATH!);
} catch (e) {