mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2024-12-24 15:56:40 +00:00
commit
3db260aa5c
10
.lintstagedrc
Normal file
10
.lintstagedrc
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"*.coffee": [
|
||||
"resin-lint"
|
||||
],
|
||||
"*.ts": [
|
||||
"prettier --config ./node_modules/resin-lint/config/.prettierrc --write",
|
||||
"resin-lint --typescript --no-prettier",
|
||||
"git add"
|
||||
],
|
||||
}
|
11
package.json
11
package.json
@ -10,7 +10,11 @@
|
||||
"scripts": {
|
||||
"start": "./entry.sh",
|
||||
"build": "webpack",
|
||||
"lint": "resin-lint --typescript src/ test/",
|
||||
"precommit": "lint-staged",
|
||||
"prettify": "prettier --config ./node_modules/resin-lint/config/.prettierrc --write \"{src,test,typings}/**/*.ts\"",
|
||||
"lint:coffee": "resin-lint src/ test/",
|
||||
"lint:typescript": "resin-lint --typescript src/ test/ typings/ && tsc --noEmit",
|
||||
"lint": "npm run lint:coffee && npm run lint:typescript",
|
||||
"test": "npm run lint && JUNIT_REPORT_PATH=report.xml mocha --exit -r ts-node/register -r coffee-script/register -r register-coffee-coverage test/*.{js,coffee} && npm run coverage",
|
||||
"test:fast": "mocha --exit -r ts-node/register -r coffee-script/register test/*.{js,coffee}",
|
||||
"test:build": "tsc && coffee -m -c -o build . && cp -r test/data build/test/ && cp -r src/migrations build/src && cp package.json build",
|
||||
@ -52,9 +56,11 @@
|
||||
"duration-js": "^4.0.0",
|
||||
"event-stream": "^3.0.20",
|
||||
"express": "^4.0.0",
|
||||
"husky": "^1.1.3",
|
||||
"istanbul": "^0.4.5",
|
||||
"json-mask": "^0.3.8",
|
||||
"knex": "~0.12.3",
|
||||
"lint-staged": "^8.0.4",
|
||||
"lockfile": "^1.0.1",
|
||||
"lodash": "^4.16.3",
|
||||
"log-timestamp": "^0.1.2",
|
||||
@ -68,9 +74,10 @@
|
||||
"node-loader": "^0.6.0",
|
||||
"null-loader": "^0.1.1",
|
||||
"pinejs-client": "^2.4.0",
|
||||
"prettier": "^1.14.3",
|
||||
"register-coffee-coverage": "0.0.1",
|
||||
"request": "^2.51.0",
|
||||
"resin-lint": "^1.5.7",
|
||||
"resin-lint": "^2.0.1",
|
||||
"resin-register-device": "^3.0.0",
|
||||
"resin-sync": "^9.3.0",
|
||||
"resumable-request": "^2.0.0",
|
||||
|
@ -810,9 +810,9 @@ module.exports = class ApplicationManager extends EventEmitter
|
||||
img = _.find(available, { dockerImageId: service.config.image, imageId: service.imageId }) ? _.find(available, { dockerImageId: service.config.image })
|
||||
return _.omit(img, [ 'dockerImageId', 'id' ])
|
||||
allImageDockerIdsForTargetApp = (app) ->
|
||||
_(app.services).map((svc) -> [ svc.imageName, svc.config.image ])
|
||||
.filter((img) -> img[1]?)
|
||||
.value()
|
||||
_(app.services).map((svc) -> [ svc.imageName, svc.config.image ])
|
||||
.filter((img) -> img[1]?)
|
||||
.value()
|
||||
|
||||
availableWithoutIds = _.map(available, (image) -> _.omit(image, [ 'dockerImageId', 'id' ]))
|
||||
currentImages = _.flatMap(current.local.apps, allImagesForCurrentApp)
|
||||
|
7
src/application-manager.d.ts
vendored
7
src/application-manager.d.ts
vendored
@ -25,7 +25,6 @@ declare interface Application {
|
||||
// This is a non-exhaustive typing for ApplicationManager to avoid
|
||||
// having to recode the entire class (and all requirements in TS).
|
||||
export class ApplicationManager extends EventEmitter {
|
||||
|
||||
// These probably could be typed, but the types are so messy that we're
|
||||
// best just waiting for the relevant module to be recoded in typescript.
|
||||
// At least any types we can be sure of then.
|
||||
@ -46,12 +45,14 @@ export class ApplicationManager extends EventEmitter {
|
||||
// TODO: This actually returns an object, but we don't need the values just yet
|
||||
public setTargetVolatileForService(serviceId: number, opts: Options): void;
|
||||
|
||||
public executeStepAction(serviceAction: ServiceAction, opts: Options): Promise<void>;
|
||||
public executeStepAction(
|
||||
serviceAction: ServiceAction,
|
||||
opts: Options,
|
||||
): Promise<void>;
|
||||
|
||||
public getStatus(): Promise<DeviceApplicationState>;
|
||||
|
||||
public serviceNameFromId(serviceId: number): Promise<string>;
|
||||
|
||||
}
|
||||
|
||||
export default ApplicationManager;
|
||||
|
@ -10,15 +10,12 @@ export class InvalidNetworkNameError extends TypedError {
|
||||
}
|
||||
|
||||
export class ResourceRecreationAttemptError extends TypedError {
|
||||
public constructor(
|
||||
public resource: string,
|
||||
public name: string,
|
||||
) {
|
||||
public constructor(public resource: string, public name: string) {
|
||||
super(
|
||||
`Trying to create ${resource} with name: ${name}, but a ${resource} `+
|
||||
'with that name and a different configuration already exists',
|
||||
`Trying to create ${resource} with name: ${name}, but a ${resource} ` +
|
||||
'with that name and a different configuration already exists',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export class InvalidNetworkConfigurationError extends TypedError { }
|
||||
export class InvalidNetworkConfigurationError extends TypedError {}
|
||||
|
@ -18,72 +18,93 @@ export class NetworkManager {
|
||||
}
|
||||
|
||||
public getAll(): Bluebird<Network[]> {
|
||||
return this.getWithBothLabels()
|
||||
.map((network: { Name: string }) => {
|
||||
return this.docker.getNetwork(network.Name).inspect()
|
||||
.then((net) => {
|
||||
return Network.fromDockerNetwork({
|
||||
return this.getWithBothLabels().map((network: { Name: string }) => {
|
||||
return this.docker
|
||||
.getNetwork(network.Name)
|
||||
.inspect()
|
||||
.then(net => {
|
||||
return Network.fromDockerNetwork(
|
||||
{
|
||||
docker: this.docker,
|
||||
logger: this.logger,
|
||||
}, net);
|
||||
});
|
||||
});
|
||||
},
|
||||
net,
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
public getAllByAppId(appId: number): Bluebird<Network[]> {
|
||||
return this.getAll()
|
||||
.filter((network: Network) => network.appId === appId);
|
||||
return this.getAll().filter((network: Network) => network.appId === appId);
|
||||
}
|
||||
|
||||
public get(network: { name: string, appId: number }): Bluebird<Network> {
|
||||
return Network.fromNameAndAppId({
|
||||
logger: this.logger,
|
||||
docker: this.docker,
|
||||
}, network.name, network.appId);
|
||||
public get(network: { name: string; appId: number }): Bluebird<Network> {
|
||||
return Network.fromNameAndAppId(
|
||||
{
|
||||
logger: this.logger,
|
||||
docker: this.docker,
|
||||
},
|
||||
network.name,
|
||||
network.appId,
|
||||
);
|
||||
}
|
||||
|
||||
public supervisorNetworkReady(): Bluebird<boolean> {
|
||||
return Bluebird.resolve(fs.stat(`/sys/class/net/${constants.supervisorNetworkInterface}`))
|
||||
return Bluebird.resolve(
|
||||
fs.stat(`/sys/class/net/${constants.supervisorNetworkInterface}`),
|
||||
)
|
||||
.then(() => {
|
||||
return this.docker.getNetwork(constants.supervisorNetworkInterface).inspect();
|
||||
return this.docker
|
||||
.getNetwork(constants.supervisorNetworkInterface)
|
||||
.inspect();
|
||||
})
|
||||
.then((network) => {
|
||||
return network.Options['com.docker.network.bridge.name'] ===
|
||||
constants.supervisorNetworkInterface;
|
||||
.then(network => {
|
||||
return (
|
||||
network.Options['com.docker.network.bridge.name'] ===
|
||||
constants.supervisorNetworkInterface
|
||||
);
|
||||
})
|
||||
.catchReturn(NotFoundError, false)
|
||||
.catchReturn(ENOENT, false);
|
||||
}
|
||||
|
||||
public ensureSupervisorNetwork(): Bluebird<void> {
|
||||
|
||||
const removeIt = () => {
|
||||
return Bluebird.resolve(this.docker.getNetwork(constants.supervisorNetworkInterface).remove())
|
||||
.then(() => {
|
||||
this.docker.getNetwork(constants.supervisorNetworkInterface).inspect();
|
||||
});
|
||||
return Bluebird.resolve(
|
||||
this.docker.getNetwork(constants.supervisorNetworkInterface).remove(),
|
||||
).then(() => {
|
||||
this.docker.getNetwork(constants.supervisorNetworkInterface).inspect();
|
||||
});
|
||||
};
|
||||
|
||||
return Bluebird.resolve(this.docker.getNetwork(constants.supervisorNetworkInterface).inspect())
|
||||
.then((net) => {
|
||||
if (net.Options['com.docker.network.bridge.name'] !== constants.supervisorNetworkInterface) {
|
||||
return Bluebird.resolve(
|
||||
this.docker.getNetwork(constants.supervisorNetworkInterface).inspect(),
|
||||
)
|
||||
.then(net => {
|
||||
if (
|
||||
net.Options['com.docker.network.bridge.name'] !==
|
||||
constants.supervisorNetworkInterface
|
||||
) {
|
||||
return removeIt();
|
||||
} else {
|
||||
return Bluebird.resolve(
|
||||
fs.stat(`/sys/class/net/${constants.supervisorNetworkInterface}`),
|
||||
)
|
||||
.catch(ENOENT, removeIt)
|
||||
.return();
|
||||
.catch(ENOENT, removeIt)
|
||||
.return();
|
||||
}
|
||||
})
|
||||
.catch(NotFoundError, () => {
|
||||
console.log(`Creating ${constants.supervisorNetworkInterface} network`);
|
||||
return Bluebird.resolve(this.docker.createNetwork({
|
||||
Name: constants.supervisorNetworkInterface,
|
||||
Options: {
|
||||
'com.docker.network.bridge.name': constants.supervisorNetworkInterface,
|
||||
},
|
||||
}));
|
||||
return Bluebird.resolve(
|
||||
this.docker.createNetwork({
|
||||
Name: constants.supervisorNetworkInterface,
|
||||
Options: {
|
||||
'com.docker.network.bridge.name':
|
||||
constants.supervisorNetworkInterface,
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@ -91,12 +112,12 @@ export class NetworkManager {
|
||||
return Bluebird.join(
|
||||
this.docker.listNetworks({
|
||||
filters: {
|
||||
label: [ 'io.resin.supervised' ],
|
||||
label: ['io.resin.supervised'],
|
||||
},
|
||||
}),
|
||||
this.docker.listNetworks({
|
||||
filters: {
|
||||
label: [ 'io.balena.supervised' ],
|
||||
label: ['io.balena.supervised'],
|
||||
},
|
||||
}),
|
||||
(legacyNetworks, currentNetworks) => {
|
||||
@ -104,5 +125,4 @@ export class NetworkManager {
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2,10 +2,7 @@ import * as Bluebird from 'bluebird';
|
||||
import * as _ from 'lodash';
|
||||
|
||||
import Docker = require('../lib/docker-utils');
|
||||
import {
|
||||
InvalidAppIdError,
|
||||
NotFoundError,
|
||||
} from '../lib/errors';
|
||||
import { InvalidAppIdError, NotFoundError } from '../lib/errors';
|
||||
import logTypes = require('../lib/log-types');
|
||||
import { checkInt } from '../lib/validation';
|
||||
import { Logger } from '../logger';
|
||||
@ -30,7 +27,6 @@ export interface NetworkOptions {
|
||||
}
|
||||
|
||||
export class Network {
|
||||
|
||||
public appId: number;
|
||||
public name: string;
|
||||
public config: NetworkConfig;
|
||||
@ -66,7 +62,7 @@ export class Network {
|
||||
driver: network.Driver,
|
||||
ipam: {
|
||||
driver: network.IPAM.Driver,
|
||||
config: _.map(network.IPAM.Config, (conf) => {
|
||||
config: _.map(network.IPAM.Config, conf => {
|
||||
const newConf: NetworkConfig['ipam']['config'][0] = {
|
||||
subnet: conf.Subnet,
|
||||
gateway: conf.Gateway,
|
||||
@ -80,11 +76,13 @@ export class Network {
|
||||
}
|
||||
return newConf;
|
||||
}),
|
||||
options: network.IPAM.Options == null ? { } : network.IPAM.Options,
|
||||
options: network.IPAM.Options == null ? {} : network.IPAM.Options,
|
||||
},
|
||||
enableIPv6: network.EnableIPv6,
|
||||
internal: network.Internal,
|
||||
labels: _.omit(ComposeUtils.normalizeLabels(network.Labels), [ 'io.balena.supervised' ]),
|
||||
labels: _.omit(ComposeUtils.normalizeLabels(network.Labels), [
|
||||
'io.balena.supervised',
|
||||
]),
|
||||
options: network.Options,
|
||||
};
|
||||
|
||||
@ -119,12 +117,12 @@ export class Network {
|
||||
ipam: {
|
||||
driver: 'default',
|
||||
config: [],
|
||||
options: { },
|
||||
options: {},
|
||||
},
|
||||
enableIPv6: false,
|
||||
internal: false,
|
||||
labels: { },
|
||||
options: { },
|
||||
labels: {},
|
||||
options: {},
|
||||
});
|
||||
net.config.labels = ComposeUtils.normalizeLabels(net.config.labels);
|
||||
|
||||
@ -132,10 +130,12 @@ export class Network {
|
||||
}
|
||||
|
||||
public create(): Bluebird<void> {
|
||||
this.logger.logSystemEvent(logTypes.createNetwork, { network: { name: this.name } });
|
||||
this.logger.logSystemEvent(logTypes.createNetwork, {
|
||||
network: { name: this.name },
|
||||
});
|
||||
|
||||
return Network.fromNameAndAppId(this.networkOpts, this.name, this.appId)
|
||||
.then((current) => {
|
||||
.then(current => {
|
||||
if (!this.isEqualConfig(current)) {
|
||||
throw new ResourceRecreationAttemptError('network', this.name);
|
||||
}
|
||||
@ -146,7 +146,7 @@ export class Network {
|
||||
.catch(NotFoundError, () => {
|
||||
return this.docker.createNetwork(this.toDockerConfig());
|
||||
})
|
||||
.tapCatch((err) => {
|
||||
.tapCatch(err => {
|
||||
this.logger.logSystemEvent(logTypes.createNetworkError, {
|
||||
network: { name: this.name, appId: this.appId },
|
||||
error: err,
|
||||
@ -161,7 +161,7 @@ export class Network {
|
||||
CheckDuplicate: true,
|
||||
IPAM: {
|
||||
Driver: this.config.ipam.driver,
|
||||
Config: _.map(this.config.ipam.config, (conf) => {
|
||||
Config: _.map(this.config.ipam.config, conf => {
|
||||
const ipamConf: DockerIPAMConfig = {
|
||||
Subnet: conf.subnet,
|
||||
Gateway: conf.gateway,
|
||||
@ -178,30 +178,32 @@ export class Network {
|
||||
},
|
||||
EnableIPv6: this.config.enableIPv6,
|
||||
Internal: this.config.internal,
|
||||
Labels: _.merge({}, {
|
||||
'io.balena.supervised': 'true',
|
||||
}, this.config.labels),
|
||||
Labels: _.merge(
|
||||
{},
|
||||
{
|
||||
'io.balena.supervised': 'true',
|
||||
},
|
||||
this.config.labels,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
public remove(): Bluebird<void> {
|
||||
this.logger.logSystemEvent(
|
||||
logTypes.removeNetwork,
|
||||
{ network: { name: this.name, appId: this.appId } },
|
||||
);
|
||||
this.logger.logSystemEvent(logTypes.removeNetwork, {
|
||||
network: { name: this.name, appId: this.appId },
|
||||
});
|
||||
|
||||
return Bluebird.resolve(this.docker.getNetwork(this.getDockerName()).remove())
|
||||
.tapCatch((error) => {
|
||||
this.logger.logSystemEvent(
|
||||
logTypes.createNetworkError,
|
||||
{ network: { name: this.name, appId: this.appId }, error },
|
||||
);
|
||||
return Bluebird.resolve(
|
||||
this.docker.getNetwork(this.getDockerName()).remove(),
|
||||
).tapCatch(error => {
|
||||
this.logger.logSystemEvent(logTypes.createNetworkError, {
|
||||
network: { name: this.name, appId: this.appId },
|
||||
error,
|
||||
});
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
public isEqualConfig(network: Network): boolean {
|
||||
|
||||
// don't compare the ipam.config if it's not present
|
||||
// in the target state (as it will be present in the
|
||||
// current state, due to docker populating it with
|
||||
@ -221,15 +223,12 @@ export class Network {
|
||||
|
||||
private static validateComposeConfig(config: NetworkConfig): void {
|
||||
// Check if every ipam config entry has both a subnet and a gateway
|
||||
_.each(
|
||||
_.get(config, 'config.ipam.config', []),
|
||||
({ subnet, gateway }) => {
|
||||
if (subnet == null || gateway == null) {
|
||||
throw new InvalidNetworkConfigurationError(
|
||||
'Network IPAM config entries must have both a subnet and gateway',
|
||||
);
|
||||
}
|
||||
},
|
||||
);
|
||||
_.each(_.get(config, 'config.ipam.config', []), ({ subnet, gateway }) => {
|
||||
if (subnet == null || gateway == null) {
|
||||
throw new InvalidNetworkConfigurationError(
|
||||
'Network IPAM config entries must have both a subnet and gateway',
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -2,16 +2,15 @@ import * as _ from 'lodash';
|
||||
import TypedError = require('typed-error');
|
||||
|
||||
// Adapted from https://github.com/docker/docker-py/blob/master/docker/utils/ports.py#L3
|
||||
const PORTS_REGEX =
|
||||
/^(?:(?:([a-fA-F\d.:]+):)?([\d]*)(?:-([\d]+))?:)?([\d]+)(?:-([\d]+))?(?:\/(udp|tcp))?$/;
|
||||
const PORTS_REGEX = /^(?:(?:([a-fA-F\d.:]+):)?([\d]*)(?:-([\d]+))?:)?([\d]+)(?:-([\d]+))?(?:\/(udp|tcp))?$/;
|
||||
|
||||
// A regex to extract the protocol and internal port of the incoming Docker options
|
||||
const DOCKER_OPTS_PORTS_REGEX = /(\d+)(?:\/?([a-z]+))?/i;
|
||||
const DOCKER_OPTS_PORTS_REGEX = /(\d+)(?:\/?([a-z]+))?/i;
|
||||
|
||||
export class InvalidPortDefinition extends TypedError { }
|
||||
export class InvalidPortDefinition extends TypedError {}
|
||||
|
||||
export interface PortBindings {
|
||||
[key: string]: Array<{ HostIp: string, HostPort: string }>;
|
||||
[key: string]: Array<{ HostIp: string; HostPort: string }>;
|
||||
}
|
||||
|
||||
export interface DockerPortOptions {
|
||||
@ -29,7 +28,6 @@ interface PortRange {
|
||||
}
|
||||
|
||||
export class PortMap {
|
||||
|
||||
private ports: PortRange;
|
||||
|
||||
public constructor(portStrOrObj: string | PortRange) {
|
||||
@ -41,8 +39,14 @@ export class PortMap {
|
||||
}
|
||||
|
||||
public toDockerOpts(): DockerPortOptions {
|
||||
const internalRange = this.generatePortRange(this.ports.internalStart, this.ports.internalEnd);
|
||||
const externalRange = this.generatePortRange(this.ports.externalStart, this.ports.externalEnd);
|
||||
const internalRange = this.generatePortRange(
|
||||
this.ports.internalStart,
|
||||
this.ports.internalEnd,
|
||||
);
|
||||
const externalRange = this.generatePortRange(
|
||||
this.ports.externalStart,
|
||||
this.ports.externalEnd,
|
||||
);
|
||||
|
||||
const exposedPorts: { [key: string]: {} } = {};
|
||||
const portBindings: PortBindings = {};
|
||||
@ -62,8 +66,11 @@ export class PortMap {
|
||||
}
|
||||
|
||||
public toExposedPortArray(): string[] {
|
||||
const internalRange = this.generatePortRange(this.ports.internalStart, this.ports.internalEnd);
|
||||
return _.map(internalRange, (internal) => {
|
||||
const internalRange = this.generatePortRange(
|
||||
this.ports.internalStart,
|
||||
this.ports.internalEnd,
|
||||
);
|
||||
return _.map(internalRange, internal => {
|
||||
return `${internal}/${this.ports.protocol}`;
|
||||
});
|
||||
}
|
||||
@ -80,14 +87,10 @@ export class PortMap {
|
||||
* and produces a list of PortMap objects, which can then be compared.
|
||||
*
|
||||
*/
|
||||
public static fromDockerOpts(
|
||||
portBindings: PortBindings,
|
||||
): PortMap[] {
|
||||
|
||||
public static fromDockerOpts(portBindings: PortBindings): PortMap[] {
|
||||
// Create a list of portBindings, rather than the map (which we can't
|
||||
// order)
|
||||
const portMaps = _.map(portBindings, (hostObj, internalStr) => {
|
||||
|
||||
const match = internalStr.match(DOCKER_OPTS_PORTS_REGEX);
|
||||
if (match == null) {
|
||||
throw new Error(`Could not parse docker port output: ${internalStr}`);
|
||||
@ -114,13 +117,14 @@ export class PortMap {
|
||||
public static normalisePortMaps(portMaps: PortMap[]): PortMap[] {
|
||||
// Fold any ranges into each other if possible
|
||||
return _(portMaps)
|
||||
.sortBy((p) => p.ports.protocol)
|
||||
.sortBy((p) => p.ports.host)
|
||||
.sortBy((p) => p.ports.internalStart)
|
||||
.sortBy(p => p.ports.protocol)
|
||||
.sortBy(p => p.ports.host)
|
||||
.sortBy(p => p.ports.internalStart)
|
||||
.reduce((res: PortMap[], p: PortMap) => {
|
||||
const last = _.last(res);
|
||||
|
||||
if (last != null &&
|
||||
if (
|
||||
last != null &&
|
||||
last.ports.internalEnd + 1 === p.ports.internalStart &&
|
||||
last.ports.externalEnd + 1 === p.ports.externalStart &&
|
||||
last.ports.protocol === p.ports.protocol &&
|
||||
@ -138,7 +142,9 @@ export class PortMap {
|
||||
private parsePortString(portStr: string): void {
|
||||
const match = portStr.match(PORTS_REGEX);
|
||||
if (match == null) {
|
||||
throw new InvalidPortDefinition(`Could not parse port definition: ${portStr}`);
|
||||
throw new InvalidPortDefinition(
|
||||
`Could not parse port definition: ${portStr}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Ignore the first parameter (the complete match) and separate the matched
|
||||
@ -178,7 +184,10 @@ export class PortMap {
|
||||
};
|
||||
|
||||
// Ensure we have the same range
|
||||
if (this.ports.internalEnd - this.ports.internalStart !== this.ports.externalEnd - this.ports.externalStart) {
|
||||
if (
|
||||
this.ports.internalEnd - this.ports.internalStart !==
|
||||
this.ports.externalEnd - this.ports.externalStart
|
||||
) {
|
||||
throw new InvalidPortDefinition(
|
||||
`Range for internal and external ports does not match: ${portStr}`,
|
||||
);
|
||||
@ -187,7 +196,9 @@ export class PortMap {
|
||||
|
||||
private generatePortRange(start: number, end: number): number[] {
|
||||
if (start > end) {
|
||||
throw new Error('Incorrect port range! The end port cannot be larger than the start port!');
|
||||
throw new Error(
|
||||
'Incorrect port range! The end port cannot be larger than the start port!',
|
||||
);
|
||||
}
|
||||
|
||||
return _.range(start, end + 1);
|
||||
|
@ -71,7 +71,11 @@ export function sanitiseComposeConfig(
|
||||
}) as ServiceComposeConfig;
|
||||
|
||||
if (filtered.length > 0) {
|
||||
console.log(`Warning: Ignoring unsupported or unknown compose fields: ${filtered.join(', ')}`);
|
||||
console.log(
|
||||
`Warning: Ignoring unsupported or unknown compose fields: ${filtered.join(
|
||||
', ',
|
||||
)}`,
|
||||
);
|
||||
}
|
||||
|
||||
return toReturn;
|
||||
|
@ -239,9 +239,9 @@ module.exports = class ServiceManager extends EventEmitter
|
||||
|
||||
wait = ->
|
||||
Promise.any _.map handoverCompletePaths, (file) ->
|
||||
fs.statAsync(file)
|
||||
.then ->
|
||||
fs.unlinkAsync(file).catch(_.noop)
|
||||
fs.statAsync(file)
|
||||
.then ->
|
||||
fs.unlinkAsync(file).catch(_.noop)
|
||||
.catch ->
|
||||
if Date.now() < deadline
|
||||
Promise.delay(pollInterval).then(wait)
|
||||
|
@ -22,7 +22,6 @@ import { sanitiseComposeConfig } from './sanitise';
|
||||
import * as constants from '../lib/constants';
|
||||
|
||||
export class Service {
|
||||
|
||||
public appId: number | null;
|
||||
public imageId: number | null;
|
||||
public config: ServiceConfig;
|
||||
@ -64,8 +63,7 @@ export class Service {
|
||||
'cpus',
|
||||
].concat(Service.configArrayFields);
|
||||
|
||||
private constructor() {
|
||||
}
|
||||
private constructor() {}
|
||||
|
||||
// The type here is actually ServiceComposeConfig, except that the
|
||||
// keys must be camelCase'd first
|
||||
@ -77,7 +75,9 @@ export class Service {
|
||||
|
||||
appConfig = ComposeUtils.camelCaseConfig(appConfig);
|
||||
|
||||
const intOrNull = (val: string | number | null | undefined): number | null => {
|
||||
const intOrNull = (
|
||||
val: string | number | null | undefined,
|
||||
): number | null => {
|
||||
return checkInt(val) || null;
|
||||
};
|
||||
|
||||
@ -112,13 +112,13 @@ export class Service {
|
||||
// For any types which do not change, we change config[value] directly
|
||||
|
||||
// First process the networks correctly
|
||||
let networks: ServiceConfig['networks'] = { };
|
||||
let networks: ServiceConfig['networks'] = {};
|
||||
if (_.isArray(config.networks)) {
|
||||
_.each(config.networks, (name) => {
|
||||
networks[name] = { };
|
||||
_.each(config.networks, name => {
|
||||
networks[name] = {};
|
||||
});
|
||||
} else if(_.isObject(config.networks)) {
|
||||
networks = config.networks || { };
|
||||
} else if (_.isObject(config.networks)) {
|
||||
networks = config.networks || {};
|
||||
}
|
||||
// Prefix the network entries with the app id
|
||||
networks = _.mapKeys(networks, (_v, k) => `${service.appId}_${k}`);
|
||||
@ -127,17 +127,24 @@ export class Service {
|
||||
// Check for unsupported networkMode entries
|
||||
if (config.networkMode != null) {
|
||||
if (/service:(\s*)?.+/.test(config.networkMode)) {
|
||||
console.log('Warning: A network_mode referencing a service is not yet supported. Ignoring.');
|
||||
console.log(
|
||||
'Warning: A network_mode referencing a service is not yet supported. Ignoring.',
|
||||
);
|
||||
delete config.networkMode;
|
||||
} else if (/container:(\s*)?.+/.test(config.networkMode)) {
|
||||
console.log('Warning: A network_mode referencing a container is not supported. Ignoring.');
|
||||
console.log(
|
||||
'Warning: A network_mode referencing a container is not supported. Ignoring.',
|
||||
);
|
||||
delete config.networkMode;
|
||||
}
|
||||
}
|
||||
|
||||
// memory strings
|
||||
const memLimit = ComposeUtils.parseMemoryNumber(config.memLimit, '0');
|
||||
const memReservation = ComposeUtils.parseMemoryNumber(config.memReservation, '0');
|
||||
const memReservation = ComposeUtils.parseMemoryNumber(
|
||||
config.memReservation,
|
||||
'0',
|
||||
);
|
||||
const shmSize = ComposeUtils.parseMemoryNumber(config.shmSize, '64m');
|
||||
delete config.memLimit;
|
||||
delete config.memReservation;
|
||||
@ -151,7 +158,7 @@ export class Service {
|
||||
delete config.stopGracePeriod;
|
||||
|
||||
// ulimits
|
||||
const ulimits: ServiceConfig['ulimits'] = { };
|
||||
const ulimits: ServiceConfig['ulimits'] = {};
|
||||
_.each(config.ulimits, (limit, name) => {
|
||||
if (_.isNumber(limit)) {
|
||||
ulimits[name] = { soft: limit, hard: limit };
|
||||
@ -163,11 +170,11 @@ export class Service {
|
||||
|
||||
// string or array of strings - normalise to an array
|
||||
if (_.isString(config.dns)) {
|
||||
config.dns = [ config.dns ];
|
||||
config.dns = [config.dns];
|
||||
}
|
||||
|
||||
if (_.isString(config.dnsSearch)) {
|
||||
config.dnsSearch = [ config.dnsSearch ];
|
||||
config.dnsSearch = [config.dnsSearch];
|
||||
}
|
||||
|
||||
// Assign network_mode to a default value if necessary
|
||||
@ -189,25 +196,27 @@ export class Service {
|
||||
// If we don't have any networks, we need to
|
||||
// create the default with some default options
|
||||
networks[config.networkMode] = {
|
||||
aliases: [ service.serviceName || '' ],
|
||||
aliases: [service.serviceName || ''],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Add default environment variables and labels
|
||||
config.environment = Service.extendEnvVars(
|
||||
config.environment || { },
|
||||
config.environment || {},
|
||||
options,
|
||||
service.appId || 0,
|
||||
service.serviceName || '',
|
||||
);
|
||||
config.labels = ComposeUtils.normalizeLabels(Service.extendLabels(
|
||||
config.labels || { },
|
||||
options,
|
||||
service.appId || 0,
|
||||
service.serviceId || 0,
|
||||
service.serviceName || '',
|
||||
));
|
||||
config.labels = ComposeUtils.normalizeLabels(
|
||||
Service.extendLabels(
|
||||
config.labels || {},
|
||||
options,
|
||||
service.appId || 0,
|
||||
service.serviceId || 0,
|
||||
service.serviceName || '',
|
||||
),
|
||||
);
|
||||
|
||||
// Any other special case handling
|
||||
if (config.networkMode === 'host' && !config.hostname) {
|
||||
@ -215,12 +224,24 @@ export class Service {
|
||||
}
|
||||
config.restart = ComposeUtils.createRestartPolicy(config.restart);
|
||||
config.command = ComposeUtils.getCommand(config.command, options.imageInfo);
|
||||
config.entrypoint = ComposeUtils.getEntryPoint(config.entrypoint, options.imageInfo);
|
||||
config.stopSignal = ComposeUtils.getStopSignal(config.stopSignal, options.imageInfo);
|
||||
config.workingDir = ComposeUtils.getWorkingDir(config.workingDir, options.imageInfo);
|
||||
config.entrypoint = ComposeUtils.getEntryPoint(
|
||||
config.entrypoint,
|
||||
options.imageInfo,
|
||||
);
|
||||
config.stopSignal = ComposeUtils.getStopSignal(
|
||||
config.stopSignal,
|
||||
options.imageInfo,
|
||||
);
|
||||
config.workingDir = ComposeUtils.getWorkingDir(
|
||||
config.workingDir,
|
||||
options.imageInfo,
|
||||
);
|
||||
config.user = ComposeUtils.getUser(config.user, options.imageInfo);
|
||||
|
||||
const healthcheck = ComposeUtils.getHealthcheck(config.healthcheck, options.imageInfo);
|
||||
const healthcheck = ComposeUtils.getHealthcheck(
|
||||
config.healthcheck,
|
||||
options.imageInfo,
|
||||
);
|
||||
delete config.healthcheck;
|
||||
|
||||
config.volumes = Service.extendAndSanitiseVolumes(
|
||||
@ -232,7 +253,7 @@ export class Service {
|
||||
|
||||
let portMaps: PortMap[] = [];
|
||||
if (config.ports != null) {
|
||||
portMaps = _.map(config.ports, (p) => new PortMap(p));
|
||||
portMaps = _.map(config.ports, p => new PortMap(p));
|
||||
}
|
||||
delete config.ports;
|
||||
|
||||
@ -241,11 +262,17 @@ export class Service {
|
||||
if (config.expose != null) {
|
||||
expose = _.map(config.expose, ComposeUtils.sanitiseExposeFromCompose);
|
||||
}
|
||||
const imageExposedPorts = _.get(options.imageInfo, 'Config.ExposedPorts', { });
|
||||
const imageExposedPorts = _.get(
|
||||
options.imageInfo,
|
||||
'Config.ExposedPorts',
|
||||
{},
|
||||
);
|
||||
expose = expose.concat(_.keys(imageExposedPorts));
|
||||
expose = _.uniq(expose);
|
||||
// Also add any exposed ports which are implied from the portMaps
|
||||
const exposedFromPortMappings = _.flatMap(portMaps, (port) => port.toExposedPortArray());
|
||||
const exposedFromPortMappings = _.flatMap(portMaps, port =>
|
||||
port.toExposedPortArray(),
|
||||
);
|
||||
expose = expose.concat(exposedFromPortMappings);
|
||||
delete config.expose;
|
||||
|
||||
@ -263,11 +290,11 @@ export class Service {
|
||||
}
|
||||
|
||||
if (_.isArray(config.sysctls)) {
|
||||
config.sysctls = _.fromPairs(_.map(config.sysctls, (v) => _.split(v, '=')));
|
||||
config.sysctls = _.fromPairs(_.map(config.sysctls, v => _.split(v, '=')));
|
||||
}
|
||||
config.sysctls = _.mapValues(config.sysctls, String);
|
||||
|
||||
_.each([ 'cpuShares', 'cpuQuota', 'oomScoreAdj' ], (key)=> {
|
||||
_.each(['cpuShares', 'cpuQuota', 'oomScoreAdj'], key => {
|
||||
const numVal = checkInt(config[key]);
|
||||
if (numVal) {
|
||||
config[key] = numVal;
|
||||
@ -288,7 +315,7 @@ export class Service {
|
||||
let tmpfs: string[] = [];
|
||||
if (config.tmpfs != null) {
|
||||
if (_.isString(config.tmpfs)) {
|
||||
tmpfs = [ config.tmpfs ];
|
||||
tmpfs = [config.tmpfs];
|
||||
} else {
|
||||
tmpfs = config.tmpfs;
|
||||
}
|
||||
@ -300,33 +327,33 @@ export class Service {
|
||||
|
||||
service.config = _.defaults(config, {
|
||||
portMaps,
|
||||
capAdd: [ ],
|
||||
capDrop:[ ],
|
||||
command: [ ],
|
||||
capAdd: [],
|
||||
capDrop: [],
|
||||
command: [],
|
||||
cgroupParent: '',
|
||||
devices,
|
||||
dnsOpt: [ ],
|
||||
dnsOpt: [],
|
||||
entrypoint: '',
|
||||
extraHosts: [ ],
|
||||
extraHosts: [],
|
||||
expose,
|
||||
networks,
|
||||
dns: [ ],
|
||||
dnsSearch: [ ],
|
||||
environment: { },
|
||||
labels: { },
|
||||
dns: [],
|
||||
dnsSearch: [],
|
||||
environment: {},
|
||||
labels: {},
|
||||
networkMode: '',
|
||||
ulimits,
|
||||
groupAdd: [ ],
|
||||
groupAdd: [],
|
||||
healthcheck,
|
||||
pid: '',
|
||||
pidsLimit: 0,
|
||||
securityOpt: [ ],
|
||||
securityOpt: [],
|
||||
stopGracePeriod,
|
||||
stopSignal: '',
|
||||
sysctls: { },
|
||||
sysctls: {},
|
||||
tmpfs,
|
||||
usernsMode: '',
|
||||
volumes: [ ],
|
||||
volumes: [],
|
||||
restart: 'always',
|
||||
cpuShares: 0,
|
||||
cpuQuota: 0,
|
||||
@ -354,14 +381,16 @@ export class Service {
|
||||
return service;
|
||||
}
|
||||
|
||||
public static fromDockerContainer(container: Dockerode.ContainerInspectInfo): Service {
|
||||
public static fromDockerContainer(
|
||||
container: Dockerode.ContainerInspectInfo,
|
||||
): Service {
|
||||
const svc = new Service();
|
||||
|
||||
if (container.State.Running) {
|
||||
svc.status = 'Running';
|
||||
} else if(container.State.Status === 'created') {
|
||||
} else if (container.State.Status === 'created') {
|
||||
svc.status = 'Installed';
|
||||
} else if(container.State.Status === 'dead') {
|
||||
} else if (container.State.Status === 'dead') {
|
||||
svc.status = 'Dead';
|
||||
} else {
|
||||
svc.status = container.State.Status;
|
||||
@ -377,23 +406,27 @@ export class Service {
|
||||
hostname = '';
|
||||
}
|
||||
|
||||
let networks: ServiceConfig['networks'] = { };
|
||||
let networks: ServiceConfig['networks'] = {};
|
||||
if (_.get(container, 'NetworkSettings.Networks', null) != null) {
|
||||
networks = ComposeUtils.dockerNetworkToServiceNetwork(container.NetworkSettings.Networks);
|
||||
networks = ComposeUtils.dockerNetworkToServiceNetwork(
|
||||
container.NetworkSettings.Networks,
|
||||
);
|
||||
}
|
||||
|
||||
const ulimits: ServiceConfig['ulimits'] = { };
|
||||
const ulimits: ServiceConfig['ulimits'] = {};
|
||||
_.each(container.HostConfig.Ulimits, ({ Name, Soft, Hard }) => {
|
||||
ulimits[Name] = { soft: Soft, hard: Hard };
|
||||
});
|
||||
|
||||
const portMaps = PortMap.fromDockerOpts(container.HostConfig.PortBindings);
|
||||
let expose = _.flatMap(
|
||||
_.flatMap(portMaps, (p) => p.toDockerOpts().exposedPorts),
|
||||
_.flatMap(portMaps, p => p.toDockerOpts().exposedPorts),
|
||||
_.keys,
|
||||
);
|
||||
if (container.Config.ExposedPorts != null) {
|
||||
expose = expose.concat(_.map(container.Config.ExposedPorts, (_v, k) => k.toString()));
|
||||
expose = expose.concat(
|
||||
_.map(container.Config.ExposedPorts, (_v, k) => k.toString()),
|
||||
);
|
||||
}
|
||||
expose = _.uniq(expose);
|
||||
|
||||
@ -425,21 +458,24 @@ export class Service {
|
||||
hostname,
|
||||
command: container.Config.Cmd || '',
|
||||
entrypoint: container.Config.Entrypoint || '',
|
||||
volumes: _.concat(container.HostConfig.Binds || [], _.keys(container.Config.Volumes || { })),
|
||||
volumes: _.concat(
|
||||
container.HostConfig.Binds || [],
|
||||
_.keys(container.Config.Volumes || {}),
|
||||
),
|
||||
image: container.Config.Image,
|
||||
environment: _.omit(conversions.envArrayToObject(container.Config.Env || [ ]), [
|
||||
'RESIN_DEVICE_NAME_AT_INIT',
|
||||
'BALENA_DEVICE_NAME_AT_INIT',
|
||||
]),
|
||||
environment: _.omit(
|
||||
conversions.envArrayToObject(container.Config.Env || []),
|
||||
['RESIN_DEVICE_NAME_AT_INIT', 'BALENA_DEVICE_NAME_AT_INIT'],
|
||||
),
|
||||
privileged: container.HostConfig.Privileged || false,
|
||||
labels: ComposeUtils.normalizeLabels(container.Config.Labels || { }),
|
||||
labels: ComposeUtils.normalizeLabels(container.Config.Labels || {}),
|
||||
running: container.State.Running,
|
||||
restart,
|
||||
capAdd: container.HostConfig.CapAdd || [ ],
|
||||
capDrop: container.HostConfig.CapDrop || [ ],
|
||||
devices: container.HostConfig.Devices || [ ],
|
||||
capAdd: container.HostConfig.CapAdd || [],
|
||||
capDrop: container.HostConfig.CapDrop || [],
|
||||
devices: container.HostConfig.Devices || [],
|
||||
networks,
|
||||
memLimit: container.HostConfig.Memory || 0 ,
|
||||
memLimit: container.HostConfig.Memory || 0,
|
||||
memReservation: container.HostConfig.MemoryReservation || 0,
|
||||
shmSize: container.HostConfig.ShmSize || 0,
|
||||
cpuShares: container.HostConfig.CpuShares || 0,
|
||||
@ -450,24 +486,24 @@ export class Service {
|
||||
domainname: container.Config.Domainname || '',
|
||||
oomKillDisable: container.HostConfig.OomKillDisable || false,
|
||||
oomScoreAdj: container.HostConfig.OomScoreAdj || 0,
|
||||
dns: container.HostConfig.Dns || [ ],
|
||||
dnsSearch: container.HostConfig.DnsSearch || [ ],
|
||||
dnsOpt: container.HostConfig.DnsOptions || [ ],
|
||||
dns: container.HostConfig.Dns || [],
|
||||
dnsSearch: container.HostConfig.DnsSearch || [],
|
||||
dnsOpt: container.HostConfig.DnsOptions || [],
|
||||
tmpfs,
|
||||
extraHosts: container.HostConfig.ExtraHosts || [ ],
|
||||
extraHosts: container.HostConfig.ExtraHosts || [],
|
||||
ulimits,
|
||||
stopSignal: (container.Config as any).StopSignal || '',
|
||||
stopGracePeriod: (container.Config as any).StopTimeout || 0,
|
||||
healthcheck: ComposeUtils.dockerHealthcheckToServiceHealthcheck(
|
||||
(container.Config as any).Healthcheck || { },
|
||||
(container.Config as any).Healthcheck || {},
|
||||
),
|
||||
readOnly: container.HostConfig.ReadonlyRootfs || false,
|
||||
sysctls: container.HostConfig.Sysctls || { },
|
||||
sysctls: container.HostConfig.Sysctls || {},
|
||||
cgroupParent: container.HostConfig.CgroupParent || '',
|
||||
groupAdd: container.HostConfig.GroupAdd || [ ],
|
||||
groupAdd: container.HostConfig.GroupAdd || [],
|
||||
pid: container.HostConfig.PidMode || '',
|
||||
pidsLimit: container.HostConfig.PidsLimit || 0,
|
||||
securityOpt: container.HostConfig.SecurityOpt || [ ],
|
||||
securityOpt: container.HostConfig.SecurityOpt || [],
|
||||
usernsMode: container.HostConfig.UsernsMode || '',
|
||||
ipc: container.HostConfig.IpcMode || '',
|
||||
macAddress: (container.Config as any).MacAddress || '',
|
||||
@ -488,12 +524,14 @@ export class Service {
|
||||
return svc;
|
||||
}
|
||||
|
||||
public toDockerContainer(opts: { deviceName: string }): Dockerode.ContainerCreateOptions {
|
||||
public toDockerContainer(opts: {
|
||||
deviceName: string;
|
||||
}): Dockerode.ContainerCreateOptions {
|
||||
const { binds, volumes } = this.getBindsAndVolumes();
|
||||
const { exposedPorts, portBindings } = this.generateExposeAndPorts();
|
||||
|
||||
const tmpFs: Dictionary<''> = { };
|
||||
_.each(this.config.tmpfs, (tmp) => {
|
||||
const tmpFs: Dictionary<''> = {};
|
||||
_.each(this.config.tmpfs, tmp => {
|
||||
tmpFs[tmp] = '';
|
||||
});
|
||||
|
||||
@ -509,14 +547,21 @@ export class Service {
|
||||
Volumes: volumes,
|
||||
// Typings are wrong here, the docker daemon accepts a string or string[],
|
||||
Entrypoint: this.config.entrypoint as string,
|
||||
Env: conversions.envObjectToArray(_.assign({
|
||||
RESIN_DEVICE_NAME_AT_INIT: opts.deviceName,
|
||||
BALENA_DEVICE_NAME_AT_INIT: opts.deviceName,
|
||||
}, this.config.environment)),
|
||||
Env: conversions.envObjectToArray(
|
||||
_.assign(
|
||||
{
|
||||
RESIN_DEVICE_NAME_AT_INIT: opts.deviceName,
|
||||
BALENA_DEVICE_NAME_AT_INIT: opts.deviceName,
|
||||
},
|
||||
this.config.environment,
|
||||
),
|
||||
),
|
||||
ExposedPorts: exposedPorts,
|
||||
Image: this.config.image,
|
||||
Labels: this.config.labels,
|
||||
NetworkingConfig: ComposeUtils.serviceNetworksToDockerNetworks(mainNetwork),
|
||||
NetworkingConfig: ComposeUtils.serviceNetworksToDockerNetworks(
|
||||
mainNetwork,
|
||||
),
|
||||
StopSignal: this.config.stopSignal,
|
||||
Domainname: this.config.domainname,
|
||||
Hostname: this.config.hostname,
|
||||
@ -542,8 +587,12 @@ export class Service {
|
||||
PidsLimit: this.config.pidsLimit,
|
||||
SecurityOpt: this.config.securityOpt,
|
||||
Sysctls: this.config.sysctls,
|
||||
Ulimits: ComposeUtils.serviceUlimitsToDockerUlimits(this.config.ulimits),
|
||||
RestartPolicy: ComposeUtils.serviceRestartToDockerRestartPolicy(this.config.restart),
|
||||
Ulimits: ComposeUtils.serviceUlimitsToDockerUlimits(
|
||||
this.config.ulimits,
|
||||
),
|
||||
RestartPolicy: ComposeUtils.serviceRestartToDockerRestartPolicy(
|
||||
this.config.restart,
|
||||
),
|
||||
CpuShares: this.config.cpuShares,
|
||||
CpuQuota: this.config.cpuQuota,
|
||||
// Type missing, and HostConfig isn't defined as a seperate object
|
||||
@ -561,7 +610,9 @@ export class Service {
|
||||
NanoCpus: this.config.cpus,
|
||||
IpcMode: this.config.ipc,
|
||||
} as Dockerode.ContainerCreateOptions['HostConfig'],
|
||||
Healthcheck: ComposeUtils.serviceHealthcheckToDockerHealthcheck(this.config.healthcheck),
|
||||
Healthcheck: ComposeUtils.serviceHealthcheckToDockerHealthcheck(
|
||||
this.config.healthcheck,
|
||||
),
|
||||
StopTimeout: this.config.stopGracePeriod,
|
||||
};
|
||||
}
|
||||
@ -574,47 +625,54 @@ export class Service {
|
||||
sameNetworks = false;
|
||||
return;
|
||||
}
|
||||
sameNetworks = sameNetworks && this.isSameNetwork(this.config.networks[name], network);
|
||||
sameNetworks =
|
||||
sameNetworks && this.isSameNetwork(this.config.networks[name], network);
|
||||
});
|
||||
|
||||
// Check the configuration for any changes
|
||||
const thisOmitted = _.omit(this.config, Service.omitFields);
|
||||
const otherOmitted = _.omit(service.config, Service.omitFields);
|
||||
let sameConfig = _.isEqual(
|
||||
thisOmitted,
|
||||
otherOmitted,
|
||||
);
|
||||
let sameConfig = _.isEqual(thisOmitted, otherOmitted);
|
||||
const nonArrayEquals = sameConfig;
|
||||
|
||||
// Check for array fields which don't match
|
||||
const differentArrayFields: string[] = [];
|
||||
sameConfig = sameConfig && _.every(Service.configArrayFields, (field: ServiceConfigArrayField) => {
|
||||
return _.isEmpty(
|
||||
_.xorWith(
|
||||
// TODO: The typings here aren't accepted, even though we
|
||||
// know it's fine
|
||||
(this.config as any)[field],
|
||||
(service.config as any)[field],
|
||||
(a, b) => {
|
||||
const eq = _.isEqual(a, b);
|
||||
if (!eq) {
|
||||
differentArrayFields.push(field);
|
||||
}
|
||||
return eq;
|
||||
},
|
||||
),
|
||||
);
|
||||
});
|
||||
sameConfig =
|
||||
sameConfig &&
|
||||
_.every(Service.configArrayFields, (field: ServiceConfigArrayField) => {
|
||||
return _.isEmpty(
|
||||
_.xorWith(
|
||||
// TODO: The typings here aren't accepted, even though we
|
||||
// know it's fine
|
||||
(this.config as any)[field],
|
||||
(service.config as any)[field],
|
||||
(a, b) => {
|
||||
const eq = _.isEqual(a, b);
|
||||
if (!eq) {
|
||||
differentArrayFields.push(field);
|
||||
}
|
||||
return eq;
|
||||
},
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
if (!(sameConfig && sameNetworks)) {
|
||||
// Add some console output for why a service is not matching
|
||||
// so that if we end up in a restart loop, we know exactly why
|
||||
console.log(`Replacing container for service ${this.serviceName} because of config changes:`);
|
||||
console.log(
|
||||
`Replacing container for service ${
|
||||
this.serviceName
|
||||
} because of config changes:`,
|
||||
);
|
||||
if (!nonArrayEquals) {
|
||||
// Try not to leak any sensitive information
|
||||
const diffObj = diff(thisOmitted, otherOmitted) as ServiceConfig;
|
||||
if (diffObj.environment != null) {
|
||||
diffObj.environment = _.mapValues(diffObj.environment, () => 'hidden');
|
||||
diffObj.environment = _.mapValues(
|
||||
diffObj.environment,
|
||||
() => 'hidden',
|
||||
);
|
||||
}
|
||||
console.log(' Non-array fields: ', JSON.stringify(diffObj));
|
||||
}
|
||||
@ -625,7 +683,6 @@ export class Service {
|
||||
if (!sameNetworks) {
|
||||
console.log(' Network changes detected');
|
||||
}
|
||||
|
||||
}
|
||||
return sameNetworks && sameConfig;
|
||||
}
|
||||
@ -635,19 +692,26 @@ export class Service {
|
||||
}
|
||||
|
||||
public isEqualExceptForRunningState(service: Service): boolean {
|
||||
return this.isEqualConfig(service) &&
|
||||
return (
|
||||
this.isEqualConfig(service) &&
|
||||
this.releaseId === service.releaseId &&
|
||||
this.imageId === service.imageId;
|
||||
this.imageId === service.imageId
|
||||
);
|
||||
}
|
||||
|
||||
public isEqual(service: Service): boolean {
|
||||
return this.isEqualExceptForRunningState(service) &&
|
||||
this.config.running === service.config.running;
|
||||
return (
|
||||
this.isEqualExceptForRunningState(service) &&
|
||||
this.config.running === service.config.running
|
||||
);
|
||||
}
|
||||
|
||||
public getNamedVolumes() {
|
||||
const defaults = Service.defaultBinds(this.appId || 0, this.serviceName || '');
|
||||
const validVolumes = _.map(this.config.volumes, (volume) => {
|
||||
const defaults = Service.defaultBinds(
|
||||
this.appId || 0,
|
||||
this.serviceName || '',
|
||||
);
|
||||
const validVolumes = _.map(this.config.volumes, volume => {
|
||||
if (_.includes(defaults, volume) || !_.includes(volume, ':')) {
|
||||
return null;
|
||||
}
|
||||
@ -655,7 +719,9 @@ export class Service {
|
||||
if (!path.isAbsolute(bindSource)) {
|
||||
const match = bindSource.match(/[0-9]+_(.+)/);
|
||||
if (match == null) {
|
||||
console.log('Error: There was an error parsing a volume bind source, ignoring.');
|
||||
console.log(
|
||||
'Error: There was an error parsing a volume bind source, ignoring.',
|
||||
);
|
||||
console.log(' bind source: ', bindSource);
|
||||
return null;
|
||||
}
|
||||
@ -675,20 +741,23 @@ export class Service {
|
||||
}
|
||||
|
||||
private handoverCompletePathOnHost(): string {
|
||||
return path.join(constants.rootMountPoint, updateLock.lockPath(this.appId || 0, this.serviceName || ''));
|
||||
return path.join(
|
||||
constants.rootMountPoint,
|
||||
updateLock.lockPath(this.appId || 0, this.serviceName || ''),
|
||||
);
|
||||
}
|
||||
|
||||
private getBindsAndVolumes(): {
|
||||
binds: string[],
|
||||
volumes: { [volName: string]: { } }
|
||||
binds: string[];
|
||||
volumes: { [volName: string]: {} };
|
||||
} {
|
||||
const binds: string[] = [ ];
|
||||
const volumes: { [volName: string]: { } } = { };
|
||||
_.each(this.config.volumes, (volume) => {
|
||||
const binds: string[] = [];
|
||||
const volumes: { [volName: string]: {} } = {};
|
||||
_.each(this.config.volumes, volume => {
|
||||
if (_.includes(volume, ':')) {
|
||||
binds.push(volume);
|
||||
} else {
|
||||
volumes[volume] = { };
|
||||
volumes[volume] = {};
|
||||
}
|
||||
});
|
||||
|
||||
@ -696,20 +765,20 @@ export class Service {
|
||||
}
|
||||
|
||||
private generateExposeAndPorts(): DockerPortOptions {
|
||||
const exposed: DockerPortOptions['exposedPorts'] = { };
|
||||
const ports: DockerPortOptions['portBindings'] = { };
|
||||
const exposed: DockerPortOptions['exposedPorts'] = {};
|
||||
const ports: DockerPortOptions['portBindings'] = {};
|
||||
|
||||
_.each(this.config.portMaps, (pmap) => {
|
||||
const { exposedPorts, portBindings } = pmap.toDockerOpts();
|
||||
_.each(this.config.portMaps, pmap => {
|
||||
const { exposedPorts, portBindings } = pmap.toDockerOpts();
|
||||
_.merge(exposed, exposedPorts);
|
||||
_.merge(ports, portBindings);
|
||||
});
|
||||
|
||||
// We also want to merge the compose and image exposedPorts
|
||||
// into the list of exposedPorts
|
||||
const composeExposed: DockerPortOptions['exposedPorts'] = { };
|
||||
_.each(this.config.expose, (port) => {
|
||||
composeExposed[port] = { };
|
||||
const composeExposed: DockerPortOptions['exposedPorts'] = {};
|
||||
_.each(this.config.expose, port => {
|
||||
composeExposed[port] = {};
|
||||
});
|
||||
_.merge(exposed, composeExposed);
|
||||
|
||||
@ -722,22 +791,29 @@ export class Service {
|
||||
appId: number,
|
||||
serviceName: string,
|
||||
): { [envVarName: string]: string } {
|
||||
let defaultEnv: { [ envVarName: string]: string } = {};
|
||||
for(let namespace of [ 'BALENA', 'RESIN' ]){
|
||||
_.assign(defaultEnv, _.mapKeys({
|
||||
APP_ID: appId.toString(),
|
||||
APP_NAME: options.appName,
|
||||
SERVICE_NAME: serviceName,
|
||||
DEVICE_UUID: options.uuid,
|
||||
DEVICE_TYPE: options.deviceType,
|
||||
HOST_OS_VERSION: options.osVersion,
|
||||
SUPERVISOR_VERSION: options.version,
|
||||
APP_LOCK_PATH: '/tmp/balena/updates.lock',
|
||||
}, (_val, key) => `${namespace}_${key}`));
|
||||
let defaultEnv: { [envVarName: string]: string } = {};
|
||||
for (let namespace of ['BALENA', 'RESIN']) {
|
||||
_.assign(
|
||||
defaultEnv,
|
||||
_.mapKeys(
|
||||
{
|
||||
APP_ID: appId.toString(),
|
||||
APP_NAME: options.appName,
|
||||
SERVICE_NAME: serviceName,
|
||||
DEVICE_UUID: options.uuid,
|
||||
DEVICE_TYPE: options.deviceType,
|
||||
HOST_OS_VERSION: options.osVersion,
|
||||
SUPERVISOR_VERSION: options.version,
|
||||
APP_LOCK_PATH: '/tmp/balena/updates.lock',
|
||||
},
|
||||
(_val, key) => `${namespace}_${key}`,
|
||||
),
|
||||
);
|
||||
defaultEnv[namespace] = '1';
|
||||
}
|
||||
defaultEnv['RESIN_SERVICE_KILL_ME_PATH'] = '/tmp/balena/handover-complete';
|
||||
defaultEnv['BALENA_SERVICE_HANDOVER_COMPLETE_PATH'] = '/tmp/balena/handover-complete';
|
||||
defaultEnv['BALENA_SERVICE_HANDOVER_COMPLETE_PATH'] =
|
||||
'/tmp/balena/handover-complete';
|
||||
defaultEnv['USER'] = 'root';
|
||||
|
||||
let env = _.defaults(environment, defaultEnv);
|
||||
@ -775,18 +851,23 @@ export class Service {
|
||||
.isEqual(targetAliases);
|
||||
} else {
|
||||
// Otherwise compare them literally
|
||||
sameNetwork = _.isEmpty(_.xorWith(currentAliases, targetAliases, _.isEqual));
|
||||
sameNetwork = _.isEmpty(
|
||||
_.xorWith(currentAliases, targetAliases, _.isEqual),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (target.ipv4Address != null) {
|
||||
sameNetwork = sameNetwork && _.isEqual(current.ipv4Address, target.ipv4Address);
|
||||
sameNetwork =
|
||||
sameNetwork && _.isEqual(current.ipv4Address, target.ipv4Address);
|
||||
}
|
||||
if (target.ipv6Address != null) {
|
||||
sameNetwork = sameNetwork && _.isEqual(current.ipv6Address, target.ipv6Address);
|
||||
sameNetwork =
|
||||
sameNetwork && _.isEqual(current.ipv6Address, target.ipv6Address);
|
||||
}
|
||||
if (target.linkLocalIps != null) {
|
||||
sameNetwork = sameNetwork && _.isEqual(current.linkLocalIps, target.linkLocalIps);
|
||||
sameNetwork =
|
||||
sameNetwork && _.isEqual(current.linkLocalIps, target.linkLocalIps);
|
||||
}
|
||||
return sameNetwork;
|
||||
}
|
||||
@ -805,7 +886,7 @@ export class Service {
|
||||
'io.balena.service-name': serviceName,
|
||||
});
|
||||
|
||||
const imageLabels = _.get(imageInfo, 'Config.Labels', { });
|
||||
const imageLabels = _.get(imageInfo, 'Config.Labels', {});
|
||||
newLabels = _.defaults(newLabels, imageLabels);
|
||||
return newLabels;
|
||||
}
|
||||
@ -818,10 +899,10 @@ export class Service {
|
||||
): ServiceConfig['volumes'] {
|
||||
let volumes: ServiceConfig['volumes'] = [];
|
||||
|
||||
_.each(composeVolumes, (volume) => {
|
||||
_.each(composeVolumes, volume => {
|
||||
const isBind = _.includes(volume, ':');
|
||||
if (isBind) {
|
||||
const [ bindSource, bindDest, mode ] = volume.split(':');
|
||||
const [bindSource, bindDest, mode] = volume.split(':');
|
||||
if (!path.isAbsolute(bindSource)) {
|
||||
// namespace our volumes by appId
|
||||
let volumeDef = `${appId}_${bindSource}:${bindDest}`;
|
||||
@ -850,5 +931,4 @@ export class Service {
|
||||
`${updateLock.lockPath(appId, serviceName)}:/tmp/balena`,
|
||||
];
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,7 +38,12 @@ export interface NetworkConfig {
|
||||
driver: string;
|
||||
ipam: {
|
||||
driver: string;
|
||||
config: Array<{ subnet: string, gateway: string, ipRange?: string, auxAddress?: string }>;
|
||||
config: Array<{
|
||||
subnet: string;
|
||||
gateway: string;
|
||||
ipRange?: string;
|
||||
auxAddress?: string;
|
||||
}>;
|
||||
options: { [optName: string]: string };
|
||||
};
|
||||
enableIPv6: boolean;
|
||||
|
@ -47,14 +47,16 @@ export interface ServiceComposeConfig {
|
||||
labels?: { [labelName: string]: string };
|
||||
running: boolean;
|
||||
networkMode?: string;
|
||||
networks?: string[] | {
|
||||
[networkName: string]: {
|
||||
aliases?: string[];
|
||||
ipv4Address?: string;
|
||||
ipv6Address?: string;
|
||||
linkLocalIps?: string[];
|
||||
}
|
||||
};
|
||||
networks?:
|
||||
| string[]
|
||||
| {
|
||||
[networkName: string]: {
|
||||
aliases?: string[];
|
||||
ipv4Address?: string;
|
||||
ipv6Address?: string;
|
||||
linkLocalIps?: string[];
|
||||
};
|
||||
};
|
||||
pid?: string;
|
||||
pidsLimit?: number;
|
||||
ports?: string[];
|
||||
@ -63,7 +65,7 @@ export interface ServiceComposeConfig {
|
||||
stopSignal?: string;
|
||||
sysctls?: { [name: string]: string };
|
||||
ulimits?: {
|
||||
[ulimitName: string]: number | { soft: number, hard: number };
|
||||
[ulimitName: string]: number | { soft: number; hard: number };
|
||||
};
|
||||
usernsMode?: string;
|
||||
volumes?: string[];
|
||||
@ -118,7 +120,7 @@ export interface ServiceConfig {
|
||||
ipv4Address?: string;
|
||||
ipv6Address?: string;
|
||||
linkLocalIps?: string[];
|
||||
}
|
||||
};
|
||||
};
|
||||
pid: string;
|
||||
pidsLimit: number;
|
||||
@ -127,7 +129,7 @@ export interface ServiceConfig {
|
||||
stopSignal: string;
|
||||
sysctls: { [name: string]: string };
|
||||
ulimits: {
|
||||
[ulimitName: string]: { soft: number, hard: number };
|
||||
[ulimitName: string]: { soft: number; hard: number };
|
||||
};
|
||||
usernsMode: string;
|
||||
volumes: string[];
|
||||
@ -152,19 +154,20 @@ export interface ServiceConfig {
|
||||
tty: boolean;
|
||||
}
|
||||
|
||||
export type ServiceConfigArrayField = 'volumes' |
|
||||
'devices' |
|
||||
'capAdd' |
|
||||
'capDrop' |
|
||||
'dns' |
|
||||
'dnsSearch' |
|
||||
'dnsOpt' |
|
||||
'expose' |
|
||||
'tmpfs' |
|
||||
'extraHosts' |
|
||||
'ulimitsArray' |
|
||||
'groupAdd' |
|
||||
'securityOpt';
|
||||
export type ServiceConfigArrayField =
|
||||
| 'volumes'
|
||||
| 'devices'
|
||||
| 'capAdd'
|
||||
| 'capDrop'
|
||||
| 'dns'
|
||||
| 'dnsSearch'
|
||||
| 'dnsOpt'
|
||||
| 'expose'
|
||||
| 'tmpfs'
|
||||
| 'extraHosts'
|
||||
| 'ulimitsArray'
|
||||
| 'groupAdd'
|
||||
| 'securityOpt';
|
||||
|
||||
// The config directly from the application manager, which contains
|
||||
// application information, plus the compose data
|
||||
@ -201,4 +204,3 @@ export interface DockerDevice {
|
||||
PathInContainer: string;
|
||||
CgroupPermissions: string;
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,9 @@ import {
|
||||
ServiceHealthcheck,
|
||||
} from './types/service';
|
||||
|
||||
export function camelCaseConfig(literalConfig: ConfigMap): ServiceComposeConfig {
|
||||
export function camelCaseConfig(
|
||||
literalConfig: ConfigMap,
|
||||
): ServiceComposeConfig {
|
||||
const config = _.mapKeys(literalConfig, (_v, k) => _.camelCase(k));
|
||||
|
||||
// Networks can either be an object or array, but given _.isObject
|
||||
@ -31,7 +33,10 @@ export function camelCaseConfig(literalConfig: ConfigMap): ServiceComposeConfig
|
||||
return config as ServiceComposeConfig;
|
||||
}
|
||||
|
||||
export function parseMemoryNumber(valueAsString: string | null | undefined, defaultValue?: string): number {
|
||||
export function parseMemoryNumber(
|
||||
valueAsString: string | null | undefined,
|
||||
defaultValue?: string,
|
||||
): number {
|
||||
if (valueAsString == null) {
|
||||
if (defaultValue != null) {
|
||||
return parseMemoryNumber(defaultValue);
|
||||
@ -46,7 +51,17 @@ export function parseMemoryNumber(valueAsString: string | null | undefined, defa
|
||||
return 0;
|
||||
}
|
||||
const num = match[1];
|
||||
const pow: { [key: string]: number } = { '': 0, b: 0, B: 0, K: 1, k: 1, m: 2, M: 2, g: 3, G: 3 };
|
||||
const pow: { [key: string]: number } = {
|
||||
'': 0,
|
||||
b: 0,
|
||||
B: 0,
|
||||
K: 1,
|
||||
k: 1,
|
||||
m: 2,
|
||||
M: 2,
|
||||
g: 3,
|
||||
G: 3,
|
||||
};
|
||||
return parseInt(num, 10) * 1024 ** pow[match[2]];
|
||||
}
|
||||
|
||||
@ -57,9 +72,7 @@ export const validRestartPolicies = [
|
||||
'unless-stopped',
|
||||
];
|
||||
|
||||
export function createRestartPolicy(
|
||||
name?: string,
|
||||
): string {
|
||||
export function createRestartPolicy(name?: string): string {
|
||||
if (name == null) {
|
||||
return 'always';
|
||||
}
|
||||
@ -67,12 +80,14 @@ export function createRestartPolicy(
|
||||
// Ensure that name is a string, otherwise the below could
|
||||
// throw
|
||||
if (!_.isString(name)) {
|
||||
console.log(`Warning: Non-string argument for restart field: ${name} - ignoring.`);
|
||||
console.log(
|
||||
`Warning: Non-string argument for restart field: ${name} - ignoring.`,
|
||||
);
|
||||
return 'always';
|
||||
}
|
||||
|
||||
name = name.toLowerCase().trim();
|
||||
if(!_.includes(validRestartPolicies, name)) {
|
||||
if (!_.includes(validRestartPolicies, name)) {
|
||||
return 'always';
|
||||
}
|
||||
|
||||
@ -87,7 +102,9 @@ function processCommandString(command: string): string {
|
||||
return command.replace(/(\$)/g, '\\$1');
|
||||
}
|
||||
|
||||
function processCommandParsedArrayElement(arg: string | { [key: string]: string}): string {
|
||||
function processCommandParsedArrayElement(
|
||||
arg: string | { [key: string]: string },
|
||||
): string {
|
||||
if (_.isString(arg)) {
|
||||
return arg;
|
||||
}
|
||||
@ -150,7 +167,7 @@ export function dockerHealthcheckToServiceHealthcheck(
|
||||
healthcheck?: Dockerode.DockerHealthcheck,
|
||||
): ServiceHealthcheck {
|
||||
if (healthcheck == null || _.isEmpty(healthcheck)) {
|
||||
return { test: [ 'NONE' ] };
|
||||
return { test: ['NONE'] };
|
||||
}
|
||||
const serviceHC: ServiceHealthcheck = {
|
||||
test: healthcheck.Test,
|
||||
@ -175,11 +192,9 @@ export function dockerHealthcheckToServiceHealthcheck(
|
||||
return serviceHC;
|
||||
}
|
||||
|
||||
function buildHealthcheckTest(
|
||||
test: string | string[],
|
||||
): string[] {
|
||||
function buildHealthcheckTest(test: string | string[]): string[] {
|
||||
if (_.isString(test)) {
|
||||
return [ 'CMD-SHELL', test];
|
||||
return ['CMD-SHELL', test];
|
||||
}
|
||||
return test;
|
||||
}
|
||||
@ -190,14 +205,13 @@ function getNanoseconds(timeStr: string): number {
|
||||
|
||||
export function composeHealthcheckToServiceHealthcheck(
|
||||
healthcheck: ComposeHealthcheck | null | undefined,
|
||||
): ServiceHealthcheck | { } {
|
||||
|
||||
): ServiceHealthcheck | {} {
|
||||
if (healthcheck == null) {
|
||||
return { };
|
||||
return {};
|
||||
}
|
||||
|
||||
if (healthcheck.disable) {
|
||||
return { test: [ 'NONE' ] };
|
||||
return { test: ['NONE'] };
|
||||
}
|
||||
|
||||
const serviceHC: ServiceHealthcheck = {
|
||||
@ -236,7 +250,11 @@ export function getHealthcheck(
|
||||
);
|
||||
|
||||
// Overlay any compose healthcheck fields on the image healthchecks
|
||||
return _.assign({ test: [ 'NONE' ] }, imageServiceHealthcheck, composeServiceHealthcheck);
|
||||
return _.assign(
|
||||
{ test: ['NONE'] },
|
||||
imageServiceHealthcheck,
|
||||
composeServiceHealthcheck,
|
||||
);
|
||||
}
|
||||
|
||||
export function serviceHealthcheckToDockerHealthcheck(
|
||||
@ -255,8 +273,10 @@ export function getWorkingDir(
|
||||
workingDir: string | null | undefined,
|
||||
imageInfo?: Dockerode.ImageInspectInfo,
|
||||
): string {
|
||||
return (workingDir != null ? workingDir : _.get(imageInfo, 'Config.WorkingDir', ''))
|
||||
.replace(/(^.+)\/$/, '$1');
|
||||
return (workingDir != null
|
||||
? workingDir
|
||||
: _.get(imageInfo, 'Config.WorkingDir', '')
|
||||
).replace(/(^.+)\/$/, '$1');
|
||||
}
|
||||
|
||||
export function getUser(
|
||||
@ -266,20 +286,16 @@ export function getUser(
|
||||
return user != null ? user : _.get(imageInfo, 'Config.User', '');
|
||||
}
|
||||
|
||||
export function sanitiseExposeFromCompose(
|
||||
portStr: string,
|
||||
): string {
|
||||
export function sanitiseExposeFromCompose(portStr: string): string {
|
||||
if (/^[0-9]*$/.test(portStr)) {
|
||||
return `${portStr}/tcp`;
|
||||
}
|
||||
return portStr;
|
||||
}
|
||||
|
||||
export function formatDevice(
|
||||
deviceStr: string,
|
||||
): DockerDevice {
|
||||
const [ pathOnHost, ...parts ] = deviceStr.split(':');
|
||||
let [ pathInContainer, cgroup ] = parts;
|
||||
export function formatDevice(deviceStr: string): DockerDevice {
|
||||
const [pathOnHost, ...parts] = deviceStr.split(':');
|
||||
let [pathInContainer, cgroup] = parts;
|
||||
if (pathInContainer == null) {
|
||||
pathInContainer = pathOnHost;
|
||||
}
|
||||
@ -300,7 +316,7 @@ export function addFeaturesFromLabels(
|
||||
service: Service,
|
||||
options: DeviceMetadata,
|
||||
): void {
|
||||
const setEnvVariables = function (key: string, val: string) {
|
||||
const setEnvVariables = function(key: string, val: string) {
|
||||
service.config.environment[`RESIN_${key}`] = val;
|
||||
service.config.environment[`BALENA_${key}`] = val;
|
||||
};
|
||||
@ -323,13 +339,19 @@ export function addFeaturesFromLabels(
|
||||
}
|
||||
|
||||
if (checkTruthy(service.config.labels['io.balena.features.balena-socket'])) {
|
||||
service.config.volumes.push(`${constants.dockerSocket}:${constants.dockerSocket}`);
|
||||
service.config.volumes.push(
|
||||
`${constants.dockerSocket}:${constants.dockerSocket}`,
|
||||
);
|
||||
if (service.config.environment['DOCKER_HOST'] == null) {
|
||||
service.config.environment['DOCKER_HOST'] = `unix://${constants.dockerSocket}`;
|
||||
service.config.environment['DOCKER_HOST'] = `unix://${
|
||||
constants.dockerSocket
|
||||
}`;
|
||||
}
|
||||
// We keep balena.sock for backwards compatibility
|
||||
if (constants.dockerSocket != '/var/run/balena.sock') {
|
||||
service.config.volumes.push(`${constants.dockerSocket}:/var/run/balena.sock`);
|
||||
service.config.volumes.push(
|
||||
`${constants.dockerSocket}:/var/run/balena.sock`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -342,72 +364,78 @@ export function addFeaturesFromLabels(
|
||||
setEnvVariables('SUPERVISOR_API_KEY', options.apiSecret);
|
||||
if (service.config.networkMode === 'host') {
|
||||
setEnvVariables('SUPERVISOR_HOST', '127.0.0.1');
|
||||
setEnvVariables('SUPERVISOR_ADDRESS', `http://127.0.0.1:${options.listenPort}`);
|
||||
setEnvVariables(
|
||||
'SUPERVISOR_ADDRESS',
|
||||
`http://127.0.0.1:${options.listenPort}`,
|
||||
);
|
||||
} else {
|
||||
setEnvVariables('SUPERVISOR_HOST', options.supervisorApiHost);
|
||||
setEnvVariables('SUPERVISOR_ADDRESS', `http://${options.supervisorApiHost}:${options.listenPort}`);
|
||||
service.config.networks[constants.supervisorNetworkInterface] = { };
|
||||
setEnvVariables(
|
||||
'SUPERVISOR_ADDRESS',
|
||||
`http://${options.supervisorApiHost}:${options.listenPort}`,
|
||||
);
|
||||
service.config.networks[constants.supervisorNetworkInterface] = {};
|
||||
}
|
||||
} else {
|
||||
// Ensure that the user hasn't added 'supervisor0' to the service's list
|
||||
// of networks
|
||||
delete service.config.networks[constants.supervisorNetworkInterface];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export function serviceUlimitsToDockerUlimits(
|
||||
ulimits: ServiceConfig['ulimits'] | null | undefined,
|
||||
): Array<{ Name: string, Soft: number, Hard: number }> {
|
||||
|
||||
const ret: Array<{ Name: string, Soft: number, Hard: number }> = [];
|
||||
): Array<{ Name: string; Soft: number; Hard: number }> {
|
||||
const ret: Array<{ Name: string; Soft: number; Hard: number }> = [];
|
||||
_.each(ulimits, ({ soft, hard }, name) => {
|
||||
ret.push({ Name: name, Soft: soft, Hard: hard });
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
export function serviceRestartToDockerRestartPolicy(restart: string): { Name: string, MaximumRetryCount: number } {
|
||||
export function serviceRestartToDockerRestartPolicy(
|
||||
restart: string,
|
||||
): { Name: string; MaximumRetryCount: number } {
|
||||
return {
|
||||
Name: restart,
|
||||
MaximumRetryCount: 0,
|
||||
};
|
||||
}
|
||||
|
||||
export function serviceNetworksToDockerNetworks(networks: ServiceConfig['networks'])
|
||||
: Dockerode.ContainerCreateOptions['NetworkingConfig'] {
|
||||
export function serviceNetworksToDockerNetworks(
|
||||
networks: ServiceConfig['networks'],
|
||||
): Dockerode.ContainerCreateOptions['NetworkingConfig'] {
|
||||
const dockerNetworks: Dockerode.ContainerCreateOptions['NetworkingConfig'] = {
|
||||
EndpointsConfig: {},
|
||||
};
|
||||
|
||||
const dockerNetworks: Dockerode.ContainerCreateOptions['NetworkingConfig'] = {
|
||||
EndpointsConfig: { },
|
||||
};
|
||||
_.each(networks, (net, name) => {
|
||||
// WHY??? This shouldn't be necessary, as we define it above...
|
||||
if (dockerNetworks.EndpointsConfig != null) {
|
||||
dockerNetworks.EndpointsConfig[name] = {};
|
||||
const conf = dockerNetworks.EndpointsConfig[name];
|
||||
conf.IPAMConfig = {};
|
||||
conf.Aliases = [];
|
||||
_.each(net, (v, k) => {
|
||||
switch (k) {
|
||||
case 'ipv4Address':
|
||||
conf.IPAMConfig.IPV4Address = v;
|
||||
break;
|
||||
case 'ipv6Address':
|
||||
conf.IPAMConfig.IPV6Address = v;
|
||||
break;
|
||||
case 'linkLocalIps':
|
||||
conf.IPAMConfig.LinkLocalIps = v;
|
||||
break;
|
||||
case 'aliases':
|
||||
conf.Aliases = v;
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
_.each(networks, (net, name) => {
|
||||
// WHY??? This shouldn't be necessary, as we define it above...
|
||||
if (dockerNetworks.EndpointsConfig != null) {
|
||||
dockerNetworks.EndpointsConfig[name] = { };
|
||||
const conf = dockerNetworks.EndpointsConfig[name];
|
||||
conf.IPAMConfig = { };
|
||||
conf.Aliases = [ ];
|
||||
_.each(net, (v, k) => {
|
||||
switch(k) {
|
||||
case 'ipv4Address':
|
||||
conf.IPAMConfig.IPV4Address = v;
|
||||
break;
|
||||
case 'ipv6Address':
|
||||
conf.IPAMConfig.IPV6Address = v;
|
||||
break;
|
||||
case 'linkLocalIps':
|
||||
conf.IPAMConfig.LinkLocalIps = v;
|
||||
break;
|
||||
case 'aliases':
|
||||
conf.Aliases = v;
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return dockerNetworks;
|
||||
return dockerNetworks;
|
||||
}
|
||||
|
||||
export function dockerNetworkToServiceNetwork(
|
||||
@ -415,10 +443,10 @@ export function dockerNetworkToServiceNetwork(
|
||||
): ServiceConfig['networks'] {
|
||||
// Take the input network object, filter out any nullish fields, extract things to
|
||||
// the correct level and return
|
||||
const networks: ServiceConfig['networks'] = { };
|
||||
const networks: ServiceConfig['networks'] = {};
|
||||
|
||||
_.each(dockerNetworks, (net, name) => {
|
||||
networks[name] = { };
|
||||
networks[name] = {};
|
||||
if (net.Aliases != null && !_.isEmpty(net.Aliases)) {
|
||||
networks[name].aliases = net.Aliases;
|
||||
}
|
||||
@ -444,19 +472,29 @@ export function normalizeNullValues(obj: Dictionary<any>): void {
|
||||
_.each(obj, (v, k) => {
|
||||
if (v == null) {
|
||||
obj[k] = undefined;
|
||||
} else if(_.isObject(v)) {
|
||||
} else if (_.isObject(v)) {
|
||||
normalizeNullValues(v);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function normalizeLabels(
|
||||
labels: { [key: string]: string },
|
||||
): { [key: string]: string } {
|
||||
const legacyLabels = _.mapKeys(_.pickBy(labels, (_v, k) => _.startsWith(k, 'io.resin.')), (_v, k) => {
|
||||
return k.replace(/resin/g, 'balena'); // e.g. io.resin.features.resin-api -> io.balena.features.balena-api
|
||||
});
|
||||
const balenaLabels = _.pickBy(labels, (_v, k) => _.startsWith(k, 'io.balena.'));
|
||||
const otherLabels = _.pickBy(labels, (_v, k) => !(_.startsWith(k, 'io.balena.') || _.startsWith(k, 'io.resin.')));
|
||||
return _.assign({}, otherLabels, legacyLabels, balenaLabels) as { [key: string]: string };
|
||||
export function normalizeLabels(labels: {
|
||||
[key: string]: string;
|
||||
}): { [key: string]: string } {
|
||||
const legacyLabels = _.mapKeys(
|
||||
_.pickBy(labels, (_v, k) => _.startsWith(k, 'io.resin.')),
|
||||
(_v, k) => {
|
||||
return k.replace(/resin/g, 'balena'); // e.g. io.resin.features.resin-api -> io.balena.features.balena-api
|
||||
},
|
||||
);
|
||||
const balenaLabels = _.pickBy(labels, (_v, k) =>
|
||||
_.startsWith(k, 'io.balena.'),
|
||||
);
|
||||
const otherLabels = _.pickBy(
|
||||
labels,
|
||||
(_v, k) => !(_.startsWith(k, 'io.balena.') || _.startsWith(k, 'io.resin.')),
|
||||
);
|
||||
return _.assign({}, otherLabels, legacyLabels, balenaLabels) as {
|
||||
[key: string]: string;
|
||||
};
|
||||
}
|
||||
|
186
src/config.ts
186
src/config.ts
@ -6,7 +6,10 @@ import { generateUniqueKey } from 'resin-register-device';
|
||||
|
||||
import ConfigJsonConfigBackend from './config/configJson';
|
||||
|
||||
import { ConfigProviderFunctions, createProviderFunctions } from './config/functions';
|
||||
import {
|
||||
ConfigProviderFunctions,
|
||||
createProviderFunctions,
|
||||
} from './config/functions';
|
||||
import * as constants from './lib/constants';
|
||||
import { ConfigMap, ConfigSchema, ConfigValue } from './lib/types';
|
||||
|
||||
@ -18,7 +21,6 @@ interface ConfigOpts {
|
||||
}
|
||||
|
||||
class Config extends EventEmitter {
|
||||
|
||||
private db: DB;
|
||||
private configJsonBackend: ConfigJsonConfigBackend;
|
||||
private providerFunctions: ConfigProviderFunctions;
|
||||
@ -37,8 +39,15 @@ class Config extends EventEmitter {
|
||||
deviceId: { source: 'config.json', mutable: true },
|
||||
registered_at: { source: 'config.json', mutable: true },
|
||||
applicationId: { source: 'config.json' },
|
||||
appUpdatePollInterval: { source: 'config.json', mutable: true, default: 60000 },
|
||||
mixpanelToken: { source: 'config.json', default: constants.defaultMixpanelToken },
|
||||
appUpdatePollInterval: {
|
||||
source: 'config.json',
|
||||
mutable: true,
|
||||
default: 60000,
|
||||
},
|
||||
mixpanelToken: {
|
||||
source: 'config.json',
|
||||
default: constants.defaultMixpanelToken,
|
||||
},
|
||||
bootstrapRetryDelay: { source: 'config.json', default: 30000 },
|
||||
supervisorOfflineMode: { source: 'config.json', default: false },
|
||||
hostname: { source: 'config.json', mutable: true },
|
||||
@ -81,15 +90,17 @@ class Config extends EventEmitter {
|
||||
public constructor({ db, configPath }: ConfigOpts) {
|
||||
super();
|
||||
this.db = db;
|
||||
this.configJsonBackend = new ConfigJsonConfigBackend(this.schema, configPath);
|
||||
this.configJsonBackend = new ConfigJsonConfigBackend(
|
||||
this.schema,
|
||||
configPath,
|
||||
);
|
||||
this.providerFunctions = createProviderFunctions(this);
|
||||
}
|
||||
|
||||
public init(): Bluebird<void> {
|
||||
return this.configJsonBackend.init()
|
||||
.then(() => {
|
||||
return this.generateRequiredFields();
|
||||
});
|
||||
return this.configJsonBackend.init().then(() => {
|
||||
return this.generateRequiredFields();
|
||||
});
|
||||
}
|
||||
|
||||
public get(key: string, trx?: Transaction): Bluebird<ConfigValue> {
|
||||
@ -99,71 +110,90 @@ class Config extends EventEmitter {
|
||||
if (this.schema[key] == null) {
|
||||
throw new Error(`Unknown config value ${key}`);
|
||||
}
|
||||
switch(this.schema[key].source) {
|
||||
switch (this.schema[key].source) {
|
||||
case 'func':
|
||||
return this.providerFunctions[key].get()
|
||||
.catch((e) => {
|
||||
console.error(`Error getting config value for ${key}`, e, e.stack);
|
||||
return null;
|
||||
});
|
||||
return this.providerFunctions[key].get().catch(e => {
|
||||
console.error(`Error getting config value for ${key}`, e, e.stack);
|
||||
return null;
|
||||
});
|
||||
case 'config.json':
|
||||
return this.configJsonBackend.get(key);
|
||||
case 'db':
|
||||
return db('config').select('value').where({ key })
|
||||
.then(([ conf ]: [{ value: string }]) => {
|
||||
return db('config')
|
||||
.select('value')
|
||||
.where({ key })
|
||||
.then(([conf]: [{ value: string }]) => {
|
||||
if (conf != null) {
|
||||
return conf.value;
|
||||
}
|
||||
return;
|
||||
});
|
||||
}
|
||||
})
|
||||
.then((value) => {
|
||||
const schemaEntry = this.schema[key];
|
||||
if (value == null && schemaEntry != null && schemaEntry.default != null) {
|
||||
return schemaEntry.default;
|
||||
}
|
||||
return value;
|
||||
});
|
||||
}).then(value => {
|
||||
const schemaEntry = this.schema[key];
|
||||
if (value == null && schemaEntry != null && schemaEntry.default != null) {
|
||||
return schemaEntry.default;
|
||||
}
|
||||
return value;
|
||||
});
|
||||
}
|
||||
|
||||
public getMany(keys: string[], trx?: Transaction): Bluebird<ConfigMap> {
|
||||
return Bluebird.map(keys, (key: string) => this.get(key, trx))
|
||||
.then((values) => {
|
||||
return Bluebird.map(keys, (key: string) => this.get(key, trx)).then(
|
||||
values => {
|
||||
return _.zipObject(keys, values);
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
public set(keyValues: ConfigMap, trx?: Transaction): Bluebird<void> {
|
||||
return Bluebird.try(() => {
|
||||
|
||||
// Split the values based on which storage backend they use
|
||||
type SplitConfigBackend = { configJsonVals: ConfigMap, dbVals: ConfigMap, fnVals: ConfigMap };
|
||||
const { configJsonVals, dbVals, fnVals }: SplitConfigBackend = _.reduce(keyValues, (acc: SplitConfigBackend, val, key) => {
|
||||
if (this.schema[key] == null || !this.schema[key].mutable) {
|
||||
throw new Error(`Config field ${key} not found or is immutable in config.set`);
|
||||
}
|
||||
if (this.schema[key].source === 'config.json') {
|
||||
acc.configJsonVals[key] = val;
|
||||
} else if (this.schema[key].source === 'db') {
|
||||
acc.dbVals[key] = val;
|
||||
} else if (this.schema[key].source === 'func') {
|
||||
acc.fnVals[key] = val;
|
||||
} else {
|
||||
throw new Error(`Unknown config backend for key: ${key}, backend: ${this.schema[key].source}`);
|
||||
}
|
||||
return acc;
|
||||
}, { configJsonVals: { }, dbVals: { }, fnVals: { } });
|
||||
type SplitConfigBackend = {
|
||||
configJsonVals: ConfigMap;
|
||||
dbVals: ConfigMap;
|
||||
fnVals: ConfigMap;
|
||||
};
|
||||
const { configJsonVals, dbVals, fnVals }: SplitConfigBackend = _.reduce(
|
||||
keyValues,
|
||||
(acc: SplitConfigBackend, val, key) => {
|
||||
if (this.schema[key] == null || !this.schema[key].mutable) {
|
||||
throw new Error(
|
||||
`Config field ${key} not found or is immutable in config.set`,
|
||||
);
|
||||
}
|
||||
if (this.schema[key].source === 'config.json') {
|
||||
acc.configJsonVals[key] = val;
|
||||
} else if (this.schema[key].source === 'db') {
|
||||
acc.dbVals[key] = val;
|
||||
} else if (this.schema[key].source === 'func') {
|
||||
acc.fnVals[key] = val;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Unknown config backend for key: ${key}, backend: ${
|
||||
this.schema[key].source
|
||||
}`,
|
||||
);
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{ configJsonVals: {}, dbVals: {}, fnVals: {} },
|
||||
);
|
||||
|
||||
// Set these values, taking into account the knex transaction
|
||||
const setValuesInTransaction = (tx: Transaction): Bluebird<void> => {
|
||||
const dbKeys = _.keys(dbVals);
|
||||
return this.getMany(dbKeys, tx)
|
||||
.then((oldValues) => {
|
||||
.then(oldValues => {
|
||||
return Bluebird.map(dbKeys, (key: string) => {
|
||||
const value = dbVals[key];
|
||||
if (oldValues[key] !== value) {
|
||||
return this.db.upsertModel('config', { key, value }, { key }, tx);
|
||||
return this.db.upsertModel(
|
||||
'config',
|
||||
{ key, value },
|
||||
{ key },
|
||||
tx,
|
||||
);
|
||||
}
|
||||
});
|
||||
})
|
||||
@ -171,7 +201,9 @@ class Config extends EventEmitter {
|
||||
return Bluebird.map(_.toPairs(fnVals), ([key, value]) => {
|
||||
const fn = this.providerFunctions[key];
|
||||
if (fn.set == null) {
|
||||
throw new Error(`Attempting to set provider function without set() method implemented - key: ${key}`);
|
||||
throw new Error(
|
||||
`Attempting to set provider function without set() method implemented - key: ${key}`,
|
||||
);
|
||||
}
|
||||
return fn.set(value, tx);
|
||||
});
|
||||
@ -186,11 +218,12 @@ class Config extends EventEmitter {
|
||||
if (trx != null) {
|
||||
return setValuesInTransaction(trx).return();
|
||||
} else {
|
||||
return this.db.transaction((tx) => {
|
||||
return setValuesInTransaction(tx);
|
||||
}).return();
|
||||
return this.db
|
||||
.transaction(tx => {
|
||||
return setValuesInTransaction(tx);
|
||||
})
|
||||
.return();
|
||||
}
|
||||
|
||||
})
|
||||
.then(() => {
|
||||
return setImmediate(() => {
|
||||
@ -203,23 +236,34 @@ class Config extends EventEmitter {
|
||||
public remove(key: string): Bluebird<void> {
|
||||
return Bluebird.try(() => {
|
||||
if (this.schema[key] == null || !this.schema[key].mutable) {
|
||||
throw new Error(`Attempt to delete non-existent or immutable key ${key}`);
|
||||
throw new Error(
|
||||
`Attempt to delete non-existent or immutable key ${key}`,
|
||||
);
|
||||
}
|
||||
if (this.schema[key].source === 'config.json') {
|
||||
return this.configJsonBackend.remove(key);
|
||||
} else if (this.schema[key].source === 'db') {
|
||||
return this.db.models('config').del().where({ key });
|
||||
return this.db
|
||||
.models('config')
|
||||
.del()
|
||||
.where({ key });
|
||||
} else if (this.schema[key].source === 'func') {
|
||||
const mutFn = this.providerFunctions[key];
|
||||
if (mutFn == null) {
|
||||
throw new Error(`Could not find provider function for config ${key}!`);
|
||||
throw new Error(
|
||||
`Could not find provider function for config ${key}!`,
|
||||
);
|
||||
}
|
||||
if (mutFn.remove == null) {
|
||||
throw new Error(`Could not find removal provider function for config ${key}`);
|
||||
throw new Error(
|
||||
`Could not find removal provider function for config ${key}`,
|
||||
);
|
||||
}
|
||||
return mutFn.remove();
|
||||
} else {
|
||||
throw new Error(`Unknown or unsupported config backend: ${this.schema[key].source}`);
|
||||
throw new Error(
|
||||
`Unknown or unsupported config backend: ${this.schema[key].source}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -241,23 +285,21 @@ class Config extends EventEmitter {
|
||||
'deviceApiKey',
|
||||
'apiSecret',
|
||||
'offlineMode',
|
||||
])
|
||||
.then(({ uuid, deviceApiKey, apiSecret, offlineMode }) => {
|
||||
// These fields need to be set regardless
|
||||
if (uuid == null || apiSecret == null) {
|
||||
uuid = uuid || this.newUniqueKey();
|
||||
apiSecret = apiSecret || this.newUniqueKey();
|
||||
]).then(({ uuid, deviceApiKey, apiSecret, offlineMode }) => {
|
||||
// These fields need to be set regardless
|
||||
if (uuid == null || apiSecret == null) {
|
||||
uuid = uuid || this.newUniqueKey();
|
||||
apiSecret = apiSecret || this.newUniqueKey();
|
||||
}
|
||||
return this.set({ uuid, apiSecret }).then(() => {
|
||||
if (offlineMode) {
|
||||
return;
|
||||
}
|
||||
if (deviceApiKey == null) {
|
||||
return this.set({ deviceApiKey: this.newUniqueKey() });
|
||||
}
|
||||
return this.set({ uuid, apiSecret })
|
||||
.then(() => {
|
||||
if (offlineMode) {
|
||||
return;
|
||||
}
|
||||
if (deviceApiKey == null) {
|
||||
return this.set({ deviceApiKey: this.newUniqueKey() });
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,13 @@ const bootMountPoint = `${constants.rootMountPoint}${constants.bootMountPoint}`;
|
||||
function remountAndWriteAtomic(file: string, data: string): Promise<void> {
|
||||
// TODO: Find out why the below Promise.resolve() is required
|
||||
// Here's the dangerous part:
|
||||
return Promise.resolve(childProcess.execAsync(`mount -t vfat -o remount,rw ${constants.bootBlockDevice} ${bootMountPoint}`))
|
||||
return Promise.resolve(
|
||||
childProcess.execAsync(
|
||||
`mount -t vfat -o remount,rw ${
|
||||
constants.bootBlockDevice
|
||||
} ${bootMountPoint}`,
|
||||
),
|
||||
)
|
||||
.then(() => {
|
||||
return fsUtils.writeFileAtomic(file, data);
|
||||
})
|
||||
@ -34,7 +40,6 @@ function remountAndWriteAtomic(file: string, data: string): Promise<void> {
|
||||
}
|
||||
|
||||
export abstract class DeviceConfigBackend {
|
||||
|
||||
// Does this config backend support the given device type?
|
||||
public abstract matches(deviceType: string): boolean;
|
||||
|
||||
@ -59,17 +64,24 @@ export abstract class DeviceConfigBackend {
|
||||
|
||||
// Process the value if the environment variable, ready to be written to
|
||||
// the backend
|
||||
public abstract processConfigVarValue(key: string, value: string): string | string[];
|
||||
public abstract processConfigVarValue(
|
||||
key: string,
|
||||
value: string,
|
||||
): string | string[];
|
||||
|
||||
// Return the env var name for this config option
|
||||
public abstract createConfigVarName(configName: string): string;
|
||||
}
|
||||
|
||||
export class RPiConfigBackend extends DeviceConfigBackend {
|
||||
private static bootConfigVarPrefix = `${constants.hostConfigVarPrefix}CONFIG_`;
|
||||
private static bootConfigVarPrefix = `${
|
||||
constants.hostConfigVarPrefix
|
||||
}CONFIG_`;
|
||||
private static bootConfigPath = `${bootMountPoint}/config.txt`;
|
||||
|
||||
public static bootConfigVarRegex = new RegExp('(' + _.escapeRegExp(RPiConfigBackend.bootConfigVarPrefix) + ')(.+)');
|
||||
public static bootConfigVarRegex = new RegExp(
|
||||
'(' + _.escapeRegExp(RPiConfigBackend.bootConfigVarPrefix) + ')(.+)',
|
||||
);
|
||||
|
||||
private static arrayConfigKeys = [
|
||||
'dtparam',
|
||||
@ -97,45 +109,46 @@ export class RPiConfigBackend extends DeviceConfigBackend {
|
||||
}
|
||||
|
||||
public getBootConfig(): Promise<ConfigOptions> {
|
||||
return Promise.resolve(fs.readFile(RPiConfigBackend.bootConfigPath, 'utf-8'))
|
||||
.then((confStr) => {
|
||||
return Promise.resolve(
|
||||
fs.readFile(RPiConfigBackend.bootConfigPath, 'utf-8'),
|
||||
).then(confStr => {
|
||||
const conf: ConfigOptions = {};
|
||||
const configStatements = confStr.split(/\r?\n/);
|
||||
|
||||
const conf: ConfigOptions = { };
|
||||
const configStatements = confStr.split(/\r?\n/);
|
||||
|
||||
for (const configStr of configStatements) {
|
||||
// Don't show warnings for comments and empty lines
|
||||
const trimmed = _.trimStart(configStr);
|
||||
if (_.startsWith(trimmed, '#') || trimmed === '') {
|
||||
continue;
|
||||
}
|
||||
let keyValue = /^([^=]+)=(.*)$/.exec(configStr);
|
||||
if (keyValue != null) {
|
||||
const [ , key, value ] = keyValue;
|
||||
if (!_.includes(RPiConfigBackend.arrayConfigKeys, key)) {
|
||||
conf[key] = value;
|
||||
} else {
|
||||
if (conf[key] == null) {
|
||||
conf[key] = [];
|
||||
}
|
||||
(conf[key] as string[]).push(value);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try the next regex instead
|
||||
keyValue = /^(initramfs) (.+)/.exec(configStr);
|
||||
if (keyValue != null) {
|
||||
const [ , key, value ] = keyValue;
|
||||
for (const configStr of configStatements) {
|
||||
// Don't show warnings for comments and empty lines
|
||||
const trimmed = _.trimStart(configStr);
|
||||
if (_.startsWith(trimmed, '#') || trimmed === '') {
|
||||
continue;
|
||||
}
|
||||
let keyValue = /^([^=]+)=(.*)$/.exec(configStr);
|
||||
if (keyValue != null) {
|
||||
const [, key, value] = keyValue;
|
||||
if (!_.includes(RPiConfigBackend.arrayConfigKeys, key)) {
|
||||
conf[key] = value;
|
||||
} else {
|
||||
console.log(`Warning - Could not parse config.txt entry: ${configStr}. Ignoring.`);
|
||||
if (conf[key] == null) {
|
||||
conf[key] = [];
|
||||
}
|
||||
(conf[key] as string[]).push(value);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
return conf;
|
||||
});
|
||||
// Try the next regex instead
|
||||
keyValue = /^(initramfs) (.+)/.exec(configStr);
|
||||
if (keyValue != null) {
|
||||
const [, key, value] = keyValue;
|
||||
conf[key] = value;
|
||||
} else {
|
||||
console.log(
|
||||
`Warning - Could not parse config.txt entry: ${configStr}. Ignoring.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return conf;
|
||||
});
|
||||
}
|
||||
|
||||
public setBootConfig(opts: ConfigOptions): Promise<void> {
|
||||
@ -144,8 +157,10 @@ export class RPiConfigBackend extends DeviceConfigBackend {
|
||||
_.each(opts, (value, key) => {
|
||||
if (key === 'initramfs') {
|
||||
confStatements.push(`${key} ${value}`);
|
||||
} else if(_.isArray(value)) {
|
||||
confStatements = confStatements.concat(_.map(value, (entry) => `${key}=${entry}`));
|
||||
} else if (_.isArray(value)) {
|
||||
confStatements = confStatements.concat(
|
||||
_.map(value, entry => `${key}=${entry}`),
|
||||
);
|
||||
} else {
|
||||
confStatements.push(`${key}=${value}`);
|
||||
}
|
||||
@ -171,7 +186,7 @@ export class RPiConfigBackend extends DeviceConfigBackend {
|
||||
public processConfigVarValue(key: string, value: string): string | string[] {
|
||||
if (_.includes(RPiConfigBackend.arrayConfigKeys, key)) {
|
||||
if (!_.startsWith(value, '"')) {
|
||||
return [ value ];
|
||||
return [value];
|
||||
} else {
|
||||
return JSON.parse(`[${value}]`);
|
||||
}
|
||||
@ -185,99 +200,122 @@ export class RPiConfigBackend extends DeviceConfigBackend {
|
||||
}
|
||||
|
||||
export class ExtlinuxConfigBackend extends DeviceConfigBackend {
|
||||
private static bootConfigVarPrefix = `${constants.hostConfigVarPrefix}EXTLINUX_`;
|
||||
private static bootConfigVarPrefix = `${
|
||||
constants.hostConfigVarPrefix
|
||||
}EXTLINUX_`;
|
||||
private static bootConfigPath = `${bootMountPoint}/extlinux/extlinux.conf`;
|
||||
|
||||
public static bootConfigVarRegex = new RegExp('(' + _.escapeRegExp(ExtlinuxConfigBackend.bootConfigVarPrefix) + ')(.+)');
|
||||
public static bootConfigVarRegex = new RegExp(
|
||||
'(' + _.escapeRegExp(ExtlinuxConfigBackend.bootConfigVarPrefix) + ')(.+)',
|
||||
);
|
||||
|
||||
private static suppportedConfigKeys = [
|
||||
'isolcpus',
|
||||
];
|
||||
private static suppportedConfigKeys = ['isolcpus'];
|
||||
|
||||
public matches(deviceType: string): boolean {
|
||||
return _.startsWith(deviceType, 'jetson-tx');
|
||||
}
|
||||
|
||||
public getBootConfig(): Promise<ConfigOptions> {
|
||||
return Promise.resolve(fs.readFile(ExtlinuxConfigBackend.bootConfigPath, 'utf-8'))
|
||||
.then((confStr) => {
|
||||
const parsedBootFile = ExtlinuxConfigBackend.parseExtlinuxFile(confStr);
|
||||
return Promise.resolve(
|
||||
fs.readFile(ExtlinuxConfigBackend.bootConfigPath, 'utf-8'),
|
||||
).then(confStr => {
|
||||
const parsedBootFile = ExtlinuxConfigBackend.parseExtlinuxFile(confStr);
|
||||
|
||||
// First find the default label name
|
||||
const defaultLabel = _.find(parsedBootFile.globals, (_v, l) => {
|
||||
if (l === 'DEFAULT') {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
if (defaultLabel == null) {
|
||||
throw new Error('Could not find default entry for extlinux.conf file');
|
||||
// First find the default label name
|
||||
const defaultLabel = _.find(parsedBootFile.globals, (_v, l) => {
|
||||
if (l === 'DEFAULT') {
|
||||
return true;
|
||||
}
|
||||
|
||||
const labelEntry = parsedBootFile.labels[defaultLabel];
|
||||
|
||||
if (labelEntry == null) {
|
||||
throw new Error(`Cannot find default label entry (label: ${defaultLabel}) for extlinux.conf file`);
|
||||
}
|
||||
|
||||
// All configuration options come from the `APPEND` directive in the default label entry
|
||||
const appendEntry = labelEntry.APPEND;
|
||||
|
||||
if (appendEntry == null) {
|
||||
throw new Error('Could not find APPEND directive in default extlinux.conf boot entry');
|
||||
}
|
||||
|
||||
const conf: ConfigOptions = { };
|
||||
const values = appendEntry.split(' ');
|
||||
for(const value of values) {
|
||||
const parts = value.split('=');
|
||||
if (this.isSupportedConfig(parts[0])) {
|
||||
if (parts.length !== 2) {
|
||||
throw new Error(`Could not parse extlinux configuration entry: ${values} [value with error: ${value}]`);
|
||||
}
|
||||
conf[parts[0]] = parts[1];
|
||||
}
|
||||
}
|
||||
|
||||
return conf;
|
||||
return false;
|
||||
});
|
||||
|
||||
if (defaultLabel == null) {
|
||||
throw new Error('Could not find default entry for extlinux.conf file');
|
||||
}
|
||||
|
||||
const labelEntry = parsedBootFile.labels[defaultLabel];
|
||||
|
||||
if (labelEntry == null) {
|
||||
throw new Error(
|
||||
`Cannot find default label entry (label: ${defaultLabel}) for extlinux.conf file`,
|
||||
);
|
||||
}
|
||||
|
||||
// All configuration options come from the `APPEND` directive in the default label entry
|
||||
const appendEntry = labelEntry.APPEND;
|
||||
|
||||
if (appendEntry == null) {
|
||||
throw new Error(
|
||||
'Could not find APPEND directive in default extlinux.conf boot entry',
|
||||
);
|
||||
}
|
||||
|
||||
const conf: ConfigOptions = {};
|
||||
const values = appendEntry.split(' ');
|
||||
for (const value of values) {
|
||||
const parts = value.split('=');
|
||||
if (this.isSupportedConfig(parts[0])) {
|
||||
if (parts.length !== 2) {
|
||||
throw new Error(
|
||||
`Could not parse extlinux configuration entry: ${values} [value with error: ${value}]`,
|
||||
);
|
||||
}
|
||||
conf[parts[0]] = parts[1];
|
||||
}
|
||||
}
|
||||
|
||||
return conf;
|
||||
});
|
||||
}
|
||||
|
||||
public setBootConfig(opts: ConfigOptions): Promise<void> {
|
||||
// First get a representation of the configuration file, with all balena-supported configuration removed
|
||||
return Promise.resolve(fs.readFile(ExtlinuxConfigBackend.bootConfigPath))
|
||||
.then((data) => {
|
||||
const extlinuxFile = ExtlinuxConfigBackend.parseExtlinuxFile(data.toString());
|
||||
const defaultLabel = extlinuxFile.globals.DEFAULT;
|
||||
if (defaultLabel == null) {
|
||||
throw new Error('Could not find DEFAULT directive entry in extlinux.conf');
|
||||
}
|
||||
const defaultEntry = extlinuxFile.labels[defaultLabel];
|
||||
if (defaultEntry == null) {
|
||||
throw new Error(`Could not find default extlinux.conf entry: ${defaultLabel}`);
|
||||
}
|
||||
return Promise.resolve(
|
||||
fs.readFile(ExtlinuxConfigBackend.bootConfigPath),
|
||||
).then(data => {
|
||||
const extlinuxFile = ExtlinuxConfigBackend.parseExtlinuxFile(
|
||||
data.toString(),
|
||||
);
|
||||
const defaultLabel = extlinuxFile.globals.DEFAULT;
|
||||
if (defaultLabel == null) {
|
||||
throw new Error(
|
||||
'Could not find DEFAULT directive entry in extlinux.conf',
|
||||
);
|
||||
}
|
||||
const defaultEntry = extlinuxFile.labels[defaultLabel];
|
||||
if (defaultEntry == null) {
|
||||
throw new Error(
|
||||
`Could not find default extlinux.conf entry: ${defaultLabel}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (defaultEntry.APPEND == null) {
|
||||
throw new Error(`extlinux.conf APPEND directive not found for default entry: ${defaultLabel}, not sure how to proceed!`);
|
||||
}
|
||||
if (defaultEntry.APPEND == null) {
|
||||
throw new Error(
|
||||
`extlinux.conf APPEND directive not found for default entry: ${defaultLabel}, not sure how to proceed!`,
|
||||
);
|
||||
}
|
||||
|
||||
const appendLine = _.filter(defaultEntry.APPEND.split(' '), (entry) => {
|
||||
const lhs = entry.split('=');
|
||||
return !this.isSupportedConfig(lhs[0]);
|
||||
});
|
||||
|
||||
// Apply the new configuration to the "plain" append line above
|
||||
|
||||
_.each(opts, (value, key) => {
|
||||
appendLine.push(`${key}=${value}`);
|
||||
});
|
||||
|
||||
defaultEntry.APPEND = appendLine.join(' ');
|
||||
const extlinuxString = ExtlinuxConfigBackend.extlinuxFileToString(extlinuxFile);
|
||||
|
||||
return remountAndWriteAtomic(ExtlinuxConfigBackend.bootConfigPath, extlinuxString);
|
||||
const appendLine = _.filter(defaultEntry.APPEND.split(' '), entry => {
|
||||
const lhs = entry.split('=');
|
||||
return !this.isSupportedConfig(lhs[0]);
|
||||
});
|
||||
|
||||
// Apply the new configuration to the "plain" append line above
|
||||
|
||||
_.each(opts, (value, key) => {
|
||||
appendLine.push(`${key}=${value}`);
|
||||
});
|
||||
|
||||
defaultEntry.APPEND = appendLine.join(' ');
|
||||
const extlinuxString = ExtlinuxConfigBackend.extlinuxFileToString(
|
||||
extlinuxFile,
|
||||
);
|
||||
|
||||
return remountAndWriteAtomic(
|
||||
ExtlinuxConfigBackend.bootConfigPath,
|
||||
extlinuxString,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
public isSupportedConfig(configName: string): boolean {
|
||||
@ -301,15 +339,14 @@ export class ExtlinuxConfigBackend extends DeviceConfigBackend {
|
||||
}
|
||||
|
||||
private static parseExtlinuxFile(confStr: string): ExtlinuxFile {
|
||||
|
||||
const file: ExtlinuxFile = {
|
||||
globals: { },
|
||||
labels: { },
|
||||
globals: {},
|
||||
labels: {},
|
||||
};
|
||||
|
||||
// Firstly split by line and filter any comments and empty lines
|
||||
let lines = confStr.split(/\r?\n/);
|
||||
lines = _.filter(lines, (l) => {
|
||||
lines = _.filter(lines, l => {
|
||||
const trimmed = _.trimStart(l);
|
||||
return trimmed !== '' && !_.startsWith(trimmed, '#');
|
||||
});
|
||||
@ -342,9 +379,8 @@ export class ExtlinuxConfigBackend extends DeviceConfigBackend {
|
||||
}
|
||||
} else {
|
||||
lastLabel = value;
|
||||
file.labels[lastLabel] = { };
|
||||
file.labels[lastLabel] = {};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return file;
|
||||
@ -363,5 +399,4 @@ export class ExtlinuxConfigBackend extends DeviceConfigBackend {
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,58 +13,60 @@ import * as osRelease from '../lib/os-release';
|
||||
type LockCallback = (file: string) => Promise<() => void>;
|
||||
|
||||
export default class ConfigJsonConfigBackend {
|
||||
|
||||
private lock: Lock;
|
||||
private readLockConfigJson: () => Promise.Disposer<() => void>;
|
||||
private writeLockConfigJson: () => Promise.Disposer<() => void>;
|
||||
|
||||
private configPath?: string;
|
||||
private cache: { [key: string]: ConfigValue } = { };
|
||||
private cache: { [key: string]: ConfigValue } = {};
|
||||
|
||||
private schema: ConfigSchema;
|
||||
|
||||
public constructor(schema: ConfigSchema, configPath?: string) {
|
||||
|
||||
this.configPath = configPath;
|
||||
this.schema = schema;
|
||||
this.lock = new Lock();
|
||||
|
||||
const writeLock: LockCallback = Promise.promisify(this.lock.async.writeLock);
|
||||
const readLock: LockCallback = Promise.promisify(this.lock.async.readLock);
|
||||
this.writeLockConfigJson = () => writeLock('config.json').disposer((release) => release());
|
||||
this.readLockConfigJson = () => readLock('config.json').disposer((release) => release());
|
||||
const writeLock: LockCallback = Promise.promisify(
|
||||
this.lock.async.writeLock,
|
||||
);
|
||||
const readLock: LockCallback = Promise.promisify(this.lock.async.readLock);
|
||||
this.writeLockConfigJson = () =>
|
||||
writeLock('config.json').disposer(release => release());
|
||||
this.readLockConfigJson = () =>
|
||||
readLock('config.json').disposer(release => release());
|
||||
}
|
||||
|
||||
public init(): Promise<void> {
|
||||
return this.read()
|
||||
.then((configJson) => {
|
||||
_.assign(this.cache, configJson);
|
||||
});
|
||||
return this.read().then(configJson => {
|
||||
_.assign(this.cache, configJson);
|
||||
});
|
||||
}
|
||||
|
||||
public set(keyVals: { [key: string]: ConfigValue }): Promise<void> {
|
||||
let changed = false;
|
||||
return Promise.using(this.writeLockConfigJson(), () => {
|
||||
|
||||
return Promise.mapSeries(_.keys(keyVals), (key: string) => {
|
||||
|
||||
const value = keyVals[key];
|
||||
|
||||
if (this.cache[key] !== value) {
|
||||
this.cache[key] = value;
|
||||
|
||||
if (value == null && this.schema[key] != null && this.schema[key].removeIfNull) {
|
||||
if (
|
||||
value == null &&
|
||||
this.schema[key] != null &&
|
||||
this.schema[key].removeIfNull
|
||||
) {
|
||||
delete this.cache[key];
|
||||
}
|
||||
|
||||
changed = true;
|
||||
}
|
||||
})
|
||||
.then(() => {
|
||||
if (changed) {
|
||||
return this.write();
|
||||
}
|
||||
});
|
||||
}).then(() => {
|
||||
if (changed) {
|
||||
return this.write();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@ -91,22 +93,21 @@ export default class ConfigJsonConfigBackend {
|
||||
}
|
||||
|
||||
public path(): Promise<string> {
|
||||
return this.pathOnHost()
|
||||
.catch((err) => {
|
||||
console.error(err.message);
|
||||
return constants.configJsonNonAtomicPath;
|
||||
});
|
||||
return this.pathOnHost().catch(err => {
|
||||
console.error(err.message);
|
||||
return constants.configJsonNonAtomicPath;
|
||||
});
|
||||
}
|
||||
|
||||
private write(): Promise<void> {
|
||||
let atomicWritePossible = true;
|
||||
return this.pathOnHost()
|
||||
.catch((err) => {
|
||||
.catch(err => {
|
||||
console.error(err.message);
|
||||
atomicWritePossible = false;
|
||||
return constants.configJsonNonAtomicPath;
|
||||
})
|
||||
.then((configPath) => {
|
||||
.then(configPath => {
|
||||
if (atomicWritePossible) {
|
||||
return writeFileAtomic(configPath, JSON.stringify(this.cache));
|
||||
} else {
|
||||
@ -117,7 +118,7 @@ export default class ConfigJsonConfigBackend {
|
||||
|
||||
private read(): Promise<string> {
|
||||
return this.path()
|
||||
.then((filename) => {
|
||||
.then(filename => {
|
||||
return fs.readFile(filename, 'utf-8');
|
||||
})
|
||||
.then(JSON.parse);
|
||||
@ -130,9 +131,9 @@ export default class ConfigJsonConfigBackend {
|
||||
if (constants.configJsonPathOnHost != null) {
|
||||
return constants.configJsonPathOnHost;
|
||||
}
|
||||
return osRelease.getOSVersion(constants.hostOSVersionPath)
|
||||
.then((osVersion) => {
|
||||
|
||||
return osRelease
|
||||
.getOSVersion(constants.hostOSVersionPath)
|
||||
.then(osVersion => {
|
||||
if (osVersion == null) {
|
||||
throw new Error('Failed to detect OS version!');
|
||||
}
|
||||
@ -147,13 +148,13 @@ export default class ConfigJsonConfigBackend {
|
||||
// In non-resinOS hosts (or older than 1.0.0), if CONFIG_JSON_PATH wasn't passed
|
||||
// then we can't do atomic changes (only access to config.json we have is in /boot,
|
||||
// which is assumed to be a file bind mount where rename is impossible)
|
||||
throw new Error('Could not determine config.json path on host, atomic write will not be possible');
|
||||
throw new Error(
|
||||
'Could not determine config.json path on host, atomic write will not be possible',
|
||||
);
|
||||
}
|
||||
});
|
||||
})
|
||||
.then((file) => {
|
||||
return path.join(constants.rootMountPoint, file);
|
||||
});
|
||||
}).then(file => {
|
||||
return path.join(constants.rootMountPoint, file);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,7 +11,10 @@ import { ConfigValue } from '../lib/types';
|
||||
|
||||
// A provider for schema entries with source 'func'
|
||||
type ConfigProviderFunctionGetter = () => Bluebird<any>;
|
||||
type ConfigProviderFunctionSetter = (value: ConfigValue, tx?: Transaction) => Bluebird<void>;
|
||||
type ConfigProviderFunctionSetter = (
|
||||
value: ConfigValue,
|
||||
tx?: Transaction,
|
||||
) => Bluebird<void>;
|
||||
type ConfigProviderFunctionRemover = () => Bluebird<void>;
|
||||
|
||||
interface ConfigProviderFunction {
|
||||
@ -24,7 +27,9 @@ export interface ConfigProviderFunctions {
|
||||
[key: string]: ConfigProviderFunction;
|
||||
}
|
||||
|
||||
export function createProviderFunctions(config: Config): ConfigProviderFunctions {
|
||||
export function createProviderFunctions(
|
||||
config: Config,
|
||||
): ConfigProviderFunctions {
|
||||
return {
|
||||
version: {
|
||||
get: () => {
|
||||
@ -33,7 +38,8 @@ export function createProviderFunctions(config: Config): ConfigProviderFunctions
|
||||
},
|
||||
currentApiKey: {
|
||||
get: () => {
|
||||
return config.getMany([ 'apiKey', 'deviceApiKey' ])
|
||||
return config
|
||||
.getMany(['apiKey', 'deviceApiKey'])
|
||||
.then(({ apiKey, deviceApiKey }) => {
|
||||
return apiKey || deviceApiKey;
|
||||
});
|
||||
@ -41,7 +47,8 @@ export function createProviderFunctions(config: Config): ConfigProviderFunctions
|
||||
},
|
||||
offlineMode: {
|
||||
get: () => {
|
||||
return config.getMany([ 'apiEndpoint', 'supervisorOfflineMode' ])
|
||||
return config
|
||||
.getMany(['apiEndpoint', 'supervisorOfflineMode'])
|
||||
.then(({ apiEndpoint, supervisorOfflineMode }) => {
|
||||
return Boolean(supervisorOfflineMode) || !Boolean(apiEndpoint);
|
||||
});
|
||||
@ -49,13 +56,9 @@ export function createProviderFunctions(config: Config): ConfigProviderFunctions
|
||||
},
|
||||
provisioned: {
|
||||
get: () => {
|
||||
return config.getMany([
|
||||
'uuid',
|
||||
'apiEndpoint',
|
||||
'registered_at',
|
||||
'deviceId',
|
||||
])
|
||||
.then((requiredValues) => {
|
||||
return config
|
||||
.getMany(['uuid', 'apiEndpoint', 'registered_at', 'deviceId'])
|
||||
.then(requiredValues => {
|
||||
return _.every(_.values(requiredValues), Boolean);
|
||||
});
|
||||
},
|
||||
@ -72,39 +75,40 @@ export function createProviderFunctions(config: Config): ConfigProviderFunctions
|
||||
},
|
||||
provisioningOptions: {
|
||||
get: () => {
|
||||
return config.getMany([
|
||||
'uuid',
|
||||
'userId',
|
||||
'applicationId',
|
||||
'apiKey',
|
||||
'deviceApiKey',
|
||||
'deviceType',
|
||||
'apiEndpoint',
|
||||
'apiTimeout',
|
||||
'registered_at',
|
||||
'deviceId',
|
||||
]).then((conf) => {
|
||||
return {
|
||||
uuid: conf.uuid,
|
||||
applicationId: conf.applicationId,
|
||||
userId: conf.userId,
|
||||
deviceType: conf.deviceType,
|
||||
provisioningApiKey: conf.apiKey,
|
||||
deviceApiKey: conf.deviceApiKey,
|
||||
apiEndpoint: conf.apiEndpoint,
|
||||
apiTimeout: conf.apiTimeout,
|
||||
registered_at: conf.registered_at,
|
||||
deviceId: conf.deviceId,
|
||||
};
|
||||
});
|
||||
return config
|
||||
.getMany([
|
||||
'uuid',
|
||||
'userId',
|
||||
'applicationId',
|
||||
'apiKey',
|
||||
'deviceApiKey',
|
||||
'deviceType',
|
||||
'apiEndpoint',
|
||||
'apiTimeout',
|
||||
'registered_at',
|
||||
'deviceId',
|
||||
])
|
||||
.then(conf => {
|
||||
return {
|
||||
uuid: conf.uuid,
|
||||
applicationId: conf.applicationId,
|
||||
userId: conf.userId,
|
||||
deviceType: conf.deviceType,
|
||||
provisioningApiKey: conf.apiKey,
|
||||
deviceApiKey: conf.deviceApiKey,
|
||||
apiEndpoint: conf.apiEndpoint,
|
||||
apiTimeout: conf.apiTimeout,
|
||||
registered_at: conf.registered_at,
|
||||
deviceId: conf.deviceId,
|
||||
};
|
||||
});
|
||||
},
|
||||
},
|
||||
mixpanelHost: {
|
||||
get: () => {
|
||||
return config.get('apiEndpoint')
|
||||
.then((apiEndpoint) => {
|
||||
return `${apiEndpoint}/mixpanel`;
|
||||
});
|
||||
return config.get('apiEndpoint').then(apiEndpoint => {
|
||||
return `${apiEndpoint}/mixpanel`;
|
||||
});
|
||||
},
|
||||
},
|
||||
extendedEnvOptions: {
|
||||
|
@ -8,33 +8,32 @@ import {
|
||||
RPiConfigBackend,
|
||||
} from './backend';
|
||||
|
||||
|
||||
const configBackends = [
|
||||
new ExtlinuxConfigBackend(),
|
||||
new RPiConfigBackend(),
|
||||
];
|
||||
const configBackends = [new ExtlinuxConfigBackend(), new RPiConfigBackend()];
|
||||
|
||||
export function isConfigDeviceType(deviceType: string): boolean {
|
||||
return getConfigBackend(deviceType) != null;
|
||||
}
|
||||
|
||||
export function getConfigBackend(deviceType: string): DeviceConfigBackend | undefined {
|
||||
return _.find(configBackends, (backend) => backend.matches(deviceType));
|
||||
export function getConfigBackend(
|
||||
deviceType: string,
|
||||
): DeviceConfigBackend | undefined {
|
||||
return _.find(configBackends, backend => backend.matches(deviceType));
|
||||
}
|
||||
|
||||
export function envToBootConfig(
|
||||
configBackend: DeviceConfigBackend | null,
|
||||
env: EnvVarObject,
|
||||
): ConfigOptions {
|
||||
|
||||
if (configBackend == null) {
|
||||
return { };
|
||||
return {};
|
||||
}
|
||||
|
||||
return _(env)
|
||||
.pickBy((_val, key) => configBackend.isBootConfigVar(key))
|
||||
.mapKeys((_val, key) => configBackend.processConfigVarName(key))
|
||||
.mapValues((val, key) => configBackend.processConfigVarValue(key, val || ''))
|
||||
.mapValues((val, key) =>
|
||||
configBackend.processConfigVarValue(key, val || ''),
|
||||
)
|
||||
.value();
|
||||
}
|
||||
|
||||
@ -42,10 +41,9 @@ export function bootConfigToEnv(
|
||||
configBackend: DeviceConfigBackend,
|
||||
config: ConfigOptions,
|
||||
): EnvVarObject {
|
||||
|
||||
return _(config)
|
||||
.mapKeys((_val, key) => configBackend.createConfigVarName(key))
|
||||
.mapValues((val) => {
|
||||
.mapValues(val => {
|
||||
if (_.isArray(val)) {
|
||||
return JSON.stringify(val).replace(/^\[(.*)\]$/, '$1');
|
||||
}
|
||||
@ -58,11 +56,14 @@ function filterNamespaceFromConfig(
|
||||
namespace: RegExp,
|
||||
conf: { [key: string]: any },
|
||||
): { [key: string]: any } {
|
||||
return _.mapKeys(_.pickBy(conf, (_v, k) => {
|
||||
return namespace.test(k);
|
||||
}), (_v,k) => {
|
||||
return k.replace(namespace, '$1');
|
||||
});
|
||||
return _.mapKeys(
|
||||
_.pickBy(conf, (_v, k) => {
|
||||
return namespace.test(k);
|
||||
}),
|
||||
(_v, k) => {
|
||||
return k.replace(namespace, '$1');
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
export function formatConfigKeys(
|
||||
@ -70,18 +71,27 @@ export function formatConfigKeys(
|
||||
allowedKeys: string[],
|
||||
conf: { [key: string]: any },
|
||||
): { [key: string]: any } {
|
||||
|
||||
const isConfigType = configBackend != null;
|
||||
const namespaceRegex = /^BALENA_(.*)/;
|
||||
const legacyNamespaceRegex = /^RESIN_(.*)/;
|
||||
const confFromNamespace = filterNamespaceFromConfig(namespaceRegex, conf);
|
||||
const confFromLegacyNamespace = filterNamespaceFromConfig(legacyNamespaceRegex, conf);
|
||||
const noNamespaceConf = _.pickBy(conf, (_v,k) => {
|
||||
const confFromLegacyNamespace = filterNamespaceFromConfig(
|
||||
legacyNamespaceRegex,
|
||||
conf,
|
||||
);
|
||||
const noNamespaceConf = _.pickBy(conf, (_v, k) => {
|
||||
return !_.startsWith(k, 'RESIN_') && !_.startsWith(k, 'BALENA_');
|
||||
});
|
||||
const confWithoutNamespace = _.defaults(confFromNamespace, confFromLegacyNamespace, noNamespaceConf);
|
||||
const confWithoutNamespace = _.defaults(
|
||||
confFromNamespace,
|
||||
confFromLegacyNamespace,
|
||||
noNamespaceConf,
|
||||
);
|
||||
|
||||
return _.pickBy(confWithoutNamespace, (_v, k) => {
|
||||
return _.includes(allowedKeys, k) || (isConfigType && configBackend!.isBootConfigVar(k));
|
||||
return (
|
||||
_.includes(allowedKeys, k) ||
|
||||
(isConfigType && configBackend!.isBootConfigVar(k))
|
||||
);
|
||||
});
|
||||
}
|
||||
|
16
src/db.ts
16
src/db.ts
@ -23,12 +23,14 @@ class DB {
|
||||
},
|
||||
useNullAsDefault: true,
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
public init(): Bluebird<void> {
|
||||
return this.knex('knex_migrations_lock').update({ is_locked: 0})
|
||||
.catch(() => { return; })
|
||||
return this.knex('knex_migrations_lock')
|
||||
.update({ is_locked: 0 })
|
||||
.catch(() => {
|
||||
return;
|
||||
})
|
||||
.then(() => {
|
||||
return this.knex.migrate.latest({
|
||||
directory: path.join(__dirname, 'migrations'),
|
||||
@ -43,13 +45,14 @@ class DB {
|
||||
public upsertModel(
|
||||
modelName: string,
|
||||
obj: any,
|
||||
id: number | { [key: string]: string},
|
||||
id: number | { [key: string]: string },
|
||||
trx?: Knex.Transaction,
|
||||
): Bluebird<any> {
|
||||
|
||||
const knex = trx || this.knex;
|
||||
|
||||
return knex(modelName).update(obj).where(id)
|
||||
return knex(modelName)
|
||||
.update(obj)
|
||||
.where(id)
|
||||
.then((n: number) => {
|
||||
if (n === 0) {
|
||||
return knex(modelName).insert(obj);
|
||||
@ -60,7 +63,6 @@ class DB {
|
||||
public transaction(cb: DBTransactionCallback): Bluebird<Knex.Transaction> {
|
||||
return this.knex.transaction(cb);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export = DB;
|
||||
|
12
src/device-api/common.d.ts
vendored
12
src/device-api/common.d.ts
vendored
@ -9,9 +9,17 @@ export interface ServiceAction {
|
||||
options: any;
|
||||
}
|
||||
|
||||
declare function doRestart(applications: ApplicationManager, appId: number, force: boolean): Promise<void>;
|
||||
declare function doRestart(
|
||||
applications: ApplicationManager,
|
||||
appId: number,
|
||||
force: boolean,
|
||||
): Promise<void>;
|
||||
|
||||
declare function doPurge(applications: ApplicationManager, appId: number, force: boolean): Promise<void>;
|
||||
declare function doPurge(
|
||||
applications: ApplicationManager,
|
||||
appId: number,
|
||||
force: boolean,
|
||||
): Promise<void>;
|
||||
|
||||
declare function serviceAction(
|
||||
action: string,
|
||||
|
@ -12,7 +12,6 @@ import { doPurge, doRestart, serviceAction } from './common';
|
||||
import supervisorVersion = require('../lib/supervisor-version');
|
||||
|
||||
export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
|
||||
const { _lockingIfNecessary, deviceState } = applications;
|
||||
|
||||
const messageFromError = (err?: Error | string | null): string => {
|
||||
@ -36,8 +35,9 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
const { appId } = req.params;
|
||||
|
||||
return _lockingIfNecessary(appId, { force }, () => {
|
||||
return applications.getCurrentApp(appId)
|
||||
.then((app) => {
|
||||
return applications
|
||||
.getCurrentApp(appId)
|
||||
.then(app => {
|
||||
if (app == null) {
|
||||
res.status(404).send(appNotFoundMessage);
|
||||
return;
|
||||
@ -47,90 +47,100 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
res.status(404).send(serviceNotFoundMessage);
|
||||
return;
|
||||
}
|
||||
applications.setTargetVolatileForService(
|
||||
service.imageId!,
|
||||
{ running: action !== 'stop' },
|
||||
);
|
||||
return applications.executeStepAction(
|
||||
serviceAction(
|
||||
action,
|
||||
service.serviceId!,
|
||||
service,
|
||||
service,
|
||||
{ wait: true },
|
||||
),
|
||||
{ skipLock: true },
|
||||
)
|
||||
applications.setTargetVolatileForService(service.imageId!, {
|
||||
running: action !== 'stop',
|
||||
});
|
||||
return applications
|
||||
.executeStepAction(
|
||||
serviceAction(action, service.serviceId!, service, service, {
|
||||
wait: true,
|
||||
}),
|
||||
{ skipLock: true },
|
||||
)
|
||||
.then(() => {
|
||||
res.status(200).send('OK');
|
||||
});
|
||||
})
|
||||
.catch((err) => {
|
||||
.catch(err => {
|
||||
res.status(503).send(messageFromError(err));
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
router.post('/v2/applications/:appId/purge', (req: Request, res: Response) => {
|
||||
const { force } = req.body;
|
||||
const { appId } = req.params;
|
||||
router.post(
|
||||
'/v2/applications/:appId/purge',
|
||||
(req: Request, res: Response) => {
|
||||
const { force } = req.body;
|
||||
const { appId } = req.params;
|
||||
|
||||
return doPurge(applications, appId, force)
|
||||
.then(() => {
|
||||
res.status(200).send('OK');
|
||||
})
|
||||
.catch((err) => {
|
||||
let message;
|
||||
if (err != null) {
|
||||
message = err.message;
|
||||
if (message == null) {
|
||||
message = err;
|
||||
return doPurge(applications, appId, force)
|
||||
.then(() => {
|
||||
res.status(200).send('OK');
|
||||
})
|
||||
.catch(err => {
|
||||
let message;
|
||||
if (err != null) {
|
||||
message = err.message;
|
||||
if (message == null) {
|
||||
message = err;
|
||||
}
|
||||
} else {
|
||||
message = 'Unknown error';
|
||||
}
|
||||
} else {
|
||||
message = 'Unknown error';
|
||||
}
|
||||
res.status(503).send(message);
|
||||
});
|
||||
});
|
||||
res.status(503).send(message);
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
router.post('/v2/applications/:appId/restart-service', (req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'restart');
|
||||
});
|
||||
router.post(
|
||||
'/v2/applications/:appId/restart-service',
|
||||
(req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'restart');
|
||||
},
|
||||
);
|
||||
|
||||
router.post('/v2/applications/:appId/stop-service', (req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'stop');
|
||||
});
|
||||
router.post(
|
||||
'/v2/applications/:appId/stop-service',
|
||||
(req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'stop');
|
||||
},
|
||||
);
|
||||
|
||||
router.post('/v2/applications/:appId/start-service', (req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'start');
|
||||
});
|
||||
router.post(
|
||||
'/v2/applications/:appId/start-service',
|
||||
(req: Request, res: Response) => {
|
||||
return handleServiceAction(req, res, 'start');
|
||||
},
|
||||
);
|
||||
|
||||
router.post('/v2/applications/:appId/restart', (req: Request, res: Response) => {
|
||||
const { force } = req.body;
|
||||
const { appId } = req.params;
|
||||
router.post(
|
||||
'/v2/applications/:appId/restart',
|
||||
(req: Request, res: Response) => {
|
||||
const { force } = req.body;
|
||||
const { appId } = req.params;
|
||||
|
||||
return doRestart(applications, appId, force)
|
||||
.then(() => {
|
||||
res.status(200).send('OK');
|
||||
})
|
||||
.catch((err) => {
|
||||
res.status(503).send(messageFromError(err));
|
||||
});
|
||||
});
|
||||
return doRestart(applications, appId, force)
|
||||
.then(() => {
|
||||
res.status(200).send('OK');
|
||||
})
|
||||
.catch(err => {
|
||||
res.status(503).send(messageFromError(err));
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
// TODO: Support dependent applications when this feature is complete
|
||||
router.get('/v2/applications/state', (_req: Request, res: Response) => {
|
||||
|
||||
// It's kinda hacky to access the services and db via the application manager
|
||||
// maybe refactor this code
|
||||
Bluebird.join(
|
||||
applications.services.getStatus(),
|
||||
applications.images.getStatus(),
|
||||
applications.db.models('app').select([ 'appId', 'commit', 'name' ]),
|
||||
applications.db.models('app').select(['appId', 'commit', 'name']),
|
||||
(
|
||||
services,
|
||||
images,
|
||||
apps: Array<{ appId: string, commit: string, name: string }>,
|
||||
apps: Array<{ appId: string; commit: string; name: string }>,
|
||||
) => {
|
||||
// Create an object which is keyed my application name
|
||||
const response: {
|
||||
@ -142,25 +152,25 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
status: string;
|
||||
releaseId: number;
|
||||
downloadProgress: number | null;
|
||||
}
|
||||
}
|
||||
}
|
||||
} = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
} = {};
|
||||
|
||||
const appNameById: { [id: number]: string } = { };
|
||||
const appNameById: { [id: number]: string } = {};
|
||||
|
||||
apps.forEach((app) => {
|
||||
apps.forEach(app => {
|
||||
const appId = parseInt(app.appId, 10);
|
||||
response[app.name] = {
|
||||
appId,
|
||||
commit: app.commit,
|
||||
services: { },
|
||||
services: {},
|
||||
};
|
||||
|
||||
appNameById[appId] = app.name;
|
||||
});
|
||||
|
||||
images.forEach((img) => {
|
||||
images.forEach(img => {
|
||||
const appName = appNameById[img.appId];
|
||||
if (appName == null) {
|
||||
console.log('Image found for unknown application!');
|
||||
@ -186,16 +196,19 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
});
|
||||
|
||||
res.status(200).json(response);
|
||||
});
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
router.get('/v2/applications/:appId/state', (_req: Request, res: Response) => {
|
||||
// Get all services and their statuses, and return it
|
||||
applications.getStatus()
|
||||
.then((apps) => {
|
||||
router.get(
|
||||
'/v2/applications/:appId/state',
|
||||
(_req: Request, res: Response) => {
|
||||
// Get all services and their statuses, and return it
|
||||
applications.getStatus().then(apps => {
|
||||
res.status(200).json(apps);
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
router.get('/v2/local/target-state', async (_req, res) => {
|
||||
try {
|
||||
@ -249,7 +262,6 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
message: e.message,
|
||||
});
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
const message = 'Could not apply target state: ';
|
||||
res.status(503).json({
|
||||
@ -263,13 +275,14 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
// Return the device type and slug so that local mode builds can use this to
|
||||
// resolve builds
|
||||
try {
|
||||
|
||||
// FIXME: We should be mounting the following file into the supervisor from the
|
||||
// start-resin-supervisor script, changed in meta-resin - but until then, hardcode it
|
||||
const data = await fs.readFile('/mnt/root/resin-boot/device-type.json', 'utf8');
|
||||
const data = await fs.readFile(
|
||||
'/mnt/root/resin-boot/device-type.json',
|
||||
'utf8',
|
||||
);
|
||||
const deviceInfo = JSON.parse(data);
|
||||
|
||||
|
||||
return res.status(200).json({
|
||||
status: 'sucess',
|
||||
info: {
|
||||
@ -277,7 +290,6 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
deviceType: deviceInfo.slug,
|
||||
},
|
||||
});
|
||||
|
||||
} catch (e) {
|
||||
const message = 'Could not fetch device information: ';
|
||||
res.status(503).json({
|
||||
@ -289,7 +301,9 @@ export function createV2Api(router: Router, applications: ApplicationManager) {
|
||||
|
||||
router.get('/v2/local/logs', async (_req, res) => {
|
||||
const backend = applications.logger.getLocalBackend();
|
||||
backend.assignServiceNameResolver(applications.serviceNameFromId.bind(applications));
|
||||
backend.assignServiceNameResolver(
|
||||
applications.serviceNameFromId.bind(applications),
|
||||
);
|
||||
|
||||
// Get the stream, and stream it into res
|
||||
const listenStream = backend.attachListener();
|
||||
|
@ -258,7 +258,6 @@ module.exports = class DeviceState extends EventEmitter
|
||||
@config.get('apiEndpoint'),
|
||||
validateState(target),
|
||||
(apiEndpoint) =>
|
||||
source = apiEndpoint
|
||||
@usingWriteLockTarget =>
|
||||
# Apps, deviceConfig, dependent
|
||||
@db.transaction (trx) =>
|
||||
@ -318,9 +317,9 @@ module.exports = class DeviceState extends EventEmitter
|
||||
_.assign(@_currentVolatile, newState)
|
||||
@emitAsync('change')
|
||||
|
||||
_convertLegacyAppsJson: (appsArray) =>
|
||||
Promise.try =>
|
||||
deviceConf = _.reduce(appsArray, (conf, app) =>
|
||||
_convertLegacyAppsJson: (appsArray) ->
|
||||
Promise.try ->
|
||||
deviceConf = _.reduce(appsArray, (conf, app) ->
|
||||
return _.merge({}, conf, app.config)
|
||||
, {})
|
||||
apps = _.keyBy(_.map(appsArray, singleToMulticontainerApp), 'appId')
|
||||
@ -333,9 +332,9 @@ module.exports = class DeviceState extends EventEmitter
|
||||
.then(JSON.parse)
|
||||
.then (stateFromFile) =>
|
||||
if _.isArray(stateFromFile)
|
||||
# This is a legacy apps.json
|
||||
console.log('Legacy apps.json detected')
|
||||
return @_convertLegacyAppsJson(stateFromFile)
|
||||
# This is a legacy apps.json
|
||||
console.log('Legacy apps.json detected')
|
||||
return @_convertLegacyAppsJson(stateFromFile)
|
||||
else
|
||||
return stateFromFile
|
||||
.then (stateFromFile) =>
|
||||
|
@ -32,7 +32,6 @@ const mixpanelMask = [
|
||||
].join(',');
|
||||
|
||||
export class EventTracker {
|
||||
|
||||
private defaultProperties: EventTrackProperties | null;
|
||||
private client: any;
|
||||
|
||||
@ -60,11 +59,7 @@ export class EventTracker {
|
||||
});
|
||||
}
|
||||
|
||||
public track(
|
||||
event: string,
|
||||
properties: EventTrackProperties | Error = { },
|
||||
) {
|
||||
|
||||
public track(event: string, properties: EventTrackProperties | Error = {}) {
|
||||
if (properties instanceof Error) {
|
||||
properties = { error: properties };
|
||||
}
|
||||
@ -89,12 +84,19 @@ export class EventTracker {
|
||||
this.throttleddLogger(event)(properties);
|
||||
}
|
||||
|
||||
private throttleddLogger = memoizee((event: string) => {
|
||||
// Call this function at maximum once every minute
|
||||
return _.throttle((properties) => {
|
||||
this.client.track(event, properties);
|
||||
}, eventDebounceTime, { leading: true });
|
||||
}, { primitive: true });
|
||||
private throttleddLogger = memoizee(
|
||||
(event: string) => {
|
||||
// Call this function at maximum once every minute
|
||||
return _.throttle(
|
||||
properties => {
|
||||
this.client.track(event, properties);
|
||||
},
|
||||
eventDebounceTime,
|
||||
{ leading: true },
|
||||
);
|
||||
},
|
||||
{ primitive: true },
|
||||
);
|
||||
|
||||
private logEvent(...args: string[]) {
|
||||
console.log(...args);
|
||||
@ -103,6 +105,6 @@ export class EventTracker {
|
||||
private assignDefaultProperties(
|
||||
properties: EventTrackProperties,
|
||||
): EventTrackProperties {
|
||||
return _.merge({ }, properties, this.defaultProperties);
|
||||
return _.merge({}, properties, this.defaultProperties);
|
||||
}
|
||||
}
|
||||
|
@ -7,14 +7,19 @@ const supervisorNetworkInterface = 'supervisor0';
|
||||
|
||||
const constants = {
|
||||
rootMountPoint,
|
||||
databasePath: checkString(process.env.DATABASE_PATH) || '/data/database.sqlite',
|
||||
databasePath:
|
||||
checkString(process.env.DATABASE_PATH) || '/data/database.sqlite',
|
||||
dockerSocket: process.env.DOCKER_SOCKET || '/var/run/docker.sock',
|
||||
supervisorImage: checkString(process.env.SUPERVISOR_IMAGE) || 'resin/rpi-supervisor',
|
||||
ledFile: checkString(process.env.LED_FILE) || '/sys/class/leds/led0/brightness',
|
||||
supervisorImage:
|
||||
checkString(process.env.SUPERVISOR_IMAGE) || 'resin/rpi-supervisor',
|
||||
ledFile:
|
||||
checkString(process.env.LED_FILE) || '/sys/class/leds/led0/brightness',
|
||||
vpnStatusPath:
|
||||
checkString(process.env.VPN_STATUS_PATH) || `${rootMountPoint}/run/openvpn/vpn_status`,
|
||||
checkString(process.env.VPN_STATUS_PATH) ||
|
||||
`${rootMountPoint}/run/openvpn/vpn_status`,
|
||||
hostOSVersionPath:
|
||||
checkString(process.env.HOST_OS_VERSION_PATH) || `${rootMountPoint}/etc/os-release`,
|
||||
checkString(process.env.HOST_OS_VERSION_PATH) ||
|
||||
`${rootMountPoint}/etc/os-release`,
|
||||
privateAppEnvVars: [
|
||||
'RESIN_SUPERVISOR_API_KEY',
|
||||
'RESIN_API_KEY',
|
||||
@ -28,7 +33,13 @@ const constants = {
|
||||
configJsonNonAtomicPath: '/boot/config.json',
|
||||
defaultMixpanelToken: process.env.DEFAULT_MIXPANEL_TOKEN,
|
||||
supervisorNetworkInterface: supervisorNetworkInterface,
|
||||
allowedInterfaces: [ 'resin-vpn', 'tun0', 'docker0', 'lo', supervisorNetworkInterface ],
|
||||
allowedInterfaces: [
|
||||
'resin-vpn',
|
||||
'tun0',
|
||||
'docker0',
|
||||
'lo',
|
||||
supervisorNetworkInterface,
|
||||
],
|
||||
appsJsonPath: process.env.APPS_JSON_PATH || '/boot/apps.json',
|
||||
ipAddressUpdateInterval: 30 * 1000,
|
||||
imageCleanupErrorIgnoreTimeout: 3600 * 1000,
|
||||
|
@ -6,8 +6,10 @@ export function envArrayToObject(env: string[]): EnvVarObject {
|
||||
const toPair = (keyVal: string) => {
|
||||
const m = keyVal.match(/^([^=]+)=\s*(.*)\s*$/);
|
||||
if (m == null) {
|
||||
console.log(`WARNING: Could not correctly parse env var ${keyVal}. ` +
|
||||
'Please fix this var and recreate the container.');
|
||||
console.log(
|
||||
`WARNING: Could not correctly parse env var ${keyVal}. ` +
|
||||
'Please fix this var and recreate the container.',
|
||||
);
|
||||
return [null, null];
|
||||
}
|
||||
return m.slice(1);
|
||||
|
@ -78,7 +78,7 @@ module.exports = class DockerUtils extends DockerToolbelt
|
||||
# Since the supervisor never calls this function without a source anymore,
|
||||
# this should never happen, but we handle it anyways.
|
||||
if !deltaSource?
|
||||
log("Falling back to regular pull due to lack of a delta source")
|
||||
log('Falling back to regular pull due to lack of a delta source')
|
||||
return @fetchImageWithProgress(imgDest, fullDeltaOpts, onProgress)
|
||||
|
||||
docker = this
|
||||
|
9
src/lib/docker-utils.d.ts
vendored
9
src/lib/docker-utils.d.ts
vendored
@ -19,17 +19,20 @@ declare class DockerUtils extends DockerToolbelt {
|
||||
|
||||
getRepoAndTag(image: string): Bluebird<TaggedRepoImage>;
|
||||
|
||||
fetchDeltaWithProgress(imgDest: string, fullDeltaOpts: any, onProgress: (args: any) => void): Bluebird<void>;
|
||||
fetchDeltaWithProgress(
|
||||
imgDest: string,
|
||||
fullDeltaOpts: any,
|
||||
onProgress: (args: any) => void,
|
||||
): Bluebird<void>;
|
||||
|
||||
fetchImageWithProgress(
|
||||
image: string,
|
||||
config: { uuid: string, currentApiKey: string },
|
||||
config: { uuid: string; currentApiKey: string },
|
||||
onProgress: (args: any) => void,
|
||||
): Bluebird<void>;
|
||||
|
||||
getImageEnv(id: string): Bluebird<EnvVarObject>;
|
||||
getNetworkGateway(netName: string): Bluebird<string>;
|
||||
|
||||
}
|
||||
|
||||
export = DockerUtils;
|
||||
|
@ -29,9 +29,9 @@ export function UnitNotLoadedError(err: string[]): boolean {
|
||||
return endsWith(err[0], 'not loaded.');
|
||||
}
|
||||
|
||||
export class InvalidNetGatewayError extends TypedError { }
|
||||
export class InvalidNetGatewayError extends TypedError {}
|
||||
|
||||
export class DeltaStillProcessingError extends TypedError { }
|
||||
export class DeltaStillProcessingError extends TypedError {}
|
||||
|
||||
export class InvalidAppIdError extends TypedError {
|
||||
public constructor(public appId: any) {
|
||||
|
@ -3,17 +3,17 @@ import { fs } from 'mz';
|
||||
import * as path from 'path';
|
||||
|
||||
export function writeAndSyncFile(path: string, data: string): Bluebird<void> {
|
||||
return Bluebird.resolve(fs.open(path, 'w'))
|
||||
.then((fd) => {
|
||||
fs.write(fd, data, 0, 'utf8')
|
||||
.then(() => fs.fsync(fd))
|
||||
.then(() => fs.close(fd));
|
||||
});
|
||||
return Bluebird.resolve(fs.open(path, 'w')).then(fd => {
|
||||
fs.write(fd, data, 0, 'utf8')
|
||||
.then(() => fs.fsync(fd))
|
||||
.then(() => fs.close(fd));
|
||||
});
|
||||
}
|
||||
|
||||
export function writeFileAtomic(path: string, data: string): Bluebird<void> {
|
||||
return Bluebird.resolve(writeAndSyncFile(`${path}.new`, data))
|
||||
.then(() => fs.rename(`${path}.new`, path));
|
||||
return Bluebird.resolve(writeAndSyncFile(`${path}.new`, data)).then(() =>
|
||||
fs.rename(`${path}.new`, path),
|
||||
);
|
||||
}
|
||||
|
||||
export function safeRename(src: string, dest: string): Bluebird<void> {
|
||||
|
@ -28,11 +28,21 @@ export function rejectOnAllInterfacesExcept(
|
||||
): Promise<void> {
|
||||
// We delete each rule and create it again to ensure ordering (all ACCEPTs before the REJECT/DROP).
|
||||
// This is especially important after a supervisor update.
|
||||
return Promise.each(allowedInterfaces, (iface) => clearAndInsertIptablesRule(`INPUT -p tcp --dport ${port} -i ${iface} -j ACCEPT`))
|
||||
.then(() => clearAndAppendIptablesRule(`INPUT -p tcp --dport ${port} -j REJECT`))
|
||||
// On systems without REJECT support, fall back to DROP
|
||||
.catch(() => clearAndAppendIptablesRule(`INPUT -p tcp --dport ${port} -j DROP`))
|
||||
.return();
|
||||
return (
|
||||
Promise.each(allowedInterfaces, iface =>
|
||||
clearAndInsertIptablesRule(
|
||||
`INPUT -p tcp --dport ${port} -i ${iface} -j ACCEPT`,
|
||||
),
|
||||
)
|
||||
.then(() =>
|
||||
clearAndAppendIptablesRule(`INPUT -p tcp --dport ${port} -j REJECT`),
|
||||
)
|
||||
// On systems without REJECT support, fall back to DROP
|
||||
.catch(() =>
|
||||
clearAndAppendIptablesRule(`INPUT -p tcp --dport ${port} -j DROP`),
|
||||
)
|
||||
.return()
|
||||
);
|
||||
}
|
||||
|
||||
export function removeRejections(port: number): Promise<void> {
|
||||
|
@ -11,164 +11,164 @@ export const stopServiceSuccess: LogType = {
|
||||
eventName: 'Service stop',
|
||||
humanName: 'Killed service',
|
||||
};
|
||||
export const stopServiceNoop: LogType = {
|
||||
export const stopServiceNoop: LogType = {
|
||||
eventName: 'Service already stopped',
|
||||
humanName: 'Service is already stopped, removing container',
|
||||
};
|
||||
export const stopRemoveServiceNoop: LogType = {
|
||||
export const stopRemoveServiceNoop: LogType = {
|
||||
eventName: 'Service already stopped and container removed',
|
||||
humanName: 'Service is already stopped and the container removed',
|
||||
};
|
||||
export const stopServiceError: LogType = {
|
||||
export const stopServiceError: LogType = {
|
||||
eventName: 'Service stop error',
|
||||
humanName: 'Failed to kill service',
|
||||
};
|
||||
|
||||
export const removeDeadService: LogType = {
|
||||
export const removeDeadService: LogType = {
|
||||
eventName: 'Remove dead container',
|
||||
humanName: 'Removing dead container',
|
||||
};
|
||||
export const removeDeadServiceError: LogType = {
|
||||
export const removeDeadServiceError: LogType = {
|
||||
eventName: 'Remove dead container error',
|
||||
humanName: 'Error removing dead container',
|
||||
};
|
||||
|
||||
export const downloadImage: LogType = {
|
||||
export const downloadImage: LogType = {
|
||||
eventName: 'Docker image download',
|
||||
humanName: 'Downloading image',
|
||||
};
|
||||
export const downloadImageDelta: LogType = {
|
||||
export const downloadImageDelta: LogType = {
|
||||
eventName: 'Delta image download',
|
||||
humanName: 'Downloading delta for image',
|
||||
};
|
||||
export const downloadImageSuccess: LogType = {
|
||||
export const downloadImageSuccess: LogType = {
|
||||
eventName: 'Image downloaded',
|
||||
humanName: 'Downloaded image',
|
||||
};
|
||||
export const downloadImageError: LogType = {
|
||||
export const downloadImageError: LogType = {
|
||||
eventName: 'Image download error',
|
||||
humanName: 'Failed to download image',
|
||||
};
|
||||
|
||||
export const installService: LogType = {
|
||||
export const installService: LogType = {
|
||||
eventName: 'Service install',
|
||||
humanName: 'Installing service',
|
||||
};
|
||||
export const installServiceSuccess: LogType = {
|
||||
export const installServiceSuccess: LogType = {
|
||||
eventName: 'Service installed',
|
||||
humanName: 'Installed service',
|
||||
};
|
||||
export const installServiceError: LogType = {
|
||||
export const installServiceError: LogType = {
|
||||
eventName: 'Service install error',
|
||||
humanName: 'Failed to install service',
|
||||
};
|
||||
|
||||
export const deleteImage: LogType = {
|
||||
export const deleteImage: LogType = {
|
||||
eventName: 'Image removal',
|
||||
humanName: 'Deleting image',
|
||||
};
|
||||
export const deleteImageSuccess: LogType = {
|
||||
export const deleteImageSuccess: LogType = {
|
||||
eventName: 'Image removed',
|
||||
humanName: 'Deleted image',
|
||||
};
|
||||
export const deleteImageError: LogType = {
|
||||
export const deleteImageError: LogType = {
|
||||
eventName: 'Image removal error',
|
||||
humanName: 'Failed to delete image',
|
||||
};
|
||||
export const imageAlreadyDeleted: LogType = {
|
||||
export const imageAlreadyDeleted: LogType = {
|
||||
eventName: 'Image already deleted',
|
||||
humanName: 'Image already deleted',
|
||||
};
|
||||
export const deltaStillProcessingError: LogType = {
|
||||
export const deltaStillProcessingError: LogType = {
|
||||
eventName: 'Delta still processing remotely.',
|
||||
humanName: 'Delta still processing remotely. Will retry...',
|
||||
};
|
||||
|
||||
export const startService: LogType = {
|
||||
export const startService: LogType = {
|
||||
eventName: 'Service start',
|
||||
humanName: 'Starting service',
|
||||
};
|
||||
export const startServiceSuccess: LogType = {
|
||||
export const startServiceSuccess: LogType = {
|
||||
eventName: 'Service started',
|
||||
humanName: 'Started service',
|
||||
};
|
||||
export const startServiceNoop: LogType = {
|
||||
export const startServiceNoop: LogType = {
|
||||
eventName: 'Service already running',
|
||||
humanName: 'Service is already running',
|
||||
};
|
||||
export const startServiceError: LogType = {
|
||||
export const startServiceError: LogType = {
|
||||
eventName: 'Service start error',
|
||||
humanName: 'Failed to start service',
|
||||
};
|
||||
|
||||
export const updateService: LogType = {
|
||||
export const updateService: LogType = {
|
||||
eventName: 'Service update',
|
||||
humanName: 'Updating service',
|
||||
};
|
||||
export const updateServiceError: LogType = {
|
||||
export const updateServiceError: LogType = {
|
||||
eventName: 'Service update error',
|
||||
humanName: 'Failed to update service',
|
||||
};
|
||||
|
||||
export const serviceExit: LogType = {
|
||||
export const serviceExit: LogType = {
|
||||
eventName: 'Service exit',
|
||||
humanName: 'Service exited',
|
||||
};
|
||||
|
||||
export const serviceRestart: LogType = {
|
||||
export const serviceRestart: LogType = {
|
||||
eventName: 'Service restart',
|
||||
humanName: 'Restarting service',
|
||||
};
|
||||
|
||||
export const updateServiceConfig: LogType = {
|
||||
export const updateServiceConfig: LogType = {
|
||||
eventName: 'Service config update',
|
||||
humanName: 'Updating config for service',
|
||||
};
|
||||
export const updateServiceConfigSuccess: LogType = {
|
||||
export const updateServiceConfigSuccess: LogType = {
|
||||
eventName: 'Service config updated',
|
||||
humanName: 'Updated config for service',
|
||||
};
|
||||
export const updateServiceConfigError: LogType = {
|
||||
export const updateServiceConfigError: LogType = {
|
||||
eventName: 'Service config update error',
|
||||
humanName: 'Failed to update config for service',
|
||||
};
|
||||
|
||||
export const createVolume: LogType = {
|
||||
export const createVolume: LogType = {
|
||||
eventName: 'Volume creation',
|
||||
humanName: 'Creating volume',
|
||||
};
|
||||
|
||||
export const createVolumeError: LogType = {
|
||||
export const createVolumeError: LogType = {
|
||||
eventName: 'Volume creation error',
|
||||
humanName: 'Error creating volume',
|
||||
};
|
||||
|
||||
export const removeVolume: LogType = {
|
||||
export const removeVolume: LogType = {
|
||||
eventName: 'Volume removal',
|
||||
humanName: 'Removing volume',
|
||||
};
|
||||
|
||||
export const removeVolumeError: LogType = {
|
||||
export const removeVolumeError: LogType = {
|
||||
eventName: 'Volume removal error',
|
||||
humanName: 'Error removing volume',
|
||||
};
|
||||
|
||||
export const createNetwork: LogType = {
|
||||
export const createNetwork: LogType = {
|
||||
eventName: 'Network creation',
|
||||
humanName: 'Creating network',
|
||||
};
|
||||
|
||||
export const createNetworkError: LogType = {
|
||||
export const createNetworkError: LogType = {
|
||||
eventName: 'Network creation error',
|
||||
humanName: 'Error creating network',
|
||||
};
|
||||
|
||||
export const removeNetwork: LogType = {
|
||||
export const removeNetwork: LogType = {
|
||||
eventName: 'Network removal',
|
||||
humanName: 'Removing network',
|
||||
};
|
||||
|
||||
export const removeNetworkError: LogType = {
|
||||
export const removeNetworkError: LogType = {
|
||||
eventName: 'Network removal error',
|
||||
humanName: 'Error removing network',
|
||||
};
|
||||
|
@ -2,4 +2,5 @@ export const appNotFoundMessage = `App not found: an app needs to be installed f
|
||||
If you've recently moved this device from another app,
|
||||
please push an app and wait for it to be installed first.`;
|
||||
|
||||
export const serviceNotFoundMessage = 'Service not found, a container must exist for this endpoint to work';
|
||||
export const serviceNotFoundMessage =
|
||||
'Service not found, a container must exist for this endpoint to work';
|
||||
|
@ -7,10 +7,10 @@ function getOSReleaseField(path: string, field: string): string | undefined {
|
||||
try {
|
||||
const releaseData = fs.readFileSync(path, 'utf-8');
|
||||
const lines = releaseData.split('\n');
|
||||
const releaseItems: { [field: string]: string} = { };
|
||||
const releaseItems: { [field: string]: string } = {};
|
||||
|
||||
for (const line of lines) {
|
||||
const [ key, value ] = line.split('=');
|
||||
const [key, value] = line.split('=');
|
||||
releaseItems[_.trim(key)] = _.trim(value);
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ function getOSReleaseField(path: string, field: string): string | undefined {
|
||||
|
||||
// Remove enclosing quotes: http://stackoverflow.com/a/19156197/2549019
|
||||
return releaseItems[field].replace(/^"(.+(?="$))"$/, '$1');
|
||||
} catch(err) {
|
||||
} catch (err) {
|
||||
console.log('Could not get OS release field: ', err.message);
|
||||
return;
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ export interface ConfigMap {
|
||||
|
||||
export interface ConfigSchema {
|
||||
[key: string]: {
|
||||
source: string,
|
||||
default?: any,
|
||||
mutable?: boolean,
|
||||
removeIfNull?: boolean,
|
||||
source: string;
|
||||
default?: any;
|
||||
mutable?: boolean;
|
||||
removeIfNull?: boolean;
|
||||
};
|
||||
}
|
||||
|
3
src/lib/update-lock.d.ts
vendored
3
src/lib/update-lock.d.ts
vendored
@ -4,8 +4,7 @@ export interface LockCallback {
|
||||
(appId: number, opts: { force: boolean }, fn: () => void): Promise<void>;
|
||||
}
|
||||
|
||||
export class UpdatesLockedError extends TypedError {
|
||||
}
|
||||
export class UpdatesLockedError extends TypedError {}
|
||||
|
||||
export function lock(): LockCallback;
|
||||
export function lockPath(appId: number, serviceName: string): string;
|
||||
|
@ -19,7 +19,10 @@ type NullableLiteral = number | NullableString;
|
||||
* Check an input string as a number, optionally specifying a requirement
|
||||
* to be positive
|
||||
*/
|
||||
export function checkInt(s: NullableLiteral, options: CheckIntOptions = {}): number | void {
|
||||
export function checkInt(
|
||||
s: NullableLiteral,
|
||||
options: CheckIntOptions = {},
|
||||
): number | void {
|
||||
if (s == null) {
|
||||
return;
|
||||
}
|
||||
@ -45,7 +48,7 @@ export function checkInt(s: NullableLiteral, options: CheckIntOptions = {}): num
|
||||
* Check that a string exists, and is not an empty string, 'null', or 'undefined'
|
||||
*/
|
||||
export function checkString(s: NullableLiteral): string | void {
|
||||
if (s == null || !_.isString(s) || _.includes([ 'null', 'undefined', '' ], s)) {
|
||||
if (s == null || !_.isString(s) || _.includes(['null', 'undefined', ''], s)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -59,7 +62,7 @@ export function checkString(s: NullableLiteral): string | void {
|
||||
* which represents if the input was truthy
|
||||
*/
|
||||
export function checkTruthy(v: string | boolean | number): boolean | void {
|
||||
switch(v) {
|
||||
switch (v) {
|
||||
case '1':
|
||||
case 'true':
|
||||
case true:
|
||||
@ -102,7 +105,9 @@ export function isValidEnv(obj: EnvVarObject): boolean {
|
||||
|
||||
return _.every(obj, (val, key) => {
|
||||
if (!isValidShortText(key)) {
|
||||
console.log('debug: Non-valid short text env var key passed to validation.isValidEnv');
|
||||
console.log(
|
||||
'debug: Non-valid short text env var key passed to validation.isValidEnv',
|
||||
);
|
||||
console.log(`\tKey: ${inspect(key)}`);
|
||||
return false;
|
||||
}
|
||||
@ -136,19 +141,25 @@ export function isValidLabelsObject(obj: LabelObject): boolean {
|
||||
|
||||
return _.every(obj, (val, key) => {
|
||||
if (!isValidShortText(key)) {
|
||||
console.log('debug: Non-valid short text label key passed to validation.isValidLabelsObject');
|
||||
console.log(
|
||||
'debug: Non-valid short text label key passed to validation.isValidLabelsObject',
|
||||
);
|
||||
console.log(`\tkey: ${inspect(key)}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!LABEL_NAME_REGEX.test(key)) {
|
||||
console.log('debug: Invalid label name passed to validation.isValidLabelsObject');
|
||||
console.log(
|
||||
'debug: Invalid label name passed to validation.isValidLabelsObject',
|
||||
);
|
||||
console.log(`\tkey: ${inspect(key)}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_.isString(val)) {
|
||||
console.log('debug: Non-string value passed to validation.isValidLabelsObject');
|
||||
console.log(
|
||||
'debug: Non-string value passed to validation.isValidLabelsObject',
|
||||
);
|
||||
console.log(`\tval: ${inspect(val)}`);
|
||||
return false;
|
||||
}
|
||||
@ -161,7 +172,9 @@ export function isValidDeviceName(name: string): boolean {
|
||||
// currently the only disallowed value in a device name is a newline
|
||||
const newline = name.indexOf('\n') !== -1;
|
||||
if (newline) {
|
||||
console.log('debug: newline found in device name. This is invalid and should be removed');
|
||||
console.log(
|
||||
'debug: newline found in device name. This is invalid and should be removed',
|
||||
);
|
||||
}
|
||||
return !newline;
|
||||
}
|
||||
@ -179,7 +192,9 @@ function undefinedOrValidEnv(val: EnvVarObject): boolean {
|
||||
*/
|
||||
export function isValidDependentAppsObject(apps: any): boolean {
|
||||
if (!_.isObject(apps)) {
|
||||
console.log('debug: non-object passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug: non-object passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tapps: ${inspect(apps)}`);
|
||||
return false;
|
||||
}
|
||||
@ -193,7 +208,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
});
|
||||
|
||||
if (!isValidShortText(appId) || !checkInt(appId)) {
|
||||
console.log('debug: Invalid appId passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid appId passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tappId: ${inspect(appId)}`);
|
||||
return false;
|
||||
}
|
||||
@ -201,7 +218,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
return _.conformsTo(val, {
|
||||
name: (n: any) => {
|
||||
if (!isValidShortText(n)) {
|
||||
console.log('debug: Invalid name passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid name passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tname: ${inspect(n)}`);
|
||||
return false;
|
||||
}
|
||||
@ -209,7 +228,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
},
|
||||
image: (i: any) => {
|
||||
if (val.commit != null && !isValidShortText(i)) {
|
||||
console.log('debug: non valid image passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug: non valid image passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\timage: ${inspect(i)}`);
|
||||
return false;
|
||||
}
|
||||
@ -217,7 +238,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
},
|
||||
commit: (c: any) => {
|
||||
if (c != null && !isValidShortText(c)) {
|
||||
console.log('debug: invalid commit passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug: invalid commit passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tcommit: ${inspect(c)}`);
|
||||
return false;
|
||||
}
|
||||
@ -225,7 +248,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
},
|
||||
config: (c: any) => {
|
||||
if (!undefinedOrValidEnv(c)) {
|
||||
console.log('debug; Invalid config passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug; Invalid config passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tconfig: ${inspect(c)}`);
|
||||
return false;
|
||||
}
|
||||
@ -233,7 +258,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
},
|
||||
environment: (e: any) => {
|
||||
if (!undefinedOrValidEnv(e)) {
|
||||
console.log('debug; Invalid environment passed to validation.isValidDependentAppsObject');
|
||||
console.log(
|
||||
'debug; Invalid environment passed to validation.isValidDependentAppsObject',
|
||||
);
|
||||
console.log(`\tenvironment: ${inspect(e)}`);
|
||||
return false;
|
||||
}
|
||||
@ -245,7 +272,9 @@ export function isValidDependentAppsObject(apps: any): boolean {
|
||||
|
||||
function isValidService(service: any, serviceId: string): boolean {
|
||||
if (!isValidShortText(serviceId) || !checkInt(serviceId)) {
|
||||
console.log('debug: Invalid service id passed to validation.isValidService');
|
||||
console.log(
|
||||
'debug: Invalid service id passed to validation.isValidService',
|
||||
);
|
||||
console.log(`\tserviceId: ${inspect(serviceId)}`);
|
||||
return false;
|
||||
}
|
||||
@ -253,7 +282,9 @@ function isValidService(service: any, serviceId: string): boolean {
|
||||
return _.conformsTo(service, {
|
||||
serviceName: (n: any) => {
|
||||
if (!isValidShortText(n)) {
|
||||
console.log('debug: Invalid service name passed to validation.isValidService');
|
||||
console.log(
|
||||
'debug: Invalid service name passed to validation.isValidService',
|
||||
);
|
||||
console.log(`\tserviceName: ${inspect(n)}`);
|
||||
return false;
|
||||
}
|
||||
@ -277,7 +308,9 @@ function isValidService(service: any, serviceId: string): boolean {
|
||||
},
|
||||
imageId: (i: any) => {
|
||||
if (checkInt(i) == null) {
|
||||
console.log('debug: Invalid image id passed to validation.isValidService');
|
||||
console.log(
|
||||
'debug: Invalid image id passed to validation.isValidService',
|
||||
);
|
||||
console.log(`\timageId: ${inspect(i)}`);
|
||||
return false;
|
||||
}
|
||||
@ -285,7 +318,9 @@ function isValidService(service: any, serviceId: string): boolean {
|
||||
},
|
||||
labels: (l: any) => {
|
||||
if (!isValidLabelsObject(l)) {
|
||||
console.log('debug: Invalid labels object passed to validation.isValidService');
|
||||
console.log(
|
||||
'debug: Invalid labels object passed to validation.isValidService',
|
||||
);
|
||||
console.log(`\tlabels: ${inspect(l)}`);
|
||||
return false;
|
||||
}
|
||||
@ -311,7 +346,9 @@ export function isValidAppsObject(obj: any): boolean {
|
||||
|
||||
return _.every(obj, (val, appId) => {
|
||||
if (!isValidShortText(appId) || !checkInt(appId)) {
|
||||
console.log('debug: Invalid appId passed to validation.isValidAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid appId passed to validation.isValidAppsObject',
|
||||
);
|
||||
console.log(`\tappId: ${inspect(appId)}`);
|
||||
return false;
|
||||
}
|
||||
@ -319,7 +356,9 @@ export function isValidAppsObject(obj: any): boolean {
|
||||
return _.conformsTo(_.defaults(_.clone(val), { releaseId: undefined }), {
|
||||
name: (n: any) => {
|
||||
if (!isValidShortText(n)) {
|
||||
console.log('debug: Invalid service name passed to validation.isValidAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid service name passed to validation.isValidAppsObject',
|
||||
);
|
||||
console.log(`\tname: ${inspect(n)}`);
|
||||
return false;
|
||||
}
|
||||
@ -327,7 +366,9 @@ export function isValidAppsObject(obj: any): boolean {
|
||||
},
|
||||
releaseId: (r: any) => {
|
||||
if (r != null && checkInt(r) == null) {
|
||||
console.log('debug: Invalid releaseId passed to validation.isValidAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid releaseId passed to validation.isValidAppsObject',
|
||||
);
|
||||
console.log(`\treleaseId: ${inspect(r)}`);
|
||||
return false;
|
||||
}
|
||||
@ -335,14 +376,18 @@ export function isValidAppsObject(obj: any): boolean {
|
||||
},
|
||||
services: (s: any) => {
|
||||
if (!_.isObject(s)) {
|
||||
console.log('debug: Non-object service passed to validation.isValidAppsObject');
|
||||
console.log(
|
||||
'debug: Non-object service passed to validation.isValidAppsObject',
|
||||
);
|
||||
console.log(`\tservices: ${inspect(s)}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
return _.every(s, (svc, svcId) => {
|
||||
if (!isValidService(svc, svcId)) {
|
||||
console.log('debug: Invalid service object passed to validation.isValidAppsObject');
|
||||
console.log(
|
||||
'debug: Invalid service object passed to validation.isValidAppsObject',
|
||||
);
|
||||
console.log(`\tsvc: ${inspect(svc)}`);
|
||||
return false;
|
||||
}
|
||||
@ -359,17 +404,19 @@ export function isValidAppsObject(obj: any): boolean {
|
||||
* Validate a dependent devices object from the state endpoint.
|
||||
*/
|
||||
export function isValidDependentDevicesObject(devices: any): boolean {
|
||||
|
||||
if (!_.isObject(devices)) {
|
||||
console.log('debug: Non-object passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Non-object passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tdevices: ${inspect(devices)}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
return _.every(devices, (val, uuid) => {
|
||||
|
||||
if (!isValidShortText(uuid)) {
|
||||
console.log('debug: Invalid uuid passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Invalid uuid passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tuuid: ${inspect(uuid)}`);
|
||||
return false;
|
||||
}
|
||||
@ -377,7 +424,9 @@ export function isValidDependentDevicesObject(devices: any): boolean {
|
||||
return _.conformsTo(val, {
|
||||
name: (n: any) => {
|
||||
if (!isValidShortText(n)) {
|
||||
console.log('debug: Invalid device name passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Invalid device name passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tname: ${inspect(n)}`);
|
||||
return false;
|
||||
}
|
||||
@ -385,22 +434,31 @@ export function isValidDependentDevicesObject(devices: any): boolean {
|
||||
},
|
||||
apps: (a: any) => {
|
||||
if (!_.isObject(a)) {
|
||||
console.log('debug: Invalid apps object passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Invalid apps object passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tapps: ${inspect(a)}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_.isEmpty(a)) {
|
||||
console.log('debug: Empty object passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Empty object passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
return _.every(a, (app) => {
|
||||
app = _.defaults(_.clone(app), { config: undefined, environment: undefined });
|
||||
return _.every(a, app => {
|
||||
app = _.defaults(_.clone(app), {
|
||||
config: undefined,
|
||||
environment: undefined,
|
||||
});
|
||||
return _.conformsTo(app, {
|
||||
config: (c: any) => {
|
||||
if (!undefinedOrValidEnv(c)) {
|
||||
console.log('debug: Invalid config passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Invalid config passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tconfig: ${inspect(c)}`);
|
||||
return false;
|
||||
}
|
||||
@ -408,7 +466,9 @@ export function isValidDependentDevicesObject(devices: any): boolean {
|
||||
},
|
||||
environment: (e: any) => {
|
||||
if (!undefinedOrValidEnv(e)) {
|
||||
console.log('debug: Invalid environment passed to validation.isValidDependentDevicesObject');
|
||||
console.log(
|
||||
'debug: Invalid environment passed to validation.isValidDependentDevicesObject',
|
||||
);
|
||||
console.log(`\tconfig: ${inspect(e)}`);
|
||||
return false;
|
||||
}
|
||||
|
@ -21,11 +21,11 @@ export class LocalModeManager {
|
||||
public docker: Docker,
|
||||
public logger: Logger,
|
||||
public db: Database,
|
||||
) { }
|
||||
) {}
|
||||
|
||||
public async init() {
|
||||
// Setup a listener to catch state changes relating to local mode
|
||||
this.config.on('change', (changed) => {
|
||||
this.config.on('change', changed => {
|
||||
if (changed.localMode != null) {
|
||||
const localMode = checkTruthy(changed.localMode) || false;
|
||||
|
||||
@ -40,7 +40,9 @@ export class LocalModeManager {
|
||||
}
|
||||
});
|
||||
|
||||
const localMode = checkTruthy(await this.config.get('localMode') || false);
|
||||
const localMode = checkTruthy(
|
||||
(await this.config.get('localMode')) || false,
|
||||
);
|
||||
if (!localMode) {
|
||||
// Remove any leftovers if necessary
|
||||
await this.removeLocalModeArtifacts();
|
||||
@ -52,35 +54,43 @@ export class LocalModeManager {
|
||||
const images = await this.getLocalModeImages();
|
||||
const containers = await this.getLocalModeContainers(images);
|
||||
|
||||
await Bluebird.map(containers, (containerId) => {
|
||||
await Bluebird.map(containers, containerId => {
|
||||
console.log('Removing local mode container: ', containerId);
|
||||
return this.docker.getContainer(containerId).remove({ force: true });
|
||||
});
|
||||
await Bluebird.map(images, (imageId) => {
|
||||
await Bluebird.map(images, imageId => {
|
||||
console.log('Removing local mode image: ', imageId);
|
||||
return this.docker.getImage(imageId).remove({ force: true });
|
||||
});
|
||||
|
||||
// Remove any local mode state added to the database
|
||||
await this.db.models('app').del().where({ source: 'local' });
|
||||
await this.db
|
||||
.models('app')
|
||||
.del()
|
||||
.where({ source: 'local' });
|
||||
} catch (e) {
|
||||
console.log('There was an error clearing local mode artifacts: ', e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private async getLocalModeImages(): Promise<string[]> {
|
||||
// Return all local mode images present on the local docker daemon
|
||||
return _.map(await this.docker.listImages({ filters: { label: [ 'io.resin.local.image=1' ] } }), 'Id');
|
||||
return _.map(
|
||||
await this.docker.listImages({
|
||||
filters: { label: ['io.resin.local.image=1'] },
|
||||
}),
|
||||
'Id',
|
||||
);
|
||||
}
|
||||
|
||||
private async getLocalModeContainers(localModeImageIds: string[]): Promise<string[]> {
|
||||
private async getLocalModeContainers(
|
||||
localModeImageIds: string[],
|
||||
): Promise<string[]> {
|
||||
return _(await this.docker.listContainers())
|
||||
.filter(({ Image }) => _.includes(localModeImageIds, Image))
|
||||
.map('Id')
|
||||
.value();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export default LocalModeManager;
|
||||
|
@ -46,8 +46,8 @@ export class Logger {
|
||||
private attached: {
|
||||
[key in OutputStream]: { [containerId: string]: boolean }
|
||||
} = {
|
||||
[OutputStream.Stderr]: { },
|
||||
[OutputStream.Stdout]: { },
|
||||
[OutputStream.Stderr]: {},
|
||||
[OutputStream.Stdout]: {},
|
||||
};
|
||||
|
||||
public constructor({ eventTracker }: LoggerConstructOptions) {
|
||||
@ -62,8 +62,7 @@ export class Logger {
|
||||
offlineMode,
|
||||
enableLogs,
|
||||
localMode,
|
||||
}: LoggerSetupOptions,
|
||||
) {
|
||||
}: LoggerSetupOptions) {
|
||||
this.balenaBackend = new BalenaLogBackend(apiEndpoint, uuid, deviceApiKey);
|
||||
this.localBackend = new LocalLogBackend();
|
||||
|
||||
@ -126,27 +125,35 @@ export class Logger {
|
||||
this.log(msgObj);
|
||||
this.eventTracker.track(
|
||||
eventName != null ? eventName : message,
|
||||
eventObj != null ? eventObj : { },
|
||||
eventObj != null ? eventObj : {},
|
||||
);
|
||||
}
|
||||
|
||||
public lock(containerId: string): Bluebird.Disposer<() => void> {
|
||||
return this.writeLock(containerId)
|
||||
.disposer((release) => {
|
||||
release();
|
||||
});
|
||||
return this.writeLock(containerId).disposer(release => {
|
||||
release();
|
||||
});
|
||||
}
|
||||
|
||||
public attach(
|
||||
docker: Docker,
|
||||
containerId: string,
|
||||
serviceInfo: { serviceId: string, imageId: string },
|
||||
serviceInfo: { serviceId: string; imageId: string },
|
||||
): Bluebird<void> {
|
||||
return Bluebird.using(this.lock(containerId), () => {
|
||||
return this.attachStream(docker, OutputStream.Stdout, containerId, serviceInfo)
|
||||
.then(() => {
|
||||
return this.attachStream(docker, OutputStream.Stderr, containerId, serviceInfo);
|
||||
});
|
||||
return this.attachStream(
|
||||
docker,
|
||||
OutputStream.Stdout,
|
||||
containerId,
|
||||
serviceInfo,
|
||||
).then(() => {
|
||||
return this.attachStream(
|
||||
docker,
|
||||
OutputStream.Stderr,
|
||||
containerId,
|
||||
serviceInfo,
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@ -169,7 +176,7 @@ export class Logger {
|
||||
|
||||
public logConfigChange(
|
||||
config: { [configName: string]: string },
|
||||
{ success = false, err = null }: { success?: boolean, err?: Error } = { },
|
||||
{ success = false, err = null }: { success?: boolean; err?: Error } = {},
|
||||
) {
|
||||
const obj: LogEventObject = { config };
|
||||
let message: string;
|
||||
@ -177,7 +184,7 @@ export class Logger {
|
||||
if (success) {
|
||||
message = `Applied configuration change ${JSON.stringify(config)}`;
|
||||
eventName = 'Apply config change success';
|
||||
} else if(err != null) {
|
||||
} else if (err != null) {
|
||||
message = `Error applying configuration change: ${err}`;
|
||||
eventName = 'Apply config change error';
|
||||
obj.error = err;
|
||||
@ -196,9 +203,8 @@ export class Logger {
|
||||
docker: Docker,
|
||||
streamType: OutputStream,
|
||||
containerId: string,
|
||||
{ serviceId, imageId }: { serviceId: string, imageId: string },
|
||||
{ serviceId, imageId }: { serviceId: string; imageId: string },
|
||||
): Bluebird<void> {
|
||||
|
||||
return Bluebird.try(() => {
|
||||
if (this.attached[streamType][containerId]) {
|
||||
return;
|
||||
@ -212,12 +218,14 @@ export class Logger {
|
||||
since: Math.floor(Date.now() / 1000),
|
||||
};
|
||||
|
||||
return docker.getContainer(containerId).logs(logsOpts)
|
||||
.then((stream) => {
|
||||
return docker
|
||||
.getContainer(containerId)
|
||||
.logs(logsOpts)
|
||||
.then(stream => {
|
||||
this.attached[streamType][containerId] = true;
|
||||
|
||||
stream
|
||||
.on('error', (err) => {
|
||||
.on('error', err => {
|
||||
console.error('Error on container logs', err);
|
||||
this.attached[streamType][containerId] = false;
|
||||
})
|
||||
@ -240,7 +248,7 @@ export class Logger {
|
||||
this.log(message);
|
||||
}
|
||||
})
|
||||
.on('error', (err) => {
|
||||
.on('error', err => {
|
||||
console.error('Error on container logs', err);
|
||||
this.attached[streamType][containerId] = false;
|
||||
})
|
||||
@ -248,16 +256,15 @@ export class Logger {
|
||||
this.attached[streamType][containerId] = false;
|
||||
});
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
private objectNameForLogs(eventObj: LogEventObject): string | null {
|
||||
if (eventObj == null) {
|
||||
return null;
|
||||
}
|
||||
if (eventObj.service != null &&
|
||||
if (
|
||||
eventObj.service != null &&
|
||||
eventObj.service.serviceName != null &&
|
||||
eventObj.service.config != null &&
|
||||
eventObj.service.config.image != null
|
||||
@ -286,20 +293,17 @@ export class Logger {
|
||||
|
||||
private static extractContainerMessage(
|
||||
msgBuf: Buffer,
|
||||
): { message: string, timestamp: number } | null {
|
||||
): { message: string; timestamp: number } | null {
|
||||
// Non-tty message format from:
|
||||
// https://docs.docker.com/engine/api/v1.30/#operation/ContainerAttach
|
||||
if (
|
||||
msgBuf[0] in [0, 1, 2] &&
|
||||
_.every(msgBuf.slice(1, 7), (c) => c === 0)
|
||||
) {
|
||||
if (msgBuf[0] in [0, 1, 2] && _.every(msgBuf.slice(1, 7), c => c === 0)) {
|
||||
// Take the header from this message, and parse it as normal
|
||||
msgBuf = msgBuf.slice(8);
|
||||
}
|
||||
const logLine = msgBuf.toString();
|
||||
const space = logLine.indexOf(' ');
|
||||
if (space > 0) {
|
||||
let timestamp = (new Date(logLine.substr(0, space))).getTime();
|
||||
let timestamp = new Date(logLine.substr(0, space)).getTime();
|
||||
if (_.isNaN(timestamp)) {
|
||||
timestamp = Date.now();
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ interface Options extends url.UrlWithParsedQuery {
|
||||
}
|
||||
|
||||
export class BalenaLogBackend extends LogBackend {
|
||||
|
||||
private req: ClientRequest | null = null;
|
||||
private dropCount: number = 0;
|
||||
private writable: boolean = true;
|
||||
@ -30,11 +29,7 @@ export class BalenaLogBackend extends LogBackend {
|
||||
private stream: stream.PassThrough;
|
||||
timeout: NodeJS.Timer;
|
||||
|
||||
public constructor(
|
||||
apiEndpoint: string,
|
||||
uuid: string,
|
||||
deviceApiKey: string,
|
||||
) {
|
||||
public constructor(apiEndpoint: string, uuid: string, deviceApiKey: string) {
|
||||
super();
|
||||
|
||||
this.opts = url.parse(`${apiEndpoint}/device/v2/${uuid}/log-stream`) as any;
|
||||
@ -62,7 +57,9 @@ export class BalenaLogBackend extends LogBackend {
|
||||
this.flush();
|
||||
if (this.dropCount > 0) {
|
||||
this.write({
|
||||
message: `Warning: Suppressed ${this.dropCount} message(s) due to high load`,
|
||||
message: `Warning: Suppressed ${
|
||||
this.dropCount
|
||||
} message(s) due to high load`,
|
||||
timestamp: Date.now(),
|
||||
isSystem: true,
|
||||
isStdErr: true,
|
||||
@ -81,10 +78,13 @@ export class BalenaLogBackend extends LogBackend {
|
||||
return;
|
||||
}
|
||||
|
||||
message = _.assign({
|
||||
timestamp: Date.now(),
|
||||
message: '',
|
||||
}, message);
|
||||
message = _.assign(
|
||||
{
|
||||
timestamp: Date.now(),
|
||||
message: '',
|
||||
},
|
||||
message,
|
||||
);
|
||||
|
||||
if (!message.isSystem && message.serviceId == null) {
|
||||
return;
|
||||
@ -104,14 +104,17 @@ export class BalenaLogBackend extends LogBackend {
|
||||
// Since we haven't sent the request body yet, and never will,the
|
||||
// only reason for the server to prematurely respond is to
|
||||
// communicate an error. So teardown the connection immediately
|
||||
this.req.on('response', (res) => {
|
||||
console.log('LogBackend: server responded with status code:', res.statusCode);
|
||||
this.req.on('response', res => {
|
||||
console.log(
|
||||
'LogBackend: server responded with status code:',
|
||||
res.statusCode,
|
||||
);
|
||||
this.teardown();
|
||||
});
|
||||
|
||||
this.req.on('timeout', () => this.teardown());
|
||||
this.req.on('close', () => this.teardown());
|
||||
this.req.on('error', (err) => {
|
||||
this.req.on('error', err => {
|
||||
console.log('LogBackend: unexpected error:', err);
|
||||
this.teardown();
|
||||
});
|
||||
@ -120,7 +123,6 @@ export class BalenaLogBackend extends LogBackend {
|
||||
// respond with potential errors such as 401 authentication error
|
||||
this.req.flushHeaders();
|
||||
|
||||
|
||||
// We want a very low writable high watermark to prevent having many
|
||||
// chunks stored in the writable queue of @_gzip and have them in
|
||||
// @_stream instead. This is desirable because once @_gzip.flush() is
|
||||
@ -142,7 +144,6 @@ export class BalenaLogBackend extends LogBackend {
|
||||
this.flush();
|
||||
}
|
||||
}, RESPONSE_GRACE_PERIOD);
|
||||
|
||||
}, COOLDOWN_PERIOD);
|
||||
|
||||
private snooze = _.debounce(this.teardown, KEEPALIVE_TIMEOUT);
|
||||
@ -150,11 +151,15 @@ export class BalenaLogBackend extends LogBackend {
|
||||
// Flushing every ZLIB_TIMEOUT hits a balance between compression and
|
||||
// latency. When ZLIB_TIMEOUT is 0 the compression ratio is around 5x
|
||||
// whereas when ZLIB_TIMEOUT is infinity the compession ratio is around 10x.
|
||||
private flush = _.throttle(() => {
|
||||
if (this.gzip != null) {
|
||||
this.gzip.flush(zlib.Z_SYNC_FLUSH);
|
||||
}
|
||||
}, ZLIB_TIMEOUT, { leading: false });
|
||||
private flush = _.throttle(
|
||||
() => {
|
||||
if (this.gzip != null) {
|
||||
this.gzip.flush(zlib.Z_SYNC_FLUSH);
|
||||
}
|
||||
},
|
||||
ZLIB_TIMEOUT,
|
||||
{ leading: false },
|
||||
);
|
||||
|
||||
private teardown() {
|
||||
if (this.req != null) {
|
||||
|
@ -2,9 +2,4 @@ import { LocalLogBackend } from './local-backend';
|
||||
import { LogBackend, LogMessage } from './log-backend';
|
||||
import { BalenaLogBackend } from './balena-backend';
|
||||
|
||||
export {
|
||||
LocalLogBackend,
|
||||
LogBackend,
|
||||
LogMessage,
|
||||
BalenaLogBackend,
|
||||
};
|
||||
export { LocalLogBackend, LogBackend, LogMessage, BalenaLogBackend };
|
||||
|
@ -6,14 +6,12 @@ import { checkInt } from '../lib/validation';
|
||||
import { LogBackend, LogMessage } from './log-backend';
|
||||
|
||||
export class LocalLogBackend extends LogBackend {
|
||||
|
||||
private globalListeners: Readable[] = [];
|
||||
|
||||
private serviceNameResolver: (serviceId: number) => Bluebird<string>;
|
||||
|
||||
public log(message: LogMessage): void {
|
||||
if (this.publishEnabled) {
|
||||
|
||||
Bluebird.try(() => {
|
||||
if (!message.isSystem) {
|
||||
if (this.serviceNameResolver == null) {
|
||||
@ -23,27 +21,29 @@ export class LocalLogBackend extends LogBackend {
|
||||
}
|
||||
const svcId = checkInt(message.serviceId);
|
||||
if (svcId == null) {
|
||||
console.log('Warning: Non-integer service id found in local logs: ');
|
||||
console.log(
|
||||
'Warning: Non-integer service id found in local logs: ',
|
||||
);
|
||||
console.log(` ${JSON.stringify(message)}`);
|
||||
return null;
|
||||
}
|
||||
// TODO: Can we cache this value? The service ids are reused, so
|
||||
// we would need a way of invalidating the cache
|
||||
return this.serviceNameResolver(svcId).then((serviceName) => {
|
||||
return this.serviceNameResolver(svcId).then(serviceName => {
|
||||
return _.assign({}, { serviceName }, message);
|
||||
});
|
||||
} else {
|
||||
return message;
|
||||
}
|
||||
})
|
||||
.then((message: LogMessage | null) => {
|
||||
if (message != null) {
|
||||
_.each(this.globalListeners, (listener) => {
|
||||
listener.push(`${JSON.stringify(message)}\n`);
|
||||
});
|
||||
}
|
||||
})
|
||||
.catch((e) => {
|
||||
.then((message: LogMessage | null) => {
|
||||
if (message != null) {
|
||||
_.each(this.globalListeners, listener => {
|
||||
listener.push(`${JSON.stringify(message)}\n`);
|
||||
});
|
||||
}
|
||||
})
|
||||
.catch(e => {
|
||||
console.log('Error streaming local log output: ', e);
|
||||
});
|
||||
}
|
||||
@ -62,10 +62,11 @@ export class LocalLogBackend extends LogBackend {
|
||||
return stream;
|
||||
}
|
||||
|
||||
public assignServiceNameResolver(resolver: (serviceId: number) => Bluebird<string>) {
|
||||
public assignServiceNameResolver(
|
||||
resolver: (serviceId: number) => Bluebird<string>,
|
||||
) {
|
||||
this.serviceNameResolver = resolver;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export default LocalLogBackend;
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
export type LogMessage = Dictionary<any>;
|
||||
|
||||
export abstract class LogBackend {
|
||||
|
@ -7,7 +7,7 @@ APIBinder = require './api-binder'
|
||||
DeviceState = require './device-state'
|
||||
SupervisorAPI = require './supervisor-api'
|
||||
{ Logger } = require './logger'
|
||||
{ checkTruthy } = require './lib/validation';
|
||||
{ checkTruthy } = require './lib/validation'
|
||||
|
||||
constants = require './lib/constants'
|
||||
|
||||
|
@ -7,14 +7,14 @@ _ = require 'lodash'
|
||||
|
||||
configs = {
|
||||
simple: {
|
||||
compose: require('./data/docker-states/simple/compose.json');
|
||||
imageInfo: require('./data/docker-states/simple/imageInfo.json');
|
||||
inspect: require('./data/docker-states/simple/inspect.json');
|
||||
compose: require('./data/docker-states/simple/compose.json')
|
||||
imageInfo: require('./data/docker-states/simple/imageInfo.json')
|
||||
inspect: require('./data/docker-states/simple/inspect.json')
|
||||
}
|
||||
entrypoint: {
|
||||
compose: require('./data/docker-states/entrypoint/compose.json');
|
||||
imageInfo: require('./data/docker-states/entrypoint/imageInfo.json');
|
||||
inspect: require('./data/docker-states/entrypoint/inspect.json');
|
||||
compose: require('./data/docker-states/entrypoint/compose.json')
|
||||
imageInfo: require('./data/docker-states/entrypoint/imageInfo.json')
|
||||
inspect: require('./data/docker-states/entrypoint/inspect.json')
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,8 +199,8 @@ describe 'compose/service', ->
|
||||
serviceId: 123456,
|
||||
serviceName: 'test',
|
||||
ports: [
|
||||
"80:80"
|
||||
"100:100"
|
||||
'80:80'
|
||||
'100:100'
|
||||
]
|
||||
}, { appName: 'test' })
|
||||
|
||||
@ -302,14 +302,14 @@ describe 'compose/service', ->
|
||||
}, { appName: 'test' })
|
||||
|
||||
expect(makeComposeServiceWithNetwork({
|
||||
"balena": {
|
||||
"ipv4Address": "1.2.3.4"
|
||||
'balena': {
|
||||
'ipv4Address': '1.2.3.4'
|
||||
}
|
||||
}).toDockerContainer({ deviceName: 'foo' }).NetworkingConfig).to.deep.equal({
|
||||
EndpointsConfig: {
|
||||
"123456_balena": {
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPV4Address: "1.2.3.4"
|
||||
IPV4Address: '1.2.3.4'
|
||||
},
|
||||
Aliases: []
|
||||
}
|
||||
@ -325,7 +325,7 @@ describe 'compose/service', ->
|
||||
}
|
||||
}).toDockerContainer({ deviceName: 'foo' }).NetworkingConfig).to.deep.equal({
|
||||
EndpointsConfig: {
|
||||
"123456_balena": {
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPV4Address: '1.2.3.4'
|
||||
IPV6Address: '5.6.7.8'
|
||||
@ -337,23 +337,23 @@ describe 'compose/service', ->
|
||||
})
|
||||
|
||||
it 'should correctly convert Docker format to service format', ->
|
||||
dockerCfg = require('./data/docker-states/simple/inspect.json');
|
||||
dockerCfg = require('./data/docker-states/simple/inspect.json')
|
||||
makeServiceFromDockerWithNetwork = (networks) ->
|
||||
Service.fromDockerContainer(
|
||||
newConfig = _.cloneDeep(dockerCfg);
|
||||
newConfig = _.cloneDeep(dockerCfg)
|
||||
newConfig.NetworkSettings = { Networks: networks }
|
||||
)
|
||||
|
||||
expect(makeServiceFromDockerWithNetwork({
|
||||
'123456_balena': {
|
||||
IPAMConfig: {
|
||||
IPv4Address: "1.2.3.4"
|
||||
IPv4Address: '1.2.3.4'
|
||||
},
|
||||
Aliases: []
|
||||
}
|
||||
}).config.networks).to.deep.equal({
|
||||
'123456_balena': {
|
||||
"ipv4Address": "1.2.3.4"
|
||||
'ipv4Address': '1.2.3.4'
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -225,7 +225,7 @@ describe 'deviceState', ->
|
||||
@deviceState.loadTargetFromFile(process.env.ROOT_MOUNTPOINT + '/apps.json')
|
||||
.then =>
|
||||
@deviceState.getTarget()
|
||||
.then (targetState) =>
|
||||
.then (targetState) ->
|
||||
testTarget = _.cloneDeep(testTarget1)
|
||||
testTarget.local.apps['1234'].services = _.map testTarget.local.apps['1234'].services, (s) ->
|
||||
s.imageName = s.image
|
||||
|
@ -95,33 +95,33 @@ describe 'EventTracker', ->
|
||||
it 'should rate limit events of the same type', ->
|
||||
@eventTracker.client.track.reset()
|
||||
|
||||
@eventTracker.track('test', { });
|
||||
@eventTracker.track('test', { });
|
||||
@eventTracker.track('test', { });
|
||||
@eventTracker.track('test', { });
|
||||
@eventTracker.track('test', { });
|
||||
@eventTracker.track('test', {})
|
||||
@eventTracker.track('test', {})
|
||||
@eventTracker.track('test', {})
|
||||
@eventTracker.track('test', {})
|
||||
@eventTracker.track('test', {})
|
||||
|
||||
expect(@eventTracker.client.track).to.have.callCount(1)
|
||||
|
||||
it 'should rate limit events of the same type with different arguments', ->
|
||||
@eventTracker.client.track.reset()
|
||||
|
||||
@eventTracker.track('test2', { a: 1 });
|
||||
@eventTracker.track('test2', { b: 2 });
|
||||
@eventTracker.track('test2', { c: 3 });
|
||||
@eventTracker.track('test2', { d: 4 });
|
||||
@eventTracker.track('test2', { e: 5 });
|
||||
@eventTracker.track('test2', { a: 1 })
|
||||
@eventTracker.track('test2', { b: 2 })
|
||||
@eventTracker.track('test2', { c: 3 })
|
||||
@eventTracker.track('test2', { d: 4 })
|
||||
@eventTracker.track('test2', { e: 5 })
|
||||
|
||||
expect(@eventTracker.client.track).to.have.callCount(1)
|
||||
|
||||
it 'should not rate limit events of different types', ->
|
||||
@eventTracker.client.track.reset()
|
||||
|
||||
@eventTracker.track('test3', { a: 1 });
|
||||
@eventTracker.track('test4', { b: 2 });
|
||||
@eventTracker.track('test5', { c: 3 });
|
||||
@eventTracker.track('test6', { d: 4 });
|
||||
@eventTracker.track('test7', { e: 5 });
|
||||
@eventTracker.track('test3', { a: 1 })
|
||||
@eventTracker.track('test4', { b: 2 })
|
||||
@eventTracker.track('test5', { c: 3 })
|
||||
@eventTracker.track('test6', { d: 4 })
|
||||
@eventTracker.track('test7', { e: 5 })
|
||||
|
||||
expect(@eventTracker.client.track).to.have.callCount(5)
|
||||
|
||||
|
@ -26,7 +26,7 @@ describe 'Logger', ->
|
||||
track: m.sinon.spy()
|
||||
}
|
||||
|
||||
@logger = new Logger({eventTracker: @fakeEventTracker})
|
||||
@logger = new Logger({ eventTracker: @fakeEventTracker })
|
||||
@logger.init({
|
||||
apiEndpoint: 'https://example.com'
|
||||
uuid: 'deadbeef'
|
||||
@ -41,7 +41,7 @@ describe 'Logger', ->
|
||||
|
||||
it 'waits the grace period before sending any logs', ->
|
||||
clock = m.sinon.useFakeTimers()
|
||||
@logger.log({message: 'foobar', serviceId: 15})
|
||||
@logger.log({ message: 'foobar', serviceId: 15 })
|
||||
clock.tick(4999)
|
||||
clock.restore()
|
||||
|
||||
@ -51,7 +51,7 @@ describe 'Logger', ->
|
||||
|
||||
it 'tears down the connection after inactivity', ->
|
||||
clock = m.sinon.useFakeTimers()
|
||||
@logger.log({message: 'foobar', serviceId: 15})
|
||||
@logger.log({ message: 'foobar', serviceId: 15 })
|
||||
clock.tick(61000)
|
||||
clock.restore()
|
||||
|
||||
|
@ -150,20 +150,20 @@ describe 'DeviceConfig', ->
|
||||
|
||||
it 'accepts RESIN_ and BALENA_ variables', ->
|
||||
@deviceConfig.formatConfigKeys({
|
||||
FOO: 'bar',
|
||||
BAR: 'baz',
|
||||
RESIN_HOST_CONFIG_foo: 'foobaz',
|
||||
BALENA_HOST_CONFIG_foo: 'foobar',
|
||||
RESIN_HOST_CONFIG_other: 'val',
|
||||
BALENA_HOST_CONFIG_baz: 'bad',
|
||||
BALENA_SUPERVISOR_POLL_INTERVAL: '100',
|
||||
FOO: 'bar',
|
||||
BAR: 'baz',
|
||||
RESIN_HOST_CONFIG_foo: 'foobaz',
|
||||
BALENA_HOST_CONFIG_foo: 'foobar',
|
||||
RESIN_HOST_CONFIG_other: 'val',
|
||||
BALENA_HOST_CONFIG_baz: 'bad',
|
||||
BALENA_SUPERVISOR_POLL_INTERVAL: '100',
|
||||
}).then (filteredConf) ->
|
||||
expect(filteredConf).to.deep.equal({
|
||||
HOST_CONFIG_foo: 'foobar',
|
||||
HOST_CONFIG_other: 'val',
|
||||
HOST_CONFIG_baz: 'bad',
|
||||
SUPERVISOR_POLL_INTERVAL: '100',
|
||||
});
|
||||
})
|
||||
|
||||
describe 'Extlinux files', ->
|
||||
|
||||
|
@ -10,13 +10,13 @@ describe 'compose/network', ->
|
||||
it 'should convert a compose configuration to an internal representation', ->
|
||||
|
||||
network = Network.fromComposeObject({ logger: null, docker: null }, 'test', 123, {
|
||||
'driver':'bridge',
|
||||
'ipam':{
|
||||
'driver':'default',
|
||||
'config':[
|
||||
'driver': 'bridge',
|
||||
'ipam': {
|
||||
'driver': 'default',
|
||||
'config': [
|
||||
{
|
||||
'subnet':'172.25.0.0/25',
|
||||
'gateway':'172.25.0.1'
|
||||
'subnet': '172.25.0.0/25',
|
||||
'gateway': '172.25.0.1'
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -30,12 +30,12 @@ describe 'compose/network', ->
|
||||
subnet: '172.25.0.0/25'
|
||||
gateway: '172.25.0.1'
|
||||
]
|
||||
options: { }
|
||||
options: {}
|
||||
}
|
||||
enableIPv6: false,
|
||||
internal: false,
|
||||
labels: { }
|
||||
options: { }
|
||||
labels: {}
|
||||
options: {}
|
||||
})
|
||||
|
||||
describe 'internal config -> docker config', ->
|
||||
@ -43,13 +43,13 @@ describe 'compose/network', ->
|
||||
it 'should convert an internal representation to a docker representation', ->
|
||||
|
||||
network = Network.fromComposeObject({ logger: null, docker: null }, 'test', 123, {
|
||||
'driver':'bridge',
|
||||
'ipam':{
|
||||
'driver':'default',
|
||||
'config':[
|
||||
'driver': 'bridge',
|
||||
'ipam': {
|
||||
'driver': 'default',
|
||||
'config': [
|
||||
{
|
||||
'subnet':'172.25.0.0/25',
|
||||
'gateway':'172.25.0.1'
|
||||
'subnet': '172.25.0.0/25',
|
||||
'gateway': '172.25.0.1'
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -65,7 +65,7 @@ describe 'compose/network', ->
|
||||
Subnet: '172.25.0.0/25'
|
||||
Gateway: '172.25.0.1'
|
||||
}]
|
||||
Options: { }
|
||||
Options: {}
|
||||
}
|
||||
EnableIPv6: false,
|
||||
Internal: false,
|
||||
|
@ -1,8 +1,8 @@
|
||||
require('mocha');
|
||||
require('mocha')
|
||||
|
||||
{ expect } = require('chai');
|
||||
{ expect } = require('chai')
|
||||
|
||||
ComposeUtils = require('../src/compose/utils');
|
||||
ComposeUtils = require('../src/compose/utils')
|
||||
|
||||
describe 'Composition utilities', ->
|
||||
|
||||
|
3
typings/blinking.d.ts
vendored
3
typings/blinking.d.ts
vendored
@ -1,5 +1,4 @@
|
||||
declare module 'blinking' {
|
||||
|
||||
interface Pattern {
|
||||
blinks?: number;
|
||||
onDuration?: number;
|
||||
@ -8,7 +7,7 @@ declare module 'blinking' {
|
||||
}
|
||||
|
||||
interface Blink {
|
||||
start: (pattern: Pattern) => void
|
||||
start: (pattern: Pattern) => void;
|
||||
stop: () => void;
|
||||
}
|
||||
|
||||
|
4
typings/dockerode-ext.d.ts
vendored
4
typings/dockerode-ext.d.ts
vendored
@ -1,7 +1,6 @@
|
||||
import { ContainerInspectInfo } from 'dockerode';
|
||||
|
||||
declare module 'dockerode' {
|
||||
|
||||
// Extend the HostConfig interface with the missing fields.
|
||||
// TODO: Add these upstream to DefinitelyTyped
|
||||
interface HostConfig {
|
||||
@ -22,5 +21,4 @@ declare module 'dockerode' {
|
||||
Healthcheck?: DockerHealthcheck;
|
||||
StopTimeout?: number;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
63
typings/duration-js.d.ts
vendored
63
typings/duration-js.d.ts
vendored
@ -1,62 +1,61 @@
|
||||
// From: https://github.com/icholy/Duration.js/pull/15
|
||||
// Once the above is merged, use the inbuilt module types
|
||||
declare module "duration-js" {
|
||||
declare module 'duration-js' {
|
||||
type DurationLike = Duration | string | number;
|
||||
type DateLike = Date | number;
|
||||
|
||||
class Duration {
|
||||
|
||||
private _milliseconds: number;
|
||||
|
||||
constructor(value?: DurationLike);
|
||||
|
||||
static millisecond: Duration;
|
||||
static second: Duration;
|
||||
static minute: Duration;
|
||||
static hour: Duration;
|
||||
static day: Duration;
|
||||
static week: Duration;
|
||||
static second: Duration;
|
||||
static minute: Duration;
|
||||
static hour: Duration;
|
||||
static day: Duration;
|
||||
static week: Duration;
|
||||
|
||||
static milliseconds(milliseconds: number): Duration;
|
||||
static seconds(seconds: number): Duration;
|
||||
static minutes(minutes: number): Duration;
|
||||
static hours(hours: number): Duration;
|
||||
static days(days: number): Duration;
|
||||
static weeks(weeks: number): Duration;
|
||||
static seconds(seconds: number): Duration;
|
||||
static minutes(minutes: number): Duration;
|
||||
static hours(hours: number): Duration;
|
||||
static days(days: number): Duration;
|
||||
static weeks(weeks: number): Duration;
|
||||
|
||||
nanoseconds(): number;
|
||||
nanoseconds(): number;
|
||||
microseconds(): number;
|
||||
milliseconds(): number;
|
||||
seconds(): number;
|
||||
minutes(): number;
|
||||
hours(): number;
|
||||
days(): number;
|
||||
weeks(): number;
|
||||
seconds(): number;
|
||||
minutes(): number;
|
||||
hours(): number;
|
||||
days(): number;
|
||||
weeks(): number;
|
||||
|
||||
toString(): string;
|
||||
valueOf(): number;
|
||||
valueOf(): number;
|
||||
|
||||
isGreaterThan(duration: DurationLike): boolean;
|
||||
isLessThan(duration: DurationLike): boolean;
|
||||
isEqualTo(duration: DurationLike): boolean;
|
||||
isLessThan(duration: DurationLike): boolean;
|
||||
isEqualTo(duration: DurationLike): boolean;
|
||||
|
||||
roundTo(duration: DurationLike): void;
|
||||
|
||||
after(date: DateLike): Date;
|
||||
|
||||
static since(date: DateLike): Duration;
|
||||
static until(date: DateLike): Duration;
|
||||
static since(date: DateLike): Duration;
|
||||
static until(date: DateLike): Duration;
|
||||
static between(a: DateLike, b: DateLike): Duration;
|
||||
static parse(duration: string): Duration;
|
||||
static fromMicroseconds(us: number): Duration;
|
||||
static fromNanoseconds(ns: number): Duration;
|
||||
static parse(duration: string): Duration;
|
||||
static fromMicroseconds(us: number): Duration;
|
||||
static fromNanoseconds(ns: number): Duration;
|
||||
|
||||
static add(a: Duration, b: Duration): Duration;
|
||||
static add(a: Duration, b: Duration): Duration;
|
||||
static subtract(a: Duration, b: Duration): Duration;
|
||||
static multiply(a: Duration, b: number): Duration;
|
||||
static multiply(a: number, b: Duration): Duration;
|
||||
static divide(a: Duration, b: Duration): number;
|
||||
static abs(d: DurationLike): Duration;
|
||||
static multiply(a: Duration, b: number): Duration;
|
||||
static multiply(a: number, b: Duration): Duration;
|
||||
static divide(a: Duration, b: Duration): number;
|
||||
static abs(d: DurationLike): Duration;
|
||||
}
|
||||
export = Duration;
|
||||
}
|
||||
}
|
||||
|
2
typings/global.d.ts
vendored
2
typings/global.d.ts
vendored
@ -1,3 +1,3 @@
|
||||
interface Dictionary<T> {
|
||||
[key: string]: T;
|
||||
}
|
||||
}
|
||||
|
3
typings/json-mask.d.ts
vendored
3
typings/json-mask.d.ts
vendored
@ -1,5 +1,4 @@
|
||||
declare module 'json-mask' {
|
||||
|
||||
function mask(obj: Dictionary<any>, mask: string): Dictionary<any>;
|
||||
|
||||
// These types are not strictly correct, but they don't need to be for our usage
|
||||
@ -9,4 +8,4 @@ declare module 'json-mask' {
|
||||
}
|
||||
|
||||
export = mask;
|
||||
}
|
||||
}
|
||||
|
2
typings/mixpanel.d.ts
vendored
2
typings/mixpanel.d.ts
vendored
@ -1 +1 @@
|
||||
declare module 'mixpanel';
|
||||
declare module 'mixpanel';
|
||||
|
6
typings/typings.d.ts
vendored
6
typings/typings.d.ts
vendored
@ -1,5 +1,5 @@
|
||||
// Allow importing of json files with typescript
|
||||
declare module "*.json" {
|
||||
const value: { [key: string]: any};
|
||||
export default value;
|
||||
declare module '*.json' {
|
||||
const value: { [key: string]: any };
|
||||
export default value;
|
||||
}
|
||||
|
2
typings/zlib.d.ts
vendored
2
typings/zlib.d.ts
vendored
@ -2,4 +2,4 @@ declare module 'zlib' {
|
||||
export interface ZlibOptions {
|
||||
writableHighWaterMark: number;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user