mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2025-01-29 15:44:13 +00:00
Remove CoffeeScript tests and all CoffeeScript tools
Closes: #1318 Change-Type: patch Signed-off-by: Miguel Casqueira <miguel@balena.io>
This commit is contained in:
parent
9c1110af32
commit
466c7999db
@ -1,16 +1,10 @@
|
||||
{
|
||||
"*.coffee": [
|
||||
"balena-lint"
|
||||
],
|
||||
"*.ts": [
|
||||
"balena-lint --typescript --fix",
|
||||
],
|
||||
"*.js": [
|
||||
"balena-lint --typescript --fix",
|
||||
],
|
||||
"test/**/*.coffee": [
|
||||
"balena-lint --tests"
|
||||
],
|
||||
"test/**/*.ts": [
|
||||
"balena-lint --typescript --no-prettier --tests"
|
||||
],
|
||||
|
@ -74,10 +74,10 @@ RUN npm ci --production --no-optional --unsafe-perm --build-from-source --sqlite
|
||||
# We also remove the spurious node.dtps, see https://github.com/mapbox/node-sqlite3/issues/861
|
||||
&& find . -path '*/coverage/*' -o -path '*/test/*' -o -path '*/.nyc_output/*' \
|
||||
-o -name '*.tar.*' -o -name '*.in' -o -name '*.cc' \
|
||||
-o -name '*.c' -o -name '*.coffee' -o -name '*.eslintrc' \
|
||||
-o -name '*.c' -o -name "*.ts" -o -name '*.eslintrc' \
|
||||
-o -name '*.h' -o -name '*.html' -o -name '*.markdown' \
|
||||
-o -name '*.md' -o -name '*.patch' -o -name '*.png' \
|
||||
-o -name '*.yml' -o -name "*.ts" \
|
||||
-o -name '*.yml' \
|
||||
-delete \
|
||||
&& find . -type f -path '*/node_modules/sqlite3/deps*' -delete \
|
||||
&& find . -type f -path '*/node_modules/knex/build*' -delete \
|
||||
|
@ -48,23 +48,25 @@ flock /tmp/balena/updates.lock -c '... (command to run while locked)'
|
||||
|
||||
For more examples and explanation of the functionality, check the links to the specific tools above.
|
||||
|
||||
#### Javascript and Coffeescript
|
||||
#### Javascript
|
||||
|
||||
Using the [`lockfile` library](https://www.npmjs.com/package/lockfile), the lock can be acquired like in this CoffeeScript example:
|
||||
```coffeescript
|
||||
lockFile = require 'lockfile'
|
||||
Using the [`lockfile` library](https://www.npmjs.com/package/lockfile), the lock can be acquired like in this example:
|
||||
```javascript
|
||||
import lockFile from 'lockfile';
|
||||
|
||||
lockFile.lock '/tmp/balena/updates.lock', (err) ->
|
||||
# A non-null err probably means the supervisor is about to kill us
|
||||
throw new Error('Could not acquire lock: ', err) if err?
|
||||
lockFile.lock('/tmp/balena/updates.lock', function(err) {
|
||||
// A non-null err probably means the supervisor is about to kill us
|
||||
if (err != null) { throw new Error('Could not acquire lock: ', err); }
|
||||
|
||||
# Here we have the lock, so we can do critical stuff:
|
||||
doTheHarlemShake()
|
||||
// Here we have the lock, so we can do critical stuff:
|
||||
doTheHarlemShake();
|
||||
|
||||
# Now we release the lock, and we can be killed again
|
||||
lockFile.unlock '/tmp/balena/updates.lock', (err) ->
|
||||
# If err is not null here, something went really wrong
|
||||
throw err if err?
|
||||
// Now we release the lock, and we can be killed again
|
||||
return lockFile.unlock('/tmp/balena/updates.lock', function(err) {
|
||||
// If err is not null here, something went really wrong
|
||||
if (err != null) { throw err; }
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
#### Python
|
||||
|
15
package-lock.json
generated
15
package-lock.json
generated
@ -2452,15 +2452,6 @@
|
||||
"resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz",
|
||||
"integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c="
|
||||
},
|
||||
"coffee-loader": {
|
||||
"version": "0.9.0",
|
||||
"resolved": "https://registry.npmjs.org/coffee-loader/-/coffee-loader-0.9.0.tgz",
|
||||
"integrity": "sha512-VSoQ5kWr6Yfjn4RDpVbba2XMs3XG1ZXtLakPRt8dNfUcNU9h+1pocpdUUEd7NK9rLDwrju4yonhxrL8aMr5tww==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"loader-utils": "^1.0.2"
|
||||
}
|
||||
},
|
||||
"coffeelint": {
|
||||
"version": "1.16.2",
|
||||
"resolved": "https://registry.npmjs.org/coffeelint/-/coffeelint-1.16.2.tgz",
|
||||
@ -2504,12 +2495,6 @@
|
||||
"globals": "^10.1.0"
|
||||
}
|
||||
},
|
||||
"coffeescript": {
|
||||
"version": "1.12.7",
|
||||
"resolved": "https://registry.npmjs.org/coffeescript/-/coffeescript-1.12.7.tgz",
|
||||
"integrity": "sha512-pLXHFxQMPklVoEekowk8b3erNynC+DVJzChxS/LCBBgR6/8AJkHivkm//zbowcfc7BTCAjryuhx6gPqPRfsFoA==",
|
||||
"dev": true
|
||||
},
|
||||
"collection-visit": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
|
||||
|
17
package.json
17
package.json
@ -9,22 +9,19 @@
|
||||
},
|
||||
"scripts": {
|
||||
"start": "./entry.sh",
|
||||
"build": "npm run typescript:release && webpack",
|
||||
"build:debug": "npm run typescript:release && npm run packagejson:copy",
|
||||
"lint": "npm run lint:coffee && npm run lint:typescript",
|
||||
"build": "npm run release && webpack",
|
||||
"build:debug": "npm run release && npm run packagejson:copy",
|
||||
"lint": "balena-lint -e ts -e js --typescript src/ test/ typings/ build-utils/ && tsc --noEmit && tsc --noEmit --project tsconfig.js.json",
|
||||
"test": "npm run lint && npm run test-nolint",
|
||||
"test-nolint": "npm run test:build && TEST=1 mocha",
|
||||
"test:build": "npm run typescript:test-build && npm run coffeescript:test && npm run testitems:copy && npm run packagejson:copy",
|
||||
"test:build": "npm run test-build && npm run testitems:copy && npm run packagejson:copy",
|
||||
"test:fast": "TEST=1 mocha --opts test/fast-mocha.opts",
|
||||
"test:debug": "npm run test:build && TEST=1 mocha --inspect-brk",
|
||||
"prettify": "balena-lint -e ts -e js --typescript --fix src/ test/ typings/ build-utils/",
|
||||
"typescript:test-build": "tsc --project tsconfig.json",
|
||||
"typescript:release": "tsc --project tsconfig.release.json && cp -r build/src/* build && rm -rf build/src",
|
||||
"coffeescript:test": "coffee -m -c -o build .",
|
||||
"test-build": "tsc --project tsconfig.json",
|
||||
"release": "tsc --project tsconfig.release.json && cp -r build/src/* build && rm -rf build/src",
|
||||
"packagejson:copy": "cp package.json build/",
|
||||
"testitems:copy": "cp -r test/data build/test/",
|
||||
"lint:coffee": "balena-lint test/",
|
||||
"lint:typescript": "balena-lint -e ts -e js --typescript src/ test/ typings/ build-utils/ && tsc --noEmit && tsc --noEmit --project tsconfig.js.json",
|
||||
"sync": "ts-node sync/sync.ts"
|
||||
},
|
||||
"private": true,
|
||||
@ -73,8 +70,6 @@
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"chai-events": "0.0.1",
|
||||
"chokidar": "^3.3.1",
|
||||
"coffee-loader": "^0.9.0",
|
||||
"coffeescript": "^1.12.7",
|
||||
"common-tags": "^1.8.0",
|
||||
"copy-webpack-plugin": "^5.1.1",
|
||||
"deep-object-diff": "^1.1.0",
|
||||
|
@ -3,7 +3,7 @@
|
||||
// It's a bit ugly for a migration (it's unusual that migrations check for existence of tables and columns)
|
||||
// but being the first migration for a legacy system, this is the easiest way to bring the db
|
||||
// to a known schema to start doing proper migrations afterwards.
|
||||
// For reference, compare this to db.coffee in old supervisors (e.g. v6.4.2), but consider we've added
|
||||
// For reference, compare this to db.ts in old supervisors (e.g. v6.4.2), but consider we've added
|
||||
// a few dropColumn and dropTable calls to delete things that were removed throughout the supervisor's
|
||||
// history without actually adding drop statements (mostly just becoming unused, but still there).
|
||||
|
||||
|
@ -1,353 +0,0 @@
|
||||
Promise = require 'bluebird'
|
||||
{ fs, child_process } = require 'mz'
|
||||
|
||||
{ expect } = require './lib/chai-config'
|
||||
{ stub, spy } = require 'sinon'
|
||||
|
||||
prepare = require './lib/prepare'
|
||||
fsUtils = require '../src/lib/fs-utils'
|
||||
|
||||
{ DeviceConfig } = require '../src/device-config'
|
||||
{ ExtlinuxConfigBackend, RPiConfigBackend } = require '../src/config/backend'
|
||||
|
||||
extlinuxBackend = new ExtlinuxConfigBackend()
|
||||
rpiConfigBackend = new RPiConfigBackend()
|
||||
|
||||
describe 'DeviceConfig', ->
|
||||
before ->
|
||||
prepare()
|
||||
@fakeDB = {}
|
||||
@fakeConfig = {
|
||||
get: (key) ->
|
||||
Promise.try ->
|
||||
if key == 'deviceType'
|
||||
return 'raspberrypi3'
|
||||
else
|
||||
throw new Error('Unknown fake config key')
|
||||
}
|
||||
@fakeLogger = {
|
||||
logSystemMessage: spy()
|
||||
}
|
||||
@deviceConfig = new DeviceConfig({ logger: @fakeLogger, db: @fakeDB, config: @fakeConfig })
|
||||
|
||||
|
||||
# Test that the format for special values like initramfs and array variables is parsed correctly
|
||||
it 'allows getting boot config with getBootConfig', ->
|
||||
|
||||
stub(fs, 'readFile').resolves('\
|
||||
initramfs initramf.gz 0x00800000\n\
|
||||
dtparam=i2c=on\n\
|
||||
dtparam=audio=on\n\
|
||||
dtoverlay=ads7846\n\
|
||||
dtoverlay=lirc-rpi,gpio_out_pin=17,gpio_in_pin=13\n\
|
||||
foobar=baz\n\
|
||||
')
|
||||
@deviceConfig.getBootConfig(rpiConfigBackend)
|
||||
.then (conf) ->
|
||||
fs.readFile.restore()
|
||||
expect(conf).to.deep.equal({
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
})
|
||||
|
||||
it 'properly reads a real config.txt file', ->
|
||||
@deviceConfig.getBootConfig(rpiConfigBackend)
|
||||
.then (conf) ->
|
||||
expect(conf).to.deep.equal({
|
||||
HOST_CONFIG_dtparam: '"i2c_arm=on","spi=on","audio=on"'
|
||||
HOST_CONFIG_enable_uart: '1'
|
||||
HOST_CONFIG_disable_splash: '1'
|
||||
HOST_CONFIG_avoid_warnings: '1'
|
||||
HOST_CONFIG_gpu_mem: '16'
|
||||
})
|
||||
|
||||
# Test that the format for special values like initramfs and array variables is preserved
|
||||
it 'does not allow setting forbidden keys', ->
|
||||
current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
}
|
||||
target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00810000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
}
|
||||
promise = Promise.try =>
|
||||
@deviceConfig.bootConfigChangeRequired(rpiConfigBackend, current, target)
|
||||
expect(promise).to.be.rejected
|
||||
promise.catch (err) =>
|
||||
expect(@fakeLogger.logSystemMessage).to.be.calledOnce
|
||||
expect(@fakeLogger.logSystemMessage).to.be.calledWith('Attempt to change blacklisted config value initramfs', {
|
||||
error: 'Attempt to change blacklisted config value initramfs'
|
||||
}, 'Apply boot config error')
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
|
||||
it 'does not try to change config.txt if it should not change', ->
|
||||
current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
}
|
||||
target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
}
|
||||
promise = Promise.try =>
|
||||
@deviceConfig.bootConfigChangeRequired(rpiConfigBackend, current, target)
|
||||
expect(promise).to.eventually.equal(false)
|
||||
promise.then =>
|
||||
expect(@fakeLogger.logSystemMessage).to.not.be.called
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
|
||||
it 'writes the target config.txt', ->
|
||||
stub(fsUtils, 'writeFileAtomic').resolves()
|
||||
stub(child_process, 'exec').resolves()
|
||||
current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"'
|
||||
HOST_CONFIG_dtoverlay: '"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'baz'
|
||||
}
|
||||
target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000'
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=off"'
|
||||
HOST_CONFIG_dtoverlay: '"lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"'
|
||||
HOST_CONFIG_foobar: 'bat'
|
||||
HOST_CONFIG_foobaz: 'bar'
|
||||
}
|
||||
promise = Promise.try =>
|
||||
@deviceConfig.bootConfigChangeRequired(rpiConfigBackend, current, target)
|
||||
expect(promise).to.eventually.equal(true)
|
||||
promise.then =>
|
||||
@deviceConfig.setBootConfig(rpiConfigBackend, target)
|
||||
.then =>
|
||||
expect(child_process.exec).to.be.calledOnce
|
||||
expect(@fakeLogger.logSystemMessage).to.be.calledTwice
|
||||
expect(@fakeLogger.logSystemMessage.getCall(1).args[2]).to.equal('Apply boot config success')
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith('./test/data/mnt/boot/config.txt', '\
|
||||
initramfs initramf.gz 0x00800000\n\
|
||||
dtparam=i2c=on\n\
|
||||
dtparam=audio=off\n\
|
||||
dtoverlay=lirc-rpi,gpio_out_pin=17,gpio_in_pin=13\n\
|
||||
foobar=bat\n\
|
||||
foobaz=bar\n\
|
||||
')
|
||||
fsUtils.writeFileAtomic.restore()
|
||||
child_process.exec.restore()
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
|
||||
it 'accepts RESIN_ and BALENA_ variables', ->
|
||||
@deviceConfig.formatConfigKeys({
|
||||
FOO: 'bar',
|
||||
BAR: 'baz',
|
||||
RESIN_HOST_CONFIG_foo: 'foobaz',
|
||||
BALENA_HOST_CONFIG_foo: 'foobar',
|
||||
RESIN_HOST_CONFIG_other: 'val',
|
||||
BALENA_HOST_CONFIG_baz: 'bad',
|
||||
BALENA_SUPERVISOR_POLL_INTERVAL: '100',
|
||||
}).then (filteredConf) ->
|
||||
expect(filteredConf).to.deep.equal({
|
||||
HOST_CONFIG_foo: 'foobar',
|
||||
HOST_CONFIG_other: 'val',
|
||||
HOST_CONFIG_baz: 'bad',
|
||||
SUPERVISOR_POLL_INTERVAL: '100',
|
||||
})
|
||||
|
||||
it 'returns default configuration values', ->
|
||||
conf = @deviceConfig.getDefaults()
|
||||
expect(conf).to.deep.equal({
|
||||
SUPERVISOR_VPN_CONTROL: 'true'
|
||||
SUPERVISOR_POLL_INTERVAL: '60000',
|
||||
SUPERVISOR_LOCAL_MODE: 'false',
|
||||
SUPERVISOR_CONNECTIVITY_CHECK: 'true',
|
||||
SUPERVISOR_LOG_CONTROL: 'true',
|
||||
SUPERVISOR_DELTA: 'false',
|
||||
SUPERVISOR_DELTA_REQUEST_TIMEOUT: '30000',
|
||||
SUPERVISOR_DELTA_APPLY_TIMEOUT: '0',
|
||||
SUPERVISOR_DELTA_RETRY_COUNT: '30',
|
||||
SUPERVISOR_DELTA_RETRY_INTERVAL: '10000',
|
||||
SUPERVISOR_DELTA_VERSION: '2',
|
||||
SUPERVISOR_INSTANT_UPDATE_TRIGGER: 'true',
|
||||
SUPERVISOR_OVERRIDE_LOCK: 'false',
|
||||
SUPERVISOR_PERSISTENT_LOGGING: 'false',
|
||||
})
|
||||
|
||||
describe 'Extlinux files', ->
|
||||
|
||||
it 'should correctly write to extlinux.conf files', ->
|
||||
stub(fsUtils, 'writeFileAtomic').resolves()
|
||||
stub(child_process, 'exec').resolves()
|
||||
|
||||
current = {
|
||||
}
|
||||
target = {
|
||||
HOST_EXTLINUX_isolcpus: '2'
|
||||
}
|
||||
|
||||
promise = Promise.try =>
|
||||
@deviceConfig.bootConfigChangeRequired(extlinuxBackend, current, target)
|
||||
expect(promise).to.eventually.equal(true)
|
||||
promise.then =>
|
||||
@deviceConfig.setBootConfig(extlinuxBackend, target)
|
||||
.then =>
|
||||
expect(child_process.exec).to.be.calledOnce
|
||||
expect(@fakeLogger.logSystemMessage).to.be.calledTwice
|
||||
expect(@fakeLogger.logSystemMessage.getCall(1).args[2]).to.equal('Apply boot config success')
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith('./test/data/mnt/boot/extlinux/extlinux.conf', '\
|
||||
DEFAULT primary\n\
|
||||
TIMEOUT 30\n\
|
||||
MENU TITLE Boot Options\n\
|
||||
LABEL primary\n\
|
||||
MENU LABEL primary Image\n\
|
||||
LINUX /Image\n\
|
||||
APPEND ${cbootargs} ${resin_kernel_root} ro rootwait isolcpus=2\n\
|
||||
')
|
||||
fsUtils.writeFileAtomic.restore()
|
||||
child_process.exec.restore()
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
|
||||
describe 'Balena fin', ->
|
||||
it 'should always add the balena-fin dtoverlay', ->
|
||||
expect(DeviceConfig.ensureRequiredOverlay('fincm3', {})).to.deep.equal({ dtoverlay: ['balena-fin'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('fincm3', { test: '123', test2: ['123'], test3: ['123', '234'] })).to
|
||||
.deep.equal({ test: '123', test2: ['123'], test3: ['123', '234'], dtoverlay: ['balena-fin'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('fincm3', { dtoverlay: 'test' })).to.deep.equal({ dtoverlay: ['test', 'balena-fin'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('fincm3', { dtoverlay: ['test'] })).to.deep.equal({ dtoverlay: ['test', 'balena-fin'] })
|
||||
|
||||
it 'should not cause a config change when the cloud does not specify the balena-fin overlay', ->
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test"' },
|
||||
'fincm3'
|
||||
)).to.equal(false)
|
||||
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: 'test' },
|
||||
'fincm3'
|
||||
)).to.equal(false)
|
||||
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2"' },
|
||||
'fincm3'
|
||||
)).to.equal(false)
|
||||
|
||||
describe 'Raspberry pi4', ->
|
||||
it 'should always add the vc4-fkms-v3d dtoverlay', ->
|
||||
expect(DeviceConfig.ensureRequiredOverlay('raspberrypi4-64', {})).to.deep.equal({ dtoverlay: ['vc4-fkms-v3d'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('raspberrypi4-64', { test: '123', test2: ['123'], test3: ['123', '234'] })).to
|
||||
.deep.equal({ test: '123', test2: ['123'], test3: ['123', '234'], dtoverlay: ['vc4-fkms-v3d'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('raspberrypi4-64', { dtoverlay: 'test' })).to.deep.equal({ dtoverlay: ['test', 'vc4-fkms-v3d'] })
|
||||
expect(DeviceConfig.ensureRequiredOverlay('raspberrypi4-64', { dtoverlay: ['test'] })).to.deep.equal({ dtoverlay: ['test', 'vc4-fkms-v3d'] })
|
||||
|
||||
it 'should not cause a config change when the cloud does not specify the pi4 overlay', ->
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test"' },
|
||||
'raspberrypi4-64'
|
||||
)).to.equal(false)
|
||||
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: 'test' },
|
||||
'raspberrypi4-64'
|
||||
)).to.equal(false)
|
||||
|
||||
expect(@deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2"' },
|
||||
'raspberrypi4-64'
|
||||
)).to.equal(false)
|
||||
|
||||
describe 'ConfigFS', ->
|
||||
before ->
|
||||
fakeConfig = {
|
||||
get: (key) ->
|
||||
Promise.try ->
|
||||
return 'up-board' if key == 'deviceType'
|
||||
throw new Error('Unknown fake config key')
|
||||
}
|
||||
@upboardConfig = new DeviceConfig({ logger: @fakeLogger, db: @fakeDB, config: fakeConfig })
|
||||
|
||||
stub(child_process, 'exec').resolves()
|
||||
stub(fs, 'exists').callsFake ->
|
||||
return true
|
||||
stub(fs, 'mkdir').resolves()
|
||||
stub(fs, 'readdir').callsFake ->
|
||||
return []
|
||||
stub(fs, 'readFile').callsFake (file) ->
|
||||
return JSON.stringify({
|
||||
ssdt: ['spidev1,1']
|
||||
}) if file == 'test/data/mnt/boot/configfs.json'
|
||||
|
||||
return ''
|
||||
stub(fsUtils, 'writeFileAtomic').resolves()
|
||||
|
||||
Promise.try =>
|
||||
@upboardConfig.getConfigBackend()
|
||||
.then (backend) =>
|
||||
@upboardConfigBackend = backend
|
||||
expect(@upboardConfigBackend).is.not.null
|
||||
expect(child_process.exec.callCount).to.equal(3, 'exec not called enough times')
|
||||
|
||||
it 'should correctly load the configfs.json file', ->
|
||||
expect(child_process.exec).to.be.calledWith('modprobe acpi_configfs')
|
||||
expect(child_process.exec).to.be.calledWith('cat test/data/boot/acpi-tables/spidev1,1.aml > test/data/sys/kernel/config/acpi/table/spidev1,1/aml')
|
||||
|
||||
expect(fs.exists.callCount).to.equal(2)
|
||||
expect(fs.readFile.callCount).to.equal(4)
|
||||
|
||||
it 'should correctly write the configfs.json file', ->
|
||||
current = {
|
||||
}
|
||||
target = {
|
||||
HOST_CONFIGFS_ssdt: 'spidev1,1'
|
||||
}
|
||||
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
child_process.exec.resetHistory()
|
||||
fs.exists.resetHistory()
|
||||
fs.mkdir.resetHistory()
|
||||
fs.readdir.resetHistory()
|
||||
fs.readFile.resetHistory()
|
||||
|
||||
Promise.try =>
|
||||
expect(@upboardConfigBackend).is.not.null
|
||||
@upboardConfig.bootConfigChangeRequired(@upboardConfigBackend, current, target)
|
||||
.then =>
|
||||
@upboardConfig.setBootConfig(@upboardConfigBackend, target)
|
||||
.then =>
|
||||
expect(child_process.exec).to.be.calledOnce
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith('test/data/mnt/boot/configfs.json', JSON.stringify({
|
||||
ssdt: ['spidev1,1']
|
||||
}))
|
||||
expect(@fakeLogger.logSystemMessage).to.be.calledTwice
|
||||
expect(@fakeLogger.logSystemMessage.getCall(1).args[2]).to.equal('Apply boot config success')
|
||||
|
||||
after ->
|
||||
child_process.exec.restore()
|
||||
fs.exists.restore()
|
||||
fs.mkdir.restore()
|
||||
fs.readdir.restore()
|
||||
fs.readFile.restore()
|
||||
fsUtils.writeFileAtomic.restore()
|
||||
@fakeLogger.logSystemMessage.resetHistory()
|
||||
|
||||
|
||||
# This will require stubbing device.reboot, gosuper.post, config.get/set
|
||||
it 'applies the target state'
|
511
test/13-device-config.spec.ts
Normal file
511
test/13-device-config.spec.ts
Normal file
@ -0,0 +1,511 @@
|
||||
import { Promise } from 'bluebird';
|
||||
import { stripIndent } from 'common-tags';
|
||||
import { child_process, fs } from 'mz';
|
||||
import { SinonSpy, SinonStub, spy, stub } from 'sinon';
|
||||
|
||||
import { ExtlinuxConfigBackend, RPiConfigBackend } from '../src/config/backend';
|
||||
import { DeviceConfig } from '../src/device-config';
|
||||
import * as fsUtils from '../src/lib/fs-utils';
|
||||
import { expect } from './lib/chai-config';
|
||||
|
||||
import prepare = require('./lib/prepare');
|
||||
|
||||
const extlinuxBackend = new ExtlinuxConfigBackend();
|
||||
const rpiConfigBackend = new RPiConfigBackend();
|
||||
|
||||
describe('DeviceConfig', function () {
|
||||
before(function () {
|
||||
prepare();
|
||||
this.fakeDB = {};
|
||||
this.fakeConfig = {
|
||||
get(key: string) {
|
||||
return Promise.try(function () {
|
||||
if (key === 'deviceType') {
|
||||
return 'raspberrypi3';
|
||||
} else {
|
||||
throw new Error('Unknown fake config key');
|
||||
}
|
||||
});
|
||||
},
|
||||
};
|
||||
this.fakeLogger = {
|
||||
logSystemMessage: spy(),
|
||||
};
|
||||
return (this.deviceConfig = new DeviceConfig({
|
||||
logger: this.fakeLogger,
|
||||
db: this.fakeDB,
|
||||
config: this.fakeConfig,
|
||||
}));
|
||||
});
|
||||
|
||||
// Test that the format for special values like initramfs and array variables is parsed correctly
|
||||
it('allows getting boot config with getBootConfig', function () {
|
||||
stub(fs, 'readFile').resolves(stripIndent`
|
||||
initramfs initramf.gz 0x00800000\n\
|
||||
dtparam=i2c=on\n\
|
||||
dtparam=audio=on\n\
|
||||
dtoverlay=ads7846\n\
|
||||
dtoverlay=lirc-rpi,gpio_out_pin=17,gpio_in_pin=13\n\
|
||||
foobar=baz\n\
|
||||
`);
|
||||
return this.deviceConfig
|
||||
.getBootConfig(rpiConfigBackend)
|
||||
.then(function (conf: any) {
|
||||
(fs.readFile as SinonStub).restore();
|
||||
return expect(conf).to.deep.equal({
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('properly reads a real config.txt file', function () {
|
||||
return this.deviceConfig.getBootConfig(rpiConfigBackend).then((conf: any) =>
|
||||
expect(conf).to.deep.equal({
|
||||
HOST_CONFIG_dtparam: '"i2c_arm=on","spi=on","audio=on"',
|
||||
HOST_CONFIG_enable_uart: '1',
|
||||
HOST_CONFIG_disable_splash: '1',
|
||||
HOST_CONFIG_avoid_warnings: '1',
|
||||
HOST_CONFIG_gpu_mem: '16',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
// Test that the format for special values like initramfs and array variables is preserved
|
||||
it('does not allow setting forbidden keys', function () {
|
||||
const current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
};
|
||||
const target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00810000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
};
|
||||
const promise = Promise.try(() => {
|
||||
return this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
current,
|
||||
target,
|
||||
);
|
||||
});
|
||||
expect(promise).to.be.rejected;
|
||||
return promise.catch((_err) => {
|
||||
expect(this.fakeLogger.logSystemMessage).to.be.calledOnce;
|
||||
expect(this.fakeLogger.logSystemMessage).to.be.calledWith(
|
||||
'Attempt to change blacklisted config value initramfs',
|
||||
{
|
||||
error: 'Attempt to change blacklisted config value initramfs',
|
||||
},
|
||||
'Apply boot config error',
|
||||
);
|
||||
return this.fakeLogger.logSystemMessage.resetHistory();
|
||||
});
|
||||
});
|
||||
|
||||
it('does not try to change config.txt if it should not change', function () {
|
||||
const current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
};
|
||||
const target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
};
|
||||
const promise = Promise.try(() => {
|
||||
return this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
current,
|
||||
target,
|
||||
);
|
||||
});
|
||||
expect(promise).to.eventually.equal(false);
|
||||
return promise.then(() => {
|
||||
expect(this.fakeLogger.logSystemMessage).to.not.be.called;
|
||||
return this.fakeLogger.logSystemMessage.resetHistory();
|
||||
});
|
||||
});
|
||||
|
||||
it('writes the target config.txt', function () {
|
||||
stub(fsUtils, 'writeFileAtomic').resolves();
|
||||
stub(child_process, 'exec').resolves();
|
||||
const current = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=on"',
|
||||
HOST_CONFIG_dtoverlay:
|
||||
'"ads7846","lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'baz',
|
||||
};
|
||||
const target = {
|
||||
HOST_CONFIG_initramfs: 'initramf.gz 0x00800000',
|
||||
HOST_CONFIG_dtparam: '"i2c=on","audio=off"',
|
||||
HOST_CONFIG_dtoverlay: '"lirc-rpi,gpio_out_pin=17,gpio_in_pin=13"',
|
||||
HOST_CONFIG_foobar: 'bat',
|
||||
HOST_CONFIG_foobaz: 'bar',
|
||||
};
|
||||
const promise = Promise.try(() => {
|
||||
return this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
current,
|
||||
target,
|
||||
);
|
||||
});
|
||||
expect(promise).to.eventually.equal(true);
|
||||
return promise.then(() => {
|
||||
return this.deviceConfig
|
||||
.setBootConfig(rpiConfigBackend, target)
|
||||
.then(() => {
|
||||
expect(child_process.exec).to.be.calledOnce;
|
||||
expect(this.fakeLogger.logSystemMessage).to.be.calledTwice;
|
||||
expect(this.fakeLogger.logSystemMessage.getCall(1).args[2]).to.equal(
|
||||
'Apply boot config success',
|
||||
);
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith(
|
||||
'./test/data/mnt/boot/config.txt',
|
||||
`\
|
||||
initramfs initramf.gz 0x00800000\n\
|
||||
dtparam=i2c=on\n\
|
||||
dtparam=audio=off\n\
|
||||
dtoverlay=lirc-rpi,gpio_out_pin=17,gpio_in_pin=13\n\
|
||||
foobar=bat\n\
|
||||
foobaz=bar\n\
|
||||
`,
|
||||
);
|
||||
(fsUtils.writeFileAtomic as SinonStub).restore();
|
||||
(child_process.exec as SinonStub).restore();
|
||||
return this.fakeLogger.logSystemMessage.resetHistory();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('accepts RESIN_ and BALENA_ variables', function () {
|
||||
return this.deviceConfig
|
||||
.formatConfigKeys({
|
||||
FOO: 'bar',
|
||||
BAR: 'baz',
|
||||
RESIN_HOST_CONFIG_foo: 'foobaz',
|
||||
BALENA_HOST_CONFIG_foo: 'foobar',
|
||||
RESIN_HOST_CONFIG_other: 'val',
|
||||
BALENA_HOST_CONFIG_baz: 'bad',
|
||||
BALENA_SUPERVISOR_POLL_INTERVAL: '100',
|
||||
})
|
||||
.then((filteredConf: any) =>
|
||||
expect(filteredConf).to.deep.equal({
|
||||
HOST_CONFIG_foo: 'foobar',
|
||||
HOST_CONFIG_other: 'val',
|
||||
HOST_CONFIG_baz: 'bad',
|
||||
SUPERVISOR_POLL_INTERVAL: '100',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('returns default configuration values', function () {
|
||||
const conf = this.deviceConfig.getDefaults();
|
||||
return expect(conf).to.deep.equal({
|
||||
SUPERVISOR_VPN_CONTROL: 'true',
|
||||
SUPERVISOR_POLL_INTERVAL: '60000',
|
||||
SUPERVISOR_LOCAL_MODE: 'false',
|
||||
SUPERVISOR_CONNECTIVITY_CHECK: 'true',
|
||||
SUPERVISOR_LOG_CONTROL: 'true',
|
||||
SUPERVISOR_DELTA: 'false',
|
||||
SUPERVISOR_DELTA_REQUEST_TIMEOUT: '30000',
|
||||
SUPERVISOR_DELTA_APPLY_TIMEOUT: '0',
|
||||
SUPERVISOR_DELTA_RETRY_COUNT: '30',
|
||||
SUPERVISOR_DELTA_RETRY_INTERVAL: '10000',
|
||||
SUPERVISOR_DELTA_VERSION: '2',
|
||||
SUPERVISOR_INSTANT_UPDATE_TRIGGER: 'true',
|
||||
SUPERVISOR_OVERRIDE_LOCK: 'false',
|
||||
SUPERVISOR_PERSISTENT_LOGGING: 'false',
|
||||
});
|
||||
});
|
||||
|
||||
describe('Extlinux files', () =>
|
||||
it('should correctly write to extlinux.conf files', function () {
|
||||
stub(fsUtils, 'writeFileAtomic').resolves();
|
||||
stub(child_process, 'exec').resolves();
|
||||
|
||||
const current = {};
|
||||
const target = {
|
||||
HOST_EXTLINUX_isolcpus: '2',
|
||||
};
|
||||
|
||||
const promise = Promise.try(() => {
|
||||
return this.deviceConfig.bootConfigChangeRequired(
|
||||
extlinuxBackend,
|
||||
current,
|
||||
target,
|
||||
);
|
||||
});
|
||||
expect(promise).to.eventually.equal(true);
|
||||
return promise.then(() => {
|
||||
return this.deviceConfig
|
||||
.setBootConfig(extlinuxBackend, target)
|
||||
.then(() => {
|
||||
expect(child_process.exec).to.be.calledOnce;
|
||||
expect(this.fakeLogger.logSystemMessage).to.be.calledTwice;
|
||||
expect(
|
||||
this.fakeLogger.logSystemMessage.getCall(1).args[2],
|
||||
).to.equal('Apply boot config success');
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith(
|
||||
'./test/data/mnt/boot/extlinux/extlinux.conf',
|
||||
`\
|
||||
DEFAULT primary\n\
|
||||
TIMEOUT 30\n\
|
||||
MENU TITLE Boot Options\n\
|
||||
LABEL primary\n\
|
||||
MENU LABEL primary Image\n\
|
||||
LINUX /Image\n\
|
||||
APPEND \${cbootargs} \${resin_kernel_root} ro rootwait isolcpus=2\n\
|
||||
`,
|
||||
);
|
||||
(fsUtils.writeFileAtomic as SinonStub).restore();
|
||||
(child_process.exec as SinonStub).restore();
|
||||
return this.fakeLogger.logSystemMessage.resetHistory();
|
||||
});
|
||||
});
|
||||
}));
|
||||
|
||||
describe('Balena fin', function () {
|
||||
it('should always add the balena-fin dtoverlay', function () {
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('fincm3', {}),
|
||||
).to.deep.equal({ dtoverlay: ['balena-fin'] });
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('fincm3', {
|
||||
test: '123',
|
||||
test2: ['123'],
|
||||
test3: ['123', '234'],
|
||||
}),
|
||||
).to.deep.equal({
|
||||
test: '123',
|
||||
test2: ['123'],
|
||||
test3: ['123', '234'],
|
||||
dtoverlay: ['balena-fin'],
|
||||
});
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('fincm3', {
|
||||
dtoverlay: 'test',
|
||||
}),
|
||||
).to.deep.equal({ dtoverlay: ['test', 'balena-fin'] });
|
||||
return expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('fincm3', {
|
||||
dtoverlay: ['test'],
|
||||
}),
|
||||
).to.deep.equal({ dtoverlay: ['test', 'balena-fin'] });
|
||||
});
|
||||
|
||||
return it('should not cause a config change when the cloud does not specify the balena-fin overlay', function () {
|
||||
expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test"' },
|
||||
'fincm3',
|
||||
),
|
||||
).to.equal(false);
|
||||
|
||||
expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: 'test' },
|
||||
'fincm3',
|
||||
),
|
||||
).to.equal(false);
|
||||
|
||||
return expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2","balena-fin"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2"' },
|
||||
'fincm3',
|
||||
),
|
||||
).to.equal(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Raspberry pi4', function () {
|
||||
it('should always add the vc4-fkms-v3d dtoverlay', function () {
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('raspberrypi4-64', {}),
|
||||
).to.deep.equal({ dtoverlay: ['vc4-fkms-v3d'] });
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('raspberrypi4-64', {
|
||||
test: '123',
|
||||
test2: ['123'],
|
||||
test3: ['123', '234'],
|
||||
}),
|
||||
).to.deep.equal({
|
||||
test: '123',
|
||||
test2: ['123'],
|
||||
test3: ['123', '234'],
|
||||
dtoverlay: ['vc4-fkms-v3d'],
|
||||
});
|
||||
expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('raspberrypi4-64', {
|
||||
dtoverlay: 'test',
|
||||
}),
|
||||
).to.deep.equal({ dtoverlay: ['test', 'vc4-fkms-v3d'] });
|
||||
return expect(
|
||||
(DeviceConfig as any).ensureRequiredOverlay('raspberrypi4-64', {
|
||||
dtoverlay: ['test'],
|
||||
}),
|
||||
).to.deep.equal({ dtoverlay: ['test', 'vc4-fkms-v3d'] });
|
||||
});
|
||||
|
||||
return it('should not cause a config change when the cloud does not specify the pi4 overlay', function () {
|
||||
expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test"' },
|
||||
'raspberrypi4-64',
|
||||
),
|
||||
).to.equal(false);
|
||||
|
||||
expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: 'test' },
|
||||
'raspberrypi4-64',
|
||||
),
|
||||
).to.equal(false);
|
||||
|
||||
return expect(
|
||||
this.deviceConfig.bootConfigChangeRequired(
|
||||
rpiConfigBackend,
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2","vc4-fkms-v3d"' },
|
||||
{ HOST_CONFIG_dtoverlay: '"test","test2"' },
|
||||
'raspberrypi4-64',
|
||||
),
|
||||
).to.equal(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('ConfigFS', function () {
|
||||
before(function () {
|
||||
const fakeConfig = {
|
||||
get(key: string) {
|
||||
return Promise.try(function () {
|
||||
if (key === 'deviceType') {
|
||||
return 'up-board';
|
||||
}
|
||||
throw new Error('Unknown fake config key');
|
||||
});
|
||||
},
|
||||
};
|
||||
this.upboardConfig = new DeviceConfig({
|
||||
logger: this.fakeLogger,
|
||||
db: this.fakeDB,
|
||||
config: fakeConfig as any,
|
||||
});
|
||||
|
||||
stub(child_process, 'exec').resolves();
|
||||
stub(fs, 'exists').callsFake(() => Promise.resolve(true));
|
||||
stub(fs, 'mkdir').resolves();
|
||||
stub(fs, 'readdir').callsFake(() => Promise.resolve([]));
|
||||
stub(fs, 'readFile').callsFake(function (file) {
|
||||
if (file === 'test/data/mnt/boot/configfs.json') {
|
||||
return Promise.resolve(
|
||||
JSON.stringify({
|
||||
ssdt: ['spidev1,1'],
|
||||
}),
|
||||
);
|
||||
}
|
||||
return Promise.resolve('');
|
||||
});
|
||||
stub(fsUtils, 'writeFileAtomic').resolves();
|
||||
|
||||
return Promise.try(() => {
|
||||
return this.upboardConfig.getConfigBackend();
|
||||
}).then((backend) => {
|
||||
this.upboardConfigBackend = backend;
|
||||
expect(this.upboardConfigBackend).is.not.null;
|
||||
return expect((child_process.exec as SinonSpy).callCount).to.equal(
|
||||
3,
|
||||
'exec not called enough times',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should correctly load the configfs.json file', function () {
|
||||
expect(child_process.exec).to.be.calledWith('modprobe acpi_configfs');
|
||||
expect(child_process.exec).to.be.calledWith(
|
||||
'cat test/data/boot/acpi-tables/spidev1,1.aml > test/data/sys/kernel/config/acpi/table/spidev1,1/aml',
|
||||
);
|
||||
|
||||
expect((fs.exists as SinonSpy).callCount).to.equal(2);
|
||||
return expect((fs.readFile as SinonSpy).callCount).to.equal(4);
|
||||
});
|
||||
|
||||
it('should correctly write the configfs.json file', function () {
|
||||
const current = {};
|
||||
const target = {
|
||||
HOST_CONFIGFS_ssdt: 'spidev1,1',
|
||||
};
|
||||
|
||||
this.fakeLogger.logSystemMessage.resetHistory();
|
||||
(child_process.exec as SinonSpy).resetHistory();
|
||||
(fs.exists as SinonSpy).resetHistory();
|
||||
(fs.mkdir as SinonSpy).resetHistory();
|
||||
(fs.readdir as SinonSpy).resetHistory();
|
||||
(fs.readFile as SinonSpy).resetHistory();
|
||||
|
||||
return Promise.try(() => {
|
||||
expect(this.upboardConfigBackend).is.not.null;
|
||||
return this.upboardConfig.bootConfigChangeRequired(
|
||||
this.upboardConfigBackend,
|
||||
current,
|
||||
target,
|
||||
);
|
||||
})
|
||||
.then(() => {
|
||||
return this.upboardConfig.setBootConfig(
|
||||
this.upboardConfigBackend,
|
||||
target,
|
||||
);
|
||||
})
|
||||
.then(() => {
|
||||
expect(child_process.exec).to.be.calledOnce;
|
||||
expect(fsUtils.writeFileAtomic).to.be.calledWith(
|
||||
'test/data/mnt/boot/configfs.json',
|
||||
JSON.stringify({
|
||||
ssdt: ['spidev1,1'],
|
||||
}),
|
||||
);
|
||||
expect(this.fakeLogger.logSystemMessage).to.be.calledTwice;
|
||||
return expect(
|
||||
this.fakeLogger.logSystemMessage.getCall(1).args[2],
|
||||
).to.equal('Apply boot config success');
|
||||
});
|
||||
});
|
||||
|
||||
return after(function () {
|
||||
(child_process.exec as SinonStub).restore();
|
||||
(fs.exists as SinonStub).restore();
|
||||
(fs.mkdir as SinonStub).restore();
|
||||
(fs.readdir as SinonStub).restore();
|
||||
(fs.readFile as SinonStub).restore();
|
||||
(fsUtils.writeFileAtomic as SinonStub).restore();
|
||||
return this.fakeLogger.logSystemMessage.resetHistory();
|
||||
});
|
||||
});
|
||||
|
||||
// This will require stubbing device.reboot, gosuper.post, config.get/set
|
||||
return it('applies the target state');
|
||||
});
|
@ -1,431 +0,0 @@
|
||||
Promise = require 'bluebird'
|
||||
_ = require 'lodash'
|
||||
|
||||
{ stub } = require 'sinon'
|
||||
chai = require './lib/chai-config'
|
||||
chai.use(require('chai-events'))
|
||||
{ expect } = chai
|
||||
|
||||
prepare = require './lib/prepare'
|
||||
{ DeviceState } = require '../src/device-state'
|
||||
{ DB } = require('../src/db')
|
||||
{ Config } = require('../src/config')
|
||||
{ Service } = require '../src/compose/service'
|
||||
{ Network } = require '../src/compose/network'
|
||||
{ Volume } = require '../src/compose/volume'
|
||||
|
||||
appDBFormatNormalised = {
|
||||
appId: 1234
|
||||
commit: 'bar'
|
||||
releaseId: 2
|
||||
name: 'app'
|
||||
source: 'https://api.resin.io'
|
||||
services: JSON.stringify([
|
||||
{
|
||||
appId: 1234
|
||||
serviceName: 'serv'
|
||||
imageId: 12345
|
||||
environment: { FOO: 'var2' }
|
||||
labels: {}
|
||||
image: 'foo/bar:latest'
|
||||
releaseId: 2
|
||||
serviceId: 4
|
||||
commit: 'bar'
|
||||
}
|
||||
])
|
||||
networks: '{}'
|
||||
volumes: '{}'
|
||||
}
|
||||
|
||||
appStateFormat = {
|
||||
appId: 1234
|
||||
commit: 'bar'
|
||||
releaseId: 2
|
||||
name: 'app'
|
||||
# This technically is not part of the appStateFormat, but in general
|
||||
# usage is added before calling normaliseAppForDB
|
||||
source: 'https://api.resin.io'
|
||||
services: {
|
||||
'4': {
|
||||
appId: 1234
|
||||
serviceName: 'serv'
|
||||
imageId: 12345
|
||||
environment: { FOO: 'var2' }
|
||||
labels: {}
|
||||
image: 'foo/bar:latest'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
appStateFormatNeedsServiceCreate = {
|
||||
appId: 1234
|
||||
commit: 'bar'
|
||||
releaseId: 2
|
||||
name: 'app'
|
||||
services: [
|
||||
{
|
||||
appId: 1234
|
||||
environment: {
|
||||
FOO: 'var2'
|
||||
}
|
||||
imageId: 12345
|
||||
serviceId: 4
|
||||
releaseId: 2
|
||||
serviceName: 'serv'
|
||||
image: 'foo/bar:latest'
|
||||
}
|
||||
]
|
||||
networks: {}
|
||||
volumes: {}
|
||||
}
|
||||
|
||||
dependentStateFormat = {
|
||||
appId: 1234
|
||||
image: 'foo/bar'
|
||||
commit: 'bar'
|
||||
releaseId: 3
|
||||
name: 'app'
|
||||
config: { RESIN_FOO: 'var' }
|
||||
environment: { FOO: 'var2' }
|
||||
parentApp: 256
|
||||
imageId: 45
|
||||
}
|
||||
|
||||
dependentStateFormatNormalised = {
|
||||
appId: 1234
|
||||
image: 'foo/bar:latest'
|
||||
commit: 'bar'
|
||||
releaseId: 3
|
||||
name: 'app'
|
||||
config: { RESIN_FOO: 'var' }
|
||||
environment: { FOO: 'var2' }
|
||||
parentApp: 256
|
||||
imageId: 45
|
||||
}
|
||||
|
||||
currentState = targetState = availableImages = null
|
||||
|
||||
dependentDBFormat = {
|
||||
appId: 1234
|
||||
image: 'foo/bar:latest'
|
||||
commit: 'bar'
|
||||
releaseId: 3
|
||||
name: 'app'
|
||||
config: JSON.stringify({ RESIN_FOO: 'var' })
|
||||
environment: JSON.stringify({ FOO: 'var2' })
|
||||
parentApp: 256
|
||||
imageId: 45
|
||||
}
|
||||
|
||||
describe 'ApplicationManager', ->
|
||||
before ->
|
||||
prepare()
|
||||
@db = new DB()
|
||||
@config = new Config({ @db })
|
||||
eventTracker = {
|
||||
track: console.log
|
||||
}
|
||||
@logger = {
|
||||
clearOutOfDateDBLogs: ->
|
||||
}
|
||||
@deviceState = new DeviceState({ @db, @config, eventTracker, @logger })
|
||||
@applications = @deviceState.applications
|
||||
stub(@applications.images, 'inspectByName').callsFake (imageName) ->
|
||||
Promise.resolve({
|
||||
Config: {
|
||||
Cmd: [ 'someCommand' ]
|
||||
Entrypoint: [ 'theEntrypoint' ]
|
||||
Env: []
|
||||
Labels: {}
|
||||
Volumes: []
|
||||
}
|
||||
})
|
||||
stub(@applications.docker, 'getNetworkGateway').returns(Promise.resolve('172.17.0.1'))
|
||||
stub(@applications.docker, 'listContainers').returns(Promise.resolve([]))
|
||||
stub(@applications.docker, 'listImages').returns(Promise.resolve([]))
|
||||
stub(Service, 'extendEnvVars').callsFake (env) ->
|
||||
env['ADDITIONAL_ENV_VAR'] = 'foo'
|
||||
return env
|
||||
@normaliseCurrent = (current) ->
|
||||
Promise.map current.local.apps, (app) =>
|
||||
Promise.map app.services, (service) ->
|
||||
Service.fromComposeObject(service, { appName: 'test' })
|
||||
.then (normalisedServices) =>
|
||||
appCloned = _.cloneDeep(app)
|
||||
appCloned.services = normalisedServices
|
||||
appCloned.networks = _.mapValues appCloned.networks, (config, name) =>
|
||||
Network.fromComposeObject(
|
||||
name,
|
||||
app.appId,
|
||||
config
|
||||
{ docker: @applications.docker, @logger }
|
||||
)
|
||||
return appCloned
|
||||
.then (normalisedApps) ->
|
||||
currentCloned = _.cloneDeep(current)
|
||||
currentCloned.local.apps = _.keyBy(normalisedApps, 'appId')
|
||||
return currentCloned
|
||||
|
||||
@normaliseTarget = (target, available) =>
|
||||
Promise.map target.local.apps, (app) =>
|
||||
@applications.normaliseAppForDB(app)
|
||||
.then (normalisedApp) =>
|
||||
@applications.normaliseAndExtendAppFromDB(normalisedApp)
|
||||
.then (apps) ->
|
||||
targetCloned = _.cloneDeep(target)
|
||||
# We mock what createTargetService does when an image is available
|
||||
targetCloned.local.apps = _.map apps, (app) ->
|
||||
app.services = _.map app.services, (service) ->
|
||||
img = _.find(available, (i) -> i.name == service.config.image)
|
||||
if img?
|
||||
service.config.image = img.dockerImageId
|
||||
return service
|
||||
return app
|
||||
targetCloned.local.apps = _.keyBy(targetCloned.local.apps, 'appId')
|
||||
return targetCloned
|
||||
@db.init()
|
||||
.then =>
|
||||
@config.init()
|
||||
|
||||
beforeEach ->
|
||||
{ currentState, targetState, availableImages } = require './lib/application-manager-test-states'
|
||||
|
||||
after ->
|
||||
@applications.images.inspectByName.restore()
|
||||
@applications.docker.getNetworkGateway.restore()
|
||||
@applications.docker.listContainers.restore()
|
||||
Service.extendEnvVars.restore()
|
||||
|
||||
it 'should init', ->
|
||||
@applications.init()
|
||||
|
||||
it 'infers a start step when all that changes is a running state', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[0], availableImages[0])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.deep.equal([{
|
||||
action: 'start'
|
||||
current: current.local.apps['1234'].services[1]
|
||||
target: target.local.apps['1234'].services[1]
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}])
|
||||
)
|
||||
|
||||
it 'infers a kill step when a service has to be removed', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[1], availableImages[0])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.deep.equal([{
|
||||
action: 'kill'
|
||||
current: current.local.apps['1234'].services[1]
|
||||
target: undefined
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}])
|
||||
)
|
||||
|
||||
it 'infers a fetch step when a service has to be updated', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[2], availableImages[0])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.deep.equal([{
|
||||
action: 'fetch'
|
||||
image: @applications.imageForService(target.local.apps['1234'].services[1])
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
serviceName: 'anotherService'
|
||||
}])
|
||||
)
|
||||
|
||||
it 'does not infer a fetch step when the download is already in progress', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[2], availableImages[0])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[0], [ target.local.apps['1234'].services[1].imageId ], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.deep.equal([{ action: 'noop', appId: 1234 }])
|
||||
)
|
||||
|
||||
it 'infers a kill step when a service has to be updated but the strategy is kill-then-download', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[3], availableImages[0])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.deep.equal([{
|
||||
action: 'kill'
|
||||
current: current.local.apps['1234'].services[1]
|
||||
target: target.local.apps['1234'].services[1]
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}])
|
||||
)
|
||||
|
||||
it 'does not infer to kill a service with default strategy if a dependency is not downloaded', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[4])
|
||||
@normaliseTarget(targetState[4], availableImages[2])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[2], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.have.deep.members([{
|
||||
action: 'fetch'
|
||||
image: @applications.imageForService(target.local.apps['1234'].services[0])
|
||||
serviceId: 23
|
||||
appId: 1234,
|
||||
serviceName: 'aservice'
|
||||
}, { action: 'noop', appId: 1234 }])
|
||||
)
|
||||
|
||||
it 'infers to kill several services as long as there is no unmet dependency', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[0])
|
||||
@normaliseTarget(targetState[5], availableImages[1])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[1], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill'
|
||||
current: current.local.apps['1234'].services[0]
|
||||
target: target.local.apps['1234'].services[0]
|
||||
serviceId: 23
|
||||
appId: 1234
|
||||
options: {}
|
||||
},
|
||||
{
|
||||
action: 'kill'
|
||||
current: current.local.apps['1234'].services[1]
|
||||
target: target.local.apps['1234'].services[1]
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}
|
||||
])
|
||||
)
|
||||
|
||||
it 'infers to start the dependency first', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[1])
|
||||
@normaliseTarget(targetState[4], availableImages[1])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[1], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start'
|
||||
current: null
|
||||
target: target.local.apps['1234'].services[0]
|
||||
serviceId: 23
|
||||
appId: 1234
|
||||
options: {}
|
||||
}
|
||||
])
|
||||
)
|
||||
|
||||
it 'infers to start a service once its dependency has been met', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[2])
|
||||
@normaliseTarget(targetState[4], availableImages[1])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[1], [], true, current, target, false, {}, {}, {})
|
||||
expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start'
|
||||
current: null
|
||||
target: target.local.apps['1234'].services[1]
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}
|
||||
])
|
||||
)
|
||||
|
||||
it 'infers to remove spurious containers', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[3])
|
||||
@normaliseTarget(targetState[4], availableImages[1])
|
||||
(current, target) =>
|
||||
steps = @applications._inferNextSteps(false, availableImages[1], [], true, current, target, false, {}, {})
|
||||
expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill'
|
||||
current: current.local.apps['1234'].services[0]
|
||||
target: undefined
|
||||
serviceId: 23
|
||||
appId: 1234
|
||||
options: {}
|
||||
},
|
||||
{
|
||||
action: 'start'
|
||||
current: null
|
||||
target: target.local.apps['1234'].services[1]
|
||||
serviceId: 24
|
||||
appId: 1234
|
||||
options: {}
|
||||
}
|
||||
])
|
||||
)
|
||||
|
||||
it 'converts an app from a state format to a db format, adding missing networks and volumes and normalising the image name', ->
|
||||
app = @applications.normaliseAppForDB(appStateFormat)
|
||||
expect(app).to.eventually.deep.equal(appDBFormatNormalised)
|
||||
|
||||
it 'converts a dependent app from a state format to a db format, normalising the image name', ->
|
||||
app = @applications.proxyvisor.normaliseDependentAppForDB(dependentStateFormat)
|
||||
expect(app).to.eventually.deep.equal(dependentDBFormat)
|
||||
|
||||
it 'converts an app in DB format into state format, adding default and missing fields', ->
|
||||
@applications.normaliseAndExtendAppFromDB(appDBFormatNormalised)
|
||||
.then (app) ->
|
||||
appStateFormatWithDefaults = _.cloneDeep(appStateFormatNeedsServiceCreate)
|
||||
opts = { imageInfo: { Config: { Cmd: [ 'someCommand' ], Entrypoint: [ 'theEntrypoint' ] } } }
|
||||
appStateFormatWithDefaults.services = _.map appStateFormatWithDefaults.services, (service) ->
|
||||
service.imageName = service.image
|
||||
return Service.fromComposeObject(service, opts)
|
||||
expect(JSON.parse(JSON.stringify(app))).to.deep.equal(JSON.parse(JSON.stringify(appStateFormatWithDefaults)))
|
||||
|
||||
it 'converts a dependent app in DB format into state format', ->
|
||||
app = @applications.proxyvisor.normaliseDependentAppFromDB(dependentDBFormat)
|
||||
expect(app).to.eventually.deep.equal(dependentStateFormatNormalised)
|
||||
|
||||
describe 'Volumes', ->
|
||||
|
||||
before ->
|
||||
stub(@applications, 'removeAllVolumesForApp').returns(Promise.resolve([{
|
||||
action: 'removeVolume',
|
||||
current: Volume.fromComposeObject('my_volume', 12, {}, { docker: null, logger: null })
|
||||
}]))
|
||||
|
||||
after ->
|
||||
@applications.removeAllVolumesForApp.restore()
|
||||
|
||||
it 'should not remove volumes when they are no longer referenced', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[6]),
|
||||
@normaliseTarget(targetState[0], availableImages[0])
|
||||
(current, target) =>
|
||||
@applications._inferNextSteps(false, availableImages[0], [], true, current, target, false, {}, {}).then (steps) ->
|
||||
expect(
|
||||
_.every(steps, (s) -> s.action != 'removeVolume'),
|
||||
'Volumes from current app should not be removed'
|
||||
).to.be.true
|
||||
)
|
||||
|
||||
it 'should remove volumes from previous applications', ->
|
||||
Promise.join(
|
||||
@normaliseCurrent(currentState[5])
|
||||
@normaliseTarget(targetState[6], [])
|
||||
(current, target) =>
|
||||
@applications._inferNextSteps(false, [], [], true, current, target, false, {}, {}).then (steps) ->
|
||||
expect(steps).to.have.length(1)
|
||||
expect(steps[0]).to.have.property('action').that.equals('removeVolume')
|
||||
expect(steps[0].current).to.have.property('appId').that.equals(12)
|
||||
)
|
731
test/14-application-manager.spec.ts
Normal file
731
test/14-application-manager.spec.ts
Normal file
@ -0,0 +1,731 @@
|
||||
import * as Bluebird from 'bluebird';
|
||||
import * as _ from 'lodash';
|
||||
import { stub } from 'sinon';
|
||||
|
||||
import Config from '../src/config';
|
||||
import DB from '../src/db';
|
||||
|
||||
import Network from '../src/compose/network';
|
||||
|
||||
import Service from '../src/compose/service';
|
||||
import Volume from '../src/compose/volume';
|
||||
import DeviceState from '../src/device-state';
|
||||
import EventTracker from '../src/event-tracker';
|
||||
|
||||
import chai = require('./lib/chai-config');
|
||||
import prepare = require('./lib/prepare');
|
||||
|
||||
// tslint:disable-next-line
|
||||
chai.use(require('chai-events'));
|
||||
const { expect } = chai;
|
||||
|
||||
let availableImages: any[] | null;
|
||||
let targetState: any[] | null;
|
||||
|
||||
const appDBFormatNormalised = {
|
||||
appId: 1234,
|
||||
commit: 'bar',
|
||||
releaseId: 2,
|
||||
name: 'app',
|
||||
source: 'https://api.resin.io',
|
||||
services: JSON.stringify([
|
||||
{
|
||||
appId: 1234,
|
||||
serviceName: 'serv',
|
||||
imageId: 12345,
|
||||
environment: { FOO: 'var2' },
|
||||
labels: {},
|
||||
image: 'foo/bar:latest',
|
||||
releaseId: 2,
|
||||
serviceId: 4,
|
||||
commit: 'bar',
|
||||
},
|
||||
]),
|
||||
networks: '{}',
|
||||
volumes: '{}',
|
||||
};
|
||||
|
||||
const appStateFormat = {
|
||||
appId: 1234,
|
||||
commit: 'bar',
|
||||
releaseId: 2,
|
||||
name: 'app',
|
||||
// This technically is not part of the appStateFormat, but in general
|
||||
// usage is added before calling normaliseAppForDB
|
||||
source: 'https://api.resin.io',
|
||||
services: {
|
||||
'4': {
|
||||
appId: 1234,
|
||||
serviceName: 'serv',
|
||||
imageId: 12345,
|
||||
environment: { FOO: 'var2' },
|
||||
labels: {},
|
||||
image: 'foo/bar:latest',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const appStateFormatNeedsServiceCreate = {
|
||||
appId: 1234,
|
||||
commit: 'bar',
|
||||
releaseId: 2,
|
||||
name: 'app',
|
||||
services: [
|
||||
{
|
||||
appId: 1234,
|
||||
environment: {
|
||||
FOO: 'var2',
|
||||
},
|
||||
imageId: 12345,
|
||||
serviceId: 4,
|
||||
releaseId: 2,
|
||||
serviceName: 'serv',
|
||||
image: 'foo/bar:latest',
|
||||
},
|
||||
],
|
||||
networks: {},
|
||||
volumes: {},
|
||||
};
|
||||
|
||||
const dependentStateFormat = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: { RESIN_FOO: 'var' },
|
||||
environment: { FOO: 'var2' },
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
const dependentStateFormatNormalised = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar:latest',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: { RESIN_FOO: 'var' },
|
||||
environment: { FOO: 'var2' },
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
let currentState = (targetState = availableImages = null);
|
||||
|
||||
const dependentDBFormat = {
|
||||
appId: 1234,
|
||||
image: 'foo/bar:latest',
|
||||
commit: 'bar',
|
||||
releaseId: 3,
|
||||
name: 'app',
|
||||
config: JSON.stringify({ RESIN_FOO: 'var' }),
|
||||
environment: JSON.stringify({ FOO: 'var2' }),
|
||||
parentApp: 256,
|
||||
imageId: 45,
|
||||
};
|
||||
|
||||
describe('ApplicationManager', function () {
|
||||
before(function () {
|
||||
prepare();
|
||||
this.db = new DB();
|
||||
this.config = new Config({ db: this.db });
|
||||
const eventTracker = new EventTracker();
|
||||
this.logger = {
|
||||
clearOutOfDateDBLogs: () => {
|
||||
/* noop */
|
||||
},
|
||||
} as any;
|
||||
this.deviceState = new DeviceState({
|
||||
db: this.db,
|
||||
config: this.config,
|
||||
eventTracker,
|
||||
logger: this.logger,
|
||||
apiBinder: null as any,
|
||||
});
|
||||
this.applications = this.deviceState.applications;
|
||||
stub(this.applications.images, 'inspectByName').callsFake((_imageName) =>
|
||||
Bluebird.Promise.resolve({
|
||||
Config: {
|
||||
Cmd: ['someCommand'],
|
||||
Entrypoint: ['theEntrypoint'],
|
||||
Env: [],
|
||||
Labels: {},
|
||||
Volumes: [],
|
||||
},
|
||||
}),
|
||||
);
|
||||
stub(this.applications.docker, 'getNetworkGateway').returns(
|
||||
Bluebird.Promise.resolve('172.17.0.1'),
|
||||
);
|
||||
stub(this.applications.docker, 'listContainers').returns(
|
||||
Bluebird.Promise.resolve([]),
|
||||
);
|
||||
stub(this.applications.docker, 'listImages').returns(
|
||||
Bluebird.Promise.resolve([]),
|
||||
);
|
||||
stub(Service as any, 'extendEnvVars').callsFake(function (env) {
|
||||
env['ADDITIONAL_ENV_VAR'] = 'foo';
|
||||
return env;
|
||||
});
|
||||
this.normaliseCurrent = function (current: {
|
||||
local: { apps: Iterable<unknown> | PromiseLike<Iterable<unknown>> };
|
||||
}) {
|
||||
return Bluebird.Promise.map(current.local.apps, async (app: any) => {
|
||||
return Bluebird.Promise.map(app.services, (service) =>
|
||||
Service.fromComposeObject(service as any, { appName: 'test' } as any),
|
||||
).then((normalisedServices) => {
|
||||
const appCloned = _.cloneDeep(app);
|
||||
appCloned.services = normalisedServices;
|
||||
appCloned.networks = _.mapValues(
|
||||
appCloned.networks,
|
||||
(config, name) => {
|
||||
return Network.fromComposeObject(name, app.appId, config, {
|
||||
docker: this.applications.docker,
|
||||
logger: this.logger,
|
||||
});
|
||||
},
|
||||
);
|
||||
return appCloned;
|
||||
});
|
||||
}).then(function (normalisedApps) {
|
||||
const currentCloned = _.cloneDeep(current);
|
||||
// @ts-ignore
|
||||
currentCloned.local.apps = _.keyBy(normalisedApps, 'appId');
|
||||
return currentCloned;
|
||||
});
|
||||
};
|
||||
|
||||
this.normaliseTarget = (
|
||||
target: {
|
||||
local: { apps: Iterable<unknown> | PromiseLike<Iterable<unknown>> };
|
||||
},
|
||||
available: any,
|
||||
) => {
|
||||
return Bluebird.Promise.map(target.local.apps, (app) => {
|
||||
return this.applications
|
||||
.normaliseAppForDB(app)
|
||||
.then((normalisedApp: any) => {
|
||||
return this.applications.normaliseAndExtendAppFromDB(normalisedApp);
|
||||
});
|
||||
}).then(function (apps) {
|
||||
const targetCloned = _.cloneDeep(target);
|
||||
// We mock what createTargetService does when an image is available
|
||||
targetCloned.local.apps = _.map(apps, function (app) {
|
||||
app.services = _.map(app.services, function (service) {
|
||||
const img = _.find(
|
||||
available,
|
||||
(i) => i.name === service.config.image,
|
||||
);
|
||||
if (img != null) {
|
||||
service.config.image = img.dockerImageId;
|
||||
}
|
||||
return service;
|
||||
});
|
||||
return app;
|
||||
});
|
||||
// @ts-ignore
|
||||
targetCloned.local.apps = _.keyBy(targetCloned.local.apps, 'appId');
|
||||
return targetCloned;
|
||||
});
|
||||
};
|
||||
return this.db.init().then(() => {
|
||||
return this.config.init();
|
||||
});
|
||||
});
|
||||
|
||||
beforeEach(
|
||||
() =>
|
||||
({
|
||||
currentState,
|
||||
targetState,
|
||||
availableImages,
|
||||
} = require('./lib/application-manager-test-states')),
|
||||
);
|
||||
|
||||
after(function () {
|
||||
this.applications.images.inspectByName.restore();
|
||||
this.applications.docker.getNetworkGateway.restore();
|
||||
this.applications.docker.listContainers.restore();
|
||||
return (Service as any).extendEnvVars.restore();
|
||||
});
|
||||
|
||||
it('should init', function () {
|
||||
return this.applications.init();
|
||||
});
|
||||
|
||||
it('infers a start step when all that changes is a running state', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[0], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'start',
|
||||
current: current.local.apps['1234'].services[1],
|
||||
target: target.local.apps['1234'].services[1],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a kill step when a service has to be removed', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[1], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[1],
|
||||
target: undefined,
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a fetch step when a service has to be updated', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[2], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'fetch',
|
||||
image: this.applications.imageForService(
|
||||
target.local.apps['1234'].services[1],
|
||||
),
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
serviceName: 'anotherService',
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('does not infer a fetch step when the download is already in progress', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[2], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[target.local.apps['1234'].services[1].imageId],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{ action: 'noop', appId: 1234 },
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers a kill step when a service has to be updated but the strategy is kill-then-download', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[3], availableImages[0]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.deep.equal([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[1],
|
||||
target: target.local.apps['1234'].services[1],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('does not infer to kill a service with default strategy if a dependency is not downloaded', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[4]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[2]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[2],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'fetch',
|
||||
image: this.applications.imageForService(
|
||||
target.local.apps['1234'].services[0],
|
||||
),
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
serviceName: 'aservice',
|
||||
},
|
||||
{ action: 'noop', appId: 1234 },
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to kill several services as long as there is no unmet dependency', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[0]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[5], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[0],
|
||||
target: target.local.apps['1234'].services[0],
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[1],
|
||||
target: target.local.apps['1234'].services[1],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to start the dependency first', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[1]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[0],
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to start a service once its dependency has been met', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[2]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[1],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('infers to remove spurious containers', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[3]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[4], availableImages[1]),
|
||||
(current, target) => {
|
||||
const steps = this.applications._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[1],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
);
|
||||
return expect(steps).to.eventually.have.deep.members([
|
||||
{
|
||||
action: 'kill',
|
||||
current: current.local.apps['1234'].services[0],
|
||||
target: undefined,
|
||||
serviceId: 23,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
{
|
||||
action: 'start',
|
||||
current: null,
|
||||
target: target.local.apps['1234'].services[1],
|
||||
serviceId: 24,
|
||||
appId: 1234,
|
||||
options: {},
|
||||
},
|
||||
]);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it('converts an app from a state format to a db format, adding missing networks and volumes and normalising the image name', function () {
|
||||
const app = this.applications.normaliseAppForDB(appStateFormat);
|
||||
return expect(app).to.eventually.deep.equal(appDBFormatNormalised);
|
||||
});
|
||||
|
||||
it('converts a dependent app from a state format to a db format, normalising the image name', function () {
|
||||
const app = this.applications.proxyvisor.normaliseDependentAppForDB(
|
||||
dependentStateFormat,
|
||||
);
|
||||
return expect(app).to.eventually.deep.equal(dependentDBFormat);
|
||||
});
|
||||
|
||||
it('converts an app in DB format into state format, adding default and missing fields', function () {
|
||||
return this.applications
|
||||
.normaliseAndExtendAppFromDB(appDBFormatNormalised)
|
||||
.then(function (app: any) {
|
||||
const appStateFormatWithDefaults = _.cloneDeep(
|
||||
appStateFormatNeedsServiceCreate,
|
||||
);
|
||||
const opts = {
|
||||
imageInfo: {
|
||||
Config: { Cmd: ['someCommand'], Entrypoint: ['theEntrypoint'] },
|
||||
},
|
||||
};
|
||||
(appStateFormatWithDefaults.services as any) = _.map(
|
||||
appStateFormatWithDefaults.services,
|
||||
function (service) {
|
||||
// @ts-ignore
|
||||
service.imageName = service.image;
|
||||
return Service.fromComposeObject(service, opts as any);
|
||||
},
|
||||
);
|
||||
return expect(JSON.parse(JSON.stringify(app))).to.deep.equal(
|
||||
JSON.parse(JSON.stringify(appStateFormatWithDefaults)),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('converts a dependent app in DB format into state format', function () {
|
||||
const app = this.applications.proxyvisor.normaliseDependentAppFromDB(
|
||||
dependentDBFormat,
|
||||
);
|
||||
return expect(app).to.eventually.deep.equal(dependentStateFormatNormalised);
|
||||
});
|
||||
|
||||
return describe('Volumes', function () {
|
||||
before(function () {
|
||||
return stub(this.applications, 'removeAllVolumesForApp').returns(
|
||||
Bluebird.Promise.resolve([
|
||||
{
|
||||
action: 'removeVolume',
|
||||
current: Volume.fromComposeObject('my_volume', 12, {}, {
|
||||
docker: null,
|
||||
logger: null,
|
||||
} as any),
|
||||
},
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
after(function () {
|
||||
return this.applications.removeAllVolumesForApp.restore();
|
||||
});
|
||||
|
||||
it('should not remove volumes when they are no longer referenced', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[6]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[0], availableImages[0]),
|
||||
(current, target) => {
|
||||
return this.applications
|
||||
._inferNextSteps(
|
||||
false,
|
||||
// @ts-ignore
|
||||
availableImages[0],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
)
|
||||
.then(
|
||||
// @ts-ignore
|
||||
(steps) =>
|
||||
expect(
|
||||
_.every(steps, (s) => s.action !== 'removeVolume'),
|
||||
'Volumes from current app should not be removed',
|
||||
).to.be.true,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
return it('should remove volumes from previous applications', function () {
|
||||
return Bluebird.Promise.join(
|
||||
// @ts-ignore
|
||||
this.normaliseCurrent(currentState[5]),
|
||||
// @ts-ignore
|
||||
this.normaliseTarget(targetState[6], []),
|
||||
(current, target) => {
|
||||
return (
|
||||
this.applications
|
||||
._inferNextSteps(
|
||||
false,
|
||||
[],
|
||||
[],
|
||||
true,
|
||||
current,
|
||||
target,
|
||||
false,
|
||||
{},
|
||||
{},
|
||||
)
|
||||
// tslint:disable-next-line
|
||||
.then(function (steps: { current: any }[]) {
|
||||
expect(steps).to.have.length(1);
|
||||
expect(steps[0])
|
||||
.to.have.property('action')
|
||||
.that.equals('removeVolume');
|
||||
return expect(steps[0].current)
|
||||
.to.have.property('appId')
|
||||
.that.equals(12);
|
||||
})
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
@ -1,5 +1,4 @@
|
||||
--exit
|
||||
--require ts-node/register/transpile-only
|
||||
--require coffeescript/register
|
||||
--timeout 30000
|
||||
test/*.{ts,coffee}
|
||||
test/*.{ts,js}
|
||||
|
@ -77,7 +77,7 @@ module.exports = function(env) {
|
||||
path: path.resolve(__dirname, 'dist'),
|
||||
},
|
||||
resolve: {
|
||||
extensions: ['.js', '.ts', '.json', '.coffee'],
|
||||
extensions: ['.js', '.ts', '.json'],
|
||||
},
|
||||
target: 'node',
|
||||
node: {
|
||||
@ -110,10 +110,6 @@ module.exports = function(env) {
|
||||
),
|
||||
use: require.resolve('./build-utils/fix-jsonstream'),
|
||||
},
|
||||
{
|
||||
test: /\.coffee$/,
|
||||
use: require.resolve('coffee-loader'),
|
||||
},
|
||||
{
|
||||
test: /\.ts$|\.js$/,
|
||||
exclude: /node_modules/,
|
||||
|
Loading…
x
Reference in New Issue
Block a user