mirror of
https://github.com/nasa/openmct.git
synced 2024-12-18 20:57:53 +00:00
Add Code Coverage Doc and some other drive-bys (#5724)
This commit is contained in:
parent
633b6be2fd
commit
b8b0a08eeb
@ -139,6 +139,32 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: html-test-results
|
||||
- generate_and_store_version_and_filesystem_artifacts
|
||||
e2e-couchdb:
|
||||
parameters:
|
||||
node-version:
|
||||
type: string
|
||||
executor: pw-focal-development
|
||||
steps:
|
||||
- build_and_install:
|
||||
node-version: <<parameters.node-version>>
|
||||
- run: |
|
||||
export $(cat src/plugins/persistence/couch/.env.ci | xargs)
|
||||
docker-compose -f src/plugins/persistence/couch/couchdb-compose.yaml up --detach
|
||||
sleep 3
|
||||
bash src/plugins/persistence/couch/setup-couchdb.sh
|
||||
- run: sh src/plugins/persistence/couch/replace-localstorage-with-couchdb-indexhtml.sh #Replace LocalStorage Plugin with CouchDB Plugin and Config
|
||||
- run: npm run test:e2e:couchdb
|
||||
- generate_e2e_code_cov_report:
|
||||
suite: full
|
||||
- store_test_results:
|
||||
path: test-results/results.xml
|
||||
- store_artifacts:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
path: coverage
|
||||
- store_artifacts:
|
||||
path: html-test-results
|
||||
- generate_and_store_version_and_filesystem_artifacts
|
||||
perf-test:
|
||||
parameters:
|
||||
node-version:
|
||||
@ -207,6 +233,8 @@ workflows:
|
||||
node-version: lts/gallium
|
||||
- visual-test:
|
||||
node-version: lts/gallium
|
||||
- e2e-couchdb:
|
||||
node-version: lts/gallium
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 * * *"
|
||||
|
28
.github/workflows/e2e-couchdb.yml
vendored
28
.github/workflows/e2e-couchdb.yml
vendored
@ -5,37 +5,39 @@ on:
|
||||
types:
|
||||
- labeled
|
||||
- opened
|
||||
env:
|
||||
OPENMCT_DATABASE_NAME: openmct
|
||||
COUCH_ADMIN_USER: admin
|
||||
COUCH_ADMIN_PASSWORD: password
|
||||
COUCH_BASE_LOCAL: http://localhost:5984
|
||||
COUCH_NODE_NAME: nonode@nohost
|
||||
jobs:
|
||||
e2e-couchdb:
|
||||
if: ${{ github.event.label.name == 'pr:e2e:couchdb' }}
|
||||
if: ${{ github.event.label.name == 'pr:e2e:couchdb' }} || ${{ github.event.action == 'opened' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run : docker-compose -f src/plugins/persistence/couch/couchdb-compose.yaml up --detach
|
||||
- run : sleep 3 # wait until CouchDB has started (TODO: there must be a better way)
|
||||
- run : bash src/plugins/persistence/couch/setup-couchdb.sh
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 'lts/gallium'
|
||||
- run: npx playwright@1.29.0 install
|
||||
- run: npm install
|
||||
- run: sh src/plugins/persistence/couch/replace-localstorage-with-couchdb-indexhtml.sh
|
||||
- name: Start CouchDB Docker Container and Init with Setup Scripts
|
||||
run : |
|
||||
export $(cat src/plugins/persistence/couch/.env.ci | xargs)
|
||||
docker-compose -f src/plugins/persistence/couch/couchdb-compose.yaml up --detach
|
||||
sleep 3
|
||||
bash src/plugins/persistence/couch/setup-couchdb.sh
|
||||
bash src/plugins/persistence/couch/replace-localstorage-with-couchdb-indexhtml.sh
|
||||
- name: Run CouchDB Tests and publish to deploysentinel
|
||||
env:
|
||||
env:
|
||||
DEPLOYSENTINEL_API_KEY: ${{ secrets.DEPLOYSENTINEL_API_KEY }}
|
||||
run: npm run test:e2e:couchdb
|
||||
- run: ls -latr
|
||||
- name: Publish Results to Codecov.io
|
||||
env:
|
||||
SUPER_SECRET: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: npm run cov:e2e:full:publish
|
||||
- name: Archive test results
|
||||
if: success() || failure()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: test-results
|
||||
- name: Archive html test results
|
||||
if: success() || failure()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: html-test-results
|
||||
|
10
.github/workflows/e2e-pr.yml
vendored
10
.github/workflows/e2e-pr.yml
vendored
@ -5,7 +5,6 @@ on:
|
||||
types:
|
||||
- labeled
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
e2e-full:
|
||||
if: ${{ github.event.label.name == 'pr:e2e' }}
|
||||
@ -33,8 +32,15 @@ jobs:
|
||||
- run: npx playwright@1.29.0 install
|
||||
- run: npx playwright install chrome-beta
|
||||
- run: npm install
|
||||
- run: npm run test:e2e:full
|
||||
- run: npm run test:e2e:full -- --maxFailures=40
|
||||
- run: npm run cov:e2e:report || true
|
||||
- shell: bash
|
||||
env:
|
||||
SUPER_SECRET: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: |
|
||||
npm run cov:e2e:full:publish
|
||||
- name: Archive test results
|
||||
if: success() || failure()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
path: test-results
|
||||
|
21
.github/workflows/e2e.yml
vendored
21
.github/workflows/e2e.yml
vendored
@ -1,21 +0,0 @@
|
||||
name: "e2e"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Which branch do you want to test?' # Limited to branch for now
|
||||
required: false
|
||||
default: 'master'
|
||||
jobs:
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.version }}
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '16'
|
||||
- run: npm install
|
||||
- name: Run the e2e tests
|
||||
run: npm run test:e2e:ci
|
@ -191,7 +191,7 @@ The following guidelines are provided for anyone contributing source code to the
|
||||
if (responseCode === 401)
|
||||
```
|
||||
1. Use the ternary operator only for simple cases such as variable assignment. Nested ternaries should be avoided in all cases.
|
||||
1. Test specs should reside alongside the source code they test, not in a separate directory.
|
||||
1. Unit Test specs should reside alongside the source code they test, not in a separate directory.
|
||||
1. Organize code by feature, not by type.
|
||||
eg.
|
||||
```
|
||||
@ -222,44 +222,6 @@ The following guidelines are provided for anyone contributing source code to the
|
||||
Deviations from Open MCT code style guidelines require two-party agreement,
|
||||
typically from the author of the change and its reviewer.
|
||||
|
||||
### Test Standards
|
||||
|
||||
Automated testing shall occur whenever changes are merged into the main
|
||||
development branch and must be confirmed alongside any pull request.
|
||||
|
||||
Automated tests are tests which exercise plugins, API, and utility classes.
|
||||
Tests are subject to code review along with the actual implementation, to
|
||||
ensure that tests are applicable and useful.
|
||||
|
||||
Examples of useful tests:
|
||||
* Tests which replicate bugs (or their root causes) to verify their
|
||||
resolution.
|
||||
* Tests which reflect details from software specifications.
|
||||
* Tests which exercise edge or corner cases among inputs.
|
||||
* Tests which verify expected interactions with other components in the
|
||||
system.
|
||||
|
||||
#### Guidelines
|
||||
* 100% statement coverage is achievable and desirable.
|
||||
* Do blackbox testing. Test external behaviors, not internal details. Write tests that describe what your plugin is supposed to do. How it does this doesn't matter, so don't test it.
|
||||
* Unit test specs for plugins should be defined at the plugin level. Start with one test spec per plugin named pluginSpec.js, and as this test spec grows too big, break it up into multiple test specs that logically group related tests.
|
||||
* Unit tests for API or for utility functions and classes may be defined at a per-source file level.
|
||||
* Wherever possible only use and mock public API, builtin functions, and UI in your test specs. Do not directly invoke any private functions. ie. only call or mock functions and objects exposed by openmct.* (eg. openmct.telemetry, openmct.objectView, etc.), and builtin browser functions (fetch, requestAnimationFrame, setTimeout, etc.).
|
||||
* Where builtin functions have been mocked, be sure to clear them between tests.
|
||||
* Test at an appropriate level of isolation. Eg.
|
||||
* If you’re testing a view, you do not need to test the whole application UI, you can just fetch the view provider using the public API and render the view into an element that you have created.
|
||||
* You do not need to test that the view switcher works, there should be separate tests for that.
|
||||
* You do not need to test that telemetry providers work, you can mock openmct.telemetry.request() to feed test data to the view.
|
||||
* Use your best judgement when deciding on appropriate scope.
|
||||
* Automated tests for plugins should start by actually installing the plugin being tested, and then test that installing the plugin adds the desired features and behavior to Open MCT, observing the above rules.
|
||||
* All variables used in a test spec, including any instances of the Open MCT API should be declared inside of an appropriate block scope (not at the root level of the source file), and should be initialized in the relevant beforeEach block. `beforeEach` is preferable to `beforeAll` to avoid leaking of state between tests.
|
||||
* A `afterEach` or `afterAll` should be used to do any clean up necessary to prevent leakage of state between test specs. This can happen when functions on `window` are wrapped, or when the URL is changed. [A convenience function](https://github.com/nasa/openmct/blob/master/src/utils/testing.js#L59) is provided for resetting the URL and clearing builtin spies between tests.
|
||||
* If writing unit tests for legacy Angular code be sure to follow [best practices in order to avoid memory leaks](https://www.thecodecampus.de/blog/avoid-memory-leaks-angularjs-unit-tests/).
|
||||
|
||||
#### Examples
|
||||
* [Example of an automated test spec for an object view plugin](https://github.com/nasa/openmct/blob/master/src/plugins/telemetryTable/pluginSpec.js)
|
||||
* [Example of an automated test spec for API](https://github.com/nasa/openmct/blob/master/src/api/time/TimeAPISpec.js)
|
||||
|
||||
### Commit Message Standards
|
||||
|
||||
Commit messages should:
|
||||
@ -301,7 +263,7 @@ Issue severity is categorized as follows (in ascending order):
|
||||
|
||||
* _Trivial_: Minimal impact on the usefulness and functionality of the software; a "nice-to-have." Visual impact without functional impact,
|
||||
* _Medium_: Some impairment of use, but simple workarounds exist
|
||||
* _Critical_: Significant loss of functionality or impairment of use. Display of telemetry data is not affected though.
|
||||
* _Critical_: Significant loss of functionality or impairment of use. Display of telemetry data is not affected though. Complex workarounds exist.
|
||||
* _Blocker_: Major functionality is impaired or lost, threatening mission success. Display of telemetry data is impaired or blocked by the bug, which could lead to loss of situational awareness.
|
||||
|
||||
## Check Lists
|
||||
@ -310,22 +272,4 @@ The following check lists should be completed and attached to pull requests
|
||||
when they are filed (author checklist) and when they are merged (reviewer
|
||||
checklist).
|
||||
|
||||
### Author Checklist
|
||||
|
||||
[Within PR Template](.github/PULL_REQUEST_TEMPLATE.md)
|
||||
|
||||
### Reviewer Checklist
|
||||
|
||||
* [ ] Changes appear to address issue?
|
||||
* [ ] Changes appear not to be breaking changes?
|
||||
* [ ] Appropriate unit tests included?
|
||||
* [ ] Code style and in-line documentation are appropriate?
|
||||
* [ ] Commit messages meet standards?
|
||||
* [ ] Has associated issue been labelled `unverified`? (only applicable if this PR closes the issue)
|
||||
* [ ] Has associated issue been labelled `bug`? (only applicable if this PR is for a bug fix)
|
||||
* [ ] List of Acceptance Tests Performed.
|
||||
|
||||
Write out a small list of tests performed with just enough detail for another developer on the team
|
||||
to execute.
|
||||
|
||||
i.e. ```When Clicking on Add button, new `object` appears in dropdown.```
|
||||
|
50
TESTING.md
Normal file
50
TESTING.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Testing
|
||||
Open MCT Testing is iterating and improving at a rapid pace. This document serves to capture and index existing testing documentation and house documentation which no other obvious location as our testing evolves.
|
||||
|
||||
## General Testing Process
|
||||
Documentation located [here](./docs/src/process/testing/plan.md)
|
||||
|
||||
## Unit Testing
|
||||
Unit testing is essential part of our test strategy and complements our e2e testing strategy.
|
||||
|
||||
#### Unit Test Guidelines
|
||||
* Unit Test specs should reside alongside the source code they test, not in a separate directory.
|
||||
* Unit test specs for plugins should be defined at the plugin level. Start with one test spec per plugin named pluginSpec.js, and as this test spec grows too big, break it up into multiple test specs that logically group related tests.
|
||||
* Unit tests for API or for utility functions and classes may be defined at a per-source file level.
|
||||
* Wherever possible only use and mock public API, builtin functions, and UI in your test specs. Do not directly invoke any private functions. ie. only call or mock functions and objects exposed by openmct.* (eg. openmct.telemetry, openmct.objectView, etc.), and builtin browser functions (fetch, requestAnimationFrame, setTimeout, etc.).
|
||||
* Where builtin functions have been mocked, be sure to clear them between tests.
|
||||
* Test at an appropriate level of isolation. Eg.
|
||||
* If you’re testing a view, you do not need to test the whole application UI, you can just fetch the view provider using the public API and render the view into an element that you have created.
|
||||
* You do not need to test that the view switcher works, there should be separate tests for that.
|
||||
* You do not need to test that telemetry providers work, you can mock openmct.telemetry.request() to feed test data to the view.
|
||||
* Use your best judgement when deciding on appropriate scope.
|
||||
* Automated tests for plugins should start by actually installing the plugin being tested, and then test that installing the plugin adds the desired features and behavior to Open MCT, observing the above rules.
|
||||
* All variables used in a test spec, including any instances of the Open MCT API should be declared inside of an appropriate block scope (not at the root level of the source file), and should be initialized in the relevant beforeEach block. `beforeEach` is preferable to `beforeAll` to avoid leaking of state between tests.
|
||||
* A `afterEach` or `afterAll` should be used to do any clean up necessary to prevent leakage of state between test specs. This can happen when functions on `window` are wrapped, or when the URL is changed. [A convenience function](https://github.com/nasa/openmct/blob/master/src/utils/testing.js#L59) is provided for resetting the URL and clearing builtin spies between tests.
|
||||
|
||||
#### Unit Test Examples
|
||||
* [Example of an automated test spec for an object view plugin](https://github.com/nasa/openmct/blob/master/src/plugins/telemetryTable/pluginSpec.js)
|
||||
* [Example of an automated test spec for API](https://github.com/nasa/openmct/blob/master/src/api/time/TimeAPISpec.js)
|
||||
|
||||
#### Unit Testing Execution
|
||||
|
||||
The unit tests can be executed in one of two ways:
|
||||
`npm run test` which runs the entire suite against headless chrome
|
||||
`npm run test:debug` for debugging the tests in realtime in an active chrome session.
|
||||
|
||||
## e2e, performance, and visual testing
|
||||
Documentation located [here](./e2e/README.md)
|
||||
|
||||
## Code Coverage
|
||||
|
||||
* 100% statement coverage is achievable and desirable.
|
||||
|
||||
Codecov.io will combine each of the above commands with [Codecov.io Flags](https://docs.codecov.com/docs/flags). Effectively, this allows us to combine multiple reports which are run at various stages of our CI Pipeline or run as part of a parallel process.
|
||||
|
||||
This e2e coverage is combined with our unit test report to give a comprehensive (if flawed) view of line coverage.
|
||||
|
||||
### Limitations in our code coverage reporting
|
||||
|
||||
Our code coverage implementation has two known limitations:
|
||||
- [Variability and accuracy](https://github.com/nasa/openmct/issues/5811)
|
||||
- [Vue instrumentation](https://github.com/nasa/openmct/issues/4973)
|
10
codecov.yml
10
codecov.yml
@ -15,14 +15,14 @@ coverage:
|
||||
|
||||
flags:
|
||||
unit:
|
||||
carryforward: true
|
||||
e2e-ci:
|
||||
carryforward: true
|
||||
carryforward: false
|
||||
e2e-stable:
|
||||
carryforward: false
|
||||
e2e-full:
|
||||
carryforward: true
|
||||
|
||||
comment:
|
||||
layout: "reach,diff,flags,files,footer"
|
||||
layout: "diff,flags,files,footer"
|
||||
behavior: default
|
||||
require_changes: false
|
||||
show_carryforward_flags: true
|
||||
show_carryforward_flags: true
|
||||
|
@ -351,6 +351,10 @@ When looking at the reports run in CI, you'll leverage this same HTML Report whi
|
||||
|
||||
### e2e Code Coverage
|
||||
|
||||
Our e2e code coverage is captured and combined with our unit test coverage. For more information, please see our [code coverage documentation](../TESTING.md)
|
||||
|
||||
#### Generating e2e code coverage
|
||||
|
||||
Code coverage is collected during test execution using our custom [baseFixture](./baseFixtures.js). The raw coverage files are stored in a `.nyc_report` directory to be converted into a lcov file with the following [nyc](https://github.com/istanbuljs/nyc) command:
|
||||
|
||||
```npm run cov:e2e:report```
|
||||
@ -361,10 +365,6 @@ At this point, the nyc linecov report can be published to [codecov.io](https://a
|
||||
or
|
||||
```npm run cov:e2e:full:publish``` for the full suite running against all available platforms.
|
||||
|
||||
Codecov.io will combine each of the above commands with [Codecov.io Flags](https://docs.codecov.com/docs/flags). Effectively, this allows us to combine multiple reports which are run at various stages of our CI Pipeline or run as part of a parallel process.
|
||||
|
||||
This e2e coverage is combined with our unit test report to give a comprehensive (if flawed) view of line coverage.
|
||||
|
||||
## Other
|
||||
|
||||
### About e2e testing
|
||||
|
@ -21,7 +21,7 @@
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
This test suite is dedicated to tests which verify form functionality.
|
||||
This test suite is dedicated to tests which verify notebook tag functionality.
|
||||
*/
|
||||
|
||||
const { test, expect } = require('../../../../pluginFixtures');
|
||||
|
Loading…
Reference in New Issue
Block a user