openmct/e2e/tests/performance/contract/imagery.contract.perf.spec.js
John Hill 0413e77d8a
test(e2e): Major refactor and stabilization of e2e tests (#7581)
* fix: update broken locator

* update eslint package

* first pass of lint fixes

* update package

* change ruleset

* update component tests to match linting rules

* driveby

* start to factor out bad locators

* update gauge component

* update notebook snapshot drop area

* Update plot aria

* add draggable true to tree items

* update package

* driveby to remove dead code

* unneeded

* unneeded

* tells a screenreader that this is a row and a cell

* adds an id for dragondrops

* this should be a button

* first pass at fixing tooltip selectors

* review comments

* Updating more tests

* update to remove expect expect given our use of check functions

* add expand component

* move role around

* update more locators

* force

* new local storage

* remove choochoo steps

* test: do `lint:fix` and also add back accidentally removed code

* test: add back more removed code

* test: remove `unstable` annotation from tests which are not unstable

* test: remove invalid test-- the "new" time conductor doesn't allow for millisecond changes in fixed time

* test: fix unstable gauge test

* test: remove useless asserts-- this was secretly non-functional. now that we've fixed it, it makes no sense and just fails

* test: add back accidentally removed changes

* test: revert changes that break test

* test: more fixes

* Remove all notion of the unstable/stable e2e tests

* test: eviscerate the flake with FACTS and LOGIC

* test: fix anotha one

* lint fixes

* test: no need to wait for save dialog

* test: fix more tests

* lint: fix more warnings

* test: fix anotha one

* test: use `toHaveLength` instead of `.length).toBe()`

* test: stabilize tabs view example imagery test

* fix: more tests be fixed

* test: more `toHaveCount()`s please

* test: revert more accidentally removed fixes

* test: fix selector

* test: fix anotha one

* update lint rules to clean up bad locators in shared fixtures

* update and remove bad appActions

* test: fix some restricted notebook tests

* test: mass find/replace to enforce `toHaveCount()` instead of `.count()).toBe()`

* Remove some bad appActions and update text

* test: fix da tree tests

* test: await not await await

* test: fix upload plan appAction and add a11y

* Updating externalFixtures with best practice locators and add missing appAction framework tests

* test: fix test

* test: fix appAction test for plans

* test: yum yum fix'em up and get rid of some dragon drops

* fix: alas, a `.only()` got my hopes up that i was done fixing tests

* test: add `setTimeConductorMode` test "suite" which covers most TC related appActions

* test: fix arg

* test(couchdb): fix some network tests via expect polling

* Stabalize visual test

* getCanasPixels

* test: stabilize tooltip telemetry table test, better a11y for tooltips

* chore: update to use `docker compose` instead of `docker-compose`

* New rules, new tests, new me

* fix sort order

* test: add `waitForPlotsToRender` framework test, passthru timeout override

* test: remove `clockOptions` test as we have `page.clock` now

* test: refactor out `overrideClock`

* test: use `clock.install` instead

* test: use `clock.install` instead

* time clock fix

* test: fix timer tests

* remove ever reference to old base fixture

* test: stabilize restricted notebook test

* lint fixes

* test: use clock.install

* update timelist

* test: update visual tests to use `page.clock()`, update snapshots

* test: stabilize tree renaming/reordering test

* a11y: add aria-label and role=region to object view

* refactor: use `dragTo`

* refactor: use `dragTo`, other small fixes

* test: use `page.clock()` to stabilize tooltip telemetry table test

* test: use web-first assertion to stabilize staleness test

* test: knock out a few more `page.click`s

* test: destroy all `page.click()`s

* refactor: consistently use `'Ok'` instead of `'OK'` and `'Ok'` mixed

* test: remove gauge aria label

* test: more test fixes

* test: more fixes and refactors

* docs: add comment

* test: refactor all instances of `dragAndDrop`

* test: remove redundant test (covered in previous test steps)

* test: stabilize imagery operations tests for display layout

* chore: remove bad unicorn rule

* chore(lint): remove unused disable directives

---------

Co-authored-by: Jesse Mazzella <jesse.d.mazzella@nasa.gov>
2024-08-07 21:36:14 +00:00

191 lines
8.0 KiB
JavaScript

/*****************************************************************************
* Open MCT, Copyright (c) 2014-2024, United States Government
* as represented by the Administrator of the National Aeronautics and Space
* Administration. All rights reserved.
*
* Open MCT is licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* Open MCT includes source code licensed under additional open source
* licenses. See the Open Source Licenses file (LICENSES.md) included with
* this source code distribution or the Licensing information page available
* at runtime from the About dialog for additional information.
*****************************************************************************/
/*
This test suite is dedicated to performance tests to ensure that testability of performance
is not broken upstream on Open MCT. Any assumptions made downstream will be tested here
TODO:
- Update resolution of performance config
- Add Performance Observer on init to push all performance marks
- Move client CDP connection to before or to a fixture
-
*/
import { expect, test } from '@playwright/test';
const filePath = 'test-data/PerformanceDisplayLayout.json';
test.describe('Performance tests', () => {
test.beforeEach(async ({ page, browser }, testInfo) => {
// Go to baseURL
await page.goto('./', { waitUntil: 'domcontentloaded' });
// Click a:has-text("My Items")
await page.locator('a:has-text("My Items")').click({
button: 'right'
});
// Click text=Import from JSON
await page.locator('text=Import from JSON').click();
// Upload Performance Display Layout.json
await page.setInputFiles('#fileElem', filePath);
// Click text=OK
await page.locator('button:has-text("OK")').click();
await expect(
page.locator('a:has-text("Performance Display Layout Display Layout")')
).toBeVisible();
//Create a Chrome Performance Timeline trace to store as a test artifact
console.log('\n==== Devtools: startTracing ====\n');
await browser.startTracing(page, {
path: `${testInfo.outputPath()}-trace.json`,
screenshots: true
});
});
test.afterEach(async ({ page, browser }) => {
console.log('\n==== Devtools: stopTracing ====\n');
await browser.stopTracing();
/* Measurement Section
/ The following section includes a block of performance measurements.
*/
//Get time difference between viewlarge actionability and evaluate time
await page.evaluate(() =>
window.performance.measure(
'machine-time-difference',
'viewlarge.start',
'viewLarge.start.test'
)
);
//Get StartTime
const startTime = await page.evaluate(() => window.performance.timing.navigationStart);
console.log('window.performance.timing.navigationStart', startTime);
//Get All Performance Marks
const getAllMarksJson = await page.evaluate(() =>
JSON.stringify(window.performance.getEntriesByType('mark'))
);
const getAllMarks = JSON.parse(getAllMarksJson);
console.log('window.performance.getEntriesByType("mark")', getAllMarks);
//Get All Performance Measures
const getAllMeasuresJson = await page.evaluate(() =>
JSON.stringify(window.performance.getEntriesByType('measure'))
);
const getAllMeasures = JSON.parse(getAllMeasuresJson);
console.log('window.performance.getEntriesByType("measure")', getAllMeasures);
});
/* The following test will navigate to a previously created Performance Display Layout and measure the
/ following metrics:
/ - ElementResourceTiming
/ - Interaction Timing
*/
test('Embedded View Large for Imagery is performant in Fixed Time', async ({ page, browser }) => {
const client = await page.context().newCDPSession(page);
// Tell the DevTools session to record performance metrics
// https://chromedevtools.github.io/devtools-protocol/tot/Performance/#method-getMetrics
await client.send('Performance.enable');
// Go to baseURL
await page.goto('./');
// Search Available after Launch
await page.locator('[aria-label="OpenMCT Search"] input[type="search"]').click();
await page.evaluate(() => window.performance.mark('search-available'));
// Fill Search input
await page
.locator('[aria-label="OpenMCT Search"] input[type="search"]')
.fill('Performance Display Layout');
await page.evaluate(() => window.performance.mark('search-entered'));
//Search Result Appears and is clicked
await Promise.all([
page.waitForNavigation(),
page.locator('a:has-text("Performance Display Layout")').first().click(),
page.evaluate(() => window.performance.mark('click-search-result'))
]);
//Time to Example Imagery Frame loads within Display Layout
await page.locator('.c-imagery__main-image__bg').waitFor({ state: 'visible' });
//Time to Example Imagery object loads
await page.locator('.c-imagery__main-image__background-image').waitFor({ state: 'visible' });
//Get background-image url from background-image css prop
const backgroundImage = page.locator('.c-imagery__main-image__background-image');
let backgroundImageUrl = await backgroundImage.evaluate((el) => {
return window
.getComputedStyle(el)
.getPropertyValue('background-image')
.match(/url\(([^)]+)\)/)[1];
});
backgroundImageUrl = backgroundImageUrl.slice(1, -1); //forgive me, padre
console.log('backgroundImageurl ' + backgroundImageUrl);
//Get ResourceTiming of background-image jpg
const resourceTimingJson = await page.evaluate(
(bgImageUrl) => JSON.stringify(window.performance.getEntriesByName(bgImageUrl).pop()),
backgroundImageUrl
);
console.log('resourceTimingJson ' + resourceTimingJson);
//Open Large view
await page.locator('button:has-text("Large View")').click(); //This action includes the performance.mark named 'viewLarge.start'
await page.evaluate(() => window.performance.mark('viewLarge.start.test')); //This is a mark only to compare evaluate timing
//Time to Imagery Rendered in Large Frame
await page.locator('.c-imagery__main-image__bg').waitFor({ state: 'visible' });
await page.evaluate(() => window.performance.mark('background-image-frame'));
//Time to Example Imagery object loads
await page.locator('.c-imagery__main-image__background-image').waitFor({ state: 'visible' });
await page.evaluate(() => window.performance.mark('background-image-visible'));
// Get Current number of images in thumbstrip
await page.locator('.c-imagery__thumb').waitFor({ state: 'visible' });
const thumbCount = await page.locator('.c-imagery__thumb').count();
console.log('number of thumbs rendered ' + thumbCount);
await page.locator('.c-imagery__thumb').last().click();
//Get ResourceTiming of all jpg resources
const resourceTimingJson2 = await page.evaluate(() =>
JSON.stringify(window.performance.getEntriesByType('resource'))
);
const resourceTiming = JSON.parse(resourceTimingJson2);
const jpgResourceTiming = resourceTiming.find((element) => element.name.includes('.jpg'));
console.log('jpgResourceTiming ' + JSON.stringify(jpgResourceTiming));
// Click Close Icon
await page.getByRole('button', { name: 'Close' }).click();
await page.evaluate(() => window.performance.mark('view-large-close-button'));
//await client.send('HeapProfiler.enable');
await client.send('HeapProfiler.collectGarbage');
let performanceMetrics = await client.send('Performance.getMetrics');
console.log(performanceMetrics.metrics);
});
});