Skip to content

Commit bbcfaa0

Browse files
authored
Ideas for speeding up and including flakey tests in PR (#14265)
* First idea * Disable IPywidget on CI machines * Missed functional publish * Fix name and actual arguments * Submit comment change to get build to happen again * Comment change to spawn new run * Fix problem of running functional tests twice * Remove data science tests from functional tests * Add github action for PR for DS * Split groups based on size * Make sure to install local requirements too * Test results not being published * Mark trusted notebook tests as DataScience
1 parent abf9a5e commit bbcfaa0

8 files changed

+287
-147
lines changed

.github/workflows/pr_datascience.yml

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
name: Pull Request DataScience
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
pull_request:
8+
branches:
9+
- main
10+
check_run:
11+
types: [rerequested, requested_action]
12+
13+
env:
14+
NODE_VERSION: 12.15.0
15+
PYTHON_VERSION: 3.8
16+
MOCHA_REPORTER_JUNIT: true # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter). Also enables a reporter which exits the process running the tests if it haven't already.
17+
CACHE_NPM_DEPS: cache-npm
18+
CACHE_OUT_DIRECTORY: cache-out-directory
19+
CACHE_PIP_DEPS: cache-pip
20+
# Key for the cache created at the end of the the 'Cache ./pythonFiles/lib/python' step.
21+
CACHE_PYTHONFILES: cache-pvsc-pythonFiles
22+
COVERAGE_REPORTS: tests-coverage-reports
23+
CI_PYTHON_PATH: python
24+
TEST_RESULTS_DIRECTORY: .
25+
TEST_RESULTS_GLOB: '**/test-results*.xml'
26+
27+
jobs:
28+
tests:
29+
name: Functional Jupyter Tests
30+
runs-on: ${{ matrix.os }}
31+
if: github.repository == 'microsoft/vscode-python'
32+
strategy:
33+
fail-fast: false
34+
matrix:
35+
# We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used,
36+
# macOS runners are expensive, and we assume that Ubuntu is enough to cover the UNIX case.
37+
os: [ubuntu-latest]
38+
python: [3.8]
39+
test-suite: [group1, group2, group3, group4]
40+
steps:
41+
- name: Checkout
42+
uses: actions/checkout@v2
43+
44+
- name: Use Python ${{matrix.python}}
45+
uses: actions/setup-python@v2
46+
with:
47+
python-version: ${{matrix.python}}
48+
49+
- name: Upgrade pip
50+
run: python -m pip install -U pip
51+
52+
- name: Use Node ${{env.NODE_VERSION}}
53+
uses: actions/[email protected]
54+
with:
55+
node-version: ${{env.NODE_VERSION}}
56+
57+
# Start caching
58+
59+
# Cache Python Dependencies.
60+
# Caching (https://github.com/actions/cache/blob/main/examples.md#python---pip
61+
- name: Cache pip on linux
62+
uses: actions/cache@v2
63+
if: matrix.os == 'ubuntu-latest'
64+
with:
65+
path: ~/.cache/pip
66+
key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}}
67+
restore-keys: |
68+
${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-
69+
70+
- name: Cache pip on mac
71+
uses: actions/cache@v2
72+
if: matrix.os == 'macos-latest'
73+
with:
74+
path: ~/Library/Caches/pip
75+
key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}}
76+
restore-keys: |
77+
${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-
78+
79+
- name: Cache pip on windows
80+
uses: actions/cache@v2
81+
if: matrix.os == 'windows-latest'
82+
with:
83+
path: ~\AppData\Local\pip\Cache
84+
key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}}
85+
restore-keys: |
86+
${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-
87+
88+
# Caching of npm packages (https://github.com/actions/cache/blob/main/examples.md#node---npm)
89+
- name: Cache npm on linux/mac
90+
uses: actions/cache@v2
91+
if: matrix.os != 'windows-latest'
92+
with:
93+
path: ~/.npm
94+
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
95+
restore-keys: |
96+
${{ runner.os }}-node-
97+
98+
- name: Get npm cache directory
99+
if: matrix.os == 'windows-latest'
100+
id: npm-cache
101+
run: |
102+
echo "::set-output name=dir::$(npm config get cache)"
103+
- name: Cache npm on windows
104+
uses: actions/cache@v2
105+
if: matrix.os == 'windows-latest'
106+
with:
107+
path: ${{ steps.npm-cache.outputs.dir }}
108+
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
109+
restore-keys: |
110+
${{ runner.os }}-node-
111+
112+
- name: Cache compiled TS files
113+
id: out-cache
114+
uses: actions/cache@v2
115+
with:
116+
path: ./out
117+
key: ${{runner.os}}-${{env.CACHE_OUT_DIRECTORY}}-${{hashFiles('src/**')}}
118+
119+
# For faster/better builds of sdists.
120+
- run: python -m pip install wheel
121+
shell: bash
122+
123+
# debugpy is not shipped, only installed for local tests.
124+
# In production, we get debugpy from python extension.
125+
- name: Install functional test requirements
126+
run: |
127+
python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade -r ./requirements.txt
128+
python -m pip --disable-pip-version-check install -r build/debugger-install-requirements.txt
129+
python ./pythonFiles/install_debugpy.py
130+
python -m pip install numpy
131+
python -m pip install --upgrade jupyter
132+
python -m pip install --upgrade -r build/test-requirements.txt
133+
python -m pip install --upgrade -r ./build/ipython-test-requirements.txt
134+
python -m pip install --upgrade -r ./build/conda-functional-requirements.txt
135+
python -m ipykernel install --user
136+
# This step is slow.
137+
138+
- name: Install dependencies (npm ci)
139+
run: npm ci --prefer-offline
140+
# This step is slow.
141+
142+
- name: Compile if not cached
143+
run: npx gulp prePublishNonBundle
144+
145+
- name: Run functional tests
146+
run: npm run test:functional:parallel -- --${{matrix.test-suite}}
147+
env:
148+
VSCODE_PYTHON_ROLLING: 1
149+
VSC_PYTHON_FORCE_LOGGING: 1
150+
id: test_functional_group
151+
152+
- name: Publish Test Report
153+
uses: scacap/action-surefire-report@v1
154+
with:
155+
github_token: ${{ secrets.GITHUB_TOKEN }}
156+
report_paths: ${{ env.TEST_RESULTS_GLOB }}
157+
check_name: Functional Test Report
158+
if: steps.test_functional_group.outcome == 'failure' && failure()
159+

build/ci/scripts/runFunctionalTests.js

Lines changed: 108 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,92 @@
88
var path = require('path');
99
var glob = require('glob');
1010
var child_process = require('child_process');
11+
var fs = require('fs-extra');
1112

1213
// Create a base for the output file
1314
var originalMochaFile = process.env['MOCHA_FILE'];
1415
var mochaFile = originalMochaFile || './test-results.xml';
1516
var mochaBaseFile = path.join(path.dirname(mochaFile), path.basename(mochaFile, '.xml'));
1617
var mochaFileExt = '.xml';
18+
var groupCount = 4;
19+
20+
function gatherArgs(extraArgs, file) {
21+
return [
22+
file,
23+
'--require=out/test/unittests.js',
24+
'--exclude=out/**/*.jsx',
25+
'--reporter=mocha-multi-reporters',
26+
'--reporter-option=configFile=build/.mocha-multi-reporters.config',
27+
'--ui=tdd',
28+
'--recursive',
29+
'--colors',
30+
'--exit',
31+
'--timeout=180000',
32+
...extraArgs
33+
];
34+
}
35+
36+
async function generateGroups(files) {
37+
// Go through each file putting it into a bucket. Each bucket will attempt to
38+
// have equal size
39+
40+
// Start with largest files first (sort by size)
41+
var stats = await Promise.all(files.map((f) => fs.stat(f)));
42+
var filesWithSize = files.map((f, i) => {
43+
return {
44+
file: f,
45+
size: stats[i].size
46+
};
47+
});
48+
var sorted = filesWithSize.sort((a, b) => b.size - a.size);
49+
50+
// Generate buckets that try to hold the largest file first
51+
var buckets = new Array(groupCount).fill().map((_, i) => {
52+
return {
53+
index: i,
54+
totalSize: 0,
55+
files: []
56+
};
57+
});
58+
var lowestBucket = buckets[0];
59+
sorted.forEach((fs) => {
60+
buckets[lowestBucket.index].totalSize += fs.size;
61+
buckets[lowestBucket.index].files.push(fs.file);
62+
lowestBucket = buckets.find((b) => b.totalSize < lowestBucket.totalSize) || lowestBucket;
63+
});
64+
65+
// Return these groups of files
66+
return buckets.map((b) => b.files);
67+
}
68+
69+
async function runIndividualTest(extraArgs, file, index) {
70+
var subMochaFile = `${mochaBaseFile}_${index}_${path.basename(file)}${mochaFileExt}`;
71+
process.env['MOCHA_FILE'] = subMochaFile;
72+
var args = gatherArgs(extraArgs, file);
73+
console.log(`Running functional test for file ${file} ...`);
74+
var exitCode = await new Promise((resolve) => {
75+
// Spawn the sub node process
76+
var proc = child_process.fork('./node_modules/mocha/bin/_mocha', args);
77+
proc.on('exit', resolve);
78+
});
79+
80+
// If failed keep track
81+
if (exitCode !== 0) {
82+
console.log(`Functional tests for ${file} failed.`);
83+
} else {
84+
console.log(`Functional test for ${file} succeeded`);
85+
}
86+
87+
return exitCode;
88+
}
1789

1890
// Wrap async code in a function so can wait till done
1991
async function main() {
2092
console.log('Globbing files for functional tests');
2193

2294
// Glob all of the files that we usually send to mocha as a group (see mocha.functional.opts.xml)
2395
var files = await new Promise((resolve, reject) => {
24-
glob('./out/test/**/*.functional.test.js', (ex, res) => {
96+
glob('./out/test/datascience/**/*.functional.test.js', (ex, res) => {
2597
if (ex) {
2698
reject(ex);
2799
} else {
@@ -30,38 +102,42 @@ async function main() {
30102
});
31103
});
32104

105+
// Figure out what group is running (should be something like --group1, --group2 etc.)
106+
var groupArgIndex = process.argv.findIndex((a) => a.includes('--group'));
107+
var groupIndex = groupArgIndex >= 0 ? parseInt(process.argv[groupArgIndex].slice(7), 10) - 1 : -1;
108+
109+
// Generate 4 groups based on sorting by size
110+
var groups = await generateGroups(files);
111+
files = groupIndex >= 0 ? groups[groupIndex] : files;
112+
console.log(`Running for group ${groupIndex}`);
113+
114+
// Extract any extra args for the individual mocha processes
115+
var extraArgs =
116+
groupIndex >= 0 && process.argv.length > 3
117+
? process.argv.slice(3)
118+
: process.argv.length > 2
119+
? process.argv.slice(2)
120+
: [];
121+
33122
// Iterate over them, running mocha on each
34123
var returnCode = 0;
35124

36-
// Go through each one at a time
125+
// Start timing now (don't care about glob time)
126+
var startTime = Date.now();
127+
128+
// Run all of the tests (in parallel or sync based on env)
37129
try {
38-
for (var index = 0; index < files.length; index += 1) {
39-
// Each run with a file will expect a $MOCHA_FILE$ variable. Generate one for each
40-
// Note: this index is used as a pattern when setting mocha file in the test_phases.yml
41-
var subMochaFile = `${mochaBaseFile}_${index}_${path.basename(files[index])}${mochaFileExt}`;
42-
process.env['MOCHA_FILE'] = subMochaFile;
43-
var exitCode = await new Promise((resolve) => {
44-
// Spawn the sub node process
45-
var proc = child_process.fork('./node_modules/mocha/bin/_mocha', [
46-
files[index],
47-
'--require=out/test/unittests.js',
48-
'--exclude=out/**/*.jsx',
49-
'--reporter=mocha-multi-reporters',
50-
'--reporter-option=configFile=build/.mocha-multi-reporters.config',
51-
'--ui=tdd',
52-
'--recursive',
53-
'--colors',
54-
'--exit',
55-
'--timeout=180000'
56-
]);
57-
proc.on('exit', resolve);
58-
});
59-
60-
// If failed keep track
61-
if (exitCode !== 0) {
62-
console.log(`Functional tests for ${files[index]} failed.`);
63-
returnCode = exitCode;
130+
if (process.env.VSCODE_PYTHON_FORCE_TEST_SYNC) {
131+
for (var i = 0; i < files.length; i += 1) {
132+
// Synchronous, one at a time
133+
returnCode = returnCode | (await runIndividualTest(extraArgs, files[i], i));
64134
}
135+
} else {
136+
// Parallel, all at once
137+
const returnCodes = await Promise.all(files.map(runIndividualTest.bind(undefined, extraArgs)));
138+
139+
// Or all of the codes together
140+
returnCode = returnCodes.reduce((p, c) => p | c);
65141
}
66142
} catch (ex) {
67143
console.log(`Functional tests run failure: ${ex}.`);
@@ -73,8 +149,10 @@ async function main() {
73149
process.env['MOCHA_FILE'] = originalMochaFile;
74150
}
75151

76-
// Indicate error code
77-
console.log(`Functional test run result: ${returnCode}`);
152+
var endTime = Date.now();
153+
154+
// Indicate error code and total time of the run
155+
console.log(`Functional test run result: ${returnCode} after ${(endTime - startTime) / 1_000} seconds`);
78156
process.exit(returnCode);
79157
}
80158

build/ci/templates/test_phases.yml

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ steps:
118118
python -c "import sys;print(sys.executable)"
119119
displayName: 'pip install functional requirements'
120120
condition: and(succeeded(), eq(variables['NeedsPythonFunctionalReqs'], 'true'))
121-
121+
122122
# Add CONDA to the path so anaconda works
123123
#
124124
# This task will only run if variable `NeedsPythonFunctionalReqs` is true.
@@ -403,39 +403,26 @@ steps:
403403
python -c "from __future__ import print_function;import sys;print('##vso[task.setvariable variable=CI_PYTHON_PATH;]{}'.format(sys.executable))"
404404
displayName: 'Set CI_PYTHON_PATH'
405405
406-
# Run the functional tests with each file split.
406+
# Run the non DS functional tests
407407
#
408408
# This task only runs if the string 'testFunctional' exists in variable `TestsToRun`.
409409
#
410-
# Note it is crucial this uses npm to start the runFunctionalTests.js. Otherwise the
411-
# environment will be messed up.
412-
#
413-
# Example command line (windows pwsh):
414-
# > node build/ci/scripts/runFunctionalTests.js
415-
- script: |
416-
npm run test:functional:split
417-
displayName: 'Run functional split'
418-
condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional'), eq(variables['SplitFunctionalTests'], 'true'))
419-
env:
420-
DISPLAY: :10
421-
422-
# Run the functional tests when not splitting
423-
#
424-
# This task only runs if the string 'testFunctional' exists in variable `TestsToRun`.
410+
# It runs the functional tests that don't start with 'DataScience'. DataScience functional tests
411+
# will be handled in a separate yml.
425412
#
426413
# Example command line (windows pwsh):
427-
# > node build/ci/scripts/runFunctionalTests.js
414+
# > npm run test:functional
428415
- script: |
429-
npm run test:functional
416+
npm run test:functional -- --grep="^(?!DataScience).*$"
430417
displayName: 'Run functional tests'
431-
condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional'), not(eq(variables['SplitFunctionalTests'], 'true')))
418+
condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional'))
432419
env:
433420
DISPLAY: :10
434421
435422
# Upload the test results to Azure DevOps to facilitate test reporting in their UX.
436423
- task: PublishTestResults@2
437424
displayName: 'Publish functional tests results'
438-
condition: contains(variables['TestsToRun'], 'testFunctional')
425+
condition: or(contains(variables['TestsToRun'], 'testFunctional'), contains(variables['TestsToRun'], 'testParallelFunctional'))
439426
inputs:
440427
testResultsFiles: '$(Build.ArtifactStagingDirectory)/test-junit*.xml'
441428
testRunTitle: 'functional-$(Agent.Os)-Py$(pythonVersion)'

0 commit comments

Comments
 (0)