diff --git a/.github/workflows/pr_datascience.yml b/.github/workflows/pr_datascience.yml new file mode 100644 index 000000000000..e33c72c7932a --- /dev/null +++ b/.github/workflows/pr_datascience.yml @@ -0,0 +1,159 @@ +name: Pull Request DataScience + +on: + push: + branches: + - main + pull_request: + branches: + - main + check_run: + types: [rerequested, requested_action] + +env: + NODE_VERSION: 12.15.0 + PYTHON_VERSION: 3.8 + MOCHA_REPORTER_JUNIT: true # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter). Also enables a reporter which exits the process running the tests if it haven't already. + CACHE_NPM_DEPS: cache-npm + CACHE_OUT_DIRECTORY: cache-out-directory + CACHE_PIP_DEPS: cache-pip + # Key for the cache created at the end of the the 'Cache ./pythonFiles/lib/python' step. + CACHE_PYTHONFILES: cache-pvsc-pythonFiles + COVERAGE_REPORTS: tests-coverage-reports + CI_PYTHON_PATH: python + TEST_RESULTS_DIRECTORY: . + TEST_RESULTS_GLOB: '**/test-results*.xml' + +jobs: + tests: + name: Functional Jupyter Tests + runs-on: ${{ matrix.os }} + if: github.repository == 'microsoft/vscode-python' + strategy: + fail-fast: false + matrix: + # We're not running CI on macOS for now because it's one less matrix entry to lower the number of runners used, + # macOS runners are expensive, and we assume that Ubuntu is enough to cover the UNIX case. + os: [ubuntu-latest] + python: [3.8] + test-suite: [group1, group2, group3, group4] + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Use Python ${{matrix.python}} + uses: actions/setup-python@v2 + with: + python-version: ${{matrix.python}} + + - name: Upgrade pip + run: python -m pip install -U pip + + - name: Use Node ${{env.NODE_VERSION}} + uses: actions/setup-node@v2.1.1 + with: + node-version: ${{env.NODE_VERSION}} + + # Start caching + + # Cache Python Dependencies. + # Caching (https://github.com/actions/cache/blob/main/examples.md#python---pip + - name: Cache pip on linux + uses: actions/cache@v2 + if: matrix.os == 'ubuntu-latest' + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}} + restore-keys: | + ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}- + + - name: Cache pip on mac + uses: actions/cache@v2 + if: matrix.os == 'macos-latest' + with: + path: ~/Library/Caches/pip + key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}} + restore-keys: | + ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}- + + - name: Cache pip on windows + uses: actions/cache@v2 + if: matrix.os == 'windows-latest' + with: + path: ~\AppData\Local\pip\Cache + key: ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}-${{ hashFiles('requirements.txt') }}-${{hashFiles('build/debugger-install-requirements.txt')}}-${{hashFiles('test-requirements.txt')}}-${{hashFiles('ipython-test-requirements.txt')}}-${{hashFiles('functional-test-requirements.txt')}}-${{hashFiles('conda-functional-requirements.txt')}} + restore-keys: | + ${{ runner.os }}-pip-${{env.PYTHON_VERSION}}- + + # Caching of npm packages (https://github.com/actions/cache/blob/main/examples.md#node---npm) + - name: Cache npm on linux/mac + uses: actions/cache@v2 + if: matrix.os != 'windows-latest' + with: + path: ~/.npm + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-node- + + - name: Get npm cache directory + if: matrix.os == 'windows-latest' + id: npm-cache + run: | + echo "::set-output name=dir::$(npm config get cache)" + - name: Cache npm on windows + uses: actions/cache@v2 + if: matrix.os == 'windows-latest' + with: + path: ${{ steps.npm-cache.outputs.dir }} + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-node- + + - name: Cache compiled TS files + id: out-cache + uses: actions/cache@v2 + with: + path: ./out + key: ${{runner.os}}-${{env.CACHE_OUT_DIRECTORY}}-${{hashFiles('src/**')}} + + # For faster/better builds of sdists. + - run: python -m pip install wheel + shell: bash + + # debugpy is not shipped, only installed for local tests. + # In production, we get debugpy from python extension. + - name: Install functional test requirements + run: | + python -m pip --disable-pip-version-check install -t ./pythonFiles/lib/python --no-cache-dir --implementation py --no-deps --upgrade -r ./requirements.txt + python -m pip --disable-pip-version-check install -r build/debugger-install-requirements.txt + python ./pythonFiles/install_debugpy.py + python -m pip install numpy + python -m pip install --upgrade jupyter + python -m pip install --upgrade -r build/test-requirements.txt + python -m pip install --upgrade -r ./build/ipython-test-requirements.txt + python -m pip install --upgrade -r ./build/conda-functional-requirements.txt + python -m ipykernel install --user + # This step is slow. + + - name: Install dependencies (npm ci) + run: npm ci --prefer-offline + # This step is slow. + + - name: Compile if not cached + run: npx gulp prePublishNonBundle + + - name: Run functional tests + run: npm run test:functional:parallel -- --${{matrix.test-suite}} + env: + VSCODE_PYTHON_ROLLING: 1 + VSC_PYTHON_FORCE_LOGGING: 1 + id: test_functional_group + + - name: Publish Test Report + uses: scacap/action-surefire-report@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: ${{ env.TEST_RESULTS_GLOB }} + check_name: Functional Test Report + if: steps.test_functional_group.outcome == 'failure' && failure() + diff --git a/build/ci/scripts/runFunctionalTests.js b/build/ci/scripts/runFunctionalTests.js index 4171900586da..e1e8de8a4318 100644 --- a/build/ci/scripts/runFunctionalTests.js +++ b/build/ci/scripts/runFunctionalTests.js @@ -8,12 +8,84 @@ var path = require('path'); var glob = require('glob'); var child_process = require('child_process'); +var fs = require('fs-extra'); // Create a base for the output file var originalMochaFile = process.env['MOCHA_FILE']; var mochaFile = originalMochaFile || './test-results.xml'; var mochaBaseFile = path.join(path.dirname(mochaFile), path.basename(mochaFile, '.xml')); var mochaFileExt = '.xml'; +var groupCount = 4; + +function gatherArgs(extraArgs, file) { + return [ + file, + '--require=out/test/unittests.js', + '--exclude=out/**/*.jsx', + '--reporter=mocha-multi-reporters', + '--reporter-option=configFile=build/.mocha-multi-reporters.config', + '--ui=tdd', + '--recursive', + '--colors', + '--exit', + '--timeout=180000', + ...extraArgs + ]; +} + +async function generateGroups(files) { + // Go through each file putting it into a bucket. Each bucket will attempt to + // have equal size + + // Start with largest files first (sort by size) + var stats = await Promise.all(files.map((f) => fs.stat(f))); + var filesWithSize = files.map((f, i) => { + return { + file: f, + size: stats[i].size + }; + }); + var sorted = filesWithSize.sort((a, b) => b.size - a.size); + + // Generate buckets that try to hold the largest file first + var buckets = new Array(groupCount).fill().map((_, i) => { + return { + index: i, + totalSize: 0, + files: [] + }; + }); + var lowestBucket = buckets[0]; + sorted.forEach((fs) => { + buckets[lowestBucket.index].totalSize += fs.size; + buckets[lowestBucket.index].files.push(fs.file); + lowestBucket = buckets.find((b) => b.totalSize < lowestBucket.totalSize) || lowestBucket; + }); + + // Return these groups of files + return buckets.map((b) => b.files); +} + +async function runIndividualTest(extraArgs, file, index) { + var subMochaFile = `${mochaBaseFile}_${index}_${path.basename(file)}${mochaFileExt}`; + process.env['MOCHA_FILE'] = subMochaFile; + var args = gatherArgs(extraArgs, file); + console.log(`Running functional test for file ${file} ...`); + var exitCode = await new Promise((resolve) => { + // Spawn the sub node process + var proc = child_process.fork('./node_modules/mocha/bin/_mocha', args); + proc.on('exit', resolve); + }); + + // If failed keep track + if (exitCode !== 0) { + console.log(`Functional tests for ${file} failed.`); + } else { + console.log(`Functional test for ${file} succeeded`); + } + + return exitCode; +} // Wrap async code in a function so can wait till done async function main() { @@ -21,7 +93,7 @@ async function main() { // Glob all of the files that we usually send to mocha as a group (see mocha.functional.opts.xml) var files = await new Promise((resolve, reject) => { - glob('./out/test/**/*.functional.test.js', (ex, res) => { + glob('./out/test/datascience/**/*.functional.test.js', (ex, res) => { if (ex) { reject(ex); } else { @@ -30,38 +102,42 @@ async function main() { }); }); + // Figure out what group is running (should be something like --group1, --group2 etc.) + var groupArgIndex = process.argv.findIndex((a) => a.includes('--group')); + var groupIndex = groupArgIndex >= 0 ? parseInt(process.argv[groupArgIndex].slice(7), 10) - 1 : -1; + + // Generate 4 groups based on sorting by size + var groups = await generateGroups(files); + files = groupIndex >= 0 ? groups[groupIndex] : files; + console.log(`Running for group ${groupIndex}`); + + // Extract any extra args for the individual mocha processes + var extraArgs = + groupIndex >= 0 && process.argv.length > 3 + ? process.argv.slice(3) + : process.argv.length > 2 + ? process.argv.slice(2) + : []; + // Iterate over them, running mocha on each var returnCode = 0; - // Go through each one at a time + // Start timing now (don't care about glob time) + var startTime = Date.now(); + + // Run all of the tests (in parallel or sync based on env) try { - for (var index = 0; index < files.length; index += 1) { - // Each run with a file will expect a $MOCHA_FILE$ variable. Generate one for each - // Note: this index is used as a pattern when setting mocha file in the test_phases.yml - var subMochaFile = `${mochaBaseFile}_${index}_${path.basename(files[index])}${mochaFileExt}`; - process.env['MOCHA_FILE'] = subMochaFile; - var exitCode = await new Promise((resolve) => { - // Spawn the sub node process - var proc = child_process.fork('./node_modules/mocha/bin/_mocha', [ - files[index], - '--require=out/test/unittests.js', - '--exclude=out/**/*.jsx', - '--reporter=mocha-multi-reporters', - '--reporter-option=configFile=build/.mocha-multi-reporters.config', - '--ui=tdd', - '--recursive', - '--colors', - '--exit', - '--timeout=180000' - ]); - proc.on('exit', resolve); - }); - - // If failed keep track - if (exitCode !== 0) { - console.log(`Functional tests for ${files[index]} failed.`); - returnCode = exitCode; + if (process.env.VSCODE_PYTHON_FORCE_TEST_SYNC) { + for (var i = 0; i < files.length; i += 1) { + // Synchronous, one at a time + returnCode = returnCode | (await runIndividualTest(extraArgs, files[i], i)); } + } else { + // Parallel, all at once + const returnCodes = await Promise.all(files.map(runIndividualTest.bind(undefined, extraArgs))); + + // Or all of the codes together + returnCode = returnCodes.reduce((p, c) => p | c); } } catch (ex) { console.log(`Functional tests run failure: ${ex}.`); @@ -73,8 +149,10 @@ async function main() { process.env['MOCHA_FILE'] = originalMochaFile; } - // Indicate error code - console.log(`Functional test run result: ${returnCode}`); + var endTime = Date.now(); + + // Indicate error code and total time of the run + console.log(`Functional test run result: ${returnCode} after ${(endTime - startTime) / 1_000} seconds`); process.exit(returnCode); } diff --git a/build/ci/templates/test_phases.yml b/build/ci/templates/test_phases.yml index f486b07616b2..08b40e5e045f 100644 --- a/build/ci/templates/test_phases.yml +++ b/build/ci/templates/test_phases.yml @@ -118,7 +118,7 @@ steps: python -c "import sys;print(sys.executable)" displayName: 'pip install functional requirements' condition: and(succeeded(), eq(variables['NeedsPythonFunctionalReqs'], 'true')) - + # Add CONDA to the path so anaconda works # # This task will only run if variable `NeedsPythonFunctionalReqs` is true. @@ -386,39 +386,26 @@ steps: python -c "from __future__ import print_function;import sys;print('##vso[task.setvariable variable=CI_PYTHON_PATH;]{}'.format(sys.executable))" displayName: 'Set CI_PYTHON_PATH' - # Run the functional tests with each file split. + # Run the non DS functional tests # # This task only runs if the string 'testFunctional' exists in variable `TestsToRun`. # - # Note it is crucial this uses npm to start the runFunctionalTests.js. Otherwise the - # environment will be messed up. - # - # Example command line (windows pwsh): - # > node build/ci/scripts/runFunctionalTests.js - - script: | - npm run test:functional:split - displayName: 'Run functional split' - condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional'), eq(variables['SplitFunctionalTests'], 'true')) - env: - DISPLAY: :10 - - # Run the functional tests when not splitting - # - # This task only runs if the string 'testFunctional' exists in variable `TestsToRun`. + # It runs the functional tests that don't start with 'DataScience'. DataScience functional tests + # will be handled in a separate yml. # # Example command line (windows pwsh): - # > node build/ci/scripts/runFunctionalTests.js + # > npm run test:functional - script: | - npm run test:functional + npm run test:functional -- --grep="^(?!DataScience).*$" displayName: 'Run functional tests' - condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional'), not(eq(variables['SplitFunctionalTests'], 'true'))) + condition: and(succeeded(), contains(variables['TestsToRun'], 'testFunctional')) env: DISPLAY: :10 # Upload the test results to Azure DevOps to facilitate test reporting in their UX. - task: PublishTestResults@2 displayName: 'Publish functional tests results' - condition: contains(variables['TestsToRun'], 'testFunctional') + condition: or(contains(variables['TestsToRun'], 'testFunctional'), contains(variables['TestsToRun'], 'testParallelFunctional')) inputs: testResultsFiles: '$(Build.ArtifactStagingDirectory)/test-junit*.xml' testRunTitle: 'functional-$(Agent.Os)-Py$(pythonVersion)' diff --git a/build/ci/vscode-python-nightly-flake-ci.yaml b/build/ci/vscode-python-nightly-flake-ci.yaml deleted file mode 100644 index 6c7a7b3e73b0..000000000000 --- a/build/ci/vscode-python-nightly-flake-ci.yaml +++ /dev/null @@ -1,90 +0,0 @@ -# Nightly build - -name: '$(Year:yyyy).$(Month).0.$(BuildID)-nightly-flake' - -# Not the CI build, see `vscode-python-nightly-flake-ci.yaml`. -trigger: none - -# Not the PR build for merges to main and release. -pr: none - -schedules: - - cron: '0 8 * * 1-5' - # Daily midnight PST build, runs Monday - Friday always - displayName: Nightly Flake build - branches: - include: - - main - - release* - always: true - -# Variables that are available for the entire pipeline. -variables: - - template: templates/globals.yml - -stages: - - stage: Build - jobs: - - template: templates/jobs/build_compile.yml - - # Each item in each matrix has a number of possible values it may - # define. They are detailed in templates/test_phases.yml. The only - # required value is "TestsToRun". - - - stage: Linux - dependsOn: - - Build - jobs: - - job: 'Py3x_Linux' - dependsOn: [] - timeoutInMinutes: 120 - strategy: - matrix: - 'Functional': - TestsToRun: 'testfunctional' - NeedsPythonTestReqs: true - NeedsPythonFunctionalReqs: true - VSCODE_PYTHON_ROLLING: true - pool: - vmImage: 'ubuntu-20.04' - steps: - - template: templates/test_phases.yml - - - stage: Mac - dependsOn: - - Build - jobs: - - job: 'Py3x_Mac' - dependsOn: [] - timeoutInMinutes: 120 - strategy: - matrix: - 'Functional': - TestsToRun: 'testfunctional' - NeedsPythonTestReqs: true - NeedsPythonFunctionalReqs: true - VSCODE_PYTHON_ROLLING: true - pool: - vmImage: '$(vmImageMacOS)' - steps: - - template: templates/test_phases.yml - - - stage: Windows - dependsOn: - - Build - jobs: - - job: 'Py3x_Windows' - dependsOn: [] - timeoutInMinutes: 180 - strategy: - matrix: - 'Functional': - TestsToRun: 'testfunctional' - NeedsPythonTestReqs: true - NeedsPythonFunctionalReqs: true - VSCODE_PYTHON_ROLLING: true - SplitFunctionalTests: true - pool: - vmImage: 'vs2017-win2016' - steps: - - template: templates/test_phases.yml diff --git a/build/ci/vscode-python-pr-validation.yaml b/build/ci/vscode-python-pr-validation.yaml index 240e9026e06b..dd19287c93a3 100644 --- a/build/ci/vscode-python-pr-validation.yaml +++ b/build/ci/vscode-python-pr-validation.yaml @@ -46,11 +46,10 @@ stages: TestsToRun: 'pythonUnitTests, pythonInternalTools, pythonIPythonTests' NeedsPythonTestReqs: true NeedsIPythonReqs: true - 'Functional': + 'Functional Non DataScience': TestsToRun: 'testfunctional' NeedsPythonTestReqs: true NeedsPythonFunctionalReqs: true - SplitFunctionalTests: false 'Single Workspace': TestsToRun: 'testSingleWorkspace' NeedsPythonTestReqs: true @@ -76,7 +75,7 @@ stages: # Note: "pythonInternalTools" tests are 3.7+. TestsToRun: 'pythonUnitTests' NeedsPythonTestReqs: true - 'Functional': + 'Functional Non DS': PythonVersion: '2.7' TestsToRun: 'testfunctional' NeedsPythonTestReqs: true @@ -116,7 +115,7 @@ stages: NeedsPythonTestReqs: true NeedsIPythonReqs: true # This gives us our best functional coverage for the OS. - 'Functional': + 'Functional Non DS': TestsToRun: 'testfunctional' NeedsPythonTestReqs: true NeedsPythonFunctionalReqs: true diff --git a/package.json b/package.json index f4525a941ad8..5e4f05ff0805 100644 --- a/package.json +++ b/package.json @@ -3464,7 +3464,7 @@ "test:functional:perf": "node --inspect-brk ./node_modules/mocha/bin/_mocha --require source-map-support/register --config ./build/.mocha.functional.perf.json", "test:functional:memleak": "node --inspect-brk ./node_modules/mocha/bin/_mocha --require source-map-support/register --config ./build/.mocha.functional.json", "test:functional:cover": "npm run test:functional", - "test:functional:split": "node ./build/ci/scripts/runFunctionalTests.js", + "test:functional:parallel": "node ./build/ci/scripts/runFunctionalTests.js", "test:cover:report": "nyc --nycrc-path build/.nycrc report --reporter=text --reporter=html --reporter=text-summary --reporter=cobertura", "testDebugger": "node ./out/test/testBootstrap.js ./out/test/debuggerTest.js", "testSingleWorkspace": "node ./out/test/testBootstrap.js ./out/test/standardTest.js", diff --git a/src/test/datascience/trustedNotebooks.functional.test.tsx b/src/test/datascience/trustedNotebooks.functional.test.tsx index 302ad724e214..85f0e11485bb 100644 --- a/src/test/datascience/trustedNotebooks.functional.test.tsx +++ b/src/test/datascience/trustedNotebooks.functional.test.tsx @@ -43,7 +43,7 @@ function waitForMessage(ioc: DataScienceIocContainer, message: string, options?: .waitForMessage(message, options); } // tslint:disable:no-any no-multiline-string -suite('Notebook trust', () => { +suite('DataScience Notebook trust', () => { let wrapper: ReactWrapper, React.Component>; let ne: { editor: INotebookEditor; mount: IMountedWebView }; const disposables: Disposable[] = []; diff --git a/src/test/datascience/uiTests/ipywidget.ui.functional.test.ts b/src/test/datascience/uiTests/ipywidget.ui.functional.test.ts index 8c5850e37309..57dc43384652 100644 --- a/src/test/datascience/uiTests/ipywidget.ui.functional.test.ts +++ b/src/test/datascience/uiTests/ipywidget.ui.functional.test.ts @@ -16,6 +16,7 @@ import { Disposable } from 'vscode'; import { LocalZMQKernel } from '../../../client/common/experiments/groups'; import { sleep } from '../../../client/common/utils/async'; import { EXTENSION_ROOT_DIR } from '../../../client/constants'; +import { IS_CI_SERVER } from '../../ciConstants'; import { retryIfFail as retryIfFailOriginal } from '../../common'; import { mockedVSCodeNamespaces } from '../../vscode-mock'; import { DataScienceIocContainer } from '../dataScienceIocContainer'; @@ -37,6 +38,12 @@ use(chaiAsPromised); let ioc: DataScienceIocContainer; suiteSetup(function () { + // Skip all tests until flakiness can be resolved. + // See issue: https://github.com/microsoft/vscode-python/issues/13936 + if (IS_CI_SERVER) { + this.skip(); + } + // These are UI tests, hence nothing to do with platforms. this.timeout(30_000); // UI Tests, need time to start jupyter. this.retries(3); // UI tests can be flaky.