From 4264ca9817fd9f60c0286f7e9056511169bc662d Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Wed, 13 Mar 2024 19:41:57 +0000 Subject: [PATCH 1/6] Enable `test_doctest` on platforms that don't support subprocesses --- Lib/test/test_doctest/test_doctest.py | 459 +++++++++++++------------- 1 file changed, 228 insertions(+), 231 deletions(-) diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index 43be200b983227..bf3c9660ece2cb 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -18,10 +18,6 @@ import contextlib -if not support.has_subprocess_support: - raise unittest.SkipTest("test_CLI requires subprocess support.") - - # NOTE: There are some additional tests relating to interaction with # zipimport in the test_zipimport_support test module. # There are also related tests in `test_doctest2` module. @@ -466,7 +462,7 @@ def basics(): r""" >>> tests = finder.find(sample_func) >>> print(tests) # doctest: +ELLIPSIS - [] + [] The exact name depends on how test_doctest was invoked, so allow for leading path components. @@ -2944,235 +2940,236 @@ def test_unicode(): """ TestResults(failed=1, attempted=1) """ -def test_CLI(): r""" -The doctest module can be used to run doctests against an arbitrary file. -These tests test this CLI functionality. - -We'll use the support module's script_helpers for this, and write a test files -to a temp dir to run the command against. Due to a current limitation in -script_helpers, though, we need a little utility function to turn the returned -output into something we can doctest against: - - >>> def normalize(s): - ... return '\n'.join(s.decode().splitlines()) - -With those preliminaries out of the way, we'll start with a file with two -simple tests and no errors. We'll run both the unadorned doctest command, and -the verbose version, and then check the output: - - >>> from test.support import script_helper - >>> from test.support.os_helper import temp_dir - >>> with temp_dir() as tmpdir: - ... fn = os.path.join(tmpdir, 'myfile.doc') - ... with open(fn, 'w', encoding='utf-8') as f: - ... _ = f.write('This is a very simple test file.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "a"\n') - ... _ = f.write(" 'a'\n") - ... _ = f.write('\n') - ... _ = f.write('And that is it.\n') - ... rc1, out1, err1 = script_helper.assert_python_ok( - ... '-m', 'doctest', fn) - ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-v', fn) - -With no arguments and passing tests, we should get no output: - - >>> rc1, out1, err1 - (0, b'', b'') - -With the verbose flag, we should see the test output, but no error output: - - >>> rc2, err2 - (0, b'') - >>> print(normalize(out2)) - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "a" - Expecting: - 'a' - ok - 1 items passed all tests: - 2 tests in myfile.doc - 2 tests in 1 items. - 2 passed and 0 failed. - Test passed. - -Now we'll write a couple files, one with three tests, the other a python module -with two tests, both of the files having "errors" in the tests that can be made -non-errors by applying the appropriate doctest options to the run (ELLIPSIS in -the first file, NORMALIZE_WHITESPACE in the second). This combination will -allow thoroughly testing the -f and -o flags, as well as the doctest command's -ability to process more than one file on the command line and, since the second -file ends in '.py', its handling of python module files (as opposed to straight -text files). - - >>> from test.support import script_helper - >>> from test.support.os_helper import temp_dir - >>> with temp_dir() as tmpdir: - ... fn = os.path.join(tmpdir, 'myfile.doc') - ... with open(fn, 'w', encoding="utf-8") as f: - ... _ = f.write('This is another simple test file.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "abcdef"\n') - ... _ = f.write(" 'a...f'\n") - ... _ = f.write(' >>> "ajkml"\n') - ... _ = f.write(" 'a...l'\n") - ... _ = f.write('\n') - ... _ = f.write('And that is it.\n') - ... fn2 = os.path.join(tmpdir, 'myfile2.py') - ... with open(fn2, 'w', encoding='utf-8') as f: - ... _ = f.write('def test_func():\n') - ... _ = f.write(' \"\"\"\n') - ... _ = f.write(' This is simple python test function.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "abc def"\n') - ... _ = f.write(" 'abc def'\n") - ... _ = f.write("\n") - ... _ = f.write(' \"\"\"\n') - ... rc1, out1, err1 = script_helper.assert_python_failure( - ... '-m', 'doctest', fn, fn2) - ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) - ... rc3, out3, err3 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) - ... rc4, out4, err4 = script_helper.assert_python_failure( - ... '-m', 'doctest', '-f', fn, fn2) - ... rc5, out5, err5 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) - -Our first test run will show the errors from the first file (doctest stops if a -file has errors). Note that doctest test-run error output appears on stdout, -not stderr: - - >>> rc1, err1 - (1, b'') - >>> print(normalize(out1)) # doctest: +ELLIPSIS - ********************************************************************** - File "...myfile.doc", line 4, in myfile.doc - Failed example: - "abcdef" - Expected: - 'a...f' - Got: - 'abcdef' - ********************************************************************** - File "...myfile.doc", line 6, in myfile.doc - Failed example: - "ajkml" - Expected: - 'a...l' - Got: - 'ajkml' - ********************************************************************** - 1 items had failures: - 2 of 3 in myfile.doc - ***Test Failed*** 2 failures. - -With -o ELLIPSIS specified, the second run, against just the first file, should -produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither -should the third, which ran against both files: - - >>> rc2, out2, err2 - (0, b'', b'') - >>> rc3, out3, err3 - (0, b'', b'') - -The fourth run uses FAIL_FAST, so we should see only one error: - - >>> rc4, err4 - (1, b'') - >>> print(normalize(out4)) # doctest: +ELLIPSIS - ********************************************************************** - File "...myfile.doc", line 4, in myfile.doc - Failed example: - "abcdef" - Expected: - 'a...f' - Got: - 'abcdef' - ********************************************************************** - 1 items had failures: - 1 of 2 in myfile.doc - ***Test Failed*** 1 failures. - -The fifth test uses verbose with the two options, so we should get verbose -success output for the tests in both files: - - >>> rc5, err5 - (0, b'') - >>> print(normalize(out5)) - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "abcdef" - Expecting: - 'a...f' - ok - Trying: - "ajkml" - Expecting: - 'a...l' - ok - 1 items passed all tests: - 3 tests in myfile.doc - 3 tests in 1 items. - 3 passed and 0 failed. - Test passed. - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "abc def" - Expecting: - 'abc def' - ok - 1 items had no tests: - myfile2 - 1 items passed all tests: - 2 tests in myfile2.test_func - 2 tests in 2 items. - 2 passed and 0 failed. - Test passed. - -We should also check some typical error cases. - -Invalid file name: - - >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', 'nosuchfile') - >>> rc, out - (1, b'') - >>> # The exact error message changes depending on the platform. - >>> print(normalize(err)) # doctest: +ELLIPSIS - Traceback (most recent call last): - ... - FileNotFoundError: [Errno ...] ...nosuchfile... +if support.has_subprocess_support: + def test_CLI(): r""" + The doctest module can be used to run doctests against an arbitrary file. + These tests test this CLI functionality. + + We'll use the support module's script_helpers for this, and write a test files + to a temp dir to run the command against. Due to a current limitation in + script_helpers, though, we need a little utility function to turn the returned + output into something we can doctest against: + + >>> def normalize(s): + ... return '\n'.join(s.decode().splitlines()) + + With those preliminaries out of the way, we'll start with a file with two + simple tests and no errors. We'll run both the unadorned doctest command, and + the verbose version, and then check the output: + + >>> from test.support import script_helper + >>> from test.support.os_helper import temp_dir + >>> with temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w', encoding='utf-8') as f: + ... _ = f.write('This is a very simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "a"\n') + ... _ = f.write(" 'a'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... rc1, out1, err1 = script_helper.assert_python_ok( + ... '-m', 'doctest', fn) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', fn) + + With no arguments and passing tests, we should get no output: + + >>> rc1, out1, err1 + (0, b'', b'') + + With the verbose flag, we should see the test output, but no error output: + + >>> rc2, err2 + (0, b'') + >>> print(normalize(out2)) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "a" + Expecting: + 'a' + ok + 1 items passed all tests: + 2 tests in myfile.doc + 2 tests in 1 items. + 2 passed and 0 failed. + Test passed. + + Now we'll write a couple files, one with three tests, the other a python module + with two tests, both of the files having "errors" in the tests that can be made + non-errors by applying the appropriate doctest options to the run (ELLIPSIS in + the first file, NORMALIZE_WHITESPACE in the second). This combination will + allow thoroughly testing the -f and -o flags, as well as the doctest command's + ability to process more than one file on the command line and, since the second + file ends in '.py', its handling of python module files (as opposed to straight + text files). + + >>> from test.support import script_helper + >>> from test.support.os_helper import temp_dir + >>> with temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w', encoding="utf-8") as f: + ... _ = f.write('This is another simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abcdef"\n') + ... _ = f.write(" 'a...f'\n") + ... _ = f.write(' >>> "ajkml"\n') + ... _ = f.write(" 'a...l'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... fn2 = os.path.join(tmpdir, 'myfile2.py') + ... with open(fn2, 'w', encoding='utf-8') as f: + ... _ = f.write('def test_func():\n') + ... _ = f.write(' \"\"\"\n') + ... _ = f.write(' This is simple python test function.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abc def"\n') + ... _ = f.write(" 'abc def'\n") + ... _ = f.write("\n") + ... _ = f.write(' \"\"\"\n') + ... rc1, out1, err1 = script_helper.assert_python_failure( + ... '-m', 'doctest', fn, fn2) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) + ... rc3, out3, err3 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + ... rc4, out4, err4 = script_helper.assert_python_failure( + ... '-m', 'doctest', '-f', fn, fn2) + ... rc5, out5, err5 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + + Our first test run will show the errors from the first file (doctest stops if a + file has errors). Note that doctest test-run error output appears on stdout, + not stderr: + + >>> rc1, err1 + (1, b'') + >>> print(normalize(out1)) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + File "...myfile.doc", line 6, in myfile.doc + Failed example: + "ajkml" + Expected: + 'a...l' + Got: + 'ajkml' + ********************************************************************** + 1 items had failures: + 2 of 3 in myfile.doc + ***Test Failed*** 2 failures. + + With -o ELLIPSIS specified, the second run, against just the first file, should + produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither + should the third, which ran against both files: + + >>> rc2, out2, err2 + (0, b'', b'') + >>> rc3, out3, err3 + (0, b'', b'') + + The fourth run uses FAIL_FAST, so we should see only one error: + + >>> rc4, err4 + (1, b'') + >>> print(normalize(out4)) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + 1 items had failures: + 1 of 2 in myfile.doc + ***Test Failed*** 1 failures. + + The fifth test uses verbose with the two options, so we should get verbose + success output for the tests in both files: + + >>> rc5, err5 + (0, b'') + >>> print(normalize(out5)) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abcdef" + Expecting: + 'a...f' + ok + Trying: + "ajkml" + Expecting: + 'a...l' + ok + 1 items passed all tests: + 3 tests in myfile.doc + 3 tests in 1 items. + 3 passed and 0 failed. + Test passed. + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abc def" + Expecting: + 'abc def' + ok + 1 items had no tests: + myfile2 + 1 items passed all tests: + 2 tests in myfile2.test_func + 2 tests in 2 items. + 2 passed and 0 failed. + Test passed. + + We should also check some typical error cases. + + Invalid file name: + + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', 'nosuchfile') + >>> rc, out + (1, b'') + >>> # The exact error message changes depending on the platform. + >>> print(normalize(err)) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + FileNotFoundError: [Errno ...] ...nosuchfile... -Invalid doctest option: + Invalid doctest option: - >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', '-o', 'nosuchoption') - >>> rc, out - (2, b'') - >>> print(normalize(err)) # doctest: +ELLIPSIS - usage...invalid...nosuchoption... + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', '-o', 'nosuchoption') + >>> rc, out + (2, b'') + >>> print(normalize(err)) # doctest: +ELLIPSIS + usage...invalid...nosuchoption... -""" + """ def test_no_trailing_whitespace_stripping(): r""" From 7d4f0fb856bf32fd5baf29db287908e7f1e6c674 Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Tue, 19 Mar 2024 12:47:51 +0000 Subject: [PATCH 2/6] Fix whitespace --- Lib/test/test_doctest/test_doctest.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index bf3c9660ece2cb..f692808b84e391 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -2995,7 +2995,7 @@ def test_CLI(): r""" 'a' ok 1 items passed all tests: - 2 tests in myfile.doc + 2 tests in myfile.doc 2 tests in 1 items. 2 passed and 0 failed. Test passed. @@ -3072,7 +3072,7 @@ def test_CLI(): r""" 'ajkml' ********************************************************************** 1 items had failures: - 2 of 3 in myfile.doc + 2 of 3 in myfile.doc ***Test Failed*** 2 failures. With -o ELLIPSIS specified, the second run, against just the first file, should @@ -3099,7 +3099,7 @@ def test_CLI(): r""" 'abcdef' ********************************************************************** 1 items had failures: - 1 of 2 in myfile.doc + 1 of 2 in myfile.doc ***Test Failed*** 1 failures. The fifth test uses verbose with the two options, so we should get verbose @@ -3124,7 +3124,7 @@ def test_CLI(): r""" 'a...l' ok 1 items passed all tests: - 3 tests in myfile.doc + 3 tests in myfile.doc 3 tests in 1 items. 3 passed and 0 failed. Test passed. @@ -3141,7 +3141,7 @@ def test_CLI(): r""" 1 items had no tests: myfile2 1 items passed all tests: - 2 tests in myfile2.test_func + 2 tests in myfile2.test_func 2 tests in 2 items. 2 passed and 0 failed. Test passed. From ffee805d1ae47d9e8785715df39f6de1d1856117 Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Thu, 21 Mar 2024 18:43:35 +0000 Subject: [PATCH 3/6] Switch to a doctest_skip_if decorator --- Lib/test/test_doctest/test_doctest.py | 464 +++++++++++++------------- 1 file changed, 236 insertions(+), 228 deletions(-) diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index f692808b84e391..aa53e9cce63545 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -18,6 +18,14 @@ import contextlib +def doctest_skip_if(condition): + def decorator(func): + if condition: + func.__doc__ = None + return func + return decorator + + # NOTE: There are some additional tests relating to interaction with # zipimport in the test_zipimport_support test module. # There are also related tests in `test_doctest2` module. @@ -462,7 +470,7 @@ def basics(): r""" >>> tests = finder.find(sample_func) >>> print(tests) # doctest: +ELLIPSIS - [] + [] The exact name depends on how test_doctest was invoked, so allow for leading path components. @@ -2940,236 +2948,236 @@ def test_unicode(): """ TestResults(failed=1, attempted=1) """ -if support.has_subprocess_support: - def test_CLI(): r""" - The doctest module can be used to run doctests against an arbitrary file. - These tests test this CLI functionality. - - We'll use the support module's script_helpers for this, and write a test files - to a temp dir to run the command against. Due to a current limitation in - script_helpers, though, we need a little utility function to turn the returned - output into something we can doctest against: - - >>> def normalize(s): - ... return '\n'.join(s.decode().splitlines()) - - With those preliminaries out of the way, we'll start with a file with two - simple tests and no errors. We'll run both the unadorned doctest command, and - the verbose version, and then check the output: - - >>> from test.support import script_helper - >>> from test.support.os_helper import temp_dir - >>> with temp_dir() as tmpdir: - ... fn = os.path.join(tmpdir, 'myfile.doc') - ... with open(fn, 'w', encoding='utf-8') as f: - ... _ = f.write('This is a very simple test file.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "a"\n') - ... _ = f.write(" 'a'\n") - ... _ = f.write('\n') - ... _ = f.write('And that is it.\n') - ... rc1, out1, err1 = script_helper.assert_python_ok( - ... '-m', 'doctest', fn) - ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-v', fn) - - With no arguments and passing tests, we should get no output: - - >>> rc1, out1, err1 - (0, b'', b'') - - With the verbose flag, we should see the test output, but no error output: - - >>> rc2, err2 - (0, b'') - >>> print(normalize(out2)) - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "a" - Expecting: - 'a' - ok - 1 items passed all tests: - 2 tests in myfile.doc - 2 tests in 1 items. - 2 passed and 0 failed. - Test passed. - - Now we'll write a couple files, one with three tests, the other a python module - with two tests, both of the files having "errors" in the tests that can be made - non-errors by applying the appropriate doctest options to the run (ELLIPSIS in - the first file, NORMALIZE_WHITESPACE in the second). This combination will - allow thoroughly testing the -f and -o flags, as well as the doctest command's - ability to process more than one file on the command line and, since the second - file ends in '.py', its handling of python module files (as opposed to straight - text files). - - >>> from test.support import script_helper - >>> from test.support.os_helper import temp_dir - >>> with temp_dir() as tmpdir: - ... fn = os.path.join(tmpdir, 'myfile.doc') - ... with open(fn, 'w', encoding="utf-8") as f: - ... _ = f.write('This is another simple test file.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "abcdef"\n') - ... _ = f.write(" 'a...f'\n") - ... _ = f.write(' >>> "ajkml"\n') - ... _ = f.write(" 'a...l'\n") - ... _ = f.write('\n') - ... _ = f.write('And that is it.\n') - ... fn2 = os.path.join(tmpdir, 'myfile2.py') - ... with open(fn2, 'w', encoding='utf-8') as f: - ... _ = f.write('def test_func():\n') - ... _ = f.write(' \"\"\"\n') - ... _ = f.write(' This is simple python test function.\n') - ... _ = f.write(' >>> 1 + 1\n') - ... _ = f.write(' 2\n') - ... _ = f.write(' >>> "abc def"\n') - ... _ = f.write(" 'abc def'\n") - ... _ = f.write("\n") - ... _ = f.write(' \"\"\"\n') - ... rc1, out1, err1 = script_helper.assert_python_failure( - ... '-m', 'doctest', fn, fn2) - ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) - ... rc3, out3, err3 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) - ... rc4, out4, err4 = script_helper.assert_python_failure( - ... '-m', 'doctest', '-f', fn, fn2) - ... rc5, out5, err5 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) - - Our first test run will show the errors from the first file (doctest stops if a - file has errors). Note that doctest test-run error output appears on stdout, - not stderr: - - >>> rc1, err1 - (1, b'') - >>> print(normalize(out1)) # doctest: +ELLIPSIS - ********************************************************************** - File "...myfile.doc", line 4, in myfile.doc - Failed example: - "abcdef" - Expected: - 'a...f' - Got: - 'abcdef' - ********************************************************************** - File "...myfile.doc", line 6, in myfile.doc - Failed example: - "ajkml" - Expected: - 'a...l' - Got: - 'ajkml' - ********************************************************************** - 1 items had failures: - 2 of 3 in myfile.doc - ***Test Failed*** 2 failures. - - With -o ELLIPSIS specified, the second run, against just the first file, should - produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither - should the third, which ran against both files: - - >>> rc2, out2, err2 - (0, b'', b'') - >>> rc3, out3, err3 - (0, b'', b'') - - The fourth run uses FAIL_FAST, so we should see only one error: - - >>> rc4, err4 - (1, b'') - >>> print(normalize(out4)) # doctest: +ELLIPSIS - ********************************************************************** - File "...myfile.doc", line 4, in myfile.doc - Failed example: - "abcdef" - Expected: - 'a...f' - Got: - 'abcdef' - ********************************************************************** - 1 items had failures: - 1 of 2 in myfile.doc - ***Test Failed*** 1 failures. - - The fifth test uses verbose with the two options, so we should get verbose - success output for the tests in both files: - - >>> rc5, err5 - (0, b'') - >>> print(normalize(out5)) - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "abcdef" - Expecting: - 'a...f' - ok - Trying: - "ajkml" - Expecting: - 'a...l' - ok - 1 items passed all tests: - 3 tests in myfile.doc - 3 tests in 1 items. - 3 passed and 0 failed. - Test passed. - Trying: - 1 + 1 - Expecting: - 2 - ok - Trying: - "abc def" - Expecting: - 'abc def' - ok - 1 items had no tests: - myfile2 - 1 items passed all tests: - 2 tests in myfile2.test_func - 2 tests in 2 items. - 2 passed and 0 failed. - Test passed. - - We should also check some typical error cases. - - Invalid file name: - - >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', 'nosuchfile') - >>> rc, out - (1, b'') - >>> # The exact error message changes depending on the platform. - >>> print(normalize(err)) # doctest: +ELLIPSIS - Traceback (most recent call last): - ... - FileNotFoundError: [Errno ...] ...nosuchfile... +@doctest_skip_if(not support.has_subprocess_support) +def test_CLI(): r""" +The doctest module can be used to run doctests against an arbitrary file. +These tests test this CLI functionality. + +We'll use the support module's script_helpers for this, and write a test files +to a temp dir to run the command against. Due to a current limitation in +script_helpers, though, we need a little utility function to turn the returned +output into something we can doctest against: + + >>> def normalize(s): + ... return '\n'.join(s.decode().splitlines()) + +With those preliminaries out of the way, we'll start with a file with two +simple tests and no errors. We'll run both the unadorned doctest command, and +the verbose version, and then check the output: + + >>> from test.support import script_helper + >>> from test.support.os_helper import temp_dir + >>> with temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w', encoding='utf-8') as f: + ... _ = f.write('This is a very simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "a"\n') + ... _ = f.write(" 'a'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... rc1, out1, err1 = script_helper.assert_python_ok( + ... '-m', 'doctest', fn) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', fn) + +With no arguments and passing tests, we should get no output: + + >>> rc1, out1, err1 + (0, b'', b'') + +With the verbose flag, we should see the test output, but no error output: + + >>> rc2, err2 + (0, b'') + >>> print(normalize(out2)) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "a" + Expecting: + 'a' + ok + 1 items passed all tests: + 2 tests in myfile.doc + 2 tests in 1 items. + 2 passed and 0 failed. + Test passed. - Invalid doctest option: +Now we'll write a couple files, one with three tests, the other a python module +with two tests, both of the files having "errors" in the tests that can be made +non-errors by applying the appropriate doctest options to the run (ELLIPSIS in +the first file, NORMALIZE_WHITESPACE in the second). This combination will +allow thoroughly testing the -f and -o flags, as well as the doctest command's +ability to process more than one file on the command line and, since the second +file ends in '.py', its handling of python module files (as opposed to straight +text files). + + >>> from test.support import script_helper + >>> from test.support.os_helper import temp_dir + >>> with temp_dir() as tmpdir: + ... fn = os.path.join(tmpdir, 'myfile.doc') + ... with open(fn, 'w', encoding="utf-8") as f: + ... _ = f.write('This is another simple test file.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abcdef"\n') + ... _ = f.write(" 'a...f'\n") + ... _ = f.write(' >>> "ajkml"\n') + ... _ = f.write(" 'a...l'\n") + ... _ = f.write('\n') + ... _ = f.write('And that is it.\n') + ... fn2 = os.path.join(tmpdir, 'myfile2.py') + ... with open(fn2, 'w', encoding='utf-8') as f: + ... _ = f.write('def test_func():\n') + ... _ = f.write(' \"\"\"\n') + ... _ = f.write(' This is simple python test function.\n') + ... _ = f.write(' >>> 1 + 1\n') + ... _ = f.write(' 2\n') + ... _ = f.write(' >>> "abc def"\n') + ... _ = f.write(" 'abc def'\n") + ... _ = f.write("\n") + ... _ = f.write(' \"\"\"\n') + ... rc1, out1, err1 = script_helper.assert_python_failure( + ... '-m', 'doctest', fn, fn2) + ... rc2, out2, err2 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) + ... rc3, out3, err3 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + ... rc4, out4, err4 = script_helper.assert_python_failure( + ... '-m', 'doctest', '-f', fn, fn2) + ... rc5, out5, err5 = script_helper.assert_python_ok( + ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) + +Our first test run will show the errors from the first file (doctest stops if a +file has errors). Note that doctest test-run error output appears on stdout, +not stderr: + + >>> rc1, err1 + (1, b'') + >>> print(normalize(out1)) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + File "...myfile.doc", line 6, in myfile.doc + Failed example: + "ajkml" + Expected: + 'a...l' + Got: + 'ajkml' + ********************************************************************** + 1 items had failures: + 2 of 3 in myfile.doc + ***Test Failed*** 2 failures. - >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', '-o', 'nosuchoption') - >>> rc, out - (2, b'') - >>> print(normalize(err)) # doctest: +ELLIPSIS - usage...invalid...nosuchoption... +With -o ELLIPSIS specified, the second run, against just the first file, should +produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither +should the third, which ran against both files: - """ + >>> rc2, out2, err2 + (0, b'', b'') + >>> rc3, out3, err3 + (0, b'', b'') + +The fourth run uses FAIL_FAST, so we should see only one error: + + >>> rc4, err4 + (1, b'') + >>> print(normalize(out4)) # doctest: +ELLIPSIS + ********************************************************************** + File "...myfile.doc", line 4, in myfile.doc + Failed example: + "abcdef" + Expected: + 'a...f' + Got: + 'abcdef' + ********************************************************************** + 1 items had failures: + 1 of 2 in myfile.doc + ***Test Failed*** 1 failures. + +The fifth test uses verbose with the two options, so we should get verbose +success output for the tests in both files: + + >>> rc5, err5 + (0, b'') + >>> print(normalize(out5)) + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abcdef" + Expecting: + 'a...f' + ok + Trying: + "ajkml" + Expecting: + 'a...l' + ok + 1 items passed all tests: + 3 tests in myfile.doc + 3 tests in 1 items. + 3 passed and 0 failed. + Test passed. + Trying: + 1 + 1 + Expecting: + 2 + ok + Trying: + "abc def" + Expecting: + 'abc def' + ok + 1 items had no tests: + myfile2 + 1 items passed all tests: + 2 tests in myfile2.test_func + 2 tests in 2 items. + 2 passed and 0 failed. + Test passed. + +We should also check some typical error cases. + +Invalid file name: + + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', 'nosuchfile') + >>> rc, out + (1, b'') + >>> # The exact error message changes depending on the platform. + >>> print(normalize(err)) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + FileNotFoundError: [Errno ...] ...nosuchfile... + +Invalid doctest option: + + >>> rc, out, err = script_helper.assert_python_failure( + ... '-m', 'doctest', '-o', 'nosuchoption') + >>> rc, out + (2, b'') + >>> print(normalize(err)) # doctest: +ELLIPSIS + usage...invalid...nosuchoption... + +""" def test_no_trailing_whitespace_stripping(): r""" From d0225380eefbd97520590ccaedef3891c9dd0977 Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Wed, 27 Mar 2024 16:53:27 +0000 Subject: [PATCH 4/6] Make DocTestCase report as skipped if all examples in the doctest are skipped --- Doc/library/doctest.rst | 6 ++- Lib/doctest.py | 7 ++-- Lib/test/test_doctest/sample_doctest_skip.py | 37 +++++++++++++++++++ Lib/test/test_doctest/test_doctest.py | 24 +++++++++++- Lib/test/test_doctest/test_doctest_skip.txt | 4 ++ ...-03-27-16-43-42.gh-issue-117294.wbXNFv.rst | 2 + 6 files changed, 74 insertions(+), 6 deletions(-) create mode 100644 Lib/test/test_doctest/sample_doctest_skip.py create mode 100644 Lib/test/test_doctest/test_doctest_skip.txt create mode 100644 Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst index 835a3a76806148..d3dcf9c4e5743c 100644 --- a/Doc/library/doctest.rst +++ b/Doc/library/doctest.rst @@ -1021,7 +1021,8 @@ from text files and modules with doctests: and runs the interactive examples in each file. If an example in any file fails, then the synthesized unit test fails, and a :exc:`failureException` exception is raised showing the name of the file containing the test and a - (sometimes approximate) line number. + (sometimes approximate) line number. If all the examples in a file are + skipped, then the synthesized unit test is also marked as skipped. Pass one or more paths (as strings) to text files to be examined. @@ -1087,7 +1088,8 @@ from text files and modules with doctests: and runs each doctest in the module. If any of the doctests fail, then the synthesized unit test fails, and a :exc:`failureException` exception is raised showing the name of the file containing the test and a (sometimes approximate) - line number. + line number. If all the examples in a docstring are skipped, then the + synthesized unit test is also marked as skipped. Optional argument *module* provides the module to be tested. It can be a module object or a (possibly dotted) module name. If not specified, the module calling diff --git a/Lib/doctest.py b/Lib/doctest.py index 6049423b5147a5..cac50f9d23439a 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -2262,12 +2262,13 @@ def runTest(self): try: runner.DIVIDER = "-"*70 - failures, tries = runner.run( - test, out=new.write, clear_globs=False) + results = runner.run(test, out=new.write, clear_globs=False) + if results.skipped == results.attempted: + raise unittest.SkipTest(f"all examples were skipped") finally: sys.stdout = old - if failures: + if results.failed: raise self.failureException(self.format_failure(new.getvalue())) def format_failure(self, err): diff --git a/Lib/test/test_doctest/sample_doctest_skip.py b/Lib/test/test_doctest/sample_doctest_skip.py new file mode 100644 index 00000000000000..b140cb2a4a70a2 --- /dev/null +++ b/Lib/test/test_doctest/sample_doctest_skip.py @@ -0,0 +1,37 @@ +"""This is a sample module used for testing doctest. + +This module includes various scenarios involving skips. +""" + +# This test will pass. +def no_skip(): + """ + >>> 2 + 2 + 4 + """ + +def single_skip(): + """ + >>> 2 + 2 # doctest: +SKIP + 4 + """ + +def double_skip(): + """ + >>> 2 + 2 # doctest: +SKIP + 4 + >>> 3 + 3 # doctest: +SKIP + 6 + """ + +# This test will fail. +def partial_skip(): + """ + >>> 2 + 2 # doctest: +SKIP + 4 + >>> 2 + 2 + 5 + """ + +def no_examples(): + """A docstring with no examples should not be counted as a skip.""" diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index aa53e9cce63545..25d7f4822e9afb 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -21,7 +21,7 @@ def doctest_skip_if(condition): def decorator(func): if condition: - func.__doc__ = None + func.__doc__ = ">>> pass # doctest: +SKIP" return func return decorator @@ -2251,6 +2251,16 @@ def test_DocTestSuite(): >>> suite.run(unittest.TestResult()) + If all examples in a docstring are skipped, unittest will report it as a + skipped test: + + >>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest_skip') + >>> result = suite.run(unittest.TestResult()) + >>> result + + >>> len(result.skipped) + 2 + We can use the current module: >>> suite = test.test_doctest.sample_doctest.test_suite() @@ -2422,6 +2432,18 @@ def test_DocFileSuite(): Traceback (most recent call last): ValueError: Package may only be specified for module-relative paths. + If all examples in a file are skipped, unittest will report it as a + skipped test: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest4.txt', + ... 'test_doctest_skip.txt') + >>> result = suite.run(unittest.TestResult()) + >>> result + + >>> len(result.skipped) + 1 + You can specify initial global variables: >>> suite = doctest.DocFileSuite('test_doctest.txt', diff --git a/Lib/test/test_doctest/test_doctest_skip.txt b/Lib/test/test_doctest/test_doctest_skip.txt new file mode 100644 index 00000000000000..f340e2b8141253 --- /dev/null +++ b/Lib/test/test_doctest/test_doctest_skip.txt @@ -0,0 +1,4 @@ +This is a sample doctest in a text file, in which all examples are skipped. + + >>> 2 + 2 # doctest: +SKIP + 5 diff --git a/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst b/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst new file mode 100644 index 00000000000000..bb351e6399a765 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst @@ -0,0 +1,2 @@ +A ``DocTestCase`` now reports as skipped if all examples in the doctest are +skipped. From 405d6790a9859d45ccdc7d3ddd224a9292ff2153 Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Wed, 27 Mar 2024 18:59:15 +0000 Subject: [PATCH 5/6] Move most of previous commit into #117297 --- Doc/library/doctest.rst | 6 +-- Lib/doctest.py | 7 ++-- Lib/test/test_doctest/sample_doctest_skip.py | 37 ------------------- Lib/test/test_doctest/test_doctest.py | 22 ----------- Lib/test/test_doctest/test_doctest_skip.txt | 4 -- ...-03-27-16-43-42.gh-issue-117294.wbXNFv.rst | 2 - 6 files changed, 5 insertions(+), 73 deletions(-) delete mode 100644 Lib/test/test_doctest/sample_doctest_skip.py delete mode 100644 Lib/test/test_doctest/test_doctest_skip.txt delete mode 100644 Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst index d3dcf9c4e5743c..835a3a76806148 100644 --- a/Doc/library/doctest.rst +++ b/Doc/library/doctest.rst @@ -1021,8 +1021,7 @@ from text files and modules with doctests: and runs the interactive examples in each file. If an example in any file fails, then the synthesized unit test fails, and a :exc:`failureException` exception is raised showing the name of the file containing the test and a - (sometimes approximate) line number. If all the examples in a file are - skipped, then the synthesized unit test is also marked as skipped. + (sometimes approximate) line number. Pass one or more paths (as strings) to text files to be examined. @@ -1088,8 +1087,7 @@ from text files and modules with doctests: and runs each doctest in the module. If any of the doctests fail, then the synthesized unit test fails, and a :exc:`failureException` exception is raised showing the name of the file containing the test and a (sometimes approximate) - line number. If all the examples in a docstring are skipped, then the - synthesized unit test is also marked as skipped. + line number. Optional argument *module* provides the module to be tested. It can be a module object or a (possibly dotted) module name. If not specified, the module calling diff --git a/Lib/doctest.py b/Lib/doctest.py index cac50f9d23439a..6049423b5147a5 100644 --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -2262,13 +2262,12 @@ def runTest(self): try: runner.DIVIDER = "-"*70 - results = runner.run(test, out=new.write, clear_globs=False) - if results.skipped == results.attempted: - raise unittest.SkipTest(f"all examples were skipped") + failures, tries = runner.run( + test, out=new.write, clear_globs=False) finally: sys.stdout = old - if results.failed: + if failures: raise self.failureException(self.format_failure(new.getvalue())) def format_failure(self, err): diff --git a/Lib/test/test_doctest/sample_doctest_skip.py b/Lib/test/test_doctest/sample_doctest_skip.py deleted file mode 100644 index b140cb2a4a70a2..00000000000000 --- a/Lib/test/test_doctest/sample_doctest_skip.py +++ /dev/null @@ -1,37 +0,0 @@ -"""This is a sample module used for testing doctest. - -This module includes various scenarios involving skips. -""" - -# This test will pass. -def no_skip(): - """ - >>> 2 + 2 - 4 - """ - -def single_skip(): - """ - >>> 2 + 2 # doctest: +SKIP - 4 - """ - -def double_skip(): - """ - >>> 2 + 2 # doctest: +SKIP - 4 - >>> 3 + 3 # doctest: +SKIP - 6 - """ - -# This test will fail. -def partial_skip(): - """ - >>> 2 + 2 # doctest: +SKIP - 4 - >>> 2 + 2 - 5 - """ - -def no_examples(): - """A docstring with no examples should not be counted as a skip.""" diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index 25d7f4822e9afb..a7288e643ab507 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -2251,16 +2251,6 @@ def test_DocTestSuite(): >>> suite.run(unittest.TestResult()) - If all examples in a docstring are skipped, unittest will report it as a - skipped test: - - >>> suite = doctest.DocTestSuite('test.test_doctest.sample_doctest_skip') - >>> result = suite.run(unittest.TestResult()) - >>> result - - >>> len(result.skipped) - 2 - We can use the current module: >>> suite = test.test_doctest.sample_doctest.test_suite() @@ -2432,18 +2422,6 @@ def test_DocFileSuite(): Traceback (most recent call last): ValueError: Package may only be specified for module-relative paths. - If all examples in a file are skipped, unittest will report it as a - skipped test: - - >>> suite = doctest.DocFileSuite('test_doctest.txt', - ... 'test_doctest4.txt', - ... 'test_doctest_skip.txt') - >>> result = suite.run(unittest.TestResult()) - >>> result - - >>> len(result.skipped) - 1 - You can specify initial global variables: >>> suite = doctest.DocFileSuite('test_doctest.txt', diff --git a/Lib/test/test_doctest/test_doctest_skip.txt b/Lib/test/test_doctest/test_doctest_skip.txt deleted file mode 100644 index f340e2b8141253..00000000000000 --- a/Lib/test/test_doctest/test_doctest_skip.txt +++ /dev/null @@ -1,4 +0,0 @@ -This is a sample doctest in a text file, in which all examples are skipped. - - >>> 2 + 2 # doctest: +SKIP - 5 diff --git a/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst b/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst deleted file mode 100644 index bb351e6399a765..00000000000000 --- a/Misc/NEWS.d/next/Library/2024-03-27-16-43-42.gh-issue-117294.wbXNFv.rst +++ /dev/null @@ -1,2 +0,0 @@ -A ``DocTestCase`` now reports as skipped if all examples in the doctest are -skipped. From f9e96875877194be04565f9d77eaacb484a6bf40 Mon Sep 17 00:00:00 2001 From: Malcolm Smith Date: Sun, 31 Mar 2024 11:36:12 +0100 Subject: [PATCH 6/6] Don't modify docstrings when running in optimized mode Co-authored-by: Nikita Sobolev --- Lib/test/test_doctest/test_doctest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Lib/test/test_doctest/test_doctest.py b/Lib/test/test_doctest/test_doctest.py index a7288e643ab507..3c0820d7232f68 100644 --- a/Lib/test/test_doctest/test_doctest.py +++ b/Lib/test/test_doctest/test_doctest.py @@ -20,7 +20,7 @@ def doctest_skip_if(condition): def decorator(func): - if condition: + if condition and support.HAVE_DOCSTRINGS: func.__doc__ = ">>> pass # doctest: +SKIP" return func return decorator