-
-
Notifications
You must be signed in to change notification settings - Fork 32k
bpo-44708: Only re-run test methods that match names of previously failing test methods #27287
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
db6e311
59d83c1
89b209e
c049888
d55e65b
aad108f
67c3e3d
26f3ba3
fb4346a
d9d7e0e
3e4622b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,10 +11,10 @@ | |
import unittest | ||
from test.libregrtest.cmdline import _parse_args | ||
from test.libregrtest.runtest import ( | ||
findtests, runtest, get_abs_module, | ||
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED, | ||
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN, TIMEOUT, | ||
PROGRESS_MIN_TIME, format_test_result, is_failed) | ||
findtests, runtest, get_abs_module, is_failed, | ||
STDTESTS, NOTTESTS, PROGRESS_MIN_TIME, | ||
Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted, | ||
ChildError, DidNotRun) | ||
from test.libregrtest.setup import setup_tests | ||
from test.libregrtest.pgo import setup_pgo_tests | ||
from test.libregrtest.utils import removepy, count, format_duration, printlist | ||
|
@@ -99,34 +99,32 @@ def get_executed(self): | |
| set(self.run_no_tests)) | ||
|
||
def accumulate_result(self, result, rerun=False): | ||
test_name = result.test_name | ||
ok = result.result | ||
test_name = result.name | ||
|
||
if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun: | ||
self.test_times.append((result.test_time, test_name)) | ||
if not isinstance(result, (ChildError, Interrupted)) and not rerun: | ||
self.test_times.append((result.duration_sec, test_name)) | ||
|
||
if ok == PASSED: | ||
if isinstance(result, Passed): | ||
self.good.append(test_name) | ||
elif ok in (FAILED, CHILD_ERROR): | ||
if not rerun: | ||
self.bad.append(test_name) | ||
elif ok == ENV_CHANGED: | ||
self.environment_changed.append(test_name) | ||
elif ok == SKIPPED: | ||
self.skipped.append(test_name) | ||
elif ok == RESOURCE_DENIED: | ||
elif isinstance(result, ResourceDenied): | ||
self.skipped.append(test_name) | ||
self.resource_denieds.append(test_name) | ||
elif ok == TEST_DID_NOT_RUN: | ||
elif isinstance(result, Skipped): | ||
self.skipped.append(test_name) | ||
elif isinstance(result, EnvChanged): | ||
self.environment_changed.append(test_name) | ||
elif isinstance(result, Failed): | ||
if not rerun: | ||
self.bad.append(test_name) | ||
self.rerun.append(result) | ||
elif isinstance(result, DidNotRun): | ||
self.run_no_tests.append(test_name) | ||
elif ok == INTERRUPTED: | ||
elif isinstance(result, Interrupted): | ||
self.interrupted = True | ||
elif ok == TIMEOUT: | ||
self.bad.append(test_name) | ||
else: | ||
raise ValueError("invalid test result: %r" % ok) | ||
raise ValueError("invalid test result: %r" % result) | ||
|
||
if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}: | ||
if rerun and not isinstance(result, (Failed, Interrupted)): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For refleaks/envchanged I don't know if it makes sense to rerun because it those fail, then there is no way that the test suite is going to succeed after that, no? OTOH I don't see how it would hurt (just let's make sure that it works as we expect and it doesn't interfere with the refleak machinery). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This function is a little cryptic. The |
||
self.bad.remove(test_name) | ||
|
||
xml_data = result.xml_data | ||
|
@@ -314,15 +312,31 @@ def rerun_failed_tests(self): | |
|
||
self.log() | ||
self.log("Re-running failed tests in verbose mode") | ||
self.rerun = self.bad[:] | ||
for test_name in self.rerun: | ||
self.log(f"Re-running {test_name} in verbose mode") | ||
rerun_list = self.rerun[:] | ||
self.rerun = [] | ||
for result in rerun_list: | ||
test_name = result.name | ||
errors = result.errors or [] | ||
failures = result.failures or [] | ||
error_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in errors] | ||
failure_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in failures] | ||
self.ns.verbose = True | ||
orig_match_tests = self.ns.match_tests | ||
if errors or failures: | ||
if self.ns.match_tests is None: | ||
self.ns.match_tests = [] | ||
self.ns.match_tests.extend(error_names) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Question: Do we want to rerun errors? In theory those are things like failures to set up the test and similar. Do you think that this is something we should run again? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The only case where we gather errors is with |
||
self.ns.match_tests.extend(failure_names) | ||
Comment on lines
+328
to
+329
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is the money shot. |
||
matching = "matching: " + ", ".join(self.ns.match_tests) | ||
self.log(f"Re-running {test_name} in verbose mode ({matching})") | ||
else: | ||
self.log(f"Re-running {test_name} in verbose mode") | ||
result = runtest(self.ns, test_name) | ||
self.ns.match_tests = orig_match_tests | ||
|
||
self.accumulate_result(result, rerun=True) | ||
|
||
if result.result == INTERRUPTED: | ||
if isinstance(result, Interrupted): | ||
break | ||
|
||
if self.bad: | ||
|
@@ -383,7 +397,7 @@ def display_result(self): | |
if self.rerun: | ||
print() | ||
print("%s:" % count(len(self.rerun), "re-run test")) | ||
printlist(self.rerun) | ||
printlist(r.name for r in self.rerun) | ||
|
||
if self.run_no_tests: | ||
print() | ||
|
@@ -423,14 +437,14 @@ def run_tests_sequential(self): | |
result = runtest(self.ns, test_name) | ||
self.accumulate_result(result) | ||
|
||
if result.result == INTERRUPTED: | ||
if isinstance(result, Interrupted): | ||
break | ||
|
||
previous_test = format_test_result(result) | ||
previous_test = str(result) | ||
test_time = time.monotonic() - start_time | ||
if test_time >= PROGRESS_MIN_TIME: | ||
previous_test = "%s in %s" % (previous_test, format_duration(test_time)) | ||
elif result.result == PASSED: | ||
elif isinstance(result, Passed): | ||
# be quiet: say nothing if the test passed shortly | ||
previous_test = None | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The different order here is required due to how inheritance works.
isinstance(Failed)
would catch all the lower cases.