-
-
Notifications
You must be signed in to change notification settings - Fork 32.1k
bpo-44708: Only re-run test methods that match names of previously failing test methods #27287
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
db6e311
59d83c1
89b209e
c049888
d55e65b
aad108f
67c3e3d
26f3ba3
fb4346a
d9d7e0e
3e4622b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -116,6 +116,7 @@ def accumulate_result(self, result, rerun=False): | |
elif isinstance(result, Failed): | ||
if not rerun: | ||
self.bad.append(test_name) | ||
self.rerun.append(result) | ||
elif isinstance(result, DidNotRun): | ||
self.run_no_tests.append(test_name) | ||
elif isinstance(result, Interrupted): | ||
|
@@ -311,10 +312,24 @@ def rerun_failed_tests(self): | |
|
||
self.log() | ||
self.log("Re-running failed tests in verbose mode") | ||
self.rerun = self.bad[:] | ||
for test_name in self.rerun: | ||
self.log(f"Re-running {test_name} in verbose mode") | ||
rerun_list = self.rerun[:] | ||
self.rerun = [] | ||
for result in rerun_list: | ||
test_name = result.name | ||
errors = result.errors or [] | ||
failures = result.failures or [] | ||
error_names = [e[0].split(" ")[0] for e in errors] | ||
failure_names = [f[0].split(" ")[0] for f in failures] | ||
self.ns.verbose = True | ||
if errors or failures: | ||
if self.ns.match_tests is None: | ||
self.ns.match_tests = [] | ||
self.ns.match_tests.extend(error_names) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Question: Do we want to rerun errors? In theory those are things like failures to set up the test and similar. Do you think that this is something we should run again? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The only case where we gather errors is with |
||
self.ns.match_tests.extend(failure_names) | ||
Comment on lines
+328
to
+329
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is the money shot. |
||
matching = "matching: " + ", ".join(self.ns.match_tests) | ||
self.log(f"Re-running {test_name} in verbose mode ({matching})") | ||
else: | ||
self.log(f"Re-running {test_name} in verbose mode") | ||
result = runtest(self.ns, test_name) | ||
|
||
self.accumulate_result(result, rerun=True) | ||
|
@@ -380,7 +395,7 @@ def display_result(self): | |
if self.rerun: | ||
print() | ||
print("%s:" % count(len(self.rerun), "re-run test")) | ||
printlist(self.rerun) | ||
printlist(r.name for r in self.rerun) | ||
|
||
if self.run_no_tests: | ||
print() | ||
|
Uh oh!
There was an error while loading. Please reload this page.