Skip to content

Commit 5e5a800

Browse files
author
likanglin
committed
condition can be evaluated in the real time
use the skipping.evaluate_condition from pytest in order to keep the same behavior with pytest.mark.skipif. It evaluates the condition in real time so that we can decide whether the case should be rerun in runtime Change-Id: I2575d368c79480223c84498513a4ef605db0c576
1 parent 1471840 commit 5e5a800

File tree

3 files changed

+108
-3
lines changed

3 files changed

+108
-3
lines changed

README.rst

+14
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,20 @@ You can also specify an optional ``condition`` in the re-run marker:
108108
import random
109109
assert random.choice([True, False])
110110
111+
You can use ``@pytest.mark.flaky(condition)`` similarly as ``@pytest.mark.skipif(condition)``, see `pytest-mark-skipif <https://docs.pytest.org/en/6.2.x/reference.html#pytest-mark-skipif>`_
112+
113+
.. code-block:: python
114+
115+
@pytest.mark.flaky(reruns=2,condition="sys.platform.startswith('win32')")
116+
def test_example():
117+
import random
118+
assert random.choice([True, False])
119+
# totally same as the above
120+
@pytest.mark.flaky(reruns=2,condition=sys.platform.startswith("win32"))
121+
def test_example():
122+
import random
123+
assert random.choice([True, False])
124+
111125
Note that the test will re-run for any ``condition`` that is truthy.
112126

113127
Output

pytest_rerunfailures.py

+56-1
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,14 @@
1+
import os
2+
import platform
13
import re
4+
import sys
25
import time
6+
import traceback
37
import warnings
48

59
import pkg_resources
610
import pytest
11+
from _pytest.outcomes import fail
712
from _pytest.runner import runtestprotocol
813

914
HAS_RESULTLOG = False
@@ -184,11 +189,61 @@ def get_reruns_condition(item):
184189

185190
condition = True
186191
if rerun_marker is not None and "condition" in rerun_marker.kwargs:
187-
condition = rerun_marker.kwargs["condition"]
192+
condition = evaluate_condition(
193+
item, rerun_marker, rerun_marker.kwargs["condition"]
194+
)
188195

189196
return condition
190197

191198

199+
def evaluate_condition(item, mark, condition: object) -> bool:
200+
"""
201+
copy from python3.8 _pytest.skipping.py
202+
"""
203+
result = False
204+
# String condition.
205+
if isinstance(condition, str):
206+
globals_ = {
207+
"os": os,
208+
"sys": sys,
209+
"platform": platform,
210+
"config": item.config,
211+
}
212+
if hasattr(item, "obj"):
213+
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
214+
try:
215+
filename = f"<{mark.name} condition>"
216+
condition_code = compile(condition, filename, "eval")
217+
result = eval(condition_code, globals_)
218+
except SyntaxError as exc:
219+
msglines = [
220+
"Error evaluating %r condition" % mark.name,
221+
" " + condition,
222+
" " + " " * (exc.offset or 0) + "^",
223+
"SyntaxError: invalid syntax",
224+
]
225+
fail("\n".join(msglines), pytrace=False)
226+
except Exception as exc:
227+
msglines = [
228+
"Error evaluating %r condition" % mark.name,
229+
" " + condition,
230+
*traceback.format_exception_only(type(exc), exc),
231+
]
232+
fail("\n".join(msglines), pytrace=False)
233+
234+
# Boolean condition.
235+
else:
236+
try:
237+
result = bool(condition)
238+
except Exception as exc:
239+
msglines = [
240+
"Error evaluating %r condition as a boolean" % mark.name,
241+
*traceback.format_exception_only(type(exc), exc),
242+
]
243+
fail("\n".join(msglines), pytrace=False)
244+
return result
245+
246+
192247
def _remove_cached_results_from_failed_fixtures(item):
193248
"""
194249
Note: remove all cached_result attribute from every fixture

test_pytest_rerunfailures.py

+38-2
Original file line numberDiff line numberDiff line change
@@ -529,8 +529,6 @@ def test_only_rerun_flag(testdir, only_rerun_texts, should_rerun):
529529
(False, 0),
530530
(1, 2),
531531
(0, 0),
532-
("'non-empty'", 2),
533-
("''", 0),
534532
(["list"], 2),
535533
([], 0),
536534
({"dict": 1}, 2),
@@ -550,3 +548,41 @@ def test_fail_two():
550548

551549
result = testdir.runpytest()
552550
assert_outcomes(result, passed=0, failed=1, rerun=expected_reruns)
551+
552+
553+
@pytest.mark.parametrize(
554+
"condition, expected_reruns",
555+
[
556+
('sys.platform.startswith("non-exists") == False', 2),
557+
('os.getpid() != -1', 2)
558+
]
559+
)
560+
# before evaluating the condition expression, sys&os&platform package has been imported
561+
def test_reruns_with_string_condition(testdir, condition, expected_reruns):
562+
testdir.makepyfile(
563+
f"""
564+
import pytest
565+
566+
@pytest.mark.flaky(reruns=2, condition='{condition}')
567+
def test_fail_two():
568+
assert False"""
569+
)
570+
result = testdir.runpytest()
571+
assert_outcomes(result, passed=0, failed=1, rerun=2)
572+
573+
574+
def test_reruns_with_string_condition_with_global_var(testdir):
575+
testdir.makepyfile(
576+
f"""
577+
import pytest
578+
579+
rerunBool = False
580+
581+
@pytest.mark.flaky(reruns=2, condition='rerunBool')
582+
def test_fail_two():
583+
global rerunBool
584+
rerunBool = True
585+
assert False"""
586+
)
587+
result = testdir.runpytest()
588+
assert_outcomes(result, passed=0, failed=1, rerun=2)

0 commit comments

Comments
 (0)