Skip to content

Commit fe157d4

Browse files
authored
Add support for using xfail in test cases (python#10635)
Add support for adding -xfail to the end of a test case in a .test file to make pytest mark it as expected to fail (so that the test is run but the test suite doesn't fail if the test fails). Turns on xfail_strict in the pytest config so that tests marked as xfails that suddenly start passing get marked as failures. This makes sure that some tests don't end up getting marked as xfails for a long time after they started working correctly, which makes sure that they can get turned into regular tests as soon as possible. That makes sure that we can use those tests to catch regressions, instead of silently ignoring when they fail. Closes python#10604
1 parent 7f92107 commit fe157d4

File tree

2 files changed

+14
-2
lines changed

2 files changed

+14
-2
lines changed

mypy/test/data.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,7 @@ def __init__(self,
223223
only_when: str,
224224
platform: Optional[str],
225225
skip: bool,
226+
xfail: bool,
226227
data: str,
227228
line: int) -> None:
228229
super().__init__(name, parent)
@@ -234,6 +235,7 @@ def __init__(self,
234235
or (platform == 'posix' and sys.platform == 'win32')):
235236
skip = True
236237
self.skip = skip
238+
self.xfail = xfail
237239
self.data = data
238240
self.line = line
239241
self.old_cwd = None # type: Optional[str]
@@ -242,6 +244,9 @@ def __init__(self,
242244
def runtest(self) -> None:
243245
if self.skip:
244246
pytest.skip()
247+
# TODO: add a better error message for when someone uses skip and xfail at the same time
248+
elif self.xfail:
249+
self.add_marker(pytest.mark.xfail)
245250
suite = self.parent.obj()
246251
suite.setup()
247252
try:
@@ -552,17 +557,20 @@ def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
552557
"""
553558
with open(file, encoding='utf-8') as f:
554559
data = f.read()
560+
# number of groups in the below regex
561+
NUM_GROUPS = 7
555562
cases = re.split(r'^\[case ([a-zA-Z_0-9]+)'
556563
r'(-writescache)?'
557564
r'(-only_when_cache|-only_when_nocache)?'
558565
r'(-posix|-windows)?'
559566
r'(-skip)?'
567+
r'(-xfail)?'
560568
r'\][ \t]*$\n',
561569
data,
562570
flags=re.DOTALL | re.MULTILINE)
563571
line_no = cases[0].count('\n') + 1
564-
for i in range(1, len(cases), 6):
565-
name, writescache, only_when, platform_flag, skip, data = cases[i:i + 6]
572+
for i in range(1, len(cases), NUM_GROUPS):
573+
name, writescache, only_when, platform_flag, skip, xfail, data = cases[i:i + NUM_GROUPS]
566574
platform = platform_flag[1:] if platform_flag else None
567575
yield DataDrivenTestCase.from_parent(
568576
parent=parent,
@@ -573,6 +581,7 @@ def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
573581
only_when=only_when,
574582
platform=platform,
575583
skip=bool(skip),
584+
xfail=bool(xfail),
576585
data=data,
577586
line=line_no,
578587
)

pytest.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,3 +20,6 @@ python_functions =
2020

2121
# always run in parallel (requires pytest-xdist, see test-requirements.txt)
2222
addopts = -nauto
23+
24+
# treat xpasses as test failures so they get converted to regular tests as soon as possible
25+
xfail_strict = true

0 commit comments

Comments
 (0)