Skip to content

Commit

Permalink
Add support for using xfail in test cases (python#10635)
Browse files Browse the repository at this point in the history
Add support for adding -xfail to the end of a test case in a .test file
to make pytest mark it as expected to fail (so that the test is run but
the test suite doesn't fail if the test fails).

Turns on xfail_strict in the pytest config so that tests marked as xfails
that suddenly start passing get marked as failures. This makes sure that
some tests don't end up getting marked as xfails for a long time after
they started working correctly, which makes sure that they can get
turned into regular tests as soon as possible. That makes sure that we
can use those tests to catch regressions, instead of silently ignoring
when they fail.

Closes python#10604
  • Loading branch information
pranavrajpal authored Jun 13, 2021
1 parent 7f92107 commit fe157d4
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 2 deletions.
13 changes: 11 additions & 2 deletions mypy/test/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ def __init__(self,
only_when: str,
platform: Optional[str],
skip: bool,
xfail: bool,
data: str,
line: int) -> None:
super().__init__(name, parent)
Expand All @@ -234,6 +235,7 @@ def __init__(self,
or (platform == 'posix' and sys.platform == 'win32')):
skip = True
self.skip = skip
self.xfail = xfail
self.data = data
self.line = line
self.old_cwd = None # type: Optional[str]
Expand All @@ -242,6 +244,9 @@ def __init__(self,
def runtest(self) -> None:
if self.skip:
pytest.skip()
# TODO: add a better error message for when someone uses skip and xfail at the same time
elif self.xfail:
self.add_marker(pytest.mark.xfail)
suite = self.parent.obj()
suite.setup()
try:
Expand Down Expand Up @@ -552,17 +557,20 @@ def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
"""
with open(file, encoding='utf-8') as f:
data = f.read()
# number of groups in the below regex
NUM_GROUPS = 7
cases = re.split(r'^\[case ([a-zA-Z_0-9]+)'
r'(-writescache)?'
r'(-only_when_cache|-only_when_nocache)?'
r'(-posix|-windows)?'
r'(-skip)?'
r'(-xfail)?'
r'\][ \t]*$\n',
data,
flags=re.DOTALL | re.MULTILINE)
line_no = cases[0].count('\n') + 1
for i in range(1, len(cases), 6):
name, writescache, only_when, platform_flag, skip, data = cases[i:i + 6]
for i in range(1, len(cases), NUM_GROUPS):
name, writescache, only_when, platform_flag, skip, xfail, data = cases[i:i + NUM_GROUPS]
platform = platform_flag[1:] if platform_flag else None
yield DataDrivenTestCase.from_parent(
parent=parent,
Expand All @@ -573,6 +581,7 @@ def split_test_cases(parent: 'DataSuiteCollector', suite: 'DataSuite',
only_when=only_when,
platform=platform,
skip=bool(skip),
xfail=bool(xfail),
data=data,
line=line_no,
)
Expand Down
3 changes: 3 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,6 @@ python_functions =

# always run in parallel (requires pytest-xdist, see test-requirements.txt)
addopts = -nauto

# treat xpasses as test failures so they get converted to regular tests as soon as possible
xfail_strict = true

0 comments on commit fe157d4

Please sign in to comment.