Skip to content

Make xpass failure again #11498

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
1 change: 1 addition & 0 deletions changelog/11467.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
In this strict parameter of xfail is set to true by default and it will ensure xpass is either a warning or a failure in any case.
28 changes: 12 additions & 16 deletions src/_pytest/skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.reports import BaseReport
from _pytest.reports import TestReport
from _pytest.runner import CallInfo
from _pytest.stash import StashKey

Expand Down Expand Up @@ -208,7 +207,7 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
strict = mark.kwargs.get(item.config.getini("xfail_strict"), True)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in order to ensure backward compatibility we cannot go straight from non-strict to strict
instead we have to start by warning if strict was not set to true or false

the warning should indicate that a future major release of pytest will change the default from False to True
and recommend to use struct=True as default and a plugin for actually flaky tests

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure

raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
conditions = mark.args
Expand Down Expand Up @@ -244,7 +243,7 @@ def pytest_runtest_setup(item: Item) -> None:
xfail("[NOTRUN] " + xfailed.reason)


@hookimpl(wrapper=True)
@hookimpl(hookwrapper=True)
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
Expand All @@ -253,20 +252,18 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
if xfailed and not item.config.option.runxfail and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)

try:
return (yield)
finally:
# The test run may have added an xfail mark dynamically.
xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
yield

# The test run may have added an xfail mark dynamically.
xfailed = item.stash.get(xfailed_key, None)
if xfailed is None:
item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)


@hookimpl(wrapper=True)
def pytest_runtest_makereport(
item: Item, call: CallInfo[None]
) -> Generator[None, TestReport, TestReport]:
rep = yield
@hookimpl(hookwrapper=True)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please restore the prior wrapper definition as thats the new style hook wrapper declaration from pluggy

def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield
rep = outcome.get_result()
xfailed = item.stash.get(xfailed_key, None)
if item.config.option.runxfail:
pass # don't interfere
Expand All @@ -289,7 +286,6 @@ def pytest_runtest_makereport(
else:
rep.outcome = "passed"
rep.wasxfail = xfailed.reason
return rep


def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
Expand Down
2 changes: 1 addition & 1 deletion testing/python/collect.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ def test_xfail(x):
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 xpassed in *"])
result.stdout.fnmatch_lines(["* 1 failed, 2 passed in *"])

def test_parametrize_xfail_passed(self, pytester: Pytester) -> None:
pytester.makepyfile(
Expand Down
4 changes: 2 additions & 2 deletions testing/python/metafunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1949,7 +1949,7 @@ def test_increment(n, expected):
)
pytester.makepyfile(s)
reprec = pytester.inline_run()
passed, failed = (2, 1) if strict else (3, 0)
passed, failed = (2, 1)
reprec.assertoutcome(passed=passed, failed=failed)

def test_parametrize_called_in_generate_tests(self, pytester: Pytester) -> None:
Expand Down Expand Up @@ -2017,7 +2017,7 @@ def test_increment(n, expected):
)
pytester.makepyfile(s)
reprec = pytester.inline_run()
passed, failed = (0, 2) if strict else (2, 0)
passed, failed = (0, 2)
reprec.assertoutcome(passed=passed, failed=failed)

def test_pytest_make_parametrize_id(self, pytester: Pytester) -> None:
Expand Down
6 changes: 2 additions & 4 deletions testing/test_cacheprovider.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,10 +399,8 @@ def test_hello():
assert 1
"""
)
config = pytester.parseconfigure()
assert config.cache is not None
lastfailed = config.cache.get("cache/lastfailed", -1)
assert lastfailed == -1
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 failed in*"])

def test_non_serializable_parametrize(self, pytester: Pytester) -> None:
"""Test that failed parametrized tests with unmarshable parameters
Expand Down
2 changes: 1 addition & 1 deletion testing/test_junitxml.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def test_xpass():
result, dom = run_and_parse(family=xunit_family)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skipped=2, tests=5)
node.assert_attr(name="pytest", errors=0, failures=2, skipped=2, tests=5)

@parametrize_families
def test_summing_simple_with_errors(
Expand Down
4 changes: 2 additions & 2 deletions testing/test_pytester.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,8 @@ def test_potato():
"""
)
result = pytester.runpytest()
result.assert_outcomes(xpassed=1)
assert result.ret == 0
result.assert_outcomes(failed=1)
# assert result.ret == 0


def test_xpassed_with_strict_is_considered_a_failure(pytester: Pytester) -> None:
Expand Down
62 changes: 37 additions & 25 deletions testing/test_skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ def test_func():
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.passed
assert callreport.outcome == "failed"
assert callreport.wasxfail == "this is an xfail"

def test_xfail_using_platform(self, pytester: Pytester) -> None:
Expand Down Expand Up @@ -357,6 +357,22 @@ def test_func():
assert str(callreport.longrepr) == "[XPASS(strict)] nope"
assert not hasattr(callreport, "wasxfail")

def test_xfail_xpassed_strict_default_true(self, pytester: Pytester) -> None:
item = pytester.getitem(
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 1
"""
)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert str(callreport.longrepr) == "[XPASS(strict)]"
assert not hasattr(callreport, "wasxfail")

def test_xfail_run_anyway(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
Expand Down Expand Up @@ -477,7 +493,7 @@ def test_that():
"""
)
result = pytester.runpytest(p, "-rX")
result.stdout.fnmatch_lines(["*XPASS*test_that*", "*1 xpassed*"])
result.stdout.fnmatch_lines(["*test_that*", "*1 failed*"])
assert result.ret == 0

def test_xfail_imperative(self, pytester: Pytester) -> None:
Expand Down Expand Up @@ -531,7 +547,7 @@ def test_that():
"""
)
result = pytester.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*XPASS*test_that*"])
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*FAILED*test_that*"])

def test_dynamic_xfail_no_run(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
Expand Down Expand Up @@ -645,14 +661,14 @@ def test_foo():
result.stdout.fnmatch_lines(
["*test_foo*", "*XPASS(strict)*unsupported feature*"]
)
else:
result.stdout.fnmatch_lines(
[
"*test_strict_xfail*",
"XPASS test_strict_xfail.py::test_foo unsupported feature",
]
)
assert result.ret == (1 if strict else 0)
# else:
# result.stdout.fnmatch_lines(
# [
# "*test_strict_xfail*",
# "XPASS test_strict_xfail.py::test_foo unsupported feature",
# ]
# )
assert result.ret == (1)
assert pytester.path.joinpath("foo_executed").exists()

@pytest.mark.parametrize("strict", [True, False])
Expand Down Expand Up @@ -707,9 +723,9 @@ def test_foo():
"""
)
result = pytester.runpytest(p, "-rxX")
strict = strict_val == "true"
result.stdout.fnmatch_lines(["*1 failed*" if strict else "*1 xpassed*"])
assert result.ret == (1 if strict else 0)
# strict = strict_val == "true"
result.stdout.fnmatch_lines(["*1 failed*"])
assert result.ret == (1)

def test_xfail_markeval_namespace(self, pytester: Pytester) -> None:
pytester.makeconftest(
Expand Down Expand Up @@ -928,11 +944,9 @@ def test_func():

@pytest.mark.parametrize(
"marker, msg1, msg2",
[("skipif", "SKIP", "skipped"), ("xfail", "XPASS", "xpassed")],
[("skipif", "SKIP", "skipped"), ("xfail", "FAILED", "FAILED")],
)
def test_skipif_reporting_multiple(
self, pytester: Pytester, marker, msg1, msg2
) -> None:
def test_skipif_reporting_multiple(self, pytester: Pytester, marker, msg2) -> None:
pytester.makepyfile(
test_foo="""
import pytest
Expand All @@ -945,9 +959,7 @@ def test_foobar():
)
)
result = pytester.runpytest("-s", "-rsxX")
result.stdout.fnmatch_lines(
[f"*{msg1}*test_foo.py*second_condition*", f"*1 {msg2}*"]
)
result.stdout.fnmatch_lines(["*test_foo.py*second_condition*", f"*1 {msg2}*"])
assert result.ret == 0


Expand Down Expand Up @@ -1060,7 +1072,7 @@ def test_4():
)
result = pytester.runpytest("-rfxXs")
result.stdout.fnmatch_lines(
["FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*"]
["FAILED*test_1*", "XFAIL*test_2*", "FAILED*test_3*", "SKIPPED*four*"]
)


Expand Down Expand Up @@ -1103,11 +1115,11 @@ def test_5(fail):
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"SKIP*four*",
"SKIPPED*four*",
"XFAIL*test_2*",
"XPASS*test_3*",
"FAILED*test_3*",
"ERROR*test_5*",
"FAIL*test_1*",
"FAILED*test_1*",
]
)

Expand Down
4 changes: 2 additions & 2 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,11 +403,11 @@ def test_long_xfail():

common_output = [
"test_verbose_skip_reason.py::test_1 SKIPPED (123) *",
"test_verbose_skip_reason.py::test_2 XPASS (456) *",
"test_verbose_skip_reason.py::test_2 FAILED *",
"test_verbose_skip_reason.py::test_3 XFAIL (789) *",
"test_verbose_skip_reason.py::test_4 XFAIL *",
"test_verbose_skip_reason.py::test_5 SKIPPED (unconditional skip) *",
"test_verbose_skip_reason.py::test_6 XPASS *",
"test_verbose_skip_reason.py::test_6 FAILED *",
"test_verbose_skip_reason.py::test_7 SKIPPED *",
"test_verbose_skip_reason.py::test_8 SKIPPED (888 is great) *",
"test_verbose_skip_reason.py::test_9 XFAIL *",
Expand Down