-
-
Notifications
You must be signed in to change notification settings - Fork 2.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
WIP Change outcome to 'passed' for xfail unless it's strict #1795
Changes from all commits
4fc20d0
10a6ed1
296f42a
14a4dd0
225341c
55ec1d7
ea379e0
018197d
bb3d6d8
d1f2f77
767c28d
0173952
dfc659f
4ed412e
224ef67
0fb34cd
68ebf55
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem): | |
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) | ||
|
||
|
||
def _is_unittest_unexpected_success_a_failure(): | ||
"""Return if the test suite should fail if a @expectedFailure unittest test PASSES. | ||
|
||
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: | ||
Changed in version 3.4: Returns False if there were any | ||
unexpectedSuccesses from tests marked with the expectedFailure() decorator. | ||
|
||
TODO: this should be moved to the "compat" module. | ||
""" | ||
return sys.version_info >= (3, 4) | ||
|
||
|
||
@pytest.hookimpl(hookwrapper=True) | ||
def pytest_runtest_makereport(item, call): | ||
outcome = yield | ||
|
@@ -228,9 +240,15 @@ def pytest_runtest_makereport(item, call): | |
evalskip = getattr(item, '_evalskip', None) | ||
# unitttest special case, see setting of _unexpectedsuccess | ||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call": | ||
# we need to translate into how pytest encodes xpass | ||
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) | ||
rep.outcome = "failed" | ||
if item._unexpectedsuccess: | ||
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess) | ||
else: | ||
rep.longrepr = "Unexpected success" | ||
if _is_unittest_unexpected_success_a_failure(): | ||
rep.outcome = "failed" | ||
else: | ||
rep.outcome = "passed" | ||
rep.wasxfail = rep.longrepr | ||
elif item.config.option.runxfail: | ||
pass # don't interefere | ||
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): | ||
|
@@ -245,8 +263,15 @@ def pytest_runtest_makereport(item, call): | |
rep.outcome = "skipped" | ||
rep.wasxfail = evalxfail.getexplanation() | ||
elif call.when == "call": | ||
rep.outcome = "failed" # xpass outcome | ||
rep.wasxfail = evalxfail.getexplanation() | ||
strict_default = item.config.getini('xfail_strict') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm I think this is the more correct implementation of "strict xfail" than the one I did... with this, I think we don't even need |
||
is_strict_xfail = evalxfail.get('strict', strict_default) | ||
explanation = evalxfail.getexplanation() | ||
if is_strict_xfail: | ||
rep.outcome = "failed" | ||
rep.longrepr = "[XPASS(strict)] {0}".format(explanation) | ||
else: | ||
rep.outcome = "passed" | ||
rep.wasxfail = explanation | ||
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: | ||
# skipped by mark.skipif; change the location of the failure | ||
# to point to the item definition, otherwise it will display | ||
|
@@ -260,7 +285,7 @@ def pytest_report_teststatus(report): | |
if hasattr(report, "wasxfail"): | ||
if report.skipped: | ||
return "xfailed", "x", "xfail" | ||
elif report.failed: | ||
elif report.passed: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @fabioz I think this will mean you no longer have to do special handling for this in pydev_runfiles_pytest2.py, correct? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @nicoddemus do you think we need to check for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need: strict xfails now have report set to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. okay, that's what I thought. Thanks! |
||
return "xpassed", "X", ("XPASS", {'yellow': True}) | ||
|
||
# called by the terminalreporter instance/plugin | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -419,8 +419,9 @@ def setup_class(cls): | |
def test_method(self): | ||
pass | ||
""") | ||
from _pytest.skipping import _is_unittest_unexpected_success_a_failure | ||
should_fail = _is_unittest_unexpected_success_a_failure() | ||
result = testdir.runpytest("-rxs") | ||
assert result.ret == 0 | ||
result.stdout.fnmatch_lines_random([ | ||
"*XFAIL*test_trial_todo*", | ||
"*trialselfskip*", | ||
|
@@ -429,8 +430,9 @@ def test_method(self): | |
"*i2wanto*", | ||
"*sys.version_info*", | ||
"*skip_in_method*", | ||
"*4 skipped*3 xfail*1 xpass*", | ||
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*", | ||
]) | ||
assert result.ret == (1 if should_fail else 0) | ||
|
||
def test_trial_error(self, testdir): | ||
testdir.makepyfile(""" | ||
|
@@ -587,24 +589,62 @@ def test_hello(self, arg1): | |
assert "TypeError" in result.stdout.str() | ||
assert result.ret == 1 | ||
|
||
|
||
@pytest.mark.skipif("sys.version_info < (2,7)") | ||
def test_unittest_unexpected_failure(testdir): | ||
testdir.makepyfile(""" | ||
@pytest.mark.parametrize('runner', ['pytest', 'unittest']) | ||
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): | ||
script = testdir.makepyfile(""" | ||
import unittest | ||
class MyTestCase(unittest.TestCase): | ||
@unittest.expectedFailure | ||
def test_func1(self): | ||
assert 0 | ||
def test_failing_test_is_xfail(self): | ||
assert False | ||
if __name__ == '__main__': | ||
unittest.main() | ||
""") | ||
if runner == 'pytest': | ||
result = testdir.runpytest("-rxX") | ||
result.stdout.fnmatch_lines([ | ||
"*XFAIL*MyTestCase*test_failing_test_is_xfail*", | ||
"*1 xfailed*", | ||
]) | ||
else: | ||
result = testdir.runpython(script) | ||
result.stderr.fnmatch_lines([ | ||
"*1 test in*", | ||
"*OK*(expected failures=1)*", | ||
]) | ||
assert result.ret == 0 | ||
|
||
|
||
@pytest.mark.skipif("sys.version_info < (2,7)") | ||
@pytest.mark.parametrize('runner', ['pytest', 'unittest']) | ||
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @hackebrot unfortunately the behavior for a passing test marked with |
||
script = testdir.makepyfile(""" | ||
import unittest | ||
class MyTestCase(unittest.TestCase): | ||
@unittest.expectedFailure | ||
def test_func2(self): | ||
assert 1 | ||
def test_passing_test_is_fail(self): | ||
assert True | ||
if __name__ == '__main__': | ||
unittest.main() | ||
""") | ||
result = testdir.runpytest("-rxX") | ||
result.stdout.fnmatch_lines([ | ||
"*XFAIL*MyTestCase*test_func1*", | ||
"*XPASS*MyTestCase*test_func2*", | ||
"*1 xfailed*1 xpass*", | ||
]) | ||
from _pytest.skipping import _is_unittest_unexpected_success_a_failure | ||
should_fail = _is_unittest_unexpected_success_a_failure() | ||
if runner == 'pytest': | ||
result = testdir.runpytest("-rxX") | ||
result.stdout.fnmatch_lines([ | ||
"*MyTestCase*test_passing_test_is_fail*", | ||
"*1 failed*" if should_fail else "*1 xpassed*", | ||
]) | ||
else: | ||
result = testdir.runpython(script) | ||
result.stderr.fnmatch_lines([ | ||
"*1 test in*", | ||
"*(unexpected successes=1)*", | ||
]) | ||
|
||
assert result.ret == (1 if should_fail else 0) | ||
|
||
|
||
@pytest.mark.parametrize('fix_type, stmt', [ | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Deliberately placed it here because this should be moved to the appropriate section on
features
.