-
-
Notifications
You must be signed in to change notification settings - Fork 2.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Make cache plugin cumulative #2621
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -105,27 +105,22 @@ def __init__(self, config): | |
self.config = config | ||
active_keys = 'lf', 'failedfirst' | ||
self.active = any(config.getvalue(key) for key in active_keys) | ||
if self.active: | ||
self.lastfailed = config.cache.get("cache/lastfailed", {}) | ||
else: | ||
self.lastfailed = {} | ||
self.lastfailed = config.cache.get("cache/lastfailed", {}) | ||
|
||
def pytest_report_header(self): | ||
if self.active: | ||
if not self.lastfailed: | ||
mode = "run all (no recorded failures)" | ||
else: | ||
mode = "rerun last %d failures%s" % ( | ||
len(self.lastfailed), | ||
mode = "rerun previous failures%s" % ( | ||
" first" if self.config.getvalue("failedfirst") else "") | ||
return "run-last-failure: %s" % mode | ||
|
||
def pytest_runtest_logreport(self, report): | ||
if report.failed and "xfail" not in report.keywords: | ||
if (report.when == 'call' and report.passed) or report.skipped: | ||
self.lastfailed.pop(report.nodeid, None) | ||
elif report.failed: | ||
self.lastfailed[report.nodeid] = True | ||
elif not report.failed: | ||
if report.when == "call": | ||
self.lastfailed.pop(report.nodeid, None) | ||
|
||
def pytest_collectreport(self, report): | ||
passed = report.outcome in ('passed', 'skipped') | ||
|
@@ -147,11 +142,11 @@ def pytest_collection_modifyitems(self, session, config, items): | |
previously_failed.append(item) | ||
else: | ||
previously_passed.append(item) | ||
if not previously_failed and previously_passed: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I didn't quite understand why There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. when we have failed tests that are outside of the of the selection thats currently being executed, that happens There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry, what happens? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see, but unless I'm mistaken that is covered by the new test I added right? I changed the condition to So running There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sounds correct |
||
if not previously_failed: | ||
# running a subset of all tests with recorded failures outside | ||
# of the set of tests currently executing | ||
pass | ||
elif self.config.getvalue("lf"): | ||
return | ||
if self.config.getvalue("lf"): | ||
items[:] = previously_failed | ||
config.hook.pytest_deselected(items=previously_passed) | ||
else: | ||
|
@@ -161,8 +156,9 @@ def pytest_sessionfinish(self, session): | |
config = self.config | ||
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"): | ||
return | ||
prev_failed = config.cache.get("cache/lastfailed", None) is not None | ||
if (session.testscollected and prev_failed) or self.lastfailed: | ||
|
||
saved_lastfailed = config.cache.get("cache/lastfailed", {}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Now that we always have There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. last failed already supported correct updating if it was enabled, making it transient means the updating code has to change There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure what you mean, could you clarify? Also not sure if it was just a general comment or a request for change. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. general comment, last failed in last failed mode supports working correctly when the test selection doesn't match the failure selection There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK thanks |
||
if saved_lastfailed != self.lastfailed: | ||
config.cache.set("cache/lastfailed", self.lastfailed) | ||
|
||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
``--last-failed`` now remembers forever when a test has failed and only forgets it if it passes again. This makes it | ||
easy to fix a test suite by selectively running files and fixing tests incrementally. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removed this number of failures because it might be incorrect (before and after this patch): we don't know which tests we will actually run because that's decided after collection only. Because of this I decided to remove the promise that we will run X failures at this point.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This problem is fixed by #2624