Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add confval console_output_style option times #13126

Merged
merged 17 commits into from
Jan 20, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
@@ -142,6 +142,7 @@ Eero Vaher
Eli Boyarski
Elizaveta Shashkova
Éloi Rivard
Emil Hjelm
Endre Galaczi
Eric Hunsberger
Eric Liu
1 change: 1 addition & 0 deletions changelog/13125.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
:confval:`console_output_style` now supports ``times`` to show execution time of each test.
1 change: 1 addition & 0 deletions doc/en/reference/reference.rst
Original file line number Diff line number Diff line change
@@ -1355,6 +1355,7 @@ passed multiple times. The expected format is ``name=value``. For example::
* ``progress``: like classic pytest output, but with a progress indicator.
* ``progress-even-when-capture-no``: allows the use of the progress indicator even when ``capture=no``.
* ``count``: like progress, but shows progress as the number of tests completed instead of a percent.
* ``times``: show tests duration.

The default is ``progress``, but you can fallback to ``classic`` if you prefer or
the new mode is causing unexpected problems:
70 changes: 63 additions & 7 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
@@ -382,12 +382,15 @@ def __init__(self, config: Config, file: TextIO | None = None) -> None:
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported: set[str] = set()
self._timing_nodeids_reported: set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write: float | None = None
self._already_displayed_warnings: int | None = None
self._keyboardinterrupt_memo: ExceptionRepr | None = None

def _determine_show_progress_info(self) -> Literal["progress", "count", False]:
def _determine_show_progress_info(
self,
) -> Literal["progress", "count", "times", False]:
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038) unless explicitly
# overridden by progress-even-when-capture-no
@@ -405,6 +408,8 @@ def _determine_show_progress_info(self) -> Literal["progress", "count", False]:
return "progress"
elif cfg == "count":
return "count"
elif cfg == "times":
return "times"
else:
return False

@@ -692,19 +697,47 @@ def _get_progress_information_message(self) -> str:
format_string = f" [{counter_format}/{{}}]"
return format_string.format(progress, collected)
return f" [ {collected} / {collected} ]"
else:
if collected:
return (
f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]"
)
return " [100%]"
if self._show_progress_info == "times":
if not collected:
return ""
all_reports = (
self._get_reports_to_display("passed")
+ self._get_reports_to_display("xpassed")
+ self._get_reports_to_display("failed")
+ self._get_reports_to_display("xfailed")
+ self._get_reports_to_display("skipped")
+ self._get_reports_to_display("error")
+ self._get_reports_to_display("")
)
current_location = all_reports[-1].location[0]
not_reported = [
r for r in all_reports if r.nodeid not in self._timing_nodeids_reported
]
tests_in_module = sum(
i.location[0] == current_location for i in self._session.items
)
tests_completed = sum(
r.when == "setup"
for r in not_reported
if r.location[0] == current_location
)
last_in_module = tests_completed == tests_in_module
if self.showlongtestinfo or last_in_module:
self._timing_nodeids_reported.update(r.nodeid for r in not_reported)
return format_node_duration(sum(r.duration for r in not_reported))
return ""
if collected:
return f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]"
return " [100%]"

def _write_progress_information_if_past_edge(self) -> None:
w = self._width_of_current_line
if self._show_progress_info == "count":
assert self._session
num_tests = self._session.testscollected
progress_length = len(f" [{num_tests}/{num_tests}]")
elif self._show_progress_info == "times":
progress_length = len(" 99h 59m")
else:
progress_length = len(" [100%]")
past_edge = w + progress_length + 1 >= self._screen_width
@@ -1554,6 +1587,29 @@ def format_session_duration(seconds: float) -> str:
return f"{seconds:.2f}s ({dt})"


def format_node_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the test progress."""
# The formatting is designed to be compact and readable, with at most 7 characters
# for durations below 100 hours.
if seconds < 0.00001:
return f" {seconds * 1000000:.3f}us"
if seconds < 0.0001:
return f" {seconds * 1000000:.2f}us"
if seconds < 0.001:
return f" {seconds * 1000000:.1f}us"
if seconds < 0.01:
return f" {seconds * 1000:.3f}ms"
if seconds < 0.1:
return f" {seconds * 1000:.2f}ms"
if seconds < 1:
return f" {seconds * 1000:.1f}ms"
if seconds < 60:
return f" {seconds:.3f}s"
if seconds < 3600:
return f" {seconds // 60:.0f}m {seconds % 60:.0f}s"
return f" {seconds // 3600:.0f}h {(seconds % 3600) // 60:.0f}m"


def _get_raw_skip_reason(report: TestReport) -> str:
"""Get the reason string of a skip/xfail/xpass test report.
118 changes: 118 additions & 0 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
@@ -2073,6 +2073,21 @@ def test_foobar(i): pass
""",
)

@pytest.fixture
def more_tests_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(30))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
)

def test_zero_tests_collected(self, pytester: Pytester) -> None:
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
@@ -2169,6 +2184,52 @@ def test_count(self, many_tests_files, pytester: Pytester) -> None:
]
)

def test_times(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = times
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
r"test_foo.py \.{5} \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
r"test_foobar.py \.{5} \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
]
)

def test_times_multiline(
self, more_tests_files, monkeypatch, pytester: Pytester
) -> None:
monkeypatch.setenv("COLUMNS", "40")
pytester.makeini(
"""
[pytest]
console_output_style = times
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py ...................",
r"........... \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
r"test_foo.py \.{5} \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
],
consecutive=True,
)

def test_times_none_collected(self, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = times
"""
)
output = pytester.runpytest()
assert output.ret == ExitCode.NO_TESTS_COLLECTED

def test_verbose(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
@@ -2195,6 +2256,22 @@ def test_verbose_count(self, many_tests_files, pytester: Pytester) -> None:
]
)

def test_verbose_times(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = times
"""
)
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
r"test_foo.py::test_foo\[4\] PASSED \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2}$",
]
)

def test_xdist_normal(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
@@ -2247,6 +2324,26 @@ def test_xdist_verbose(
]
)

def test_xdist_times(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
pytester.makeini(
"""
[pytest]
console_output_style = times
"""
)
output = pytester.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2} PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2} PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \d{1,3}[\.[a-z\ ]{1,2}\d{0,3}\w{1,2} PASSED test_foobar.py::test_foobar\[1\]",
]
)

def test_capture_no(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-s")
output.stdout.re_match_lines(
@@ -2537,6 +2634,27 @@ def test_format_session_duration(seconds, expected):
assert format_session_duration(seconds) == expected


@pytest.mark.parametrize(
"seconds, expected",
[
(3600 * 100 - 60, " 99h 59m"),
(31 * 60 - 1, " 30m 59s"),
(10.1236, " 10.124s"),
(9.1236, " 9.124s"),
(0.1236, " 123.6ms"),
(0.01236, " 12.36ms"),
(0.001236, " 1.236ms"),
(0.0001236, " 123.6us"),
(0.00001236, " 12.36us"),
(0.000001236, " 1.236us"),
],
)
def test_format_node_duration(seconds: float, expected: str) -> None:
from _pytest.terminal import format_node_duration

assert format_node_duration(seconds) == expected


def test_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", str(p1))
Loading