Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update sweep tests infra to use pytest #8553

Open
eyonland opened this issue May 16, 2024 · 1 comment
Open

Update sweep tests infra to use pytest #8553

eyonland opened this issue May 16, 2024 · 1 comment

Comments

@eyonland
Copy link
Contributor

eyonland commented May 16, 2024

If you put this code into the conftest.py file inside the sweeps folder:

import csv
import os

import pytest


def get_status(report):
    if report.passed:
        return "xpassed" if hasattr(report, "wasxfail") else "passed"
    elif report.failed:
        return "xfailed" if hasattr(report, "wasxfail") else "failed"
    elif report.skipped:
        return "xfailed" if hasattr(report, "wasxfail") else "skipped"


class SweepHarness:
    def __init__(self, output_dir: str):
        self.results = {}
        self.output_dir = output_dir

    @pytest.hookimpl(hookwrapper=True)
    def pytest_runtest_makereport(self, item, call):
        outcome = yield
        rep = outcome.get_result()
        # this allows us to include the skipped tests in the results
        if rep.when != "call" and rep.passed:
            return
        test_file = item.location[0]
        self.results.setdefault(test_file, [])
        params = item.callspec.params.copy()
        params["outcome"] = get_status(rep)
        self.results[test_file].append(params)

    def pytest_terminal_summary(self, terminalreporter):
        os.makedirs(self.output_dir, exist_ok=True)

        for test_file, results in self.results.items():
            base_name = os.path.basename(os.path.splitext(test_file)[0])
            base_name = base_name.replace("test_", "")  # Remove the "test_" prefix
            file_path = os.path.join(self.output_dir, f"{base_name}_results.csv")

            fieldnames = ["outcome"] + [key for key in results[0] if key != "outcome"]
            with open(file_path, "w") as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(results)

            terminalreporter.write_line(f"Wrote sweep results to {file_path}")

    @pytest.hookimpl(tryfirst=True)
    def pytest_cmdline_main(self, config):
        self.output_dir = config.option.csv_dir


def pytest_configure(config):
    output_dir = config.getoption("csv_dir")
    config.pluginmanager.register(SweepHarness(output_dir), "sweep-harness")

and this in the top-most conftest.py:

import os


def pytest_addoption(parser):
    parser.addoption(
        "--csv-dir",
        action="store",
        default=os.path.join(os.getcwd(), "sweep_results"),
        help="Directory where sweep results will be saved",
    )

Then when you call pytest sweeps , it will collect all the test results in a CSV file, including any skipped or xfailed tests.
For instance, I have the following test_math.py file:

import pytest

from harness import math


@pytest.mark.parametrize(
    "x, expected",
    [
        (2, 4),
        (3, 6),
        (4, 8),
        pytest.param(5, 11, marks=pytest.mark.xfail),
        (6, 13),  # this will report as failed
    ],
)
def test_double(x: int, expected: int):
    assert math.double(x) == expected

and I get this CSV calledd math_results.csv:

outcome,x,expected
passed,2,4
passed,3,6
passed,4,8
xfailed,5,11
failed,6,13
@eyonland
Copy link
Contributor Author

We may not follow this approach for the following reason:

  • Start/stop of the sweep is not clear. Especially if filter is used
  • To restrict users to one sweep per file
  • Easier to read because for example all skip conditions are guaranteed to be in one place
  • Less overhead
  • User cannot add random fixtures

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant