From ff1e97116a1321e1a6cac5e2e7bf3fbaa1ef3365 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sun, 5 Nov 2023 17:00:41 -0500 Subject: [PATCH 01/11] Add pytest runner mode This commit adds a new runner mode to stestr that enables users to run test suites using pytest instead of unittest. This is an opt-in feature as for compatibility reasons we'll always default to using unittest, as all existing stestr users have only been leveraging unittest to run tests. A pytest plugin that adds a subunit output mode is now bundled with stestr. When the user specifies running tests with pytest it calls out to pytest just as stestr does with the integrated unittest extension today and sets the appropriate flags to enable subunit output. To facilitate this new feature pytest is added to the stestr requirements list. I debated making it an optional dependency, but to make it easier for the significantly larger pytest user base (it's downloaded ~900x more per month) it seemed simpler to just make it a hard requirement. But I'm still not 100% on this decision so if there is sufficient pushback we can start it out as an optional dependency. Co-Authored-By: Joe Gordon Closes: #354 --- doc/source/MANUAL.rst | 30 +++++- requirements.txt | 1 + setup.cfg | 2 + stestr/commands/run.py | 13 +++ stestr/config_file.py | 41 ++++++-- stestr/pytest_subunit.py | 218 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 298 insertions(+), 7 deletions(-) create mode 100644 stestr/pytest_subunit.py diff --git a/doc/source/MANUAL.rst b/doc/source/MANUAL.rst index fae7c1a..7cc21fb 100644 --- a/doc/source/MANUAL.rst +++ b/doc/source/MANUAL.rst @@ -68,6 +68,7 @@ A full example config file is:: test_path=./project/tests top_dir=./ group_regex=([^\.]*\.)* + runner=pytest The ``group_regex`` option is used to specify is used to provide a scheduler @@ -77,7 +78,10 @@ You can also specify the ``parallel_class=True`` instead of group_regex to group tests in the stestr scheduler together by class. Since this is a common use case this enables that without needing to memorize the complicated regex for ``group_regex`` to do -this. +this. The ``runner`` argument is used to specify the test runner to use. By +default a runner based on Python's standard library ``unittest`` module is +used. However, if you'd prefer to use ``pytest`` as your runner you can specify +this as the runner argument in the config file. There is also an option to specify all the options in the config file via the CLI. This way you can run stestr directly without having to write a config file @@ -137,6 +141,8 @@ providing configs in TOML format, the configuration directives **must** be located in a ``[tool.stestr]`` section, and the filename **must** have a ``.toml`` extension. + + Running tests ------------- @@ -166,6 +172,28 @@ Additionally you can specify a specific class or method within that file using will skip discovery and directly call the test runner on the test method in the specified test class. +Test runners +'''''''''''' + +By default ``stestr`` is built to run tests leveraging the Python standard +library ``unittest`` modules runner. stestr includes a test runner that will +emit the subunit protocol it relies on internally to handle live results from +parallel workers. However, there is an alternative runner available that +leverages ``pytest`` which is a popular test runner and testing library +alternative to the standard library's ``unittest`` module. The ``stestr`` +project bundles a ``pytest`` plugin that adds real time subunit output to +pytest. As a test suite author the ``pytest`` plugin enables you to write your +test suite using pytest's test library instead of ``unittest``. There are two +ways to specify your test runner, first is the ``--pytest`` flag on +``stestr run``. This tells stestr for this test run use ``pytest`` as the runner +instead of ``unittest``, this is good for a/b comparisons between the test +runners and also general investigations with using different test runners. The +other option is to leverage your project's config file and set the ``runner`` +field to either ``pytest`` or ``unittest`` (although ``unittest`` is always the +default so you shouldn't ever need to set it). This is the more natural fit +because if your test suite is written using pytest it won't be compatible with +the unittest based runner. + Running with pdb '''''''''''''''' diff --git a/requirements.txt b/requirements.txt index 8e64059..edc625e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ PyYAML>=3.10.0 # MIT voluptuous>=0.8.9 # BSD License tomlkit>=0.11.6 # MIT extras>=1.0.0 +pytest>=2.3 # MIT diff --git a/setup.cfg b/setup.cfg index b3b35fe..66c468d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,6 +46,8 @@ stestr.cm = history_list = stestr.commands.history:HistoryList history_show = stestr.commands.history:HistoryShow history_remove = stestr.commands.history:HistoryRemove +pytest11 = + stestr_subunit = stestr.pytest_subunit [extras] sql = diff --git a/stestr/commands/run.py b/stestr/commands/run.py index 8c02b24..8fb26d1 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -244,6 +244,12 @@ def get_parser(self, prog_name): help="If set, show non-text attachments. This is " "generally only useful for debug purposes.", ) + parser.add_argument( + "--pytest", + action="store_true", + dest="pytest", + help="If set to True enable using pytest as the test runner", + ) return parser def take_action(self, parsed_args): @@ -335,6 +341,7 @@ def take_action(self, parsed_args): all_attachments=all_attachments, show_binary_attachments=args.show_binary_attachments, pdb=args.pdb, + pytest=args.pytest, ) # Always output slowest test info if requested, regardless of other @@ -396,6 +403,7 @@ def run_command( all_attachments=False, show_binary_attachments=True, pdb=False, + pytest=False, ): """Function to execute the run command @@ -460,6 +468,8 @@ def run_command( :param str pdb: Takes in a single test_id to bypasses test discover and just execute the test specified without launching any additional processes. A file name may be used in place of a test name. + :param bool pytest: Set to true to use pytest as the test runner instead of + the stestr stdlib based unittest runner :return return_code: The exit code for the command. 0 for success and > 0 for failures. @@ -645,6 +655,7 @@ def run_tests(): top_dir=top_dir, test_path=test_path, randomize=random, + pytest=pytest, ) if isolated: result = 0 @@ -669,6 +680,7 @@ def run_tests(): randomize=random, test_path=test_path, top_dir=top_dir, + pytest=pytest, ) run_result = _run_tests( @@ -724,6 +736,7 @@ def run_tests(): randomize=random, test_path=test_path, top_dir=top_dir, + pytest=pytest, ) if not _run_tests(cmd, until_failure): # If the test was filtered, it won't have been run. diff --git a/stestr/config_file.py b/stestr/config_file.py index 6a66503..beafbb3 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -39,6 +39,7 @@ class TestrConf: top_dir = None parallel_class = False group_regex = None + runner = None def __init__(self, config_file, section="DEFAULT"): self.config_file = str(config_file) @@ -59,6 +60,7 @@ def _load_from_configparser(self): self.group_regex = parser.get( self.section, "group_regex", fallback=self.group_regex ) + self.runner = parser.get(self.section, "runner", fallback=self.runner) def _load_from_toml(self): with open(self.config_file) as f: @@ -68,6 +70,7 @@ def _load_from_toml(self): self.top_dir = root.get("top_dir", self.top_dir) self.parallel_class = root.get("parallel_class", self.parallel_class) self.group_regex = root.get("group_regex", self.group_regex) + self.runner = root.get("runner", self.runner) @classmethod def load_from_file(cls, config): @@ -113,6 +116,7 @@ def get_run_command( exclude_regex=None, randomize=False, parallel_class=None, + pytest=False, ): """Get a test_processor.TestProcessorFixture for this config file @@ -158,6 +162,8 @@ def get_run_command( stestr scheduler by class. If both this and the corresponding config file option which includes `group-regex` are set, this value will be used. + :param bool pytest: Set to true to use pytest as the test runner instead of + the stestr stdlib based unittest runner :returns: a TestProcessorFixture object for the specified config file and any arguments passed into this function @@ -198,12 +204,35 @@ def get_run_command( if os.path.exists('"%s"' % python): python = '"%s"' % python - command = ( - '%s -m stestr.subunit_runner.run discover -t "%s" "%s" ' - "$LISTOPT $IDOPTION" % (python, top_dir, test_path) - ) - listopt = "--list" - idoption = "--load-list $IDFILE" + if not pytest and self.runner is not None: + if self.runner == "pytest": + pytest = True + elif self.runner == "unittest": + pytest = False + else: + raise RuntimeError( + "Specified runner argument value: {self.runner} in config file is not " + "valid. Only pytest or unittest can be specified in the config file." + ) + if pytest: + command = ( + '%s -m pytest --subunit --rootdir="%s" "%s" ' + "$LISTOPT $IDOPTION" + % ( + python, + top_dir, + test_path, + ) + ) + listopt = "--co" + idoption = "--load-list $IDFILE" + else: + command = ( + '%s -m stestr.subunit_runner.run discover -t "%s" "%s" ' + "$LISTOPT $IDOPTION" % (python, top_dir, test_path) + ) + listopt = "--list" + idoption = "--load-list $IDFILE" # If the command contains $IDOPTION read that command from config # Use a group regex if one is defined if parallel_class or self.parallel_class: diff --git a/stestr/pytest_subunit.py b/stestr/pytest_subunit.py new file mode 100644 index 0000000..e87f4e8 --- /dev/null +++ b/stestr/pytest_subunit.py @@ -0,0 +1,218 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This file was forked from: +# https://github.com/jogo/pytest-subunit/blob/f5da98f3bee2ffc8d898ced92034f11bcf8e35fe/pytest_subunit.py +# which itself was a fork from the now archived: +# pytest-subunit: https://github.com/lukaszo/pytest-subunit + +from __future__ import annotations + +from typing import Optional + +import datetime +from io import StringIO +import pathlib + +from _pytest._io import TerminalWriter +from _pytest.terminal import TerminalReporter +import pytest +from subunit import StreamResultToBytes + + +def to_path(testid: str) -> pathlib.PosixPath: + delim = "::" + if delim in testid: + path = testid.split(delim)[0] + else: + path = testid + return pathlib.PosixPath(path).resolve() + + +# hook +def pytest_ignore_collect(collection_path, config) -> Optional[bool]: + # TODO(jogo): If specify a path, use same short circuit logic + # Only collect files in the list + if config.option.subunit_load_list: + # TODO(jogo): memoize me + with open(config.option.subunit_load_list) as f: + testids = f.readlines() + filenames = [to_path(line.strip()) for line in testids] + for filename in filenames: + if str(filename).startswith(str(collection_path)): + # Don't ignore + return None + # Ignore everything else by default + return True + return None + + +# hook +def pytest_collection_modifyitems(session, config, items): + if config.option.subunit: + terminal_reporter = config.pluginmanager.getplugin("terminalreporter") + terminal_reporter.tests_count += len(items) + if config.option.subunit_load_list: + with open(config.option.subunit_load_list) as f: + to_run = f.readlines() + to_run = [line.strip() for line in to_run] + # print(to_run) + # print([item.nodeid for item in items]) + filtered = [item for item in items if item.nodeid in to_run] + items[:] = filtered + + +# hook +def pytest_deselected(items): + """Update tests_count to not include deselected tests""" + if len(items) > 0: + pluginmanager = items[0].config.pluginmanager + terminal_reporter = pluginmanager.getplugin("terminalreporter") + if ( + hasattr(terminal_reporter, "tests_count") + and terminal_reporter.tests_count > 0 + ): + terminal_reporter.tests_count -= len(items) + + +# hook +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group._addoption( + "--subunit", + action="store_true", + dest="subunit", + default=False, + help=("enable pytest-subunit"), + ) + group._addoption( + "--load-list", + dest="subunit_load_list", + default=False, + help=("Path to file with list of tests to run"), + ) + + +@pytest.mark.trylast +def pytest_configure(config): + if config.option.subunit: + # Get the standard terminal reporter plugin and replace it with our + standard_reporter = config.pluginmanager.getplugin("terminalreporter") + subunit_reporter = SubunitTerminalReporter(standard_reporter) + config.pluginmanager.unregister(standard_reporter) + config.pluginmanager.register(subunit_reporter, "terminalreporter") + + +class SubunitTerminalReporter(TerminalReporter): + def __init__(self, reporter): + TerminalReporter.__init__(self, reporter.config) + self.tests_count = 0 + self.reports = [] + self.skipped = [] + self.failed = [] + self.result = StreamResultToBytes(self._tw._file) + + @property + def no_summary(self): + return True + + def _status(self, report: pytest.TestReport, status: str): + # task id + test_id = report.nodeid + + # get time + now = datetime.datetime.now(datetime.timezone.utc) + + # capture output + buffer = StringIO() + writer = TerminalWriter(file=buffer) + report.toterminal(writer) + buffer.seek(0) + out_bytes = buffer.read().encode("utf-8") + + # send status + self.result.status( + test_id=test_id, + test_status=status, + timestamp=now, + file_name=report.fspath, + file_bytes=out_bytes, + mime_type="text/plain; charset=utf8", + ) + + def pytest_collectreport(self, report): + pass + + def pytest_collection_finish(self, session): + if self.config.option.collectonly: + self._printcollecteditems(session.items) + + def pytest_collection(self): + # Prevent shoving `collecting` message + pass + + def report_collect(self, final=False): + # Prevent shoving `collecting` message + pass + + def pytest_sessionstart(self, session): + # Set self._session + # https://github.com/pytest-dev/pytest/blob/58cf20edf08d84c5baf08f0566cc9bccbc4ec7fd/src/_pytest/terminal.py#L692 + self._session = session + + def pytest_runtest_logstart(self, nodeid, location): + pass + + def pytest_sessionfinish(self, session, exitstatus): + # always exit with exitcode 0 + session.exitstatus = 0 + + def pytest_runtest_logreport(self, report: pytest.TestReport): + self.reports.append(report) + test_id = report.nodeid + if report.when in ["setup", "session"]: + self._status(report, "exists") + if report.outcome == "passed": + self._status(report, "inprogress") + if report.outcome == "failed": + self._status(report, "fail") + elif report.outcome == "skipped": + self._status(report, "skip") + elif report.when in ["call"]: + if hasattr(report, "wasxfail"): + if report.skipped: + self._status(report, "xfail") + elif report.failed: + self._status(report, "uxsuccess") + elif report.outcome == "failed": + self._status(report, "fail") + self.failed.append(test_id) + elif report.outcome == "skipped": + self._status(report, "skip") + self.skipped.append(test_id) + elif report.when in ["teardown"]: + if test_id not in self.skipped and test_id not in self.failed: + if report.outcome == "passed": + self._status(report, "success") + elif report.outcome == "failed": + self._status(report, "fail") + else: + raise Exception(str(report)) + + def _printcollecteditems(self, items): + for item in items: + test_id = item.nodeid + self.result.status(test_id=test_id, test_status="exists") + + def _determine_show_progress_info(self): + # Never show progress bar + return False From d1c802c75b095efa5ccd3865514f8f2609e39a12 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 09:28:35 -0500 Subject: [PATCH 02/11] Add return code tests This commit adds a return code subclass that executes the tests in pytest mode written with the pytest test suite. These tests caught a couple of issues when running in pytest mode. --- doc/source/MANUAL.rst | 7 ++ stestr/commands/run.py | 18 ++-- stestr/config_file.py | 2 +- stestr/pytest_subunit.py | 3 +- stestr/tests/pytest_mode_files/__init__.py | 0 stestr/tests/pytest_mode_files/failing-tests | 22 +++++ stestr/tests/pytest_mode_files/passing-tests | 18 ++++ .../pytest_mode_files/pytest_stestr_conf | 3 + stestr/tests/pytest_mode_files/setup.cfg | 20 +++++ stestr/tests/test_return_codes.py | 87 +++++++++++++++++++ 10 files changed, 171 insertions(+), 9 deletions(-) create mode 100644 stestr/tests/pytest_mode_files/__init__.py create mode 100644 stestr/tests/pytest_mode_files/failing-tests create mode 100644 stestr/tests/pytest_mode_files/passing-tests create mode 100644 stestr/tests/pytest_mode_files/pytest_stestr_conf create mode 100644 stestr/tests/pytest_mode_files/setup.cfg diff --git a/doc/source/MANUAL.rst b/doc/source/MANUAL.rst index 7cc21fb..339c6b8 100644 --- a/doc/source/MANUAL.rst +++ b/doc/source/MANUAL.rst @@ -172,6 +172,13 @@ Additionally you can specify a specific class or method within that file using will skip discovery and directly call the test runner on the test method in the specified test class. +.. note:: + + If you're using ``--pytest`` or have the runner configured to pytest, then + the ``--no-discover``/``-n`` option passes the id field directly to + ``pytest`` and the id passed via the argument needs to be in a format that + pytest will accept. + Test runners '''''''''''' diff --git a/stestr/commands/run.py b/stestr/commands/run.py index 8fb26d1..4d2e2a5 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -529,13 +529,15 @@ def run_command( stdout.write(msg) return 2 + conf = config_file.TestrConf.load_from_file(config) if no_discover: ids = no_discover - if "::" in ids: - ids = ids.replace("::", ".") - if ids.find("/") != -1: - root = ids.replace(".py", "") - ids = root.replace("/", ".") + if not pytest and conf.runner != "pytest": + if "::" in ids: + ids = ids.replace("::", ".") + if ids.find("/") != -1: + root = ids.replace(".py", "") + ids = root.replace("/", ".") stestr_python = sys.executable if os.environ.get("PYTHON"): python_bin = os.environ.get("PYTHON") @@ -545,7 +547,10 @@ def run_command( raise RuntimeError( "The Python interpreter was not found and " "PYTHON is not set" ) - run_cmd = python_bin + " -m stestr.subunit_runner.run " + ids + if pytest or conf.runner == "pytest": + run_cmd = python_bin + " -m pytest --subunit " + ids + else: + run_cmd = python_bin + " -m stestr.subunit_runner.run " + ids def run_tests(): run_proc = [ @@ -639,7 +644,6 @@ def run_tests(): # that are both failing and listed. ids = list_ids.intersection(ids) - conf = config_file.TestrConf.load_from_file(config) if not analyze_isolation: cmd = conf.get_run_command( ids, diff --git a/stestr/config_file.py b/stestr/config_file.py index beafbb3..517ae04 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -211,7 +211,7 @@ def get_run_command( pytest = False else: raise RuntimeError( - "Specified runner argument value: {self.runner} in config file is not " + f"Specified runner argument value: {self.runner} in config file is not " "valid. Only pytest or unittest can be specified in the config file." ) if pytest: diff --git a/stestr/pytest_subunit.py b/stestr/pytest_subunit.py index e87f4e8..9df7c90 100644 --- a/stestr/pytest_subunit.py +++ b/stestr/pytest_subunit.py @@ -191,8 +191,9 @@ def pytest_runtest_logreport(self, report: pytest.TestReport): if hasattr(report, "wasxfail"): if report.skipped: self._status(report, "xfail") - elif report.failed: + elif report.outcome == "passed": self._status(report, "uxsuccess") + self.failed.append(test_id) elif report.outcome == "failed": self._status(report, "fail") self.failed.append(test_id) diff --git a/stestr/tests/pytest_mode_files/__init__.py b/stestr/tests/pytest_mode_files/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stestr/tests/pytest_mode_files/failing-tests b/stestr/tests/pytest_mode_files/failing-tests new file mode 100644 index 0000000..fa59696 --- /dev/null +++ b/stestr/tests/pytest_mode_files/failing-tests @@ -0,0 +1,22 @@ +import pytest + +def inc(x): + return x + 1 + + +def test_answer(): + assert inc(3) == 5 + + +def test_pass(): + assert False + + +def test_pass_list(): + test_list = ['test', 'a', 'b'] + assert "fail" in test_list + + +@pytest.mark.xfail +def test_unexpected_pass(): + assert True diff --git a/stestr/tests/pytest_mode_files/passing-tests b/stestr/tests/pytest_mode_files/passing-tests new file mode 100644 index 0000000..ba30b7e --- /dev/null +++ b/stestr/tests/pytest_mode_files/passing-tests @@ -0,0 +1,18 @@ +import pytest + +def inc(x): + return x + 1 + +def test_answer(): + assert inc(4) == 5 + +def test_pass(): + assert True + +def test_pass_list(): + test_list = ['test', 'a', 'b'] + assert "test" in test_list + +@pytest.mark.xfail +def test_xfail(): + assert 0 == 1 diff --git a/stestr/tests/pytest_mode_files/pytest_stestr_conf b/stestr/tests/pytest_mode_files/pytest_stestr_conf new file mode 100644 index 0000000..4f69032 --- /dev/null +++ b/stestr/tests/pytest_mode_files/pytest_stestr_conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./tests +runner=pytest diff --git a/stestr/tests/pytest_mode_files/setup.cfg b/stestr/tests/pytest_mode_files/setup.cfg new file mode 100644 index 0000000..f6f9f73 --- /dev/null +++ b/stestr/tests/pytest_mode_files/setup.cfg @@ -0,0 +1,20 @@ +[metadata] +name = tempest_unit_tests +version = 1 +summary = Fake Project for testing wrapper scripts +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Intended Audience :: Information Technology + Intended Audience :: System Administrators + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook diff --git a/stestr/tests/test_return_codes.py b/stestr/tests/test_return_codes.py index 99c1e3c..b388aee 100644 --- a/stestr/tests/test_return_codes.py +++ b/stestr/tests/test_return_codes.py @@ -524,3 +524,90 @@ def test_all_configs_missing(self): os.remove(self.pyproject_toml) output, _ = self.assertRunExit("stestr run passing", 1) self.assertIn(b"No config file found", output) + + +class TestPytestReturnCodes(TestReturnCodes): + def setUp(self): + super().setUp() + os.chdir(self.repo_root) + # Setup test dirs + self.directory = tempfile.mkdtemp(prefix="stestr-pytest-unit") + self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True) + self.test_dir = os.path.join(self.directory, "tests") + os.mkdir(self.test_dir) + # Setup Test files + self.repo_root = os.path.abspath(os.curdir) + self.testr_conf_file = os.path.join(self.directory, ".stestr.conf") + self.setup_cfg_file = os.path.join(self.directory, "setup.cfg") + self.passing_file = os.path.join(self.test_dir, "test_passing.py") + self.failing_file = os.path.join(self.test_dir, "test_failing.py") + self.init_file = os.path.join(self.test_dir, "__init__.py") + self.setup_py = os.path.join(self.directory, "setup.py") + self.user_config = os.path.join(self.directory, "stestr.yaml") + shutil.copy( + "stestr/tests/pytest_mode_files/pytest_stestr_conf", self.testr_conf_file + ) + shutil.copy("stestr/tests/pytest_mode_files/passing-tests", self.passing_file) + shutil.copy("stestr/tests/pytest_mode_files/failing-tests", self.failing_file) + shutil.copy("setup.py", self.setup_py) + shutil.copy("stestr/tests/pytest_mode_files/setup.cfg", self.setup_cfg_file) + shutil.copy("stestr/tests/pytest_mode_files/__init__.py", self.init_file) + shutil.copy("stestr/tests/files/stestr.yaml", self.user_config) + + self.stdout = io.StringIO() + self.stderr = io.StringIO() + # Change directory, run wrapper and check result + self.addCleanup(os.chdir, self.repo_root) + os.chdir(self.directory) + subprocess.call("stestr init", shell=True) + + def test_history_show_passing(self): + self.assertRunExit("stestr run passing", 0) + self.assertRunExit("stestr run", 1) + self.assertRunExit("stestr run passing", 0) + output, _ = self.assertRunExit("stestr history show 0", 0) + lines = [x.rstrip() for x in output.decode("utf8").split("\n")] + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 0", lines) + self.assertIn(" - Expected Fail: 1", lines) + + def test_history_show_failing(self): + self.assertRunExit("stestr run passing", 0) + self.assertRunExit("stestr run", 1) + self.assertRunExit("stestr run passing", 0) + output, _ = self.assertRunExit("stestr history show 1", 1) + lines = [x.rstrip() for x in output.decode("utf8").split("\n")] + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 3", lines) + self.assertIn(" - Expected Fail: 1", lines) + self.assertIn(" - Unexpected Success: 1", lines) + + def test_run_no_discover_pytest_path(self): + passing_string = "tests/test_passing.py::test_pass_list" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 0) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 1", lines) + self.assertIn(" - Failed: 0", lines) + + def test_run_no_discover_pytest_path_failing(self): + passing_string = "tests/test_failing.py::test_pass_list" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 1) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 0", lines) + self.assertIn(" - Failed: 1", lines) + + def test_run_no_discover_file_path(self): + passing_string = "tests/test_passing.py" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 0) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 4", lines) + self.assertIn(" - Failed: 0", lines) + self.assertIn(" - Expected Fail: 1", lines) + + def test_run_no_discover_file_path_failing(self): + passing_string = "tests/test_failing.py" + out, err = self.assertRunExit("stestr run -n %s" % passing_string, 1) + lines = out.decode("utf8").splitlines() + self.assertIn(" - Passed: 0", lines) + self.assertIn(" - Failed: 3", lines) + self.assertIn(" - Unexpected Success: 1", lines) From 831d80d6ad00b162a951651e6be555f098410c1f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 10:20:41 -0500 Subject: [PATCH 03/11] Add CI job to run in pytest mode on each OS This commit adds a new CI job that runs stestr's unit test suite in pytest mode instead of using stestr's built in subunit runner (based on stdlib unittest). --- .github/workflows/main.yml | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fb1e01d..e90338a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,9 +35,42 @@ jobs: - name: Install and run tests macOS run: | tox -epy --notest - .tox/py/bin/pip install gnureadline subunit2sql + .tox/py/bin/pip install gnureadline tox -epy if: runner.os == 'macOS' + pytest: + name: test-pytest-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: ["macOS-latest", "ubuntu-latest", "windows-latest"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Pip cache + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-tests-${{ hashFiles('setup.py','requirements-dev.txt','constraints.txt') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip-tests- + ${{ runner.os }}-${{ matrix.python-version }}-pip- + ${{ runner.os }}-${{ matrix.python-version }} + - name: Install Deps + run: python -m pip install -U 'tox<4' setuptools virtualenv wheel + - name: Install and Run Tests + run: tox -e py -- --pytest + if: runner.os != 'macOS' + - name: Install and run tests macOS + run: | + tox -epy --notest + .tox/py/bin/pip install gnureadline + tox -epy -- --pytest + if: runner.os == 'macOS' + lint: name: pep8 runs-on: ubuntu-latest From b1f116a638be2a6dd7be50c66abf533acecc5ee3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 10:25:50 -0500 Subject: [PATCH 04/11] Fix lint --- .github/workflows/main.yml | 2 +- doc/source/MANUAL.rst | 16 ++++++++-------- stestr/config_file.py | 5 +++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e90338a..9a82024 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -49,7 +49,7 @@ jobs: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: - python-version: ${{ matrix.python-version }} + python-version: "3.11" - name: Pip cache uses: actions/cache@v3 with: diff --git a/doc/source/MANUAL.rst b/doc/source/MANUAL.rst index 339c6b8..9213f4e 100644 --- a/doc/source/MANUAL.rst +++ b/doc/source/MANUAL.rst @@ -192,14 +192,14 @@ project bundles a ``pytest`` plugin that adds real time subunit output to pytest. As a test suite author the ``pytest`` plugin enables you to write your test suite using pytest's test library instead of ``unittest``. There are two ways to specify your test runner, first is the ``--pytest`` flag on -``stestr run``. This tells stestr for this test run use ``pytest`` as the runner -instead of ``unittest``, this is good for a/b comparisons between the test -runners and also general investigations with using different test runners. The -other option is to leverage your project's config file and set the ``runner`` -field to either ``pytest`` or ``unittest`` (although ``unittest`` is always the -default so you shouldn't ever need to set it). This is the more natural fit -because if your test suite is written using pytest it won't be compatible with -the unittest based runner. +``stestr run``. This tells stestr for this test run use ``pytest`` as the +runner instead of ``unittest``, this is good for a/b comparisons between the +test runners and also general investigations with using different test runners. +The other option is to leverage your project's config file and set the +``runner`` field to either ``pytest`` or ``unittest`` (although ``unittest`` is +always the default so you shouldn't ever need to set it). This is the more +natural fit because if your test suite is written using pytest it won't be +compatible with the unittest based runner. Running with pdb '''''''''''''''' diff --git a/stestr/config_file.py b/stestr/config_file.py index 517ae04..a526aed 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -211,8 +211,9 @@ def get_run_command( pytest = False else: raise RuntimeError( - f"Specified runner argument value: {self.runner} in config file is not " - "valid. Only pytest or unittest can be specified in the config file." + f"Specified runner argument value: {self.runner} in " + "config file is not valid. Only pytest or unittest can be " + "specified in the config file." ) if pytest: command = ( From 2d2e3b166f508d3ba7be7dea023c373723ea66a4 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 10:30:45 -0500 Subject: [PATCH 05/11] Disable failfast on test matrices --- .github/workflows/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 9a82024..6d986be 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -10,6 +10,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + fail-fast: false python-version: [3.7, 3.8, 3.9, "3.10", "3.11", "3.12"] os: ["macOS-latest", "ubuntu-latest", "windows-latest"] steps: @@ -42,6 +43,7 @@ jobs: name: test-pytest-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: ["macOS-latest", "ubuntu-latest", "windows-latest"] steps: From 88e56122965a2a66233d66c1c14d62126927f3c0 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 10:34:14 -0500 Subject: [PATCH 06/11] Fix config file test mock This commit fixes the mocking used for the config file tests. It was previously implicitly defining a runner argument as a mock object which wasn't a valid choice for a config file to contain after parsing. This commit just explicitly sets the mock to have the unittest runner set to avoid failure. --- stestr/tests/test_config_file.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stestr/tests/test_config_file.py b/stestr/tests/test_config_file.py index a1a1e9d..c90d682 100644 --- a/stestr/tests/test_config_file.py +++ b/stestr/tests/test_config_file.py @@ -26,6 +26,7 @@ class TestTestrConf(base.TestCase): def setUp(self, mock_ConfigParser): super().setUp() self._testr_conf = config_file.TestrConf(mock.sentinel.config_file) + self._testr_conf.runner = "unittest" @mock.patch.object(config_file.util, "get_repo_open") @mock.patch.object(config_file.test_processor, "TestProcessorFixture") @@ -198,5 +199,6 @@ def test_toml_load(self, mock_toml): with open(file_path, "w"): pass self._testr_conf = config_file.TestrConf(file_path) + self._testr_conf.runner = "unittest" self._check_get_run_command() mock_toml.return_value.__getitem__.assert_called_once_with("tool") From 2d4a4ea68dd71aab099ed95d6da894d477cfeaf8 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 10:36:19 -0500 Subject: [PATCH 07/11] Fix copy paste error in github workflow --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6d986be..76d0018 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -9,8 +9,8 @@ jobs: name: tests-python${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: - fail-fast: false python-version: [3.7, 3.8, 3.9, "3.10", "3.11", "3.12"] os: ["macOS-latest", "ubuntu-latest", "windows-latest"] steps: From 4ad399b6116a88f32cb5bc28e8115fe1aab2de93 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 13:54:30 -0500 Subject: [PATCH 08/11] Try setting -s on windows --- stestr/config_file.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/stestr/config_file.py b/stestr/config_file.py index a526aed..5d21617 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -216,15 +216,27 @@ def get_run_command( "specified in the config file." ) if pytest: - command = ( - '%s -m pytest --subunit --rootdir="%s" "%s" ' - "$LISTOPT $IDOPTION" - % ( - python, - top_dir, - test_path, + if sys.platform == "win32": + command = ( + '%s -m pytest -s --subunit --rootdir="%s" "%s" ' + "$LISTOPT $IDOPTION" + % ( + python, + top_dir, + test_path, + ) + ) + + else: + command = ( + '%s -m pytest --subunit --rootdir="%s" "%s" ' + "$LISTOPT $IDOPTION" + % ( + python, + top_dir, + test_path, + ) ) - ) listopt = "--co" idoption = "--load-list $IDFILE" else: From cbbe2fc641f83798e7172f742006965fff1834c1 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 13:59:56 -0500 Subject: [PATCH 09/11] DNM: Add a debug step for inspecting pytest output directly on windows --- .github/workflows/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 76d0018..91555af 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -63,6 +63,11 @@ jobs: ${{ runner.os }}-${{ matrix.python-version }} - name: Install Deps run: python -m pip install -U 'tox<4' setuptools virtualenv wheel + - name: Test windows stuff + run: | + tox -epy --notest + .tox/py/Scripts/python -m pytest --subunit --rootdir="./" "./stestr/tests" --co + if: runner.os == 'Windows' - name: Install and Run Tests run: tox -e py -- --pytest if: runner.os != 'macOS' From e3a9e5de06b97ed936777d88310986fafa60cadd Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sat, 11 Nov 2023 14:14:36 -0500 Subject: [PATCH 10/11] Revert "DNM: Add a debug step for inspecting pytest output directly on windows" This reverts commit cbbe2fc641f83798e7172f742006965fff1834c1. --- .github/workflows/main.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 91555af..76d0018 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -63,11 +63,6 @@ jobs: ${{ runner.os }}-${{ matrix.python-version }} - name: Install Deps run: python -m pip install -U 'tox<4' setuptools virtualenv wheel - - name: Test windows stuff - run: | - tox -epy --notest - .tox/py/Scripts/python -m pytest --subunit --rootdir="./" "./stestr/tests" --co - if: runner.os == 'Windows' - name: Install and Run Tests run: tox -e py -- --pytest if: runner.os != 'macOS' From 21b0efe538c520e4b889caf0f2f68be8688967a4 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sun, 12 Nov 2023 10:01:14 -0500 Subject: [PATCH 11/11] Fix path usage on windows The pytest subunit plugin was loading test paths incorrectly on windows as a posix path. This was causing the test loading to fail unexpectedly because pytest was unable to process the posix path version of a windows path. This commit fixes this issue in the pytest plugin. --- stestr/pytest_subunit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stestr/pytest_subunit.py b/stestr/pytest_subunit.py index 9df7c90..2888108 100644 --- a/stestr/pytest_subunit.py +++ b/stestr/pytest_subunit.py @@ -29,13 +29,13 @@ from subunit import StreamResultToBytes -def to_path(testid: str) -> pathlib.PosixPath: +def to_path(testid: str) -> pathlib.Path: delim = "::" if delim in testid: path = testid.split(delim)[0] else: path = testid - return pathlib.PosixPath(path).resolve() + return pathlib.Path(path).resolve() # hook