diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 951cb1b73..6c99eaef6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,41 @@ jobs: - name: Run tests run: python -m pytest -v --timeout=300 --webdriver=ChromeHeadless --durations=100 test + test_env: + name: test_environments + runs-on: "ubuntu-latest" + strategy: + fail-fast: false + timeout-minutes: 10 + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: mamba-org/setup-micromamba@v1 + with: + init-shell: >- + bash + environment-name: test-env + cache-environment: true + create-args: >- + python + pip + libmambapy + conda-build + + - name: Install dependencies + run: python -m pip install ".[test,hg]" --pre + shell: micromamba-shell {0} + + - name: Install asv + run: pip install . + shell: micromamba-shell {0} + + - name: Run tests + run: pytest -k environment_bench -vvvvv + shell: micromamba-shell {0} + docs: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ci_win.yml b/.github/workflows/ci_win.yml deleted file mode 100644 index 6661be3e6..000000000 --- a/.github/workflows/ci_win.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Windows CI - -on: [push, pull_request] - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test: - name: test - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: ["windows-latest"] - python-version: ["3.7"] - timeout-minutes: 30 - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set up Python version ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: pip - cache-dependency-path: pyproject.toml - - - name: Install and test - shell: pwsh - run: | - python.exe -m pip install .[test] - python.exe -m pip install packaging virtualenv - python.exe -m pytest -v -l -x --timeout=300 --durations=100 test --environment-type=virtualenv diff --git a/CHANGES.rst b/CHANGES.rst index 8e233f84c..30b632cbc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,19 @@ +0.6.2 (TBD) +---------------------------- + +New Features +^^^^^^^^^^^^ + +API Changes +^^^^^^^^^^^ + +Bug Fixes +^^^^^^^^^ +- The ``mamba`` plugin works correctly for newer versions (>=1.5) of ``libmambapy`` (#1372) + +Other Changes and Additions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 0.6.1 (2023-09-11) ---------------------------- diff --git a/asv/plugins/_mamba_helpers.py b/asv/plugins/_mamba_helpers.py new file mode 100644 index 000000000..bf8518901 --- /dev/null +++ b/asv/plugins/_mamba_helpers.py @@ -0,0 +1,259 @@ +# Licensed under a 3-clause BSD style license FOR Mamba - see LICENSE in mamba-org/mamba +# Also covered by the 3-clause BSD style license FOR boa - see LICENSE in mamba-org/boa +# Copyright 2019 QuantStack and the Mamba contributors. +# Very lightly edited / simplified for use within asv +import os +import urllib.parse +import collections + +import libmambapy +from conda.base.constants import ChannelPriority +from conda.base.context import context +from conda.core.index import check_allowlist +from conda.gateways.connection.session import CondaHttpAuth + + +def get_index( + channel_urls=(), + prepend=True, + platform=None, + use_local=False, + use_cache=False, + unknown=None, + prefix=None, + repodata_fn="repodata.json", +): + if isinstance(platform, str): + platform = [platform, "noarch"] + + all_channels = [] + if use_local: + all_channels.append("local") + all_channels.extend(channel_urls) + if prepend: + all_channels.extend(context.channels) + check_allowlist(all_channels) + + # Remove duplicates but retain order + all_channels = list(collections.OrderedDict.fromkeys(all_channels)) + + dlist = libmambapy.DownloadTargetList() + + index = [] + + def fixup_channel_spec(spec): + at_count = spec.count("@") + if at_count > 1: + first_at = spec.find("@") + spec = ( + spec[:first_at] + + urllib.parse.quote(spec[first_at]) + + spec[first_at + 1 :] + ) + if platform: + spec = spec + "[" + ",".join(platform) + "]" + return spec + + all_channels = list(map(fixup_channel_spec, all_channels)) + pkgs_dirs = libmambapy.MultiPackageCache(context.pkgs_dirs) + libmambapy.create_cache_dir(str(pkgs_dirs.first_writable_path)) + + for channel in libmambapy.get_channels(all_channels): + for channel_platform, url in channel.platform_urls(with_credentials=True): + full_url = CondaHttpAuth.add_binstar_token(url) + + sd = libmambapy.SubdirData( + channel, channel_platform, full_url, pkgs_dirs, repodata_fn + ) + + needs_finalising = sd.download_and_check_targets(dlist) + index.append( + ( + sd, + { + "platform": channel_platform, + "url": url, + "channel": channel, + "needs_finalising": needs_finalising, + }, + ) + ) + + for sd, info in index: + if info["needs_finalising"]: + sd.finalize_checks() + dlist.add(sd) + + is_downloaded = dlist.download(libmambapy.MAMBA_DOWNLOAD_FAILFAST) + + if not is_downloaded: + raise RuntimeError("Error downloading repodata.") + + return index + + +def load_channels( + pool, + channels, + repos, + has_priority=None, + prepend=True, + platform=None, + use_local=False, + use_cache=True, + repodata_fn="repodata.json", +): + index = get_index( + channel_urls=channels, + prepend=prepend, + platform=platform, + use_local=use_local, + repodata_fn=repodata_fn, + use_cache=use_cache, + ) + + if has_priority is None: + has_priority = context.channel_priority in [ + ChannelPriority.STRICT, + ChannelPriority.FLEXIBLE, + ] + + subprio_index = len(index) + if has_priority: + # first, count unique channels + n_channels = len(set([entry["channel"].canonical_name for _, entry in index])) + current_channel = index[0][1]["channel"].canonical_name + channel_prio = n_channels + + for subdir, entry in index: + # add priority here + if has_priority: + if entry["channel"].canonical_name != current_channel: + channel_prio -= 1 + current_channel = entry["channel"].canonical_name + priority = channel_prio + else: + priority = 0 + if has_priority: + subpriority = 0 + else: + subpriority = subprio_index + subprio_index -= 1 + + if not subdir.loaded() and entry["platform"] != "noarch": + # ignore non-loaded subdir if channel is != noarch + continue + + if context.verbosity != 0 and not context.json: + print( + "Channel: {}, platform: {}, prio: {} : {}".format( + entry["channel"], entry["platform"], priority, subpriority + ) + ) + print("Cache path: ", subdir.cache_path()) + + repo = subdir.create_repo(pool) + repo.set_priority(priority, subpriority) + repos.append(repo) + + return index + + +class MambaSolver: + def __init__(self, channels, platform, context, output_folder=None): + self.channels = channels + self.platform = platform + self.context = context + self.output_folder = output_folder or "local" + self.pool = libmambapy.Pool() + self.repos = [] + self.index = load_channels( + self.pool, self.channels, self.repos, platform=platform + ) + + self.local_index = [] + self.local_repos = {} + # load local repo, too + self.replace_channels() + + def replace_installed(self, prefix): + prefix_data = libmambapy.PrefixData(prefix) + vp = libmambapy.get_virtual_packages() + prefix_data.add_packages(vp) + repo = libmambapy.Repo(self.pool, prefix_data) + repo.set_installed() + + def replace_channels(self): + self.local_index = get_index( + (self.output_folder,), platform=self.platform, prepend=False + ) + + for _, v in self.local_repos.items(): + v.clear(True) + + start_prio = len(self.channels) + len(self.index) + for subdir, channel in self.local_index: + if not subdir.loaded(): + continue + + # support new mamba + if isinstance(channel, dict): + channelstr = channel["url"] + channelurl = channel["url"] + else: + channelstr = str(channel) + channelurl = channel.url(with_credentials=True) + + cp = subdir.cache_path() + if cp.endswith(".solv"): + os.remove(subdir.cache_path()) + cp = cp.replace(".solv", ".json") + + self.local_repos[channelstr] = libmambapy.Repo( + self.pool, channelstr, cp, channelurl + ) + + self.local_repos[channelstr].set_priority(start_prio, 0) + start_prio -= 1 + + def solve(self, specs, pkg_cache_path=None): + """Solve given a set of specs. + Parameters + ---------- + specs : list of str + A list of package specs. You can use `conda.models.match_spec.MatchSpec` + to get them to the right form by calling + `MatchSpec(mypec).conda_build_form()` + Returns + ------- + transaction : libmambapy.Transaction + The mamba transaction. + Raises + ------ + RuntimeError : + If the solver did not find a solution. + """ + solver_options = [(libmambapy.SOLVER_FLAG_ALLOW_DOWNGRADE, 1)] + api_solver = libmambapy.Solver(self.pool, solver_options) + _specs = specs + + api_solver.add_jobs(_specs, libmambapy.SOLVER_INSTALL) + success = api_solver.try_solve() + + if not success: + error_string = "Mamba failed to solve:\n" + for s in _specs: + error_string += f" - {s}\n" + error_string += "\nwith channels:\n" + for c in self.channels: + error_string += f" - {c}\n" + error_string += api_solver.explain_problems() + print(error_string) + raise RuntimeError("Solver could not find solution." + error_string) + + if pkg_cache_path is None: + # use values from conda + pkg_cache_path = self.context.pkgs_dirs + + package_cache = libmambapy.MultiPackageCache(pkg_cache_path) + return libmambapy.Transaction(api_solver, package_cache) diff --git a/asv/plugins/mamba.py b/asv/plugins/mamba.py index d3728a866..f095a56fe 100644 --- a/asv/plugins/mamba.py +++ b/asv/plugins/mamba.py @@ -11,8 +11,7 @@ except ImportError: from yaml import Loader -from mamba.api import libmambapy, MambaSolver - +from ._mamba_helpers import libmambapy, MambaSolver from .. import environment, util from ..console import log diff --git a/docs/source/installing.rst b/docs/source/installing.rst index 45e8917e0..80b324c3d 100644 --- a/docs/source/installing.rst +++ b/docs/source/installing.rst @@ -22,7 +22,7 @@ the ``python`` requirements are as noted in the ``pyproject.toml``. For managing the environments, one of the following packages is required: - `libmambapy `__, - which is typically part of ``mamba`` + which is typically part of ``mamba``. In this case ``conda`` must be present too. - `virtualenv `__, which is required since venv is not compatible with other versions of Python. diff --git a/pyproject.toml b/pyproject.toml index 2fcd5e6d0..74754be72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ test = [ "scipy; platform_python_implementation != \"PyPy\"", "feedparser", "selenium", + "flaky", "pytest-rerunfailures", "python-hglib; platform_system != 'Windows'", "rpy2; platform_system != 'Windows' and platform_python_implementation != 'PyPy'", diff --git a/test/test_environment_bench.py b/test/test_environment_bench.py new file mode 100644 index 000000000..593945164 --- /dev/null +++ b/test/test_environment_bench.py @@ -0,0 +1,105 @@ +import subprocess +import os +import json + +import pytest + +from . import tools + +ENVIRONMENTS = [] +if tools.HAS_VIRTUALENV: + ENVIRONMENTS.append("virtualenv") +if tools.HAS_CONDA: + ENVIRONMENTS.append("conda") +if tools.HAS_MAMBA: + ENVIRONMENTS.append("mamba") +if len(ENVIRONMENTS) == 0: + pytest.skip("No environments can be constructed", allow_module_level=True) + +ASV_CONFIG = { + "version": 1, + "project": "project", + "project_url": "http://project-homepage.org/", + "repo": ".", + "branches": ["main"], + "environment_type": "virtualenv", + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html", +} + +BENCHMARK_CODE = """ +class ExampleBench: + def setup(self): + self.data = list(range(100)) + + def time_sum(self): + return sum(self.data) + + def time_max(self): + return max(self.data) +""" + +SETUP_CODE = """ +from setuptools import setup, find_packages + +setup( + name="myproject", + version="0.1.0", + packages=find_packages(), +) +""" + + +@pytest.fixture(scope="session", autouse=True) +def setup_asv_project(tmp_path_factory): + """ + Fixture to set up an ASV project in a temporary directory + """ + tmp_path = tmp_path_factory.mktemp("asv_project") + original_dir = os.getcwd() + os.chdir(tmp_path) + + os.makedirs("benchmarks", exist_ok=True) + with open("benchmarks/example_bench.py", "w") as f: + f.write(BENCHMARK_CODE) + with open("benchmarks/__init__.py", "w") as f: + f.write("") + with open("asv.conf.json", "w") as f: + json.dump(ASV_CONFIG, f, indent=4) + with open("setup.py", "w") as f: + f.write(SETUP_CODE) + + subprocess.run(["git", "init"], cwd=tmp_path, check=True) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=tmp_path, check=True + ) + subprocess.run( + ["git", "config", "user.name", "Test User"], cwd=tmp_path, check=True + ) + subprocess.run(["git", "add", "."], cwd=tmp_path, check=True) + subprocess.run( + ["git", "commit", "-m", "Initial ASV setup"], cwd=tmp_path, check=True + ) + subprocess.run(["git", "branch", "-M", "main"], cwd=tmp_path, check=True) + + yield tmp_path + os.chdir(original_dir) + + +@pytest.mark.parametrize("env", ENVIRONMENTS) +def test_asv_benchmark(setup_asv_project, env): + """ + Test running ASV benchmarks in the specified environment. + """ + project_dir = setup_asv_project + subprocess.run(["asv", "machine", "--yes"], cwd=project_dir, check=True) + result = subprocess.run( + ["asv", "run", "--quick", "--dry-run", "--environment", env], + cwd=project_dir, + check=True, + ) + + assert ( + result.returncode == 0 + ), f"ASV benchmark failed in {env} environment: {result.stderr}" diff --git a/test/tools.py b/test/tools.py index 3d13be07b..a9703d4ee 100644 --- a/test/tools.py +++ b/test/tools.py @@ -16,6 +16,7 @@ import subprocess import platform import http.server +import importlib from os.path import abspath, join, dirname, relpath, isdir from contextlib import contextmanager from hashlib import sha256 @@ -92,6 +93,21 @@ def _check_conda(): HAVE_WEBDRIVER = False +def _check_mamba(): + conda = _find_conda() + try: + importlib.import_module('libmambapy') + subprocess.check_call([conda, 'build', '--version'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + return True + except (ImportError, subprocess.CalledProcessError, FileNotFoundError): + return False + + +HAS_MAMBA = _check_mamba() + + WAIT_TIME = 20.0