Skip to content

Commit

Permalink
TST: Add more explicit mamba tests
Browse files Browse the repository at this point in the history
  • Loading branch information
HaoZeke committed Feb 4, 2024
1 parent 30324e3 commit 6caa6e7
Showing 1 changed file with 161 additions and 33 deletions.
194 changes: 161 additions & 33 deletions test/test_environment_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@
"env_dir": ".asv/env",
"results_dir": ".asv/results",
"html_dir": ".asv/html",
"matrix": {
"asv_runner": [], # On conda-forge, not defaults
},
}

BENCHMARK_CODE = """
Expand All @@ -51,49 +54,67 @@ def time_max(self):
)
"""

CONDARC_CONTENT = """
channels:
- conda-forge
- nodefaults
channel_priority: disabled
auto_activate_base: false
"""


@pytest.fixture(scope="session", autouse=True)
def setup_asv_project(tmp_path_factory):
@pytest.fixture(scope="session")
def asv_project_factory(tmp_path_factory):
"""
Fixture to set up an ASV project in a temporary directory
Factory to set up an ASV project with customizable configurations.
"""
tmp_path = tmp_path_factory.mktemp("asv_project")
original_dir = os.getcwd()
os.chdir(tmp_path)

os.makedirs("benchmarks", exist_ok=True)
with open("benchmarks/example_bench.py", "w") as f:
f.write(BENCHMARK_CODE)
with open("benchmarks/__init__.py", "w") as f:
f.write("")
with open("asv.conf.json", "w") as f:
json.dump(ASV_CONFIG, f, indent=4)
with open("setup.py", "w") as f:
f.write(SETUP_CODE)

subprocess.run(["git", "init"], cwd=tmp_path, check=True)
subprocess.run(
["git", "config", "user.email", "[email protected]"], cwd=tmp_path, check=True
)
subprocess.run(
["git", "config", "user.name", "Test User"], cwd=tmp_path, check=True
)
subprocess.run(["git", "add", "."], cwd=tmp_path, check=True)
subprocess.run(
["git", "commit", "-m", "Initial ASV setup"], cwd=tmp_path, check=True
)
subprocess.run(["git", "branch", "-M", "main"], cwd=tmp_path, check=True)

yield tmp_path
os.chdir(original_dir)
def _create_asv_project(custom_config=None, create_condarc=False):
tmp_path = tmp_path_factory.mktemp("asv_project")
original_dir = os.getcwd()
os.chdir(tmp_path)

os.makedirs("benchmarks", exist_ok=True)
benchmark_file = tmp_path / "benchmarks" / "example_bench.py"
benchmark_file.write_text(BENCHMARK_CODE)
(tmp_path / "benchmarks" / "__init__.py").write_text("")

config = ASV_CONFIG.copy()
if custom_config:
config.update(custom_config)
(tmp_path / "asv.conf.json").write_text(json.dumps(config, indent=4))
(tmp_path / "setup.py").write_text(SETUP_CODE)

if create_condarc:
(tmp_path / ".condarc").write_text(CONDARC_CONTENT)

subprocess.run(["git", "init"], cwd=tmp_path, check=True)
subprocess.run(
["git", "config", "user.email", "[email protected]"],
cwd=tmp_path,
check=True,
)
subprocess.run(
["git", "config", "user.name", "Test User"], cwd=tmp_path, check=True
)
subprocess.run(["git", "add", "."], cwd=tmp_path, check=True)
subprocess.run(
["git", "commit", "-m", "Initial ASV setup"], cwd=tmp_path, check=True
)
subprocess.run(["git", "branch", "-M", "main"], cwd=tmp_path, check=True)

os.chdir(original_dir)
return tmp_path

return _create_asv_project


@pytest.mark.parametrize("env", ENVIRONMENTS)
def test_asv_benchmark(setup_asv_project, env):
def test_asv_benchmark(asv_project_factory, env):
"""
Test running ASV benchmarks in the specified environment.
"""
project_dir = setup_asv_project
project_dir = asv_project_factory(custom_config={})
subprocess.run(["asv", "machine", "--yes"], cwd=project_dir, check=True)
result = subprocess.run(
["asv", "run", "--quick", "--dry-run", "--environment", env],
Expand All @@ -104,3 +125,110 @@ def test_asv_benchmark(setup_asv_project, env):
assert (
result.returncode == 0
), f"ASV benchmark failed in {env} environment: {result.stderr}"


@pytest.mark.parametrize(
"config_modifier, expected_success, expected_error",
[
pytest.param(
{"conda_channels": ["conda-forge", "nodefaults"]},
True,
None,
id="with_conda_forge",
),
pytest.param(
{"conda_channels": []},
False,
"Solver could not find solution",
id="empty_conda_channels",
),
],
)
def test_asv_mamba(
asv_project_factory, config_modifier, expected_success, expected_error
):
"""
Test running ASV benchmarks with various configurations,
checking for specific errors when failures are expected.
"""
project_dir = asv_project_factory(custom_config=config_modifier)
try:
subprocess.run(
["asv", "run", "--quick", "--dry-run", "--environment", "mamba"],
cwd=project_dir,
check=True,
capture_output=True,
text=True,
)
if not expected_success:
pytest.fail("Expected failure, but succeeded")
except subprocess.CalledProcessError as exc:
if expected_success:
pytest.fail(f"ASV benchmark unexpectedly failed: {exc.stderr}")
elif expected_error and expected_error not in exc.stderr:
pytest.fail(
f"Expected error '{expected_error}' not found in stderr: {exc.stderr}"
)


@pytest.mark.parametrize(
"create_condarc, set_mambarc, expected_success, expected_error",
[
pytest.param(
True,
True,
True,
None,
id="with_proper_condarc_and_mambarc",
),
pytest.param(
True,
False,
False,
"Solver could not find solution",
id="with_condarc_but_no_mambarc",
),
pytest.param(
False,
False,
False,
"Solver could not find solution",
id="without_condarc_and_mambarc",
),
],
)
def test_asv_mamba_condarc(
asv_project_factory,
create_condarc,
set_mambarc,
expected_success,
expected_error,
monkeypatch,
):
project_dir = asv_project_factory(
custom_config={"conda_channels": [], "environment_type": "mamba"},
create_condarc=create_condarc,
)

env = os.environ.copy()
if set_mambarc:
env["MAMBARC"] = str(project_dir.resolve() / ".condarc")

try:
subprocess.run(
["asv", "run", "--quick", "--dry-run"],
cwd=project_dir,
check=True,
capture_output=True,
text=True,
env=env,
)
if not expected_success:
pytest.fail("Expected failure, but succeeded")
except subprocess.CalledProcessError as exc:
if expected_success:
pytest.fail(f"ASV benchmark unexpectedly failed: {exc.stderr}")
elif expected_error and expected_error not in exc.stderr:
pytest.fail(
f"Expected error '{expected_error}' not found in stderr: {exc.stderr}"
)

0 comments on commit 6caa6e7

Please sign in to comment.