From 67f57f8356e79268abad07d60707eccffebd26e3 Mon Sep 17 00:00:00 2001 From: James Meakin <12661555+jmsmkn@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:30:06 +0200 Subject: [PATCH] Fix CI (#169) Closes #168 --- .github/workflows/ci.yml | 2 +- .pre-commit-config.yaml | 10 ++-- HISTORY.md | 2 +- pyproject.toml | 3 +- tests/async_integration_tests.py | 26 +++++---- tests/integration_tests.py | 25 +++++---- tests/scripts/create_test_fixtures.py | 76 +-------------------------- tests/test_gcapi.py | 8 --- 8 files changed, 40 insertions(+), 112 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c8ede6d..0b06181 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: strategy: fail-fast: false # Try to work around ECS errors matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index da5a205..92e8c10 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-docstring-first - id: debug-statements @@ -8,7 +8,7 @@ repos: - id: mixed-line-ending - id: trailing-whitespace - repo: https://github.com/asottile/pyupgrade - rev: v3.17.0 + rev: v3.18.0 hooks: - id: pyupgrade language: python @@ -18,12 +18,12 @@ repos: hooks: - id: isort - repo: https://github.com/ambv/black - rev: 24.4.2 + rev: 24.10.0 hooks: - id: black language: python - repo: https://github.com/PyCQA/flake8 - rev: 7.1.0 + rev: 7.1.1 hooks: - id: flake8 language: python @@ -35,7 +35,7 @@ repos: - mccabe - yesqa - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.11.1' + rev: 'v1.12.0' hooks: - id: mypy additional_dependencies: diff --git a/HISTORY.md b/HISTORY.md index df1ed1a..ab31633 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -5,7 +5,7 @@ - Removed the retina endpoints - Migrated to use Pydantic models for request and response validation - Removed support for Python 3.8 - - Added support for Python 3.12 + - Added support for Python 3.12 and 3.13 ## 0.12.0 (2023-02-20) diff --git a/pyproject.toml b/pyproject.toml index 36e5ccc..ab85c43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ xfail_strict = true legacy_tox_ini = """ [tox] isolated_build = True -envlist = py39, py310, py311, py312 +envlist = py39, py310, py311, py312, py313 [gh-actions] python = @@ -61,6 +61,7 @@ python = 3.10: py310 3.11: py311 3.12: py312 + 3.13: py313 [testenv] allowlist_externals = diff --git a/tests/async_integration_tests.py b/tests/async_integration_tests.py index bd22758..92026ca 100644 --- a/tests/async_integration_tests.py +++ b/tests/async_integration_tests.py @@ -326,12 +326,12 @@ async def get_download(): "algorithm,interface,files", ( ( - "test-algorithm-evaluation-image-1", + "test-algorithm-evaluation-image-0", "generic-medical-image", ["image10x10x101.mha"], ), # TODO this algorithm was removed from the test fixtures - # ("test-algorithm-evaluation-file-1", "json-file", ["test.json"]), + # ("test-algorithm-evaluation-file-0", "json-file", ["test.json"]), ), ) @pytest.mark.anyio @@ -358,11 +358,9 @@ async def run_job(): # algorithm might not be ready yet job = await run_job() - assert job["status"] == "Queued" - assert len(job["inputs"]) == 1 - + assert job["status"] == "Validating inputs" job = await c.algorithm_jobs.detail(job["pk"]) - assert job.status in {"Queued", "Started"} + assert job.status in {"Validating inputs", "Queued", "Started"} @pytest.mark.parametrize( @@ -395,7 +393,7 @@ async def test_get_algorithm_by_slug(local_grand_challenge): token=DEMO_PARTICIPANT_TOKEN, ) as c: by_slug = await c.algorithms.detail( - slug="test-algorithm-evaluation-image-1" + slug="test-algorithm-evaluation-image-0" ) by_pk = await c.algorithms.detail(pk=by_slug.pk) @@ -577,8 +575,11 @@ async def get_archive_detail(): item_updated = await get_archive_detail() - json_civ = item_updated.values[-1] - assert json_civ.interface.slug == "results-json-file" + json_civ = [ + civ + for civ in item_updated.values + if civ.interface.slug == "results-json-file" + ][0] assert json_civ.value == {"foo": 0.5} updated_civ_count = len(item_updated.values) @@ -600,8 +601,11 @@ async def get_updated_archive_detail(): item_updated_again = await get_updated_archive_detail() assert len(item_updated_again.values) == updated_civ_count - new_json_civ = item_updated_again.values[-1] - assert new_json_civ.interface.slug == "results-json-file" + new_json_civ = [ + civ + for civ in item_updated_again.values + if civ.interface.slug == "results-json-file" + ][0] assert new_json_civ.value == {"foo": 0.8} diff --git a/tests/integration_tests.py b/tests/integration_tests.py index 2665a66..410aa2f 100644 --- a/tests/integration_tests.py +++ b/tests/integration_tests.py @@ -281,13 +281,13 @@ def get_download(): "algorithm,interface,files", ( ( - "test-algorithm-evaluation-image-1", + "test-algorithm-evaluation-image-0", "generic-medical-image", ["image10x10x101.mha"], ), # TODO this algorithm was removed from the test fixtures # ( - # "test-algorithm-evaluation-file-1", + # "test-algorithm-evaluation-file-0", # "json-file", # ["test.json"], # ), @@ -316,10 +316,9 @@ def run_job(): # algorithm might not be ready yet job = run_job() - assert job["status"] == "Queued" - assert len(job["inputs"]) == 1 + assert job["status"] == "Validating inputs" job = c.algorithm_jobs.detail(job["pk"]) - assert job.status in {"Queued", "Started"} + assert job.status in {"Validating inputs", "Queued", "Started"} def test_get_algorithm_by_slug(local_grand_challenge): @@ -329,7 +328,7 @@ def test_get_algorithm_by_slug(local_grand_challenge): token=DEMO_PARTICIPANT_TOKEN, ) - by_slug = c.algorithms.detail(slug="test-algorithm-evaluation-image-1") + by_slug = c.algorithms.detail(slug="test-algorithm-evaluation-image-0") by_pk = c.algorithms.detail(pk=by_slug.pk) assert by_pk == by_slug @@ -496,8 +495,11 @@ def get_archive_item_detail(): item_updated = get_archive_item_detail() - json_civ = item_updated.values[-1] - assert json_civ.interface.slug == "results-json-file" + json_civ = [ + civ + for civ in item_updated.values + if civ.interface.slug == "results-json-file" + ][0] assert json_civ.value == {"foo": 0.5} updated_civ_count = len(item_updated.values) @@ -517,8 +519,11 @@ def get_updated_archive_item_detail(): item_updated_again = get_updated_archive_item_detail() assert len(item_updated_again.values) == updated_civ_count - new_json_civ = item_updated_again.values[-1] - assert new_json_civ.interface.slug == "results-json-file" + new_json_civ = [ + civ + for civ in item_updated_again.values + if civ.interface.slug == "results-json-file" + ][0] assert new_json_civ.value == {"foo": 0.8} diff --git a/tests/scripts/create_test_fixtures.py b/tests/scripts/create_test_fixtures.py index 3462db9..9f0bb40 100644 --- a/tests/scripts/create_test_fixtures.py +++ b/tests/scripts/create_test_fixtures.py @@ -1,4 +1,3 @@ -import base64 import gzip import logging import os @@ -11,7 +10,6 @@ from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, Permission -from django.core.exceptions import ObjectDoesNotExist from django.core.files.base import ContentFile from django.db import IntegrityError from grandchallenge.algorithms.models import Algorithm, AlgorithmImage @@ -24,12 +22,7 @@ ComponentInterfaceValue, ) from grandchallenge.core.fixtures import create_uploaded_image -from grandchallenge.evaluation.models import ( - Evaluation, - Method, - Phase, - Submission, -) +from grandchallenge.evaluation.models import Method, Phase from grandchallenge.evaluation.utils import SubmissionKindChoices from grandchallenge.invoices.models import Invoice from grandchallenge.reader_studies.models import ( @@ -73,7 +66,6 @@ def run(): raise RuntimeError("Fixtures already initialized") from e _set_user_permissions(users) - _create_demo_challenge(users=users) _create_reader_studies(users) _create_archive(users) _create_user_tokens(users) @@ -153,72 +145,6 @@ def _set_user_permissions(users): users["demo"].user_permissions.add(add_archive_perm) -def _create_demo_challenge(users): - demo = Challenge.objects.create( - short_name="demo", - description="Demo Challenge", - creator=users["demo"], - hidden=False, - display_forum_link=True, - ) - demo.add_participant(users["demop"]) - - phase = Phase.objects.create(challenge=demo, title="Phase 1") - - phase.score_title = "Accuracy ± std" - phase.score_jsonpath = "acc.mean" - phase.score_error_jsonpath = "acc.std" - phase.extra_results_columns = [ - { - "title": "Dice ± std", - "path": "dice.mean", - "error_path": "dice.std", - "order": "desc", - } - ] - - phase.submission_kind = SubmissionKindChoices.ALGORITHM - phase.save() - - method = Method(phase=phase, creator=users["demo"]) - - with _gc_demo_algorithm() as container: - method.image.save("algorithm_io.tar", container) - - submission = Submission(phase=phase, creator=users["demop"]) - content = ContentFile(base64.b64decode(b"")) - submission.predictions_file.save("test.csv", content) - submission.save() - - e = Evaluation.objects.create( - submission=submission, - method=method, - status=Evaluation.SUCCESS, - time_limit=300, - ) - - def create_result(evaluation, result: dict): - interface = ComponentInterface.objects.get(slug="metrics-json-file") - - try: - output_civ = evaluation.outputs.get(interface=interface) - output_civ.value = result - output_civ.save() - except ObjectDoesNotExist: - output_civ = ComponentInterfaceValue.objects.create( - interface=interface, value=result - ) - evaluation.outputs.add(output_civ) - - create_result( - e, - { - "acc": {"mean": 0, "std": 0.1}, - "dice": {"mean": 0.71, "std": 0.05}, - }, - ) - - def _create_reader_studies(users): reader_study = ReaderStudy.objects.create( title="Reader Study", diff --git a/tests/test_gcapi.py b/tests/test_gcapi.py index 2232f0c..974392b 100644 --- a/tests/test_gcapi.py +++ b/tests/test_gcapi.py @@ -1,6 +1,5 @@ import pytest from click.testing import CliRunner -from httpx import HTTPStatusError from gcapi import Client, cli from tests.utils import ADMIN_TOKEN @@ -114,10 +113,3 @@ def test_command_line_interface(): help_result = runner.invoke(cli.main, ["--help"]) assert help_result.exit_code == 0 assert "--help Show this message and exit." in help_result.output - - -def test_ground_truth_url(): - c = Client(token="foo", base_url="https://example.com/api/v1/") - with pytest.raises(HTTPStatusError) as exc_info: - c.reader_studies.ground_truth("fake", "image_pk") - assert exc_info.value.request.url.path.endswith("image_pk/")