Skip to content

Commit

Permalink
Fix CI (#169)
Browse files Browse the repository at this point in the history
Closes #168
  • Loading branch information
jmsmkn authored and chrisvanrun committed Nov 5, 2024
1 parent 4df0dc3 commit 67f57f8
Show file tree
Hide file tree
Showing 8 changed files with 40 additions and 112 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:
strategy:
fail-fast: false # Try to work around ECS errors
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
Expand Down
10 changes: 5 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v5.0.0
hooks:
- id: check-docstring-first
- id: debug-statements
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
- repo: https://github.com/asottile/pyupgrade
rev: v3.17.0
rev: v3.18.0
hooks:
- id: pyupgrade
language: python
Expand All @@ -18,12 +18,12 @@ repos:
hooks:
- id: isort
- repo: https://github.com/ambv/black
rev: 24.4.2
rev: 24.10.0
hooks:
- id: black
language: python
- repo: https://github.com/PyCQA/flake8
rev: 7.1.0
rev: 7.1.1
hooks:
- id: flake8
language: python
Expand All @@ -35,7 +35,7 @@ repos:
- mccabe
- yesqa
- repo: https://github.com/pre-commit/mirrors-mypy
rev: 'v1.11.1'
rev: 'v1.12.0'
hooks:
- id: mypy
additional_dependencies:
Expand Down
2 changes: 1 addition & 1 deletion HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
- Removed the retina endpoints
- Migrated to use Pydantic models for request and response validation
- Removed support for Python 3.8
- Added support for Python 3.12
- Added support for Python 3.12 and 3.13

## 0.12.0 (2023-02-20)

Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,15 @@ xfail_strict = true
legacy_tox_ini = """
[tox]
isolated_build = True
envlist = py39, py310, py311, py312
envlist = py39, py310, py311, py312, py313
[gh-actions]
python =
3.9: py39
3.10: py310
3.11: py311
3.12: py312
3.13: py313
[testenv]
allowlist_externals =
Expand Down
26 changes: 15 additions & 11 deletions tests/async_integration_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,12 +326,12 @@ async def get_download():
"algorithm,interface,files",
(
(
"test-algorithm-evaluation-image-1",
"test-algorithm-evaluation-image-0",
"generic-medical-image",
["image10x10x101.mha"],
),
# TODO this algorithm was removed from the test fixtures
# ("test-algorithm-evaluation-file-1", "json-file", ["test.json"]),
# ("test-algorithm-evaluation-file-0", "json-file", ["test.json"]),
),
)
@pytest.mark.anyio
Expand All @@ -358,11 +358,9 @@ async def run_job():
# algorithm might not be ready yet
job = await run_job()

assert job["status"] == "Queued"
assert len(job["inputs"]) == 1

assert job["status"] == "Validating inputs"
job = await c.algorithm_jobs.detail(job["pk"])
assert job.status in {"Queued", "Started"}
assert job.status in {"Validating inputs", "Queued", "Started"}


@pytest.mark.parametrize(
Expand Down Expand Up @@ -395,7 +393,7 @@ async def test_get_algorithm_by_slug(local_grand_challenge):
token=DEMO_PARTICIPANT_TOKEN,
) as c:
by_slug = await c.algorithms.detail(
slug="test-algorithm-evaluation-image-1"
slug="test-algorithm-evaluation-image-0"
)
by_pk = await c.algorithms.detail(pk=by_slug.pk)

Expand Down Expand Up @@ -577,8 +575,11 @@ async def get_archive_detail():

item_updated = await get_archive_detail()

json_civ = item_updated.values[-1]
assert json_civ.interface.slug == "results-json-file"
json_civ = [
civ
for civ in item_updated.values
if civ.interface.slug == "results-json-file"
][0]
assert json_civ.value == {"foo": 0.5}
updated_civ_count = len(item_updated.values)

Expand All @@ -600,8 +601,11 @@ async def get_updated_archive_detail():
item_updated_again = await get_updated_archive_detail()

assert len(item_updated_again.values) == updated_civ_count
new_json_civ = item_updated_again.values[-1]
assert new_json_civ.interface.slug == "results-json-file"
new_json_civ = [
civ
for civ in item_updated_again.values
if civ.interface.slug == "results-json-file"
][0]
assert new_json_civ.value == {"foo": 0.8}


Expand Down
25 changes: 15 additions & 10 deletions tests/integration_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,13 +281,13 @@ def get_download():
"algorithm,interface,files",
(
(
"test-algorithm-evaluation-image-1",
"test-algorithm-evaluation-image-0",
"generic-medical-image",
["image10x10x101.mha"],
),
# TODO this algorithm was removed from the test fixtures
# (
# "test-algorithm-evaluation-file-1",
# "test-algorithm-evaluation-file-0",
# "json-file",
# ["test.json"],
# ),
Expand Down Expand Up @@ -316,10 +316,9 @@ def run_job():
# algorithm might not be ready yet
job = run_job()

assert job["status"] == "Queued"
assert len(job["inputs"]) == 1
assert job["status"] == "Validating inputs"
job = c.algorithm_jobs.detail(job["pk"])
assert job.status in {"Queued", "Started"}
assert job.status in {"Validating inputs", "Queued", "Started"}


def test_get_algorithm_by_slug(local_grand_challenge):
Expand All @@ -329,7 +328,7 @@ def test_get_algorithm_by_slug(local_grand_challenge):
token=DEMO_PARTICIPANT_TOKEN,
)

by_slug = c.algorithms.detail(slug="test-algorithm-evaluation-image-1")
by_slug = c.algorithms.detail(slug="test-algorithm-evaluation-image-0")
by_pk = c.algorithms.detail(pk=by_slug.pk)

assert by_pk == by_slug
Expand Down Expand Up @@ -496,8 +495,11 @@ def get_archive_item_detail():

item_updated = get_archive_item_detail()

json_civ = item_updated.values[-1]
assert json_civ.interface.slug == "results-json-file"
json_civ = [
civ
for civ in item_updated.values
if civ.interface.slug == "results-json-file"
][0]
assert json_civ.value == {"foo": 0.5}
updated_civ_count = len(item_updated.values)

Expand All @@ -517,8 +519,11 @@ def get_updated_archive_item_detail():
item_updated_again = get_updated_archive_item_detail()

assert len(item_updated_again.values) == updated_civ_count
new_json_civ = item_updated_again.values[-1]
assert new_json_civ.interface.slug == "results-json-file"
new_json_civ = [
civ
for civ in item_updated_again.values
if civ.interface.slug == "results-json-file"
][0]
assert new_json_civ.value == {"foo": 0.8}


Expand Down
76 changes: 1 addition & 75 deletions tests/scripts/create_test_fixtures.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import base64
import gzip
import logging
import os
Expand All @@ -11,7 +10,6 @@
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import ContentFile
from django.db import IntegrityError
from grandchallenge.algorithms.models import Algorithm, AlgorithmImage
Expand All @@ -24,12 +22,7 @@
ComponentInterfaceValue,
)
from grandchallenge.core.fixtures import create_uploaded_image
from grandchallenge.evaluation.models import (
Evaluation,
Method,
Phase,
Submission,
)
from grandchallenge.evaluation.models import Method, Phase
from grandchallenge.evaluation.utils import SubmissionKindChoices
from grandchallenge.invoices.models import Invoice
from grandchallenge.reader_studies.models import (
Expand Down Expand Up @@ -73,7 +66,6 @@ def run():
raise RuntimeError("Fixtures already initialized") from e

_set_user_permissions(users)
_create_demo_challenge(users=users)
_create_reader_studies(users)
_create_archive(users)
_create_user_tokens(users)
Expand Down Expand Up @@ -153,72 +145,6 @@ def _set_user_permissions(users):
users["demo"].user_permissions.add(add_archive_perm)


def _create_demo_challenge(users):
demo = Challenge.objects.create(
short_name="demo",
description="Demo Challenge",
creator=users["demo"],
hidden=False,
display_forum_link=True,
)
demo.add_participant(users["demop"])

phase = Phase.objects.create(challenge=demo, title="Phase 1")

phase.score_title = "Accuracy ± std"
phase.score_jsonpath = "acc.mean"
phase.score_error_jsonpath = "acc.std"
phase.extra_results_columns = [
{
"title": "Dice ± std",
"path": "dice.mean",
"error_path": "dice.std",
"order": "desc",
}
]

phase.submission_kind = SubmissionKindChoices.ALGORITHM
phase.save()

method = Method(phase=phase, creator=users["demo"])

with _gc_demo_algorithm() as container:
method.image.save("algorithm_io.tar", container)

submission = Submission(phase=phase, creator=users["demop"])
content = ContentFile(base64.b64decode(b""))
submission.predictions_file.save("test.csv", content)
submission.save()

e = Evaluation.objects.create(
submission=submission,
method=method,
status=Evaluation.SUCCESS,
time_limit=300,
)

def create_result(evaluation, result: dict):
interface = ComponentInterface.objects.get(slug="metrics-json-file")

try:
output_civ = evaluation.outputs.get(interface=interface)
output_civ.value = result
output_civ.save()
except ObjectDoesNotExist:
output_civ = ComponentInterfaceValue.objects.create(
interface=interface, value=result
)
evaluation.outputs.add(output_civ)

create_result(
e,
{
"acc": {"mean": 0, "std": 0.1},
"dice": {"mean": 0.71, "std": 0.05},
},
)


def _create_reader_studies(users):
reader_study = ReaderStudy.objects.create(
title="Reader Study",
Expand Down
8 changes: 0 additions & 8 deletions tests/test_gcapi.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pytest
from click.testing import CliRunner
from httpx import HTTPStatusError

from gcapi import Client, cli
from tests.utils import ADMIN_TOKEN
Expand Down Expand Up @@ -114,10 +113,3 @@ def test_command_line_interface():
help_result = runner.invoke(cli.main, ["--help"])
assert help_result.exit_code == 0
assert "--help Show this message and exit." in help_result.output


def test_ground_truth_url():
c = Client(token="foo", base_url="https://example.com/api/v1/")
with pytest.raises(HTTPStatusError) as exc_info:
c.reader_studies.ground_truth("fake", "image_pk")
assert exc_info.value.request.url.path.endswith("image_pk/")

0 comments on commit 67f57f8

Please sign in to comment.