Skip to content

Commit

Permalink
Hacky testing how to make GUI integration tests
Browse files Browse the repository at this point in the history
  • Loading branch information
GenevieveBuckley committed Oct 4, 2023
1 parent f5d34d4 commit 9319bd4
Show file tree
Hide file tree
Showing 3 changed files with 329 additions and 2 deletions.
9 changes: 7 additions & 2 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,20 @@ jobs:
with:
environment-file: environment_cpu.yaml

# Setup Qt libraries for GUI testing
- uses: tlambert03/setup-qt-libs@v1

- name: Install pytest
shell: bash -l {0}
run: |
python -m pip install pytest
python -m pip install pytest-cov
- name: Install package
shell: bash -l {0}
run: pip install --no-deps -e .
uses: aganders3/headless-gui@v1
with:
shell: bash -l {0}
run: pip install --no-deps -e .

- name: Run tests
shell: bash -l {0}
Expand Down
127 changes: 127 additions & 0 deletions .github/workflows/test_gui.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# Our minimal suite of tests that run on each pull request
name: PR Test

on:
pull_request:
branches:
- main
- dev
- "v*x"

concurrency:
group: test-${{ github.ref }}
cancel-in-progress: true

env:
COLUMNS: 120

jobs:
test:
name: ${{ matrix.platform }} ${{ matrix.python }} ${{ matrix.toxenv || matrix.backend }} ${{ matrix.MIN_REQ && 'min_req' }}
runs-on: ${{ matrix.platform }}
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
platform: [ubuntu-latest]
python: ["3.8", "3.9", "3.10", "3.11"]
backend: [pyqt5, pyside2]
exclude:
- python: '3.11'
backend: pyside2
include:
# Windows py38
- python: 3.8
platform: windows-latest
backend: pyqt5
- python: 3.8
platform: windows-latest
backend: pyside2
- python: 3.9
platform: macos-latest
backend: pyqt5
# minimum specified requirements
- python: 3.8
platform: ubuntu-20.04
backend: pyqt5
MIN_REQ: 1
# test without any Qt backends
- python: 3.8
platform: ubuntu-20.04
backend: headless
- python: 3.9
platform: ubuntu-latest
backend: pyqt6
- python: 3.9
platform: ubuntu-latest
backend: pyside6
# pyside 6
- python: '3.10'
platform: ubuntu-latest
backend: pyside6
- python: '3.11'
platform: ubuntu-latest
backend: pyside6

steps:
- name: Cancel Previous Runs
uses: styfle/[email protected]
with:
access_token: ${{ github.token }}

- uses: actions/checkout@v4

- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
cache: "pip"
cache-dependency-path: setup.cfg

- uses: tlambert03/setup-qt-libs@v1

# strategy borrowed from vispy for installing opengl libs on windows
- name: Install Windows OpenGL
if: runner.os == 'Windows'
run: |
git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git
powershell gl-ci-helpers/appveyor/install_opengl.ps1
if (Test-Path -Path "C:\Windows\system32\opengl32.dll" -PathType Leaf) {Exit 0} else {Exit 1}
# tox and tox-gh-actions will take care of the "actual" installation
# of python dependendencies into a virtualenv. see tox.ini for more
- name: Install dependencies
run: |
pip install --upgrade pip
pip install setuptools tox tox-gh-actions tox-min-req
# here we pass off control of environment creation and running of tests to tox
# tox-gh-actions, installed above, helps to convert environment variables into
# tox "factors" ... limiting the scope of what gets tested on each platform
# for instance, on ubuntu-latest with python 3.8, it would be equivalent to this command:
# `tox -e py38-linux-pyqt,py38-linux-pyside`
# see tox.ini for more
- name: Test with tox
# the longest is macos-latest 3.9 pyqt5 at ~30 minutes.
timeout-minutes: 40
uses: aganders3/headless-gui@v1
with:
run: python -m tox
env:
PLATFORM: ${{ matrix.platform }}
BACKEND: ${{ matrix.backend }}
TOXENV: ${{ matrix.toxenv }}
NUMPY_EXPERIMENTAL_ARRAY_FUNCTION: ${{ matrix.MIN_REQ || 1 }}
PYVISTA_OFF_SCREEN: True
MIN_REQ: ${{ matrix.MIN_REQ }}
FORCE_COLOR: 1
PIP_CONSTRAINT: resources/constraints/constraints_py${{ matrix.python }}${{ matrix.MIN_REQ && '_min_req' }}.txt
- uses: actions/upload-artifact@v3
with:
name: upload pytest timing reports as json
path: |
./report-*.json
- name: Coverage
uses: codecov/codecov-action@v3

195 changes: 195 additions & 0 deletions test/test_gui.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
import numpy as np
import skimage.data
from micro_sam.sam_annotator import annotator_2d, annotator_3d
from micro_sam.sam_annotator.annotator_2d import _initialize_viewer, _segment_widget, _autosegment_widget
from micro_sam.sam_annotator.util import _clear_widget, _commit_segmentation_widget


def _check_layer_initialization(viewer):
"""Utility function to check the initial layer setup is correct."""
assert len(viewer.layers) == 6
expected_layer_names = ['raw', 'auto_segmentation', 'committed_objects', 'current_object', 'point_prompts', 'prompts']
for layername in expected_layer_names:
assert layername in viewer.layers
# Check layers are empty before beginning tests
np.testing.assert_equal(viewer.layers["auto_segmentation"].data, 0)
np.testing.assert_equal(viewer.layers["current_object"].data, 0)
np.testing.assert_equal(viewer.layers["committed_objects"].data, 0)
np.testing.assert_equal(viewer.layers["point_prompts"].data, 0)
assert viewer.layers["prompts"].data == [] # shape data is list, not numpy array


def test_annotator_2d_amg(make_napari_viewer_proxy, tmp_path):
"""Integration test for annotator_2d widget with automatic mask generation.
* Creates 2D image embedding
* Opens annotator_2d widget in napari
* Test automatic mask generation
"""
model_type = "vit_b"
embedding_path = tmp_path / "test-embedding.zarr"
# example data - a basic checkerboard pattern
image = np.zeros((16,16,16))
image[:8,:8,:8] = 1
image[8:,8:,8:] = 1

viewer = make_napari_viewer_proxy()
viewer = _initialize_viewer(image, None, None, None) # TODO: fix hacky workaround
# test generating image embedding, then adding micro-sam dock widgets to the GUI
viewer = annotator_2d(
image,
embedding_path,
show_embeddings=False,
model_type=model_type,
v=viewer,
return_viewer=True
)
_check_layer_initialization(viewer)
# ========================================================================
# # Automatic mask generation
# _autosegment_widget(v=viewer, min_object_size=30)
# # We expect four segmentation regions to be identified
# expected_segmentation_label_ids = np.array([0,1,2,3])
# np.testing.assert_equal(np.unique(viewer.layers["auto_segmentation"].data),
# expected_segmentation_label_ids)
viewer.close() # must close the viewer at the end of tests


def test_annotator_3d(make_napari_viewer_proxy, tmp_path):
"""Integration test for annotator_2d widget with automatic mask generation.
* Creates 2D image embedding
* Opens annotator_2d widget in napari
* Test automatic mask generation
"""
model_type = "vit_b"
embedding_path = tmp_path / "test-embedding.zarr"
# example data - a basic checkerboard pattern
image = np.zeros((16,16))
image[:8,:8] = 1
image[8:,8:] = 1

viewer = make_napari_viewer_proxy()
viewer = _initialize_viewer(image, None, None, None) # TODO: fix hacky workaround
# test generating image embedding, then adding micro-sam dock widgets to the GUI
viewer = annotator_3d(
image,
embedding_path,
show_embeddings=False,
model_type=model_type,
v=viewer,
return_viewer=True
)
_check_layer_initialization(viewer)
# ========================================================================
# # Automatic mask generation
# _autosegment_widget(v=viewer, min_object_size=30)
# # We expect four segmentation regions to be identified
# expected_segmentation_label_ids = np.array([0,1,2,3])
# np.testing.assert_equal(np.unique(viewer.layers["auto_segmentation"].data),
# expected_segmentation_label_ids)
viewer.close() # must close the viewer at the end of tests


# def test_annotator_2d(make_napari_viewer_proxy, tmp_path):
# """Integration test for annotator_2d widget.

# * Creates 2D image embedding
# * Opens annotator_2d widget in napari
# * Test point prompts (add points, segment object, clear, and commit)
# * Test box prompt (add rectangle prompt, segment object, clear, and commit)
# ...
# """
# model_type = "vit_b"
# image = skimage.data.camera()
# embedding_path = tmp_path / "test-embedding.zarr"

# viewer = make_napari_viewer_proxy()
# viewer = _initialize_viewer(image, None, None, None) # TODO: fix hacky workaround
# # test generating image embedding, then adding micro-sam dock widgets to the GUI
# viewer = annotator_2d(
# image,
# embedding_path,
# show_embeddings=False,
# model_type=model_type,
# v=viewer,
# return_viewer=True
# )
# _assert_initialization_is_correct(viewer)

# # ========================================================================
# # TEST POINT PROMPTS
# # Add three points in the sky region of the camera image
# sky_point_prompts = np.array([[70, 80],[50, 320],[80, 470 ]])
# viewer.layers["point_prompts"].data = sky_point_prompts

# # Segment sky region of image
# _segment_widget(v=viewer) # segment slice
# # We expect all of the first 50 rows should be identified as sky,
# assert (viewer.layers["current_object"].data[0:50,:] == 1).all()
# # We also expect roughly 25% of the image to be sky
# sky_segmentation = np.copy(viewer.layers["current_object"].data)
# segmented_pixel_percentage = (np.sum(sky_segmentation == 1) / image.size) * 100
# assert segmented_pixel_percentage > 25
# assert segmented_pixel_percentage < 30

# # Clear segmentation current object and prompts
# _clear_widget(v=viewer)
# np.testing.assert_equal(viewer.layers["current_object"].data, 0)
# np.testing.assert_equal(viewer.layers["point_prompts"].data, 0)
# assert viewer.layers["prompts"].data == [] # shape data is list, not numpy array

# # Repeat segmentation and commit segmentation result
# viewer.layers["point_prompts"].data = sky_point_prompts
# _segment_widget(v=viewer) # segment slice
# np.testing.assert_equal(sky_segmentation, viewer.layers["current_object"].data)
# # Commit segmentation
# _commit_segmentation_widget(v=viewer)
# np.testing.assert_equal(sky_segmentation, viewer.layers["committed_objects"].data)

# # ========================================================================
# # TEST BOX PROMPTS
# # Add rechangle bounding box prompt
# camera_bounding_box_prompt = np.array([[139, 254],[139, 324],[183, 324],[183, 254]])
# viewer.layers["prompts"].data = [camera_bounding_box_prompt]
# # Segment slice
# _segment_widget(v=viewer) # segment slice
# # Check segmentation results
# camera_segmentation = np.copy(viewer.layers["current_object"].data)
# segmented_pixels = np.sum(camera_segmentation == 1)
# assert segmented_pixels > 2500 # we expect roughly 2770 pixels
# assert segmented_pixels < 3000 # we expect roughly 2770 pixels
# assert (camera_segmentation[150:175,275:310] == 1).all() # small patch which should definitely be inside segmentation

# # Clear segmentation current object and prompts
# _clear_widget(v=viewer)
# np.testing.assert_equal(viewer.layers["current_object"].data, 0)
# np.testing.assert_equal(viewer.layers["point_prompts"].data, 0)
# assert viewer.layers["prompts"].data == [] # shape data is list, not numpy array

# # Repeat segmentation and commit segmentation result
# viewer.layers["prompts"].data = [camera_bounding_box_prompt]
# _segment_widget(v=viewer) # segment slice
# np.testing.assert_equal(camera_segmentation, viewer.layers["current_object"].data)
# # Commit segmentation
# _commit_segmentation_widget(v=viewer)
# committed_objects = viewer.layers["committed_objects"].data
# # We expect two committed objects
# # label id 1: sky segmentation
# # label id 2: camera segmentation
# np.testing.assert_equal(np.unique(committed_objects), np.array([0, 1, 2]))
# np.testing.assert_equal(committed_objects == 2, camera_segmentation == 1)

# # ========================================================================
# viewer.close() # must close the viewer at the end of tests





# def test_something_else(make_napari_viewer_proxy):
# viewer = make_napari_viewer_proxy()
# # carry on with your test
# image = skimage.data.brick()
# viewer.add_image(image, name="raw")
# viewer.close()

0 comments on commit 9319bd4

Please sign in to comment.