Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use ndx-pose>=0.2 for DeepLabCutInterface and LightningPoseDataInterface #1128

Merged
merged 27 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
444221e
update deeplabcut to ndx-pose 0.2
h-mayorquin Nov 6, 2024
0cf99e7
relax contrain
h-mayorquin Nov 6, 2024
c979f80
soft bound on lighting interface
h-mayorquin Nov 6, 2024
50ecd76
changelog
h-mayorquin Nov 6, 2024
354466d
merge attempt
h-mayorquin Dec 16, 2024
ffb5115
Merge branch 'main' into use_latest_version_of_ndx_pose
pauladkisson Dec 20, 2024
3c78303
Merge branch 'main' into use_latest_version_of_ndx_pose
h-mayorquin Dec 20, 2024
9301d7f
changelog
h-mayorquin Dec 20, 2024
8046d0d
Update ndx-pose for Lightning pose (#1170)
pauladkisson Jan 13, 2025
0636d87
Merge branch 'main' into use_latest_version_of_ndx_pose
h-mayorquin Jan 14, 2025
1925bde
merge
h-mayorquin Jan 20, 2025
abc9993
changelog correction
h-mayorquin Jan 20, 2025
6d765a3
fix tests
h-mayorquin Jan 20, 2025
816f27b
fix pose estimation test
h-mayorquin Jan 21, 2025
887428d
modify doctests
h-mayorquin Jan 21, 2025
a08ddea
add the rest of the testing
h-mayorquin Jan 21, 2025
d069e5e
add links to removal
h-mayorquin Jan 21, 2025
a48c9e6
restore sleap io test
h-mayorquin Jan 21, 2025
71cf0ad
testing
h-mayorquin Jan 21, 2025
62eb864
fix version
h-mayorquin Jan 21, 2025
9bdbbfe
skip docstest
h-mayorquin Jan 21, 2025
7504597
Merge branch 'main' into use_latest_version_of_ndx_pose
h-mayorquin Jan 22, 2025
814a248
Update CHANGELOG.md
h-mayorquin Jan 23, 2025
f37e09b
use get_module
h-mayorquin Jan 23, 2025
7e7cf05
add skeleton to dlc
h-mayorquin Jan 23, 2025
901bc35
added assertions for skeletons to lightning
pauladkisson Jan 23, 2025
ee83879
Merge branch 'main' into use_latest_version_of_ndx_pose
h-mayorquin Jan 23, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ jobs:
s3-gin-bucket: ${{ secrets.S3_GIN_BUCKET }}
os: ${{ matrix.os }}

# TODO: remove this setp after this is merged https://github.com/talmolab/sleap-io/pull/143
- name: Run Sleap Tests until sleap.io adds support for ndx-pose > 2.0
run : |
pip install ndx-pose==0.1.1
pytest tests/test_on_data/behavior/test_pose_estimation_interfaces.py
- name: Install full requirements
run: pip install .[full]

Expand Down
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@

## Features
* Added `metadata` and `conversion_options` as arguments to `NWBConverter.temporally_align_data_interfaces` [PR #1162](https://github.com/catalystneuro/neuroconv/pull/1162)
* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128)
h-mayorquin marked this conversation as resolved.
Show resolved Hide resolved

## Improvements

# v0.6.7 (January 20, 2024)

# v0.6.7 (January 20, 2025)

## Deprecations

Expand Down
9 changes: 8 additions & 1 deletion docs/conversion_examples_gallery/conftest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import platform
from importlib.metadata import version as importlib_version
from pathlib import Path

import pytest
Expand Down Expand Up @@ -29,9 +30,15 @@ def add_data_space(doctest_namespace, tmp_path):
# Hook to conditionally skip doctests in deeplabcut.rst for Python 3.9 on macOS (Darwin)
def pytest_runtest_setup(item):
if isinstance(item, pytest.DoctestItem):
# Check if we are running the doctest from deeplabcut.rst
test_file = Path(item.fspath)
# Check if we are running the doctest from deeplabcut.rst
if test_file.name == "deeplabcut.rst":
# Check if Python version is 3.9 and platform is Darwin (macOS)
if version.parse(python_version) < version.parse("3.10") and os == "Darwin":
pytest.skip("Skipping doctests for deeplabcut.rst on Python 3.9 and macOS")
# Check if we are running the doctest from sleap.rst
# TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 and released
elif test_file.name in ["ecephys_pose_estimation.rst", "sleap.rst"]:
ndx_pose_version = version.parse(importlib_version("ndx-pose"))
if ndx_pose_version >= version.parse("0.2.0"):
pytest.skip("Skipping doctests because sleeps only run when ndx-pose version < 0.2.0")
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ sleap = [
"sleap-io>=0.0.2; python_version>='3.9'",
]
deeplabcut = [
"ndx-pose==0.1.1",
"ndx-pose>=0.2",
"tables; platform_system != 'Darwin'",
"tables>=3.10.1; platform_system == 'Darwin' and python_version >= '3.10'",
]
Expand All @@ -128,7 +128,7 @@ video = [
"opencv-python-headless>=4.8.1.78",
]
lightningpose = [
"ndx-pose==0.1.1",
"ndx-pose>=0.2",
"neuroconv[video]",
]
medpc = [
Expand Down
78 changes: 56 additions & 22 deletions src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import importlib
import pickle
import warnings
from pathlib import Path
Expand Down Expand Up @@ -93,7 +92,7 @@ def _get_cv2_timestamps(file_path: Union[Path, str]):
return timestamps


def _get_movie_timestamps(movie_file, VARIABILITYBOUND=1000, infer_timestamps=True):
def _get_video_timestamps(movie_file, VARIABILITYBOUND=1000, infer_timestamps=True):
"""
Return numpy array of the timestamps for a video.

Expand Down Expand Up @@ -263,13 +262,52 @@ def _write_pes_to_nwbfile(
exclude_nans,
pose_estimation_container_kwargs: Optional[dict] = None,
):

from ndx_pose import PoseEstimation, PoseEstimationSeries
"""
Updated version of _write_pes_to_nwbfile to work with ndx-pose v0.2.0+
"""
from ndx_pose import PoseEstimation, PoseEstimationSeries, Skeleton, Skeletons
from pynwb.file import Subject

pose_estimation_container_kwargs = pose_estimation_container_kwargs or dict()
pose_estimation_name = pose_estimation_container_kwargs.get("name", "PoseEstimationDeepLabCut")

# Create a subject if it doesn't exist
if nwbfile.subject is None:
subject = Subject(subject_id=animal)
nwbfile.subject = subject
else:
subject = nwbfile.subject

# Create skeleton from the keypoints
keypoints = df_animal.columns.get_level_values("bodyparts").unique()
animal = animal if animal else ""
subject = subject if animal == subject.subject_id else None
skeleton_name = f"Skeleton{pose_estimation_name}_{animal.capitalize()}"
skeleton = Skeleton(
name=skeleton_name,
nodes=list(keypoints),
edges=np.array(paf_graph) if paf_graph else None, # Convert paf_graph to numpy array
subject=subject,
)

# Create Skeletons container
if "behavior" not in nwbfile.processing:
behavior_processing_module = nwbfile.create_processing_module(
name="behavior", description="processed behavioral data"
pauladkisson marked this conversation as resolved.
Show resolved Hide resolved
)
skeletons = Skeletons(skeletons=[skeleton])
behavior_processing_module.add(skeletons)
else:
behavior_processing_module = nwbfile.processing["behavior"]
if "Skeletons" not in behavior_processing_module.data_interfaces:
skeletons = Skeletons(skeletons=[skeleton])
behavior_processing_module.add(skeletons)
else:
skeletons = behavior_processing_module["Skeletons"]
skeletons.add_skeletons(skeleton)

pose_estimation_series = []
for keypoint in df_animal.columns.get_level_values("bodyparts").unique():
for keypoint in keypoints:
data = df_animal.xs(keypoint, level="bodyparts", axis=1).to_numpy()

if exclude_nans:
Expand All @@ -292,35 +330,31 @@ def _write_pes_to_nwbfile(
)
pose_estimation_series.append(pes)

deeplabcut_version = None
is_deeplabcut_installed = importlib.util.find_spec(name="deeplabcut") is not None
if is_deeplabcut_installed:
deeplabcut_version = importlib.metadata.version(distribution_name="deeplabcut")
camera_name = pose_estimation_name
if camera_name not in nwbfile.devices:
camera = nwbfile.create_device(
name=camera_name,
description="Camera used for behavioral recording and pose estimation.",
)
else:
camera = nwbfile.devices[camera_name]

# TODO, taken from the original implementation, improve it if the video is passed
# Create PoseEstimation container with updated arguments
dimensions = [list(map(int, image_shape.split(",")))[1::2]]
dimensions = np.array(dimensions, dtype="uint32")
pose_estimation_default_kwargs = dict(
pose_estimation_series=pose_estimation_series,
description="2D keypoint coordinates estimated using DeepLabCut.",
original_videos=[video_file_path],
original_videos=[video_file_path] if video_file_path else None,
dimensions=dimensions,
devices=[camera],
scorer=scorer,
source_software="DeepLabCut",
source_software_version=deeplabcut_version,
nodes=[pes.name for pes in pose_estimation_series],
edges=paf_graph if paf_graph else None,
**pose_estimation_container_kwargs,
skeleton=skeleton,
)
pose_estimation_default_kwargs.update(pose_estimation_container_kwargs)
pose_estimation_container = PoseEstimation(**pose_estimation_default_kwargs)

if "behavior" in nwbfile.processing: # TODO: replace with get_module
behavior_processing_module = nwbfile.processing["behavior"]
else:
behavior_processing_module = nwbfile.create_processing_module(
name="behavior", description="processed behavioral data"
)
behavior_processing_module.add(pose_estimation_container)

return nwbfile
Expand Down Expand Up @@ -387,7 +421,7 @@ def _add_subject_to_nwbfile(
if video_file_path is None:
timestamps = df.index.tolist() # setting timestamps to dummy
else:
timestamps = _get_movie_timestamps(video_file_path, infer_timestamps=True)
timestamps = _get_video_timestamps(video_file_path, infer_timestamps=True)

# Fetch the corresponding metadata pickle file, we extract the edges graph from here
# TODO: This is the original implementation way to extract the file name but looks very brittle. Improve it
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class DeepLabCutInterface(BaseTemporalAlignmentInterface):
"""Data interface for DeepLabCut datasets."""

display_name = "DeepLabCut"
keywords = ("DLC",)
keywords = ("DLC", "DeepLabCut", "pose estimation", "behavior")
associated_suffixes = (".h5", ".csv")
info = "Interface for handling data from DeepLabCut."

Expand Down Expand Up @@ -48,7 +48,18 @@ def __init__(
Controls verbosity.
"""
# This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created
from ndx_pose import PoseEstimation, PoseEstimationSeries # noqa: F401
from importlib.metadata import version

import ndx_pose # noqa: F401
from packaging import version as version_parse

ndx_pose_version = version("ndx-pose")
if version_parse.parse(ndx_pose_version) < version_parse.parse("0.2.0"):
raise ImportError(
"DeepLabCut interface requires ndx-pose version 0.2.0 or later. "
f"Found version {ndx_pose_version}. Please upgrade: "
"pip install 'ndx-pose>=0.2.0'"
)

from ._dlc_utils import _read_config

Expand All @@ -62,6 +73,8 @@ def __init__(
self.config_dict = _read_config(config_file_path=config_file_path)
self.subject_name = subject_name
self.verbose = verbose
self.pose_estimation_container_kwargs = dict()

super().__init__(file_path=file_path, config_file_path=config_file_path)

def get_metadata(self):
Expand Down Expand Up @@ -101,7 +114,7 @@ def add_to_nwbfile(
self,
nwbfile: NWBFile,
metadata: Optional[dict] = None,
container_name: str = "PoseEstimation",
container_name: str = "PoseEstimationDeepLabCut",
):
"""
Conversion from DLC output files to nwb. Derived from dlc2nwb library.
Expand All @@ -112,16 +125,19 @@ def add_to_nwbfile(
nwb file to which the recording information is to be added
metadata: dict
metadata info for constructing the nwb file (optional).
container_name: str, default: "PoseEstimation"
Name of the container to store the pose estimation.
container_name: str, default: "PoseEstimationDeepLabCut"
name of the PoseEstimation container in the nwb

"""
from ._dlc_utils import _add_subject_to_nwbfile

self.pose_estimation_container_kwargs["name"] = container_name

_add_subject_to_nwbfile(
nwbfile=nwbfile,
file_path=str(self.source_data["file_path"]),
individual_name=self.subject_name,
config_file=self.source_data["config_file_path"],
timestamps=self._timestamps,
pose_estimation_container_kwargs=dict(name=container_name),
pose_estimation_container_kwargs=self.pose_estimation_container_kwargs,
)
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,10 @@ def get_metadata_schema(self) -> dict:
description=dict(type="string"),
scorer=dict(type="string"),
source_software=dict(type="string", default="LightningPose"),
camera_name=dict(type="string", default="CameraPoseEstimation"),
),
patternProperties={
"^(?!(name|description|scorer|source_software)$)[a-zA-Z0-9_]+$": dict(
"^(?!(name|description|scorer|source_software|camera_name)$)[a-zA-Z0-9_]+$": dict(
title="PoseEstimationSeries",
type="object",
properties=dict(name=dict(type="string"), description=dict(type="string")),
Expand Down Expand Up @@ -80,9 +81,21 @@ def __init__(
verbose : bool, default: True
controls verbosity. ``True`` by default.
"""

# This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created
# For more detail, see https://github.com/rly/ndx-pose/issues/36
from importlib.metadata import version

import ndx_pose # noqa: F401
from packaging import version as version_parse

ndx_pose_version = version("ndx-pose")
if version_parse.parse(ndx_pose_version) < version_parse.parse("0.2.0"):
raise ImportError(
"LightningPose interface requires ndx-pose version 0.2.0 or later. "
f"Found version {ndx_pose_version}. Please upgrade: "
"pip install 'ndx-pose>=0.2.0'"
)

from neuroconv.datainterfaces.behavior.video.video_utils import (
VideoCaptureContext,
Expand Down Expand Up @@ -162,6 +175,7 @@ def get_metadata(self) -> DeepDict:
description="Contains the pose estimation series for each keypoint.",
scorer=self.scorer_name,
source_software="LightningPose",
camera_name="CameraPoseEstimation",
)
for keypoint_name in self.keypoint_names:
keypoint_name_without_spaces = keypoint_name.replace(" ", "")
Expand Down Expand Up @@ -198,7 +212,7 @@ def add_to_nwbfile(
The description of how the confidence was computed, e.g., 'Softmax output of the deep neural network'.
stub_test : bool, default: False
"""
from ndx_pose import PoseEstimation, PoseEstimationSeries
from ndx_pose import PoseEstimation, PoseEstimationSeries, Skeleton, Skeletons

metadata_copy = deepcopy(metadata)

Expand All @@ -215,15 +229,14 @@ def add_to_nwbfile(
original_video_name = str(self.original_video_file_path)
else:
original_video_name = metadata_copy["Behavior"]["Videos"][0]["name"]

pose_estimation_kwargs = dict(
name=pose_estimation_metadata["name"],
description=pose_estimation_metadata["description"],
source_software=pose_estimation_metadata["source_software"],
scorer=pose_estimation_metadata["scorer"],
original_videos=[original_video_name],
dimensions=[self.dimension],
)
camera_name = pose_estimation_metadata["camera_name"]
if camera_name in nwbfile.devices:
camera = nwbfile.devices[camera_name]
else:
camera = nwbfile.create_device(
name=camera_name,
description="Camera used for behavioral recording and pose estimation.",
)

pose_estimation_data = self.pose_estimation_data if not stub_test else self.pose_estimation_data.head(n=10)
timestamps = self.get_timestamps(stub_test=stub_test)
Expand Down Expand Up @@ -255,8 +268,28 @@ def add_to_nwbfile(

pose_estimation_series.append(PoseEstimationSeries(**pose_estimation_series_kwargs))

pose_estimation_kwargs.update(
# Add Skeleton(s)
nodes = [keypoint_name.replace(" ", "") for keypoint_name in self.keypoint_names]
subject = nwbfile.subject if nwbfile.subject is not None else None
name = f"Skeleton{pose_estimation_name}"
skeleton = Skeleton(name=name, nodes=nodes, subject=subject)
if "Skeletons" in behavior.data_interfaces:
skeletons = behavior.data_interfaces["Skeletons"]
skeletons.add_skeletons(skeleton)
else:
skeletons = Skeletons(skeletons=[skeleton])
behavior.add(skeletons)

pose_estimation_kwargs = dict(
name=pose_estimation_metadata["name"],
description=pose_estimation_metadata["description"],
source_software=pose_estimation_metadata["source_software"],
scorer=pose_estimation_metadata["scorer"],
original_videos=[original_video_name],
dimensions=[self.dimension],
pose_estimation_series=pose_estimation_series,
devices=[camera],
skeleton=skeleton,
)

if self.source_data["labeled_video_file_path"]:
Expand Down
18 changes: 18 additions & 0 deletions src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,24 @@ def __init__(
frames_per_second : float, optional
The frames per second (fps) or sampling rate of the video.
"""

# This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created
# For more detail, see https://github.com/rly/ndx-pose/issues/36
from importlib.metadata import version

import ndx_pose # noqa: F401
from packaging import version as version_parse

ndx_pose_version = version("ndx-pose")

# TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 and released
if version_parse.parse(ndx_pose_version) != version_parse.parse("0.1.1"):
raise ImportError(
"SLEAP interface requires ndx-pose version 0.1.1. "
f"Found version {ndx_pose_version}. Please install the required version: "
"pip install 'ndx-pose==0.1.1'"
)

self.file_path = Path(file_path)
self.sleap_io = get_package(package_name="sleap_io")
self.video_file_path = video_file_path
Expand Down
Loading
Loading