diff --git a/docs/tables/egocentrically_align_pose_numba.csv b/docs/tables/egocentrically_align_pose_numba.csv index df244643c..d07e1692a 100644 --- a/docs/tables/egocentrically_align_pose_numba.csv +++ b/docs/tables/egocentrically_align_pose_numba.csv @@ -1,10 +1,10 @@ FRAMES (MILLIONS),NUMBA TIME (S),NUMBA TIME (STEV),NUMPY TIME (S),NUMPY TIME (STEV) -1,,,10.138,0.4589 -2,,,16.894,0.264 -4,,,33.813,0.3712255 -8,,,73.43435,0.526412 -16,,,134.0284325,0.858443488 -32,,,270.4346202,1.3789 -64,,,540.896359,1.781485522 +1,0.733,0.006,10.138,0.459 +2,1.474,0.004,16.894,0.264 +4,2.969,0.032,33.813,0.371 +8,5.991,0.061,73.434,0.526 +16,12.123,0.215,134.028,0.858 +32,23.844,0.105,270.435,1.379 +64,48.296,0.034,540.896,1.781 7 BODY-PARTS PER FRAME,,,, 3 ITERATIONS,,,, diff --git a/simba/data_processors/cuda/image.py b/simba/data_processors/cuda/image.py index 45355668d..51706445b 100644 --- a/simba/data_processors/cuda/image.py +++ b/simba/data_processors/cuda/image.py @@ -1192,12 +1192,12 @@ def bg_subtraction_cupy(video_path: Union[str, os.PathLike], timer.stop_timer() stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str) - -#from simba.data_processors.cuda.image import create_average_frm_cupy -SAVE_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/bg_removed_ex_7.mp4" -VIDEO_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/open_field.mp4" -avg_frm = create_average_frm_cuda(video_path=VIDEO_PATH) -# -get_video_meta_data(VIDEO_PATH) # -bg_subtraction_cuda(video_path=VIDEO_PATH, avg_frm=avg_frm, save_path=SAVE_PATH, threshold=70) +# #from simba.data_processors.cuda.image import create_average_frm_cupy +# SAVE_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/bg_removed_ex_7.mp4" +# VIDEO_PATH = "/mnt/c/Users/sroni/Downloads/bg_remove_nb/open_field.mp4" +# avg_frm = create_average_frm_cuda(video_path=VIDEO_PATH) +# # +# get_video_meta_data(VIDEO_PATH) +# # +# bg_subtraction_cuda(video_path=VIDEO_PATH, avg_frm=avg_frm, save_path=SAVE_PATH, threshold=70) diff --git a/simba/utils/data.py b/simba/utils/data.py index 85f1af88a..c4947e66b 100644 --- a/simba/utils/data.py +++ b/simba/utils/data.py @@ -14,7 +14,7 @@ import h5py import numpy as np import pandas as pd -from numba import jit, prange, typed +from numba import jit, prange, typed, njit from pylab import * from scipy import stats from scipy.signal import savgol_filter @@ -1492,6 +1492,80 @@ def egocentrically_align_pose(data: np.ndarray, return results, centers, rotation_vectors +@njit("(int32[:, :, :], int64, int64, int64, int32[:])") +def egocentrically_align_pose_numba(data: np.ndarray, + anchor_1_idx: int, + anchor_2_idx: int, + direction: int, + anchor_location: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + + """ + Aligns a set of 2D points egocentrically based on two anchor points and a target direction. + + Rotates and translates a 3D array of 2D points (e.g., time-series of frame-wise data) such that + one anchor point is aligned to a specified location, and the direction between the two anchors is aligned + to a target angle. + + + .. video:: _static/img/EgocentricalAligner.webm + :width: 600 + :autoplay: + :loop: + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/egocentrically_align_pose_numba.csv + :widths: 12, 22, 22, 22, 22 + :align: center + :class: simba-table + :header-rows: 1 + + .. seealso:: + For numpy function, see :func:`simba.utils.data.egocentrically_align_pose`. + To align both pose and video, see :func:`simba.data_processors.egocentric_aligner.EgocentricalAligner` + + :param np.ndarray data: A 3D array of shape `(num_frames, num_points, 2)` containing 2D points for each frame. Each frame is represented as a 2D array of shape `(num_points, 2)`, where each row corresponds to a point's (x, y) coordinates. + :param int anchor_1_idx: The index of the first anchor point in `data` used as the center of alignment. This body-part will be placed in the center of the image. + :param int anchor_2_idx: The index of the second anchor point in `data` used to calculate the direction vector. This bosy-part will be located `direction` degrees from the anchor_1 body-part. + :param int direction: The target direction in degrees to which the vector between the two anchors will be aligned. + :param np.ndarray anchor_location: A 1D array of shape `(2,)` specifying the target (x, y) location for `anchor_1_idx` after alignment. + :return: A tuple containing the rotated data, and variables required for also rotating the video using the same rules: + - `aligned_data`: A 3D array of shape `(num_frames, num_points, 2)` with the aligned 2D points. + - `centers`: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. + - `rotation_vectors`: A 3D array of shape `(num_frames, 2, 2)` containing the rotation matrices applied to each frame. + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] + + :example: + >>> data = np.random.randint(0, 500, (100, 7, 2)) + >>> anchor_1_idx = 5 # E.g., the animal tail-base is the 5th body-part + >>> anchor_2_idx = 7 # E.g., the animal nose is the 7th row in the data + >>> anchor_location = np.array([250, 250]) # the tail-base (index 5) is placed at x=250, y=250 in the image. + >>> direction = 90 # The nose (index 7) will be placed in direction 90 degrees (S) relative to the tailbase. + >>> results, centers, rotation_vectors = egocentrically_align_pose_numba(data=data, anchor_1_idx=anchor_1_idx, anchor_2_idx=anchor_2_idx, direction=direction) + """ + + target_angle = np.deg2rad(direction) + centers = np.full((data.shape[0], 2), fill_value=-1, dtype=np.int32) + rotation_vectors = np.full((data.shape[0], 2, 2), fill_value=-1, dtype=np.float32) + results = np.zeros_like(data, dtype=np.int32) + for frm_idx in prange(data.shape[0]): + frm_points = data[frm_idx] + frm_anchor_1, frm_anchor_2 = frm_points[anchor_1_idx], frm_points[anchor_2_idx] + centers[frm_idx] = frm_anchor_1 + delta_x, delta_y = frm_anchor_2[0] - frm_anchor_1[0], frm_anchor_2[1] - frm_anchor_1[1] + frm_angle = np.arctan2(delta_y, delta_x) + frm_rotation_angle = target_angle - frm_angle + frm_cos_theta, frm_sin_theta = np.cos(frm_rotation_angle), np.sin(frm_rotation_angle) + R = np.array([[frm_cos_theta, -frm_sin_theta], [frm_sin_theta, frm_cos_theta]]) + rotation_vectors[frm_idx] = R + keypoints_rotated = np.dot(frm_points.astype(np.float64) - frm_anchor_1.astype(np.float64), R.T) + anchor_1_position_after_rotation = keypoints_rotated[anchor_1_idx] + translation_to_target = anchor_location - anchor_1_position_after_rotation + results[frm_idx] = keypoints_rotated + translation_to_target + + return results, centers, rotation_vectors + + # run_user_defined_feature_extraction_class(config_path='/Users/simon/Desktop/envs/troubleshooting/circular_features_zebrafish/project_folder/project_config.ini', file_path='/Users/simon/Desktop/fish_feature_extractor_2023_version_5.py')