From 0999268300b1f310f15fc6e0c7472ee30d5d70f7 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Mon, 15 Jul 2024 16:47:44 -0700 Subject: [PATCH 01/36] Simple Visualizer Block --- inference/core/workflows/core_steps/loader.py | 7 ++ .../core_steps/visualizations/bounding_box.py | 97 +++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/bounding_box.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 37fc716a8..f81d39c7c 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -76,6 +76,11 @@ from inference.core.workflows.core_steps.transformations.relative_static_crop import ( RelativeStaticCropBlock, ) + +from inference.core.workflows.core_steps.visualizations.bounding_box import ( + BoundingBoxVisualizationBlock +) + from inference.core.workflows.prototypes.block import WorkflowBlock @@ -109,4 +114,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: PropertyDefinitionBlock, DimensionCollapseBlock, FirstNonEmptyOrDefaultBlock, + + BoundingBoxVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py new file mode 100644 index 000000000..cb47c4f86 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -0,0 +1,97 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import AliasChoices, ConfigDict, Field + +from inference.core.workflows.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + IMAGE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" +TYPE: str = "BoundingBoxVisualization" +SHORT_DESCRIPTION = ( + "Draws a box around detected objects in an image." +) +LONG_DESCRIPTION = """ +The `BoundingBoxVisualization` block draws a box around detected +objects in an image using Supervision's `sv.RoundBoxAnnotator`. +""" + + +class BoundingBoxManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + type: Literal[f"{TYPE}"] + predictions: StepOutputSelector( + kind=[ + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.object_detection_model.predictions"], + ) + image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + title="Input Image", + description="The input image for this step.", + examples=["$inputs.image", "$steps.cropping.crops"], + validation_alias=AliasChoices("image", "images"), + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name=OUTPUT_IMAGE_KEY, + kind=[ + IMAGE_KIND, + ], + ), + ] + +class BoundingBoxVisualizationBlock(WorkflowBlock): + def __init__(self): + self.annotator = None + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BoundingBoxManifest + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections + ) -> BlockResult: + if self.annotator is None: + self.annotator = sv.RoundBoxAnnotator() + + output = self.annotator.annotate( + scene=image.numpy_image, + detections=predictions + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 01f027e85cc8b47bbf04ae940fa19e06fb9fca89 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 09:12:40 -0700 Subject: [PATCH 02/36] Simple working version --- .../core_steps/visualizations/bounding_box.py | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index cb47c4f86..67859cec0 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -1,3 +1,4 @@ +from dataclasses import replace from typing import List, Literal, Optional, Type, Union import supervision as sv @@ -8,10 +9,14 @@ WorkflowImageData, ) from inference.core.workflows.entities.types import ( - IMAGE_KIND, - OBJECT_DETECTION_PREDICTION_KIND, - INSTANCE_SEGMENTATION_PREDICTION_KIND, - KEYPOINT_DETECTION_PREDICTION_KIND, + # IMAGE_KIND, + # OBJECT_DETECTION_PREDICTION_KIND, + # INSTANCE_SEGMENTATION_PREDICTION_KIND, + # KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_IMAGES_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector @@ -45,9 +50,9 @@ class BoundingBoxManifest(WorkflowBlockManifest): type: Literal[f"{TYPE}"] predictions: StepOutputSelector( kind=[ - OBJECT_DETECTION_PREDICTION_KIND, - INSTANCE_SEGMENTATION_PREDICTION_KIND, - KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( # type: ignore description="Predictions", @@ -66,7 +71,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - IMAGE_KIND, + BATCH_OF_IMAGES_KIND, ], ), ] @@ -85,13 +90,21 @@ async def run( predictions: sv.Detections ) -> BlockResult: if self.annotator is None: - self.annotator = sv.RoundBoxAnnotator() + self.annotator = sv.RoundBoxAnnotator( + thickness=3 + ) - output = self.annotator.annotate( + annotated_image = self.annotator.annotate( scene=image.numpy_image, detections=predictions ) + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + return { OUTPUT_IMAGE_KEY: output } From ffdde1691d237897c4588f4b2bc07cdb43d071ae Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 10:04:31 -0700 Subject: [PATCH 03/36] Add block configuration --- .../core_steps/visualizations/bounding_box.py | 56 ++++++++++++++++--- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 67859cec0..021dcc137 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -17,9 +17,14 @@ BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + INTEGER_KIND, + FloatZeroToOne, + FLOAT_ZERO_TO_ONE_KIND, + BOOLEAN_KIND, StepOutputImageSelector, StepOutputSelector, - WorkflowImageSelector + WorkflowImageSelector, + WorkflowParameterSelector ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -65,6 +70,21 @@ class BoundingBoxManifest(WorkflowBlockManifest): validation_alias=AliasChoices("image", "images"), ) + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + default=True + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + description="Thickness of the bounding box in pixels.", + default=1, + ) + + roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + description="Roundness of the corners of the bounding box.", + default=0.0, + ) + @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ @@ -76,9 +96,27 @@ def describe_outputs(cls) -> List[OutputDefinition]: ), ] +annotatorCache = {} + +def getAnnotator( + thickness:int, + roundness: float +): + key = f"{thickness}_{roundness}" + if key not in annotatorCache: + if roundness == 0: + annotatorCache[key] = sv.BoxAnnotator( + thickness=thickness + ) + else: + annotatorCache[key] = sv.RoundBoxAnnotator( + thickness=thickness, + roundness=roundness + ) + return annotatorCache[key] class BoundingBoxVisualizationBlock(WorkflowBlock): def __init__(self): - self.annotator = None + pass @classmethod def get_manifest(cls) -> Type[WorkflowBlockManifest]: @@ -87,15 +125,15 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: async def run( self, image: WorkflowImageData, - predictions: sv.Detections + predictions: sv.Detections, + copy_image: bool, + thickness: Optional[int], + roundness: Optional[float] ) -> BlockResult: - if self.annotator is None: - self.annotator = sv.RoundBoxAnnotator( - thickness=3 - ) + annotator = getAnnotator(thickness, roundness) - annotated_image = self.annotator.annotate( - scene=image.numpy_image, + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, detections=predictions ) From 806c68ddfa12fd3a52940fb9a5033b584435e21f Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 13:59:49 -0700 Subject: [PATCH 04/36] Add color_lookup --- .../core_steps/visualizations/bounding_box.py | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 021dcc137..7374fe172 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -21,6 +21,7 @@ FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, + STRING_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -75,9 +76,22 @@ class BoundingBoxManifest(WorkflowBlockManifest): default=True ) + color_lookup: Union[ + Literal[ + "INDEX", + "CLASS", + "TRACK" + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( + default="CLASS", + description="Strategy to use for mapping colors to annotations.", + examples=["CLASS", "$inputs.color_lookup"], + ) + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( description="Thickness of the bounding box in pixels.", - default=1, + default=2, ) roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( @@ -99,17 +113,20 @@ def describe_outputs(cls) -> List[OutputDefinition]: annotatorCache = {} def getAnnotator( + color_lookup:str, thickness:int, roundness: float ): - key = f"{thickness}_{roundness}" + key = f"{color_lookup}_{thickness}_{roundness}" if key not in annotatorCache: if roundness == 0: annotatorCache[key] = sv.BoxAnnotator( + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), thickness=thickness ) else: annotatorCache[key] = sv.RoundBoxAnnotator( + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), thickness=thickness, roundness=roundness ) @@ -127,10 +144,11 @@ async def run( image: WorkflowImageData, predictions: sv.Detections, copy_image: bool, + color_lookup: Optional[str], thickness: Optional[int], - roundness: Optional[float] + roundness: Optional[float], ) -> BlockResult: - annotator = getAnnotator(thickness, roundness) + annotator = getAnnotator(color_lookup, thickness, roundness) annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, From 60c18bbb47d2a79f90d3183a8288de4588d94ad1 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 15:06:21 -0700 Subject: [PATCH 05/36] Refactor --- .../core_steps/visualizations/base.py | 109 +++++++++++++++ .../core_steps/visualizations/bounding_box.py | 129 +++++------------- 2 files changed, 146 insertions(+), 92 deletions(-) create mode 100644 inference/core/workflows/core_steps/visualizations/base.py diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py new file mode 100644 index 000000000..b447a9cae --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -0,0 +1,109 @@ +from abc import ABC, abstractmethod +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import AliasChoices, ConfigDict, Field + +from inference.core.workflows.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + # IMAGE_KIND, + # OBJECT_DETECTION_PREDICTION_KIND, + # INSTANCE_SEGMENTATION_PREDICTION_KIND, + # KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_IMAGES_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + BOOLEAN_KIND, + STRING_KIND, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +class VisualizationManifest(WorkflowBlockManifest, ABC): + model_config = ConfigDict( + json_schema_extra={ + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + predictions: StepOutputSelector( + kind=[ + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.object_detection_model.predictions"], + ) + image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + title="Input Image", + description="The input image for this step.", + examples=["$inputs.image", "$steps.cropping.crops"], + validation_alias=AliasChoices("image", "images"), + ) + + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + default=True + ) + + color_lookup: Union[ + Literal[ + "INDEX", + "CLASS", + "TRACK" + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="CLASS", + description="Strategy to use for mapping colors to annotations.", + examples=["CLASS", "$inputs.color_lookup"], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name=OUTPUT_IMAGE_KEY, + kind=[ + BATCH_OF_IMAGES_KIND, + ], + ), + ] + +class VisualizationBlock(WorkflowBlock, ABC): + def __init__(self): + pass + + @classmethod + @abstractmethod + def get_manifest(cls) -> Type[VisualizationManifest]: + pass + + @abstractmethod + def getAnnotator(self) -> sv.annotators.base.BaseAnnotator: + pass + + @abstractmethod + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_lookup: Optional[str] + ) -> BlockResult: + pass diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 7374fe172..bcf31851a 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -1,39 +1,30 @@ -from dataclasses import replace -from typing import List, Literal, Optional, Type, Union +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import Literal, Optional, Type, Union import supervision as sv -from pydantic import AliasChoices, ConfigDict, Field +from pydantic import ConfigDict, Field from inference.core.workflows.entities.base import ( - OutputDefinition, WorkflowImageData, ) from inference.core.workflows.entities.types import ( - # IMAGE_KIND, - # OBJECT_DETECTION_PREDICTION_KIND, - # INSTANCE_SEGMENTATION_PREDICTION_KIND, - # KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, INTEGER_KIND, FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, - BOOLEAN_KIND, STRING_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, WorkflowParameterSelector ) from inference.core.workflows.prototypes.block import ( BlockResult, - WorkflowBlock, WorkflowBlockManifest, ) OUTPUT_IMAGE_KEY: str = "image" + TYPE: str = "BoundingBoxVisualization" SHORT_DESCRIPTION = ( "Draws a box around detected objects in an image." @@ -43,8 +34,8 @@ objects in an image using Supervision's `sv.RoundBoxAnnotator`. """ - -class BoundingBoxManifest(WorkflowBlockManifest): +class BoundingBoxManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] model_config = ConfigDict( json_schema_extra={ "short_description": SHORT_DESCRIPTION, @@ -53,92 +44,46 @@ class BoundingBoxManifest(WorkflowBlockManifest): "block_type": "visualization", } ) - type: Literal[f"{TYPE}"] - predictions: StepOutputSelector( - kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - ] - ) = Field( # type: ignore - description="Predictions", - examples=["$steps.object_detection_model.predictions"], - ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( - title="Input Image", - description="The input image for this step.", - examples=["$inputs.image", "$steps.cropping.crops"], - validation_alias=AliasChoices("image", "images"), - ) - - copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( - description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", - default=True - ) - - color_lookup: Union[ - Literal[ - "INDEX", - "CLASS", - "TRACK" - ], - WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( - default="CLASS", - description="Strategy to use for mapping colors to annotations.", - examples=["CLASS", "$inputs.color_lookup"], - ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the bounding box in pixels.", default=2, ) - roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Roundness of the corners of the bounding box.", default=0.0, ) - @classmethod - def describe_outputs(cls) -> List[OutputDefinition]: - return [ - OutputDefinition( - name=OUTPUT_IMAGE_KEY, - kind=[ - BATCH_OF_IMAGES_KIND, - ], - ), - ] - -annotatorCache = {} - -def getAnnotator( - color_lookup:str, - thickness:int, - roundness: float -): - key = f"{color_lookup}_{thickness}_{roundness}" - if key not in annotatorCache: - if roundness == 0: - annotatorCache[key] = sv.BoxAnnotator( - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), - thickness=thickness - ) - else: - annotatorCache[key] = sv.RoundBoxAnnotator( - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), - thickness=thickness, - roundness=roundness - ) - return annotatorCache[key] -class BoundingBoxVisualizationBlock(WorkflowBlock): +class BoundingBoxVisualizationBlock(VisualizationBlock): def __init__(self): - pass + self.annotatorCache = {} @classmethod def get_manifest(cls) -> Type[WorkflowBlockManifest]: return BoundingBoxManifest + def getAnnotator( + self, + color_lookup:str, + thickness:int, + roundness: float + ) -> sv.annotators.base.BaseAnnotator: + key = f"{color_lookup}_{thickness}_{roundness}" + if key not in self.annotatorCache: + if roundness == 0: + self.annotatorCache[key] = sv.BoxAnnotator( + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), + thickness=thickness + ) + else: + self.annotatorCache[key] = sv.RoundBoxAnnotator( + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), + thickness=thickness, + roundness=roundness + ) + return self.annotatorCache[key] + async def run( self, image: WorkflowImageData, @@ -148,7 +93,7 @@ async def run( thickness: Optional[int], roundness: Optional[float], ) -> BlockResult: - annotator = getAnnotator(color_lookup, thickness, roundness) + annotator = self.getAnnotator(color_lookup, thickness, roundness) annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, From f75f58101dee92547d7978180a87a8a708ffcfd5 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 16:34:12 -0700 Subject: [PATCH 06/36] Add color palettes --- .../core_steps/visualizations/base.py | 133 +++++++++++++++++- .../core_steps/visualizations/bounding_box.py | 43 ++++-- 2 files changed, 163 insertions(+), 13 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index b447a9cae..926e90304 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -17,8 +17,10 @@ BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + INTEGER_KIND, BOOLEAN_KIND, STRING_KIND, + LIST_OF_VALUES_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -61,7 +63,96 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): default=True ) - color_lookup: Union[ + color_palette: Union[ + Literal[ + "DEFAULT", + "CUSTOM", + "ROBOFLOW", + + "Matplotlib Viridis", + "Matplotlib Plasma", + "Matplotlib Inferno", + "Matplotlib Magma", + "Matplotlib Cividis", + + # 'LinearSegmentedColormap' object has no attribute 'colors' + # "Matplotlib Twilight", + # "Matplotlib Twilight_Shifted", + + # 'LinearSegmentedColormap' object has no attribute 'colors' + # "Matplotlib HSV", + # "Matplotlib Jet", + # "Matplotlib Turbo", + # "Matplotlib Rainbow", + # "Matplotlib gist_rainbow", + # "Matplotlib nipy_spectral", + # "Matplotlib gist_ncar", + + "Matplotlib Pastel1", + "Matplotlib Pastel2", + "Matplotlib Paired", + "Matplotlib Accent", + "Matplotlib Dark2", + "Matplotlib Set1", + "Matplotlib Set2", + "Matplotlib Set3", + "Matplotlib Tab10", + "Matplotlib Tab20", + "Matplotlib Tab20b", + "Matplotlib Tab20c", + + # 'LinearSegmentedColormap' object has no attribute 'colors' + # "Matplotlib Ocean", + # "Matplotlib Gist_Earth", + # "Matplotlib Terrain", + # "Matplotlib Stern", + # "Matplotlib gnuplot", + # "Matplotlib gnuplot2", + + # 'LinearSegmentedColormap' object has no attribute 'colors' + # "Matplotlib Spring", + # "Matplotlib Summer", + # "Matplotlib Autumn", + # "Matplotlib Winter", + # "Matplotlib Cool", + # "Matplotlib Hot", + # "Matplotlib Copper", + # "Matplotlib Bone", + + # "Matplotlib Greys_R", + # "Matplotlib Purples_R", + # "Matplotlib Blues_R", + # "Matplotlib Greens_R", + # "Matplotlib Oranges_R", + # "Matplotlib Reds_R", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="DEFAULT", + description="Color palette to use for annotations.", + examples=["DEFAULT", "$inputs.color_palette"], + ) + + palette_size: Union[ + INTEGER_KIND, + WorkflowParameterSelector(kind=[INTEGER_KIND]), + ] = Field( # type: ignore + default=10, + description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", + examples=[10, "$inputs.palette_size"], + ) + + custom_colors: Union[ + List[str], + WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + ] = Field( # type: ignore + default=[], + description="List of colors to use for annotations when `color_palette` is set to \"CUSTOM\".", + examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], + ) + + + color_axis: Union[ Literal[ "INDEX", "CLASS", @@ -71,7 +162,7 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): ] = Field( # type: ignore default="CLASS", description="Strategy to use for mapping colors to annotations.", - examples=["CLASS", "$inputs.color_lookup"], + examples=["CLASS", "$inputs.color_axis"], ) @classmethod @@ -98,12 +189,48 @@ def get_manifest(cls) -> Type[VisualizationManifest]: def getAnnotator(self) -> sv.annotators.base.BaseAnnotator: pass + @classmethod + def getPalette(self, color_palette, palette_size, custom_colors): + if color_palette == "CUSTOM": + return custom_colors + elif hasattr(sv.ColorPalette, color_palette): + return getattr(sv.ColorPalette, color_palette) + else: + palette_name = color_palette.replace("Matplotlib ", "") + + if palette_name in [ + "Greys_R", + "Purples_R", + "Blues_R", + "Greens_R", + "Oranges_R", + "Reds_R", + + "Wistia", + "Pastel1", + "Pastel2", + "Paired", + "Accent", + "Dark2", + "Set1", + "Set2", + "Set3", + ]: + palette_name = palette_name.capitalize() + else: + palette_name = palette_name.lower() + + return sv.ColorPalette.from_matplotlib(palette_name, int(palette_size)) + @abstractmethod async def run( self, image: WorkflowImageData, predictions: sv.Detections, copy_image: bool, - color_lookup: Optional[str] + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], ) -> BlockResult: pass diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index bcf31851a..8eb046893 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -3,7 +3,7 @@ VisualizationBlock ) -from typing import Literal, Optional, Type, Union +from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field @@ -15,7 +15,6 @@ INTEGER_KIND, FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, - STRING_KIND, WorkflowParameterSelector ) from inference.core.workflows.prototypes.block import ( @@ -65,35 +64,59 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def getAnnotator( self, - color_lookup:str, thickness:int, - roundness: float + roundness: float, + color_palette: str, + palette_size: int, + custom_colors: Optional[List[str]], + color_axis:str, ) -> sv.annotators.base.BaseAnnotator: - key = f"{color_lookup}_{thickness}_{roundness}" + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + thickness, + roundness + ])) + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + if roundness == 0: self.annotatorCache[key] = sv.BoxAnnotator( - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), thickness=thickness ) else: self.annotatorCache[key] = sv.RoundBoxAnnotator( - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_lookup), + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), thickness=thickness, roundness=roundness ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, image: WorkflowImageData, predictions: sv.Detections, copy_image: bool, - color_lookup: Optional[str], + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], thickness: Optional[int], roundness: Optional[float], ) -> BlockResult: - annotator = self.getAnnotator(color_lookup, thickness, roundness) + annotator = self.getAnnotator( + thickness, + roundness, + color_palette, + palette_size, + custom_colors, + color_axis + ) annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, From d5525e01ee340406ff2b1eb988f523974a26829a Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:00:08 -0700 Subject: [PATCH 07/36] Add Corner Annotator --- inference/core/workflows/core_steps/loader.py | 5 + .../core_steps/visualizations/bounding_box.py | 12 +- .../core_steps/visualizations/corner.py | 126 ++++++++++++++++++ 3 files changed, 137 insertions(+), 6 deletions(-) create mode 100644 inference/core/workflows/core_steps/visualizations/corner.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index f81d39c7c..50156a539 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -81,6 +81,10 @@ BoundingBoxVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.corner import ( + CornerVisualizationBlock +) + from inference.core.workflows.prototypes.block import WorkflowBlock @@ -116,4 +120,5 @@ def load_blocks() -> List[Type[WorkflowBlock]]: FirstNonEmptyOrDefaultBlock, BoundingBoxVisualizationBlock, + CornerVisualizationBlock ] diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 8eb046893..4c2214d2f 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -64,12 +64,12 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def getAnnotator( self, - thickness:int, - roundness: float, color_palette: str, palette_size: int, - custom_colors: Optional[List[str]], + custom_colors: List[str], color_axis:str, + thickness:int, + roundness: float, ) -> sv.annotators.base.BaseAnnotator: key = "_".join(map(str, [ color_palette, @@ -110,12 +110,12 @@ async def run( roundness: Optional[float], ) -> BlockResult: annotator = self.getAnnotator( - thickness, - roundness, color_palette, palette_size, custom_colors, - color_axis + color_axis, + thickness, + roundness, ) annotated_image = annotator.annotate( diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py new file mode 100644 index 000000000..93a2f61d1 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -0,0 +1,126 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "CornerVisualization" +SHORT_DESCRIPTION = ( + "Draws the corners of detected objects in an image." +) +LONG_DESCRIPTION = """ +The `CornerVisualization` block draws the corners of detected +objects in an image using Supervision's `sv.BoxCornerAnnotator`. +""" + +class CornerManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=4, + ) + + corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Length of the corner lines in pixels.", + default=15, + ) + +class CornerVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CornerManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + corner_length: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + thickness, + corner_length, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.BoxCornerAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + thickness=thickness, + corner_length=corner_length + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + corner_length: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + corner_length, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 818bf5f2647c658ed4b93b1a2effb9e6899f73d5 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:06:30 -0700 Subject: [PATCH 08/36] Add Color Visualizer --- inference/core/workflows/core_steps/loader.py | 8 +- .../core_steps/visualizations/color.py | 117 ++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 inference/core/workflows/core_steps/visualizations/color.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 50156a539..c488b1463 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -77,10 +77,13 @@ RelativeStaticCropBlock, ) +# Visualizers from inference.core.workflows.core_steps.visualizations.bounding_box import ( BoundingBoxVisualizationBlock ) - +from inference.core.workflows.core_steps.visualizations.color import ( + ColorVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.corner import ( CornerVisualizationBlock ) @@ -120,5 +123,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: FirstNonEmptyOrDefaultBlock, BoundingBoxVisualizationBlock, - CornerVisualizationBlock + ColorVisualizationBlock, + CornerVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py new file mode 100644 index 000000000..395a195b7 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -0,0 +1,117 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + FloatZeroToOne, + FLOAT_ZERO_TO_ONE_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "ColorVisualization" +SHORT_DESCRIPTION = ( + "Paints a solid color on detected objects in an image." +) +LONG_DESCRIPTION = """ +The `ColorVisualization` block paints a solid color on detected +objects in an image using Supervision's `sv.ColorAnnotator`. +""" + +class ColorManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the color overlay.", + default=0.5, + ) + +class ColorVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return ColorManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + opacity, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.ColorAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + opacity=opacity + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From ffcaae12587bcabdf752024dc7894e72ef1bc6b7 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:09:48 -0700 Subject: [PATCH 09/36] Add Circle Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/circle.py | 116 ++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/circle.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index c488b1463..6ae0a8665 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -81,6 +81,9 @@ from inference.core.workflows.core_steps.visualizations.bounding_box import ( BoundingBoxVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.circle import ( + CircleVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.color import ( ColorVisualizationBlock ) @@ -123,6 +126,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: FirstNonEmptyOrDefaultBlock, BoundingBoxVisualizationBlock, + CircleVisualizationBlock, ColorVisualizationBlock, CornerVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py new file mode 100644 index 000000000..37f156b5d --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -0,0 +1,116 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "CircleVisualization" +SHORT_DESCRIPTION = ( + "Draws a circle around detected objects in an image." +) +LONG_DESCRIPTION = """ +The `CircleVisualization` block draws a circle around detected +objects in an image using Supervision's `sv.CircleAnnotator`. +""" + +class CircleManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=2, + ) + +class CircleVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CircleManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + thickness, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.CircleAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + thickness=thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 48e4abd9aee3133322f432cbd7ac89ced6738057 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:25:39 -0700 Subject: [PATCH 10/36] Add Dot Annotator + Examples --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/bounding_box.py | 2 + .../core_steps/visualizations/circle.py | 1 + .../core_steps/visualizations/color.py | 1 + .../core_steps/visualizations/corner.py | 2 + .../core_steps/visualizations/dot.py | 154 ++++++++++++++++++ 6 files changed, 164 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/dot.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 6ae0a8665..00d553f8d 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -90,6 +90,9 @@ from inference.core.workflows.core_steps.visualizations.corner import ( CornerVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.dot import ( + DotVisualizationBlock +) from inference.core.workflows.prototypes.block import WorkflowBlock @@ -129,4 +132,5 @@ def load_blocks() -> List[Type[WorkflowBlock]]: CircleVisualizationBlock, ColorVisualizationBlock, CornerVisualizationBlock, + DotVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 4c2214d2f..936398861 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -47,11 +47,13 @@ class BoundingBoxManifest(VisualizationManifest): thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the bounding box in pixels.", default=2, + examples=[2, "$inputs.thickness"], ) roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Roundness of the corners of the bounding box.", default=0.0, + examples=[0.0, "$inputs.roundness"], ) class BoundingBoxVisualizationBlock(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py index 37f156b5d..c83630d99 100644 --- a/inference/core/workflows/core_steps/visualizations/circle.py +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -45,6 +45,7 @@ class CircleManifest(VisualizationManifest): thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, + examples=[2, "$inputs.thickness"], ) class CircleVisualizationBlock(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py index 395a195b7..f5f85f68c 100644 --- a/inference/core/workflows/core_steps/visualizations/color.py +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -46,6 +46,7 @@ class ColorManifest(VisualizationManifest): opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the color overlay.", default=0.5, + examples=[0.5, "$inputs.opacity"], ) class ColorVisualizationBlock(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py index 93a2f61d1..e456452a7 100644 --- a/inference/core/workflows/core_steps/visualizations/corner.py +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -45,11 +45,13 @@ class CornerManifest(VisualizationManifest): thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=4, + examples=[4, "$inputs.thickness"], ) corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Length of the corner lines in pixels.", default=15, + examples=[15, "$inputs.corner_length"], ) class CornerVisualizationBlock(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py new file mode 100644 index 000000000..0596bb4f8 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -0,0 +1,154 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "DotVisualization" +SHORT_DESCRIPTION = ( + "Draws dots on an image at specific coordinates based on provided detections." +) +LONG_DESCRIPTION = """ +The `DotVisualization` block draws dots on an image at specific coordinates +based on provided detections using Supervision's `sv.DotAnnotator`. +""" + +class DotManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="CENTER", + description="The anchor position for placing the dot.", + examples=["CENTER", "$inputs.position"], + ) + + radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Radius of the dot in pixels.", + default=4, + examples=[4, "$inputs.radius"], + ) + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline of the dot in pixels.", + default=2, + examples=[2, "$inputs.outline_thickness"], + ) + +class DotVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return DotManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + radius: int, + outline_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + position, + radius, + outline_thickness, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.DotAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + position=getattr(sv.Position, position), + radius=radius, + outline_thickness=outline_thickness + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + radius: Optional[int], + outline_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + radius, + outline_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 0ba8c89f25082427c34bf0b99841b3dfd4f8dc4a Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:31:14 -0700 Subject: [PATCH 11/36] Add Triangle Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/dot.py | 2 +- .../core_steps/visualizations/triangle.py | 165 ++++++++++++++++++ 3 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 inference/core/workflows/core_steps/visualizations/triangle.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 00d553f8d..fd52ff293 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -93,6 +93,9 @@ from inference.core.workflows.core_steps.visualizations.dot import ( DotVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.triangle import ( + TriangleVisualizationBlock +) from inference.core.workflows.prototypes.block import WorkflowBlock @@ -133,4 +136,5 @@ def load_blocks() -> List[Type[WorkflowBlock]]: ColorVisualizationBlock, CornerVisualizationBlock, DotVisualizationBlock, + TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py index 0596bb4f8..1c0de2b2a 100644 --- a/inference/core/workflows/core_steps/visualizations/dot.py +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -71,7 +71,7 @@ class DotManifest(VisualizationManifest): outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the dot in pixels.", - default=2, + default=0, examples=[2, "$inputs.outline_thickness"], ) diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py new file mode 100644 index 000000000..5ab1ddfa8 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -0,0 +1,165 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "TriangleVisualization" +SHORT_DESCRIPTION = ( + "Draws triangle markers on an image at specific coordinates based on provided detections." +) +LONG_DESCRIPTION = """ +The `TriangleVisualization` block draws triangle markers on an image at specific coordinates +based on provided detections using Supervision's `sv.TriangleAnnotator`. +""" + +class TriangleManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_CENTER", + description="The anchor position for placing the triangle.", + examples=["CENTER", "$inputs.position"], + ) + + base: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Base width of the triangle in pixels.", + default=10, + examples=[10, "$inputs.base"], + ) + + height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Height of the triangle in pixels.", + default=10, + examples=[10, "$inputs.height"], + ) + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline of the triangle in pixels.", + default=0, + examples=[2, "$inputs.outline_thickness"], + ) + +class TriangleVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return TriangleManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + base: int, + height: int, + outline_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + position, + base, + height, + outline_thickness, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.TriangleAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + position=getattr(sv.Position, position), + base=base, + height=height, + outline_thickness=outline_thickness + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + base: Optional[int], + height: Optional[int], + outline_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + base, + height, + outline_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From f93cabad690abc37e8ee06f155667ee4acc3b5bf Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 17:51:26 -0700 Subject: [PATCH 12/36] Add Ellipse Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/ellipse.py | 139 ++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/ellipse.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index fd52ff293..81de472f5 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -93,6 +93,9 @@ from inference.core.workflows.core_steps.visualizations.dot import ( DotVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.ellipse import ( + EllipseVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.triangle import ( TriangleVisualizationBlock ) @@ -136,5 +139,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: ColorVisualizationBlock, CornerVisualizationBlock, DotVisualizationBlock, + EllipseVisualizationBlock, TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/ellipse.py b/inference/core/workflows/core_steps/visualizations/ellipse.py new file mode 100644 index 000000000..a0234f5a3 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/ellipse.py @@ -0,0 +1,139 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "EllipseVisualization" +SHORT_DESCRIPTION = ( + "Draws ellipses that highlight detected objects in an image." +) +LONG_DESCRIPTION = """ +The `EllipseVisualization` block draws ellipses that highlight detected +objects in an image using Supervision's `sv.EllipseAnnotator`. +""" + +class EllipseManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + + start_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Starting angle of the ellipse in degrees.", + default=-45, + examples=[-45, "$inputs.start_angle"], + ) + + end_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Ending angle of the ellipse in degrees.", + default=235, + examples=[235, "$inputs.end_angle"], + ) + +class EllipseVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return EllipseManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + start_angle: int, + end_angle: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + thickness, + start_angle, + end_angle, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.EllipseAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + thickness=thickness, + start_angle=start_angle, + end_angle=end_angle + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + start_angle: Optional[int], + end_angle: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + start_angle, + end_angle, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 00bd13adf465e42aeeb14978419251b34b40919c Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 18:05:07 -0700 Subject: [PATCH 13/36] Add Halo Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/halo.py | 141 ++++++++++++++++++ 2 files changed, 145 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/halo.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 81de472f5..65408c246 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -96,6 +96,9 @@ from inference.core.workflows.core_steps.visualizations.ellipse import ( EllipseVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.halo import ( + HaloVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.triangle import ( TriangleVisualizationBlock ) @@ -140,5 +143,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: CornerVisualizationBlock, DotVisualizationBlock, EllipseVisualizationBlock, + HaloVisualizationBlock, TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/halo.py b/inference/core/workflows/core_steps/visualizations/halo.py new file mode 100644 index 000000000..59a4c298a --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/halo.py @@ -0,0 +1,141 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FloatZeroToOne, + FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + WorkflowParameterSelector, + StepOutputSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "HaloVisualization" +SHORT_DESCRIPTION = ( + "Paints a halo around detected objects in an image." +) +LONG_DESCRIPTION = """ +The `HaloVisualization` block uses a detected polygon +from an instance segmentation to draw a halo using +`sv.HaloAnnotator`. +""" + +class HaloManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the halo overlay.", + default=0.8, + examples=[0.8, "$inputs.opacity"], + ) + + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the average pooling kernel used for creating the halo.", + default=40, + examples=[40, "$inputs.kernel_size"], + ) + +class HaloVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return HaloManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + kernel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + opacity, + kernel_size, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.HaloAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + opacity=opacity + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + kernel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + kernel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 0aa5ad2812669c7892e0edd300bfce853d86804b Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 18:10:54 -0700 Subject: [PATCH 14/36] Add Mask Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/mask.py | 131 ++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/mask.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 65408c246..2119f7f6b 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -99,6 +99,9 @@ from inference.core.workflows.core_steps.visualizations.halo import ( HaloVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.mask import ( + MaskVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.triangle import ( TriangleVisualizationBlock ) @@ -144,5 +147,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: DotVisualizationBlock, EllipseVisualizationBlock, HaloVisualizationBlock, + MaskVisualizationBlock, TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/mask.py b/inference/core/workflows/core_steps/visualizations/mask.py new file mode 100644 index 000000000..0e532e202 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/mask.py @@ -0,0 +1,131 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FloatZeroToOne, + FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + WorkflowParameterSelector, + StepOutputSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "MaskVisualization" +SHORT_DESCRIPTION = ( + "Paints a mask over detected objects in an image." +) +LONG_DESCRIPTION = """ +The `MaskVisualization` block uses a detected polygon +from an instance segmentation to draw a mask using +`sv.MaskAnnotator`. +""" + +class MaskManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the Mask overlay.", + default=0.5, + examples=[0.5, "$inputs.opacity"], + ) + +class MaskVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MaskManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + opacity, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.MaskAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + opacity=opacity + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From ece68fdc0015ec2b54b950a667e2c69b6b6d8c54 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 18:14:37 -0700 Subject: [PATCH 15/36] Add Polygon Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/polygon.py | 131 ++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/polygon.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 2119f7f6b..a8254bdf0 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -102,6 +102,9 @@ from inference.core.workflows.core_steps.visualizations.mask import ( MaskVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.polygon import ( + PolygonVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.triangle import ( TriangleVisualizationBlock ) @@ -148,5 +151,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: EllipseVisualizationBlock, HaloVisualizationBlock, MaskVisualizationBlock, + PolygonVisualizationBlock, TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/polygon.py b/inference/core/workflows/core_steps/visualizations/polygon.py new file mode 100644 index 000000000..cb6e20b39 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/polygon.py @@ -0,0 +1,131 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FloatZeroToOne, + FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + WorkflowParameterSelector, + StepOutputSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "PolygonVisualization" +SHORT_DESCRIPTION = ( + "Draws a polygon around detected objects in an image." +) +LONG_DESCRIPTION = """ +The `PolygonVisualization` block uses a detections from an +instance segmentation to draw polygons around objects using +`sv.PolygonAnnotator`. +""" + +class PolygonManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + +class PolygonVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return PolygonManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + thickness, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.PolygonAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + thickness=thickness + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 59b1407fecaaa208620f44009c2a830c1e7e26d3 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 18:33:21 -0700 Subject: [PATCH 16/36] Add Blur Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/blur.py | 139 ++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/blur.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index a8254bdf0..316ec7ca2 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -78,6 +78,9 @@ ) # Visualizers +from inference.core.workflows.core_steps.visualizations.blur import ( + BlurVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.bounding_box import ( BoundingBoxVisualizationBlock ) @@ -143,6 +146,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: DimensionCollapseBlock, FirstNonEmptyOrDefaultBlock, + BlurVisualizationBlock, BoundingBoxVisualizationBlock, CircleVisualizationBlock, ColorVisualizationBlock, diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py new file mode 100644 index 000000000..2f8650286 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -0,0 +1,139 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import AliasChoices, ConfigDict, Field + +from inference.core.workflows.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + # IMAGE_KIND, + # OBJECT_DETECTION_PREDICTION_KIND, + # INSTANCE_SEGMENTATION_PREDICTION_KIND, + # KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_IMAGES_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + INTEGER_KIND, + BOOLEAN_KIND, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "BlurVisualization" +SHORT_DESCRIPTION = ( + "Blurs detected objects in an image." +) +LONG_DESCRIPTION = """ +The `BlurVisualization` block blurs detected +objects in an image using Supervision's `sv.BlurAnnotator`. +""" + +class BlurManifest(WorkflowBlockManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.object_detection_model.predictions"], + ) + image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + title="Input Image", + description="The input image for this step.", + examples=["$inputs.image", "$steps.cropping.crops"], + validation_alias=AliasChoices("image", "images"), + ) + + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + default=True + ) + + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the average pooling kernel used for blurring.", + default=15, + examples=[15, "$inputs.thickness"], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name=OUTPUT_IMAGE_KEY, + kind=[ + BATCH_OF_IMAGES_KIND, + ], + ), + ] + +class BlurVisualizationBlock(WorkflowBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlurManifest + + def getAnnotator( + self, + kernel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + kernel_size + ])) + + if key not in self.annotatorCache: + self.annotatorCache[key] = sv.BlurAnnotator( + kernel_size=kernel_size + ) + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + kernel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + kernel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 3413a7edeb2c9b5d1664586eb95e48ce83da70ef Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 18:39:06 -0700 Subject: [PATCH 17/36] Add Pixelate Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/blur.py | 2 +- .../core_steps/visualizations/pixelate.py | 139 ++++++++++++++++++ 3 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 inference/core/workflows/core_steps/visualizations/pixelate.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 316ec7ca2..12e7ffeea 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -105,6 +105,9 @@ from inference.core.workflows.core_steps.visualizations.mask import ( MaskVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.pixelate import ( + PixelateVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.polygon import ( PolygonVisualizationBlock ) @@ -155,6 +158,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: EllipseVisualizationBlock, HaloVisualizationBlock, MaskVisualizationBlock, + PixelateVisualizationBlock, PolygonVisualizationBlock, TriangleVisualizationBlock, ] diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py index 2f8650286..bdcccd54d 100644 --- a/inference/core/workflows/core_steps/visualizations/blur.py +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -76,7 +76,7 @@ class BlurManifest(WorkflowBlockManifest): kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for blurring.", default=15, - examples=[15, "$inputs.thickness"], + examples=[15, "$inputs.kernel_size"], ) @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/pixelate.py b/inference/core/workflows/core_steps/visualizations/pixelate.py new file mode 100644 index 000000000..36f988d9e --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/pixelate.py @@ -0,0 +1,139 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import AliasChoices, ConfigDict, Field + +from inference.core.workflows.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + # IMAGE_KIND, + # OBJECT_DETECTION_PREDICTION_KIND, + # INSTANCE_SEGMENTATION_PREDICTION_KIND, + # KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_IMAGES_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + INTEGER_KIND, + BOOLEAN_KIND, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "PixelateVisualization" +SHORT_DESCRIPTION = ( + "Pixelates detected objects in an image." +) +LONG_DESCRIPTION = """ +The `PixelateVisualization` block pixelates detected +objects in an image using Supervision's `sv.PixelateAnnotator`. +""" + +class PixelateManifest(WorkflowBlockManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.object_detection_model.predictions"], + ) + image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + title="Input Image", + description="The input image for this step.", + examples=["$inputs.image", "$steps.cropping.crops"], + validation_alias=AliasChoices("image", "images"), + ) + + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + default=True + ) + + pixel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the pixelation.", + default=20, + examples=[20, "$inputs.pixel_size"], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name=OUTPUT_IMAGE_KEY, + kind=[ + BATCH_OF_IMAGES_KIND, + ], + ), + ] + +class PixelateVisualizationBlock(WorkflowBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return PixelateManifest + + def getAnnotator( + self, + pixel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + pixel_size + ])) + + if key not in self.annotatorCache: + self.annotatorCache[key] = sv.PixelateAnnotator( + pixel_size=pixel_size + ) + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + pixel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + pixel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 052bcf4fcf6f3725529db39e16b34b95add7d380 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 20:25:04 -0700 Subject: [PATCH 18/36] Add Label Annotator --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/label.py | 202 ++++++++++++++++++ 2 files changed, 206 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/label.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 12e7ffeea..eced16cb3 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -102,6 +102,9 @@ from inference.core.workflows.core_steps.visualizations.halo import ( HaloVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.label import ( + LabelVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.mask import ( MaskVisualizationBlock ) @@ -157,6 +160,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: DotVisualizationBlock, EllipseVisualizationBlock, HaloVisualizationBlock, + LabelVisualizationBlock, MaskVisualizationBlock, PixelateVisualizationBlock, PolygonVisualizationBlock, diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py new file mode 100644 index 000000000..7eb21b2d9 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -0,0 +1,202 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + FLOAT_KIND, + FLOAT_ZERO_TO_ONE_KIND, + STRING_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "LabelVisualization" +SHORT_DESCRIPTION = ( + "Draws labels on an image at specific coordinates based on provided detections." +) +LONG_DESCRIPTION = """ +The `LabelVisualization` block draws labels on an image at specific coordinates +based on provided detections using Supervision's `sv.LabelAnnotator`. +""" + +class LabelManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + text_position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_LEFT", + description="The anchor position for placing the label.", + examples=["CENTER", "$inputs.text_position"], + ) + + text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + description="Color of the text.", + default="WHITE", + examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.text_color"], + ) + + text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + description="Scale of the text.", + default=1.0, + examples=[1.0, "$inputs.text_scale"], + ) + + text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the text characters.", + default=1, + examples=[1, "$inputs.text_thickness"], + ) + + text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Padding around the text in pixels.", + default=10, + examples=[10, "$inputs.text_padding"], + ) + + border_radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Radius of the label in pixels.", + default=0, + examples=[0, "$inputs.border_radius"], + ) + +class LabelVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return LabelManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + text_position: str, + text_color: str, + text_scale: float, + text_thickness: int, + text_padding: int, + border_radius: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + text_position, + text_color, + text_scale, + text_thickness, + text_padding, + border_radius, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + if text_color.startswith("#"): + text_color = sv.Color.from_hex(text_color) + elif text_color.startswith("rgb"): + r, g, b = map(int, text_color[4:-1].split(",")) + text_color = sv.Color.from_rgb_tuple(r, g, b) + elif text_color.startswith("bgr"): + b, g, r = map(int, text_color[4:-1].split(",")) + text_color = sv.Color.from_bgr_tuple(b, g, r) + else: + text_color = getattr(sv.Color, text_color) + + print(f"Text color: {text_color}") + + self.annotatorCache[key] = sv.LabelAnnotator( + color=palette, + color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + text_position=getattr(sv.Position, text_position), + text_color=text_color, + text_scale=text_scale, + text_thickness=text_thickness, + text_padding=text_padding, + border_radius=border_radius + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + text_position: Optional[str], + text_color: Optional[str], + text_scale: Optional[float], + text_thickness: Optional[int], + text_padding: Optional[int], + border_radius: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + text_position, + text_color, + text_scale, + text_thickness, + text_padding, + border_radius, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } From 99738a4ed5d3a4600af4653e2fdd7be1ca092cf9 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Tue, 16 Jul 2024 20:29:24 -0700 Subject: [PATCH 19/36] Fix tuples --- inference/core/workflows/core_steps/visualizations/label.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index 7eb21b2d9..e0fae3a79 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -135,10 +135,10 @@ def getAnnotator( text_color = sv.Color.from_hex(text_color) elif text_color.startswith("rgb"): r, g, b = map(int, text_color[4:-1].split(",")) - text_color = sv.Color.from_rgb_tuple(r, g, b) + text_color = sv.Color.from_rgb_tuple((r, g, b)) elif text_color.startswith("bgr"): b, g, r = map(int, text_color[4:-1].split(",")) - text_color = sv.Color.from_bgr_tuple(b, g, r) + text_color = sv.Color.from_bgr_tuple((b, g, r)) else: text_color = getattr(sv.Color, text_color) From 855f4bbab59e16efb9e523d220ff692febf76f3b Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 08:15:47 -0700 Subject: [PATCH 20/36] Color error handling --- inference/core/workflows/core_steps/visualizations/label.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index e0fae3a79..50f161f2e 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -139,8 +139,10 @@ def getAnnotator( elif text_color.startswith("bgr"): b, g, r = map(int, text_color[4:-1].split(",")) text_color = sv.Color.from_bgr_tuple((b, g, r)) + elif hasattr(sv.Color, text_color.upper()): + text_color = getattr(sv.Color, text_color.upper()) else: - text_color = getattr(sv.Color, text_color) + raise ValueError(f"Invalid text color: {text_color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE).") print(f"Text color: {text_color}") From 89a34b4bd9bb72c27fd16a7d9c1eb345d67d77c6 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 10:15:59 -0700 Subject: [PATCH 21/36] Add Crop Annotator and fix CUSTOM ColorPalette --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/base.py | 6 +- .../core_steps/visualizations/crop.py | 155 ++++++++++++++++++ .../core_steps/visualizations/label.py | 22 +-- .../core_steps/visualizations/triangle.py | 2 +- .../core_steps/visualizations/utils.py | 15 ++ 6 files changed, 186 insertions(+), 18 deletions(-) create mode 100644 inference/core/workflows/core_steps/visualizations/crop.py create mode 100644 inference/core/workflows/core_steps/visualizations/utils.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index eced16cb3..a4de6fa44 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -93,6 +93,9 @@ from inference.core.workflows.core_steps.visualizations.corner import ( CornerVisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.crop import ( + CropVisualizationBlock +) from inference.core.workflows.core_steps.visualizations.dot import ( DotVisualizationBlock ) @@ -157,6 +160,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: CircleVisualizationBlock, ColorVisualizationBlock, CornerVisualizationBlock, + CropVisualizationBlock, DotVisualizationBlock, EllipseVisualizationBlock, HaloVisualizationBlock, diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 926e90304..4746ef540 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -4,6 +4,10 @@ import supervision as sv from pydantic import AliasChoices, ConfigDict, Field +from inference.core.workflows.core_steps.visualizations.utils import ( + strToColor +) + from inference.core.workflows.entities.base import ( OutputDefinition, WorkflowImageData, @@ -192,7 +196,7 @@ def getAnnotator(self) -> sv.annotators.base.BaseAnnotator: @classmethod def getPalette(self, color_palette, palette_size, custom_colors): if color_palette == "CUSTOM": - return custom_colors + return sv.ColorPalette(colors=[strToColor(color) for color in custom_colors]) elif hasattr(sv.ColorPalette, color_palette): return getattr(sv.ColorPalette, color_palette) else: diff --git a/inference/core/workflows/core_steps/visualizations/crop.py b/inference/core/workflows/core_steps/visualizations/crop.py new file mode 100644 index 000000000..77cf489d3 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/crop.py @@ -0,0 +1,155 @@ +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationManifest, + VisualizationBlock +) + +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + FLOAT_KIND, + STRING_KIND, + WorkflowParameterSelector +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + +TYPE: str = "CropVisualization" +SHORT_DESCRIPTION = ( + "Draws scaled up crops of detections on the scene." +) +LONG_DESCRIPTION = """ +The `CropVisualization` block draws scaled up crops of detections +on the scene using Supervision's `sv.CropAnnotator`. +""" + +class CropManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_CENTER", + description="The anchor position for placing the crop.", + examples=["CENTER", "$inputs.position"], + ) + + scale_factor: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + description="The factor by which to scale the cropped image part. A factor of 2, for example, would double the size of the cropped area, allowing for a closer view of the detection.", + default=2.0, + examples=[2.0, "$inputs.scale_factor"], + ) + + border_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline in pixels.", + default=2, + examples=[2, "$inputs.border_thickness"], + ) + +class CropVisualizationBlock(VisualizationBlock): + def __init__(self): + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CropManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + scale_factor: float, + border_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [ + color_palette, + palette_size, + color_axis, + position, + scale_factor, + border_thickness, + ])) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.CropAnnotator( + border_color=palette, + border_color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + position=getattr(sv.Position, position), + scale_factor=scale_factor, + border_thickness=border_thickness + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + scale_factor: Optional[float], + border_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + scale_factor, + border_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return { + OUTPUT_IMAGE_KEY: output + } diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index 50f161f2e..b420c7ca0 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -3,6 +3,10 @@ VisualizationBlock ) +from inference.core.workflows.core_steps.visualizations.utils import ( + strToColor +) + from typing import List, Literal, Optional, Type, Union import supervision as sv @@ -14,7 +18,6 @@ from inference.core.workflows.entities.types import ( INTEGER_KIND, FLOAT_KIND, - FLOAT_ZERO_TO_ONE_KIND, STRING_KIND, WorkflowParameterSelector ) @@ -131,21 +134,8 @@ def getAnnotator( if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) - if text_color.startswith("#"): - text_color = sv.Color.from_hex(text_color) - elif text_color.startswith("rgb"): - r, g, b = map(int, text_color[4:-1].split(",")) - text_color = sv.Color.from_rgb_tuple((r, g, b)) - elif text_color.startswith("bgr"): - b, g, r = map(int, text_color[4:-1].split(",")) - text_color = sv.Color.from_bgr_tuple((b, g, r)) - elif hasattr(sv.Color, text_color.upper()): - text_color = getattr(sv.Color, text_color.upper()) - else: - raise ValueError(f"Invalid text color: {text_color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE).") - - print(f"Text color: {text_color}") - + text_color = strToColor(text_color) + self.annotatorCache[key] = sv.LabelAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py index 5ab1ddfa8..eb1068225 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle.py +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -25,7 +25,7 @@ TYPE: str = "TriangleVisualization" SHORT_DESCRIPTION = ( - "Draws triangle markers on an image at specific coordinates based on provided detections." + "Draws triangle markers on an image at specific coordinates based on provided detections." ) LONG_DESCRIPTION = """ The `TriangleVisualization` block draws triangle markers on an image at specific coordinates diff --git a/inference/core/workflows/core_steps/visualizations/utils.py b/inference/core/workflows/core_steps/visualizations/utils.py new file mode 100644 index 000000000..c75cc640e --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/utils.py @@ -0,0 +1,15 @@ +import supervision as sv + +def strToColor(color: str) -> sv.Color: + if color.startswith("#"): + return sv.Color.from_hex(color) + elif color.startswith("rgb"): + r, g, b = map(int, color[4:-1].split(",")) + return sv.Color.from_rgb_tuple((r, g, b)) + elif color.startswith("bgr"): + b, g, r = map(int, color[4:-1].split(",")) + return sv.Color.from_bgr_tuple((b, g, r)) + elif hasattr(sv.Color, color.upper()): + return getattr(sv.Color, color.upper()) + else: + raise ValueError(f"Invalid text color: {color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE).") \ No newline at end of file From 240dbb6f16e084e18ea5b6b480dfb493091bd5f2 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 10:47:22 -0700 Subject: [PATCH 22/36] Add label text formats --- .../core_steps/visualizations/label.py | 45 ++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index b420c7ca0..ee090e595 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -48,6 +48,22 @@ class LabelManifest(VisualizationManifest): } ) + text: Union[ + Literal[ + "Class", + "Confidence", + "Class and Confidence", + "Index", + "Dimensions", + "Area" + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="Class", + description="The type of text to display.", + examples=["LABEL", "$inputs.text"], + ) + text_position: Union[ Literal[ "CENTER", @@ -158,6 +174,7 @@ async def run( palette_size: Optional[int], custom_colors: Optional[List[str]], color_axis: Optional[str], + text: Optional[str], text_position: Optional[str], text_color: Optional[str], text_scale: Optional[float], @@ -178,9 +195,35 @@ async def run( border_radius, ) + if text == "Class": + labels = predictions['class_name'] + elif text == "Confidence": + labels = [f"{confidence:.2f}" for confidence in predictions.confidence] + elif text == "Class and Confidence": + labels = [ + f"{class_name} {confidence:.2f}" + for class_name, confidence + in zip(predictions['class_name'], predictions.confidence) + ] + elif text == "Index": + labels = [str(i) for i in range(len(predictions))] + elif text == "Dimensions": + # rounded ints: center x, center y wxh from predictions[i].xyxy + labels = [] + for i in range(len(predictions)): + x1, y1, x2, y2 = predictions.xyxy[i] + cx, cy = (x1 + x2) / 2, (y1 + y2) / 2 + w, h = x2 - x1, y2 - y1 + labels.append(f"{int(cx)}, {int(cy)} {int(w)}x{int(h)}") + elif text == "Area": + labels = [str(int(area)) for area in predictions.area] + else: + raise ValueError(f"Invalid text type: {text}") + annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, + labels=labels ) output = WorkflowImageData( From 0558d154006f244fefa520b0a1e83b2d3b333d1d Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 11:40:34 -0700 Subject: [PATCH 23/36] Run `make style` --- inference/core/workflows/core_steps/loader.py | 32 +++---- .../core_steps/visualizations/base.py | 62 ++++-------- .../core_steps/visualizations/blur.py | 49 ++++------ .../core_steps/visualizations/bounding_box.py | 60 +++++------- .../core_steps/visualizations/circle.py | 55 +++++------ .../core_steps/visualizations/color.py | 59 ++++++------ .../core_steps/visualizations/corner.py | 61 ++++++------ .../core_steps/visualizations/crop.py | 69 +++++++------ .../core_steps/visualizations/dot.py | 61 ++++++------ .../core_steps/visualizations/ellipse.py | 65 ++++++------- .../core_steps/visualizations/halo.py | 63 ++++++------ .../core_steps/visualizations/label.py | 96 +++++++++---------- .../core_steps/visualizations/mask.py | 60 ++++++------ .../core_steps/visualizations/pixelate.py | 49 ++++------ .../core_steps/visualizations/polygon.py | 59 ++++++------ .../core_steps/visualizations/triangle.py | 69 +++++++------ .../core_steps/visualizations/utils.py | 5 +- 17 files changed, 439 insertions(+), 535 deletions(-) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index a4de6fa44..0f3b9b840 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -79,48 +79,45 @@ # Visualizers from inference.core.workflows.core_steps.visualizations.blur import ( - BlurVisualizationBlock + BlurVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.bounding_box import ( - BoundingBoxVisualizationBlock + BoundingBoxVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.circle import ( - CircleVisualizationBlock + CircleVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.color import ( - ColorVisualizationBlock + ColorVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.corner import ( - CornerVisualizationBlock + CornerVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.crop import ( - CropVisualizationBlock -) -from inference.core.workflows.core_steps.visualizations.dot import ( - DotVisualizationBlock + CropVisualizationBlock, ) +from inference.core.workflows.core_steps.visualizations.dot import DotVisualizationBlock from inference.core.workflows.core_steps.visualizations.ellipse import ( - EllipseVisualizationBlock + EllipseVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.halo import ( - HaloVisualizationBlock + HaloVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.label import ( - LabelVisualizationBlock + LabelVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.mask import ( - MaskVisualizationBlock + MaskVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.pixelate import ( - PixelateVisualizationBlock + PixelateVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.polygon import ( - PolygonVisualizationBlock + PolygonVisualizationBlock, ) from inference.core.workflows.core_steps.visualizations.triangle import ( - TriangleVisualizationBlock + TriangleVisualizationBlock, ) - from inference.core.workflows.prototypes.block import WorkflowBlock @@ -154,7 +151,6 @@ def load_blocks() -> List[Type[WorkflowBlock]]: PropertyDefinitionBlock, DimensionCollapseBlock, FirstNonEmptyOrDefaultBlock, - BlurVisualizationBlock, BoundingBoxVisualizationBlock, CircleVisualizationBlock, diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 4746ef540..170943e96 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -4,31 +4,21 @@ import supervision as sv from pydantic import AliasChoices, ConfigDict, Field -from inference.core.workflows.core_steps.visualizations.utils import ( - strToColor -) - -from inference.core.workflows.entities.base import ( - OutputDefinition, - WorkflowImageData, -) -from inference.core.workflows.entities.types import ( - # IMAGE_KIND, - # OBJECT_DETECTION_PREDICTION_KIND, - # INSTANCE_SEGMENTATION_PREDICTION_KIND, - # KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.core_steps.visualizations.utils import strToColor +from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData +from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - INTEGER_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, - STRING_KIND, + INTEGER_KIND, LIST_OF_VALUES_KIND, + STRING_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, - WorkflowParameterSelector + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -38,6 +28,7 @@ OUTPUT_IMAGE_KEY: str = "image" + class VisualizationManifest(WorkflowBlockManifest, ABC): model_config = ConfigDict( json_schema_extra={ @@ -62,9 +53,9 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): validation_alias=AliasChoices("image", "images"), ) - copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", - default=True + default=True, ) color_palette: Union[ @@ -72,17 +63,14 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "DEFAULT", "CUSTOM", "ROBOFLOW", - "Matplotlib Viridis", "Matplotlib Plasma", "Matplotlib Inferno", "Matplotlib Magma", "Matplotlib Cividis", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib Twilight", # "Matplotlib Twilight_Shifted", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib HSV", # "Matplotlib Jet", @@ -91,7 +79,6 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): # "Matplotlib gist_rainbow", # "Matplotlib nipy_spectral", # "Matplotlib gist_ncar", - "Matplotlib Pastel1", "Matplotlib Pastel2", "Matplotlib Paired", @@ -104,7 +91,6 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "Matplotlib Tab20", "Matplotlib Tab20b", "Matplotlib Tab20c", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib Ocean", # "Matplotlib Gist_Earth", @@ -112,7 +98,6 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): # "Matplotlib Stern", # "Matplotlib gnuplot", # "Matplotlib gnuplot2", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib Spring", # "Matplotlib Summer", @@ -122,7 +107,6 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): # "Matplotlib Hot", # "Matplotlib Copper", # "Matplotlib Bone", - # "Matplotlib Greys_R", # "Matplotlib Purples_R", # "Matplotlib Blues_R", @@ -131,7 +115,7 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): # "Matplotlib Reds_R", ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="DEFAULT", description="Color palette to use for annotations.", examples=["DEFAULT", "$inputs.color_palette"], @@ -140,30 +124,24 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): palette_size: Union[ INTEGER_KIND, WorkflowParameterSelector(kind=[INTEGER_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default=10, description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", examples=[10, "$inputs.palette_size"], ) custom_colors: Union[ - List[str], - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) - ] = Field( # type: ignore + List[str], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + ] = Field( # type: ignore default=[], - description="List of colors to use for annotations when `color_palette` is set to \"CUSTOM\".", + description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], ) - color_axis: Union[ - Literal[ - "INDEX", - "CLASS", - "TRACK" - ], + Literal["INDEX", "CLASS", "TRACK"], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="CLASS", description="Strategy to use for mapping colors to annotations.", examples=["CLASS", "$inputs.color_axis"], @@ -180,6 +158,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ), ] + class VisualizationBlock(WorkflowBlock, ABC): def __init__(self): pass @@ -196,7 +175,9 @@ def getAnnotator(self) -> sv.annotators.base.BaseAnnotator: @classmethod def getPalette(self, color_palette, palette_size, custom_colors): if color_palette == "CUSTOM": - return sv.ColorPalette(colors=[strToColor(color) for color in custom_colors]) + return sv.ColorPalette( + colors=[strToColor(color) for color in custom_colors] + ) elif hasattr(sv.ColorPalette, color_palette): return getattr(sv.ColorPalette, color_palette) else: @@ -209,7 +190,6 @@ def getPalette(self, color_palette, palette_size, custom_colors): "Greens_R", "Oranges_R", "Reds_R", - "Wistia", "Pastel1", "Pastel2", diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py index bdcccd54d..7efa1aa39 100644 --- a/inference/core/workflows/core_steps/visualizations/blur.py +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -3,25 +3,18 @@ import supervision as sv from pydantic import AliasChoices, ConfigDict, Field -from inference.core.workflows.entities.base import ( - OutputDefinition, - WorkflowImageData, -) -from inference.core.workflows.entities.types import ( - # IMAGE_KIND, - # OBJECT_DETECTION_PREDICTION_KIND, - # INSTANCE_SEGMENTATION_PREDICTION_KIND, - # KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData +from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - INTEGER_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, + INTEGER_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, - WorkflowParameterSelector + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -32,14 +25,13 @@ OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "BlurVisualization" -SHORT_DESCRIPTION = ( - "Blurs detected objects in an image." -) +SHORT_DESCRIPTION = "Blurs detected objects in an image." LONG_DESCRIPTION = """ The `BlurVisualization` block blurs detected objects in an image using Supervision's `sv.BlurAnnotator`. """ + class BlurManifest(WorkflowBlockManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -68,12 +60,12 @@ class BlurManifest(WorkflowBlockManifest): validation_alias=AliasChoices("image", "images"), ) - copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", - default=True + default=True, ) - - kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for blurring.", default=15, examples=[15, "$inputs.kernel_size"], @@ -90,6 +82,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ), ] + class BlurVisualizationBlock(WorkflowBlock): def __init__(self): self.annotatorCache = {} @@ -102,15 +95,11 @@ def getAnnotator( self, kernel_size: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - kernel_size - ])) - + key = "_".join(map(str, [kernel_size])) + if key not in self.annotatorCache: - self.annotatorCache[key] = sv.BlurAnnotator( - kernel_size=kernel_size - ) - return self.annotatorCache[key] + self.annotatorCache[key] = sv.BlurAnnotator(kernel_size=kernel_size) + return self.annotatorCache[key] async def run( self, @@ -125,7 +114,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -134,6 +123,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 936398861..caf226776 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -1,38 +1,31 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( + FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, FloatZeroToOne, - FLOAT_ZERO_TO_ONE_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "BoundingBoxVisualization" -SHORT_DESCRIPTION = ( - "Draws a box around detected objects in an image." -) +SHORT_DESCRIPTION = "Draws a box around detected objects in an image." LONG_DESCRIPTION = """ The `BoundingBoxVisualization` block draws a box around detected objects in an image using Supervision's `sv.RoundBoxAnnotator`. """ + class BoundingBoxManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -43,19 +36,20 @@ class BoundingBoxManifest(VisualizationManifest): "block_type": "visualization", } ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the bounding box in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Roundness of the corners of the bounding box.", default=0.0, examples=[0.0, "$inputs.roundness"], ) + class BoundingBoxVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -69,18 +63,14 @@ def getAnnotator( color_palette: str, palette_size: int, custom_colors: List[str], - color_axis:str, - thickness:int, + color_axis: str, + thickness: int, roundness: float, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - thickness, - roundness - ])) - + key = "_".join( + map(str, [color_palette, palette_size, color_axis, thickness, roundness]) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -88,16 +78,16 @@ def getAnnotator( self.annotatorCache[key] = sv.BoxAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), - thickness=thickness + thickness=thickness, ) else: self.annotatorCache[key] = sv.RoundBoxAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), thickness=thickness, - roundness=roundness + roundness=roundness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -122,7 +112,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -131,6 +121,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py index c83630d99..b41828212 100644 --- a/inference/core/workflows/core_steps/visualizations/circle.py +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -1,36 +1,29 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "CircleVisualization" -SHORT_DESCRIPTION = ( - "Draws a circle around detected objects in an image." -) +SHORT_DESCRIPTION = "Draws a circle around detected objects in an image." LONG_DESCRIPTION = """ The `CircleVisualization` block draws a circle around detected objects in an image using Supervision's `sv.CircleAnnotator`. """ + class CircleManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -41,13 +34,14 @@ class CircleManifest(VisualizationManifest): "block_type": "visualization", } ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) + class CircleVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -64,13 +58,18 @@ def getAnnotator( color_axis: str, thickness: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - thickness, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -80,7 +79,7 @@ def getAnnotator( thickness=thickness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -103,7 +102,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -112,6 +111,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py index f5f85f68c..6ebeaa9da 100644 --- a/inference/core/workflows/core_steps/visualizations/color.py +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -1,37 +1,30 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( - FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + FloatZeroToOne, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "ColorVisualization" -SHORT_DESCRIPTION = ( - "Paints a solid color on detected objects in an image." -) +SHORT_DESCRIPTION = "Paints a solid color on detected objects in an image." LONG_DESCRIPTION = """ The `ColorVisualization` block paints a solid color on detected objects in an image using Supervision's `sv.ColorAnnotator`. """ + class ColorManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -42,13 +35,14 @@ class ColorManifest(VisualizationManifest): "block_type": "visualization", } ) - - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the color overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], ) + class ColorVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -65,23 +59,28 @@ def getAnnotator( color_axis: str, opacity: float, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - opacity, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) self.annotatorCache[key] = sv.ColorAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), - opacity=opacity + opacity=opacity, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -104,7 +103,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -113,6 +112,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py index e456452a7..f6b41ffde 100644 --- a/inference/core/workflows/core_steps/visualizations/corner.py +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -1,36 +1,29 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "CornerVisualization" -SHORT_DESCRIPTION = ( - "Draws the corners of detected objects in an image." -) +SHORT_DESCRIPTION = "Draws the corners of detected objects in an image." LONG_DESCRIPTION = """ The `CornerVisualization` block draws the corners of detected objects in an image using Supervision's `sv.BoxCornerAnnotator`. """ + class CornerManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -41,19 +34,20 @@ class CornerManifest(VisualizationManifest): "block_type": "visualization", } ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=4, examples=[4, "$inputs.thickness"], ) - corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Length of the corner lines in pixels.", default=15, examples=[15, "$inputs.corner_length"], ) + class CornerVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -71,14 +65,19 @@ def getAnnotator( thickness: int, corner_length: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - thickness, - corner_length, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + corner_length, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -86,10 +85,10 @@ def getAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), thickness=thickness, - corner_length=corner_length + corner_length=corner_length, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -114,7 +113,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -123,6 +122,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/crop.py b/inference/core/workflows/core_steps/visualizations/crop.py index 77cf489d3..78cadc3ba 100644 --- a/inference/core/workflows/core_steps/visualizations/crop.py +++ b/inference/core/workflows/core_steps/visualizations/crop.py @@ -1,38 +1,31 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( - INTEGER_KIND, FLOAT_KIND, + INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "CropVisualization" -SHORT_DESCRIPTION = ( - "Draws scaled up crops of detections on the scene." -) +SHORT_DESCRIPTION = "Draws scaled up crops of detections on the scene." LONG_DESCRIPTION = """ The `CropVisualization` block draws scaled up crops of detections on the scene using Supervision's `sv.CropAnnotator`. """ + class CropManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -58,24 +51,25 @@ class CropManifest(VisualizationManifest): "CENTER_OF_MASS", ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the crop.", examples=["CENTER", "$inputs.position"], ) - scale_factor: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + scale_factor: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="The factor by which to scale the cropped image part. A factor of 2, for example, would double the size of the cropped area, allowing for a closer view of the detection.", default=2.0, examples=[2.0, "$inputs.scale_factor"], ) - border_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.border_thickness"], ) + class CropVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -94,27 +88,34 @@ def getAnnotator( scale_factor: float, border_thickness: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - position, - scale_factor, - border_thickness, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + scale_factor, + border_thickness, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) self.annotatorCache[key] = sv.CropAnnotator( border_color=palette, - border_color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + border_color_lookup=getattr( + sv.annotators.utils.ColorLookup, color_axis + ), position=getattr(sv.Position, position), scale_factor=scale_factor, - border_thickness=border_thickness + border_thickness=border_thickness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -141,7 +142,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -150,6 +151,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py index 1c0de2b2a..e6b7039fd 100644 --- a/inference/core/workflows/core_steps/visualizations/dot.py +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -1,25 +1,19 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" @@ -32,6 +26,7 @@ based on provided detections using Supervision's `sv.DotAnnotator`. """ + class DotManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -57,24 +52,25 @@ class DotManifest(VisualizationManifest): "CENTER_OF_MASS", ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="CENTER", description="The anchor position for placing the dot.", examples=["CENTER", "$inputs.position"], ) - radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the dot in pixels.", default=4, examples=[4, "$inputs.radius"], ) - - outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the dot in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], ) + class DotVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -93,15 +89,20 @@ def getAnnotator( radius: int, outline_thickness: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - position, - radius, - outline_thickness, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + radius, + outline_thickness, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -110,10 +111,10 @@ def getAnnotator( color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), position=getattr(sv.Position, position), radius=radius, - outline_thickness=outline_thickness + outline_thickness=outline_thickness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -140,7 +141,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -149,6 +150,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/ellipse.py b/inference/core/workflows/core_steps/visualizations/ellipse.py index a0234f5a3..b4a7a3a85 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse.py @@ -1,36 +1,29 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "EllipseVisualization" -SHORT_DESCRIPTION = ( - "Draws ellipses that highlight detected objects in an image." -) +SHORT_DESCRIPTION = "Draws ellipses that highlight detected objects in an image." LONG_DESCRIPTION = """ The `EllipseVisualization` block draws ellipses that highlight detected objects in an image using Supervision's `sv.EllipseAnnotator`. """ + class EllipseManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -41,25 +34,26 @@ class EllipseManifest(VisualizationManifest): "block_type": "visualization", } ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - start_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + start_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Starting angle of the ellipse in degrees.", default=-45, examples=[-45, "$inputs.start_angle"], ) - end_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + end_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Ending angle of the ellipse in degrees.", default=235, examples=[235, "$inputs.end_angle"], ) + class EllipseVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -78,15 +72,20 @@ def getAnnotator( start_angle: int, end_angle: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - thickness, - start_angle, - end_angle, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + start_angle, + end_angle, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -95,10 +94,10 @@ def getAnnotator( color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), thickness=thickness, start_angle=start_angle, - end_angle=end_angle + end_angle=end_angle, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -125,7 +124,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -134,6 +133,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/halo.py b/inference/core/workflows/core_steps/visualizations/halo.py index 59a4c298a..cfbc86439 100644 --- a/inference/core/workflows/core_steps/visualizations/halo.py +++ b/inference/core/workflows/core_steps/visualizations/halo.py @@ -1,41 +1,34 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, + FloatZeroToOne, + StepOutputSelector, WorkflowParameterSelector, - StepOutputSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "HaloVisualization" -SHORT_DESCRIPTION = ( - "Paints a halo around detected objects in an image." -) +SHORT_DESCRIPTION = "Paints a halo around detected objects in an image." LONG_DESCRIPTION = """ The `HaloVisualization` block uses a detected polygon from an instance segmentation to draw a halo using `sv.HaloAnnotator`. """ + class HaloManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -55,19 +48,20 @@ class HaloManifest(VisualizationManifest): description="Predictions", examples=["$steps.instance_segmentation_model.predictions"], ) - - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the halo overlay.", default=0.8, examples=[0.8, "$inputs.opacity"], ) - kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for creating the halo.", default=40, examples=[40, "$inputs.kernel_size"], ) + class HaloVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -85,24 +79,29 @@ def getAnnotator( opacity: float, kernel_size: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - opacity, - kernel_size, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + kernel_size, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) self.annotatorCache[key] = sv.HaloAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), - opacity=opacity + opacity=opacity, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -127,7 +126,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -136,6 +135,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index ee090e595..c110819fb 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -1,30 +1,21 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - -from inference.core.workflows.core_steps.visualizations.utils import ( - strToColor -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.core_steps.visualizations.utils import strToColor +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( - INTEGER_KIND, FLOAT_KIND, + INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" @@ -37,6 +28,7 @@ based on provided detections using Supervision's `sv.LabelAnnotator`. """ + class LabelManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -50,15 +42,10 @@ class LabelManifest(VisualizationManifest): text: Union[ Literal[ - "Class", - "Confidence", - "Class and Confidence", - "Index", - "Dimensions", - "Area" + "Class", "Confidence", "Class and Confidence", "Index", "Dimensions", "Area" ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="Class", description="The type of text to display.", examples=["LABEL", "$inputs.text"], @@ -78,42 +65,43 @@ class LabelManifest(VisualizationManifest): "CENTER_OF_MASS", ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="TOP_LEFT", description="The anchor position for placing the label.", examples=["CENTER", "$inputs.text_position"], ) - text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the text.", default="WHITE", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.text_color"], ) - text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=1.0, examples=[1.0, "$inputs.text_scale"], ) - text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text characters.", default=1, examples=[1, "$inputs.text_thickness"], ) - text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Padding around the text in pixels.", default=10, examples=[10, "$inputs.text_padding"], ) - border_radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the label in pixels.", default=0, examples=[0, "$inputs.border_radius"], ) + class LabelVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -135,23 +123,28 @@ def getAnnotator( text_padding: int, border_radius: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - text_position, - text_color, - text_scale, - text_thickness, - text_padding, - border_radius, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + text_position, + text_color, + text_scale, + text_thickness, + text_padding, + border_radius, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) text_color = strToColor(text_color) - + self.annotatorCache[key] = sv.LabelAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), @@ -160,10 +153,10 @@ def getAnnotator( text_scale=text_scale, text_thickness=text_thickness, text_padding=text_padding, - border_radius=border_radius + border_radius=border_radius, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -196,14 +189,15 @@ async def run( ) if text == "Class": - labels = predictions['class_name'] + labels = predictions["class_name"] elif text == "Confidence": labels = [f"{confidence:.2f}" for confidence in predictions.confidence] elif text == "Class and Confidence": labels = [ f"{class_name} {confidence:.2f}" - for class_name, confidence - in zip(predictions['class_name'], predictions.confidence) + for class_name, confidence in zip( + predictions["class_name"], predictions.confidence + ) ] elif text == "Index": labels = [str(i) for i in range(len(predictions))] @@ -223,7 +217,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, detections=predictions, - labels=labels + labels=labels, ) output = WorkflowImageData( @@ -232,6 +226,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/mask.py b/inference/core/workflows/core_steps/visualizations/mask.py index 0e532e202..124063197 100644 --- a/inference/core/workflows/core_steps/visualizations/mask.py +++ b/inference/core/workflows/core_steps/visualizations/mask.py @@ -1,41 +1,33 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - FloatZeroToOne, FLOAT_ZERO_TO_ONE_KIND, - INTEGER_KIND, + FloatZeroToOne, + StepOutputSelector, WorkflowParameterSelector, - StepOutputSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "MaskVisualization" -SHORT_DESCRIPTION = ( - "Paints a mask over detected objects in an image." -) +SHORT_DESCRIPTION = "Paints a mask over detected objects in an image." LONG_DESCRIPTION = """ The `MaskVisualization` block uses a detected polygon from an instance segmentation to draw a mask using `sv.MaskAnnotator`. """ + class MaskManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -55,13 +47,14 @@ class MaskManifest(VisualizationManifest): description="Predictions", examples=["$steps.instance_segmentation_model.predictions"], ) - - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], ) + class MaskVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -78,23 +71,28 @@ def getAnnotator( color_axis: str, opacity: float, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - opacity, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) self.annotatorCache[key] = sv.MaskAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), - opacity=opacity + opacity=opacity, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -117,7 +115,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -126,6 +124,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/pixelate.py b/inference/core/workflows/core_steps/visualizations/pixelate.py index 36f988d9e..8114f5448 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate.py @@ -3,25 +3,18 @@ import supervision as sv from pydantic import AliasChoices, ConfigDict, Field -from inference.core.workflows.entities.base import ( - OutputDefinition, - WorkflowImageData, -) -from inference.core.workflows.entities.types import ( - # IMAGE_KIND, - # OBJECT_DETECTION_PREDICTION_KIND, - # INSTANCE_SEGMENTATION_PREDICTION_KIND, - # KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData +from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - INTEGER_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, + INTEGER_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, - WorkflowParameterSelector + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -32,14 +25,13 @@ OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "PixelateVisualization" -SHORT_DESCRIPTION = ( - "Pixelates detected objects in an image." -) +SHORT_DESCRIPTION = "Pixelates detected objects in an image." LONG_DESCRIPTION = """ The `PixelateVisualization` block pixelates detected objects in an image using Supervision's `sv.PixelateAnnotator`. """ + class PixelateManifest(WorkflowBlockManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -68,12 +60,12 @@ class PixelateManifest(WorkflowBlockManifest): validation_alias=AliasChoices("image", "images"), ) - copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", - default=True + default=True, ) - - pixel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + pixel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the pixelation.", default=20, examples=[20, "$inputs.pixel_size"], @@ -90,6 +82,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ), ] + class PixelateVisualizationBlock(WorkflowBlock): def __init__(self): self.annotatorCache = {} @@ -102,15 +95,11 @@ def getAnnotator( self, pixel_size: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - pixel_size - ])) - + key = "_".join(map(str, [pixel_size])) + if key not in self.annotatorCache: - self.annotatorCache[key] = sv.PixelateAnnotator( - pixel_size=pixel_size - ) - return self.annotatorCache[key] + self.annotatorCache[key] = sv.PixelateAnnotator(pixel_size=pixel_size) + return self.annotatorCache[key] async def run( self, @@ -125,7 +114,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -134,6 +123,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/polygon.py b/inference/core/workflows/core_steps/visualizations/polygon.py index cb6e20b39..becda5adb 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon.py +++ b/inference/core/workflows/core_steps/visualizations/polygon.py @@ -1,41 +1,32 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - FloatZeroToOne, - FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, + StepOutputSelector, WorkflowParameterSelector, - StepOutputSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "PolygonVisualization" -SHORT_DESCRIPTION = ( - "Draws a polygon around detected objects in an image." -) +SHORT_DESCRIPTION = "Draws a polygon around detected objects in an image." LONG_DESCRIPTION = """ The `PolygonVisualization` block uses a detections from an instance segmentation to draw polygons around objects using `sv.PolygonAnnotator`. """ + class PolygonManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -55,13 +46,14 @@ class PolygonManifest(VisualizationManifest): description="Predictions", examples=["$steps.instance_segmentation_model.predictions"], ) - - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.thickness"], ) + class PolygonVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -78,23 +70,28 @@ def getAnnotator( color_axis: str, thickness: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - thickness, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) self.annotatorCache[key] = sv.PolygonAnnotator( color=palette, color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), - thickness=thickness + thickness=thickness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -117,7 +114,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -126,6 +123,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py index eb1068225..aae99aa49 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle.py +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -1,37 +1,30 @@ -from inference.core.workflows.core_steps.visualizations.base import ( - VisualizationManifest, - VisualizationBlock -) - from typing import List, Literal, Optional, Type, Union import supervision as sv from pydantic import ConfigDict, Field -from inference.core.workflows.entities.base import ( - WorkflowImageData, +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, ) +from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector -) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, + WorkflowParameterSelector, ) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest OUTPUT_IMAGE_KEY: str = "image" TYPE: str = "TriangleVisualization" -SHORT_DESCRIPTION = ( - "Draws triangle markers on an image at specific coordinates based on provided detections." -) +SHORT_DESCRIPTION = "Draws triangle markers on an image at specific coordinates based on provided detections." LONG_DESCRIPTION = """ The `TriangleVisualization` block draws triangle markers on an image at specific coordinates based on provided detections using Supervision's `sv.TriangleAnnotator`. """ + class TriangleManifest(VisualizationManifest): type: Literal[f"{TYPE}"] model_config = ConfigDict( @@ -57,30 +50,31 @@ class TriangleManifest(VisualizationManifest): "CENTER_OF_MASS", ], WorkflowParameterSelector(kind=[STRING_KIND]), - ] = Field( # type: ignore + ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the triangle.", examples=["CENTER", "$inputs.position"], ) - base: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + base: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Base width of the triangle in pixels.", default=10, examples=[10, "$inputs.base"], ) - height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Height of the triangle in pixels.", default=10, examples=[10, "$inputs.height"], ) - - outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the triangle in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], ) + class TriangleVisualizationBlock(VisualizationBlock): def __init__(self): self.annotatorCache = {} @@ -100,16 +94,21 @@ def getAnnotator( height: int, outline_thickness: int, ) -> sv.annotators.base.BaseAnnotator: - key = "_".join(map(str, [ - color_palette, - palette_size, - color_axis, - position, - base, - height, - outline_thickness, - ])) - + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + base, + height, + outline_thickness, + ], + ) + ) + if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) @@ -119,10 +118,10 @@ def getAnnotator( position=getattr(sv.Position, position), base=base, height=height, - outline_thickness=outline_thickness + outline_thickness=outline_thickness, ) - return self.annotatorCache[key] + return self.annotatorCache[key] async def run( self, @@ -151,7 +150,7 @@ async def run( annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, - detections=predictions + detections=predictions, ) output = WorkflowImageData( @@ -160,6 +159,4 @@ async def run( numpy_image=annotated_image, ) - return { - OUTPUT_IMAGE_KEY: output - } + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/utils.py b/inference/core/workflows/core_steps/visualizations/utils.py index c75cc640e..b548db60f 100644 --- a/inference/core/workflows/core_steps/visualizations/utils.py +++ b/inference/core/workflows/core_steps/visualizations/utils.py @@ -1,5 +1,6 @@ import supervision as sv + def strToColor(color: str) -> sv.Color: if color.startswith("#"): return sv.Color.from_hex(color) @@ -12,4 +13,6 @@ def strToColor(color: str) -> sv.Color: elif hasattr(sv.Color, color.upper()): return getattr(sv.Color, color.upper()) else: - raise ValueError(f"Invalid text color: {color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE).") \ No newline at end of file + raise ValueError( + f"Invalid text color: {color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE)." + ) From 37001153fefe788fe19f7e914869ba605a5db735 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 12:50:01 -0700 Subject: [PATCH 24/36] Fix failing test on Python 3.9 --- inference/core/workflows/core_steps/visualizations/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 170943e96..344f7e352 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -28,7 +28,6 @@ OUTPUT_IMAGE_KEY: str = "image" - class VisualizationManifest(WorkflowBlockManifest, ABC): model_config = ConfigDict( json_schema_extra={ @@ -122,7 +121,7 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): ) palette_size: Union[ - INTEGER_KIND, + int, WorkflowParameterSelector(kind=[INTEGER_KIND]), ] = Field( # type: ignore default=10, From e0dfa086af6ce3113f39ef8fc49e352e868edbb0 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 12:52:17 -0700 Subject: [PATCH 25/36] Fix style --- inference/core/workflows/core_steps/visualizations/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 344f7e352..df7818af2 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -28,6 +28,7 @@ OUTPUT_IMAGE_KEY: str = "image" + class VisualizationManifest(WorkflowBlockManifest, ABC): model_config = ConfigDict( json_schema_extra={ From 93165a81b50b3daa55fbc3359ed6a500825266fc Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 13:46:47 -0700 Subject: [PATCH 26/36] bounding_box test --- .../visualizations/test_bounding_box.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py new file mode 100644 index 000000000..0c7c4655b --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py @@ -0,0 +1,88 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.bounding_box import ( + BoundingBoxManifest, + BoundingBoxVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_bounding_box_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "BoundingBoxVisualization", + "name": "square1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 1, + "roundness": 0 + } + + # when + result = BoundingBoxManifest.model_validate(data) + + # then + assert result == BoundingBoxManifest( + type="BoundingBoxVisualization", + name="square1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=1, + roundness=0 + ) + +def test_bounding_box_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "BoundingBoxVisualization", + "name": "square1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 1, + "roundness": 0 + } + + # when + with pytest.raises(ValidationError): + _ = BoundingBoxManifest.model_validate(data) + +async def test_bounding_box_visualization_block() -> None: + # given + block = BoundingBoxVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=None, + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=1, + roundness=0, + ) + + assert output is not None + assert hasattr(output, "image") + assert hasattr(output.image, "numpy_image") + + # dimensions of output match input + assert output.image.numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.image.numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + From 65f51ccda0024f1a7b6d817fab48560861b465e2 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 15:29:55 -0700 Subject: [PATCH 27/36] Working Unit Tests --- .../core_steps/visualizations/test_blur.py | 83 +++++++++++++ .../visualizations/test_bounding_box.py | 17 ++- .../core_steps/visualizations/test_circle.py | 86 ++++++++++++++ .../core_steps/visualizations/test_color.py | 86 ++++++++++++++ .../core_steps/visualizations/test_corner.py | 87 ++++++++++++++ .../core_steps/visualizations/test_crop.py | 94 +++++++++++++++ .../core_steps/visualizations/test_dot.py | 94 +++++++++++++++ .../core_steps/visualizations/test_ellipse.py | 94 +++++++++++++++ .../core_steps/visualizations/test_halo.py | 96 +++++++++++++++ .../core_steps/visualizations/test_label.py | 110 ++++++++++++++++++ .../core_steps/visualizations/test_mask.py | 93 +++++++++++++++ .../visualizations/test_pixelate.py | 84 +++++++++++++ .../core_steps/visualizations/test_polygon.py | 93 +++++++++++++++ .../visualizations/test_triangle.py | 99 ++++++++++++++++ 14 files changed, 1211 insertions(+), 5 deletions(-) create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_blur.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_circle.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_color.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_corner.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_crop.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_dot.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_halo.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_label.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_mask.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py b/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py new file mode 100644 index 000000000..0b9266dfb --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py @@ -0,0 +1,83 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.blur import ( + BlurManifest, + BlurVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_blur_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "BlurVisualization", + "name": "blur1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "kernel_size": 5 + } + + # when + result = BlurManifest.model_validate(data) + + # then + assert result == BlurManifest( + type="BlurVisualization", + name="blur1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + kernel_size=5 + ) + + +def test_blur_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "BlurVisualization", + "name": "blur1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "kernel_size": 5 + } + + # when + with pytest.raises(ValidationError): + _ = BlurManifest.model_validate(data) + +@pytest.mark.asyncio +async def test_blur_visualization_block() -> None: + # given + block = BlurVisualizationBlock() + + start_image = np.random.randint(0, 255, (1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + kernel_size=5 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, start_image) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py index 0c7c4655b..392db010a 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py @@ -11,6 +11,10 @@ from inference.core.workflows.entities.base import ( WorkflowImageData, ) +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) @pytest.mark.parametrize("images_field_alias", ["images", "image"]) @@ -53,13 +57,14 @@ def test_bounding_box_validation_when_invalid_image_is_given() -> None: with pytest.raises(ValidationError): _ = BoundingBoxManifest.model_validate(data) +@pytest.mark.asyncio async def test_bounding_box_visualization_block() -> None: # given block = BoundingBoxVisualizationBlock() output = await block.run( image=WorkflowImageData( - parent_metadata=None, + parent_metadata=ImageParentMetadata(parent_id="some"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ), predictions=sv.Detections( @@ -77,12 +82,14 @@ async def test_bounding_box_visualization_block() -> None: roundness=0, ) + print("output", output) + assert output is not None - assert hasattr(output, "image") - assert hasattr(output.image, "numpy_image") + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") # dimensions of output match input - assert output.image.numpy_image.shape == (1000, 1000, 3) + assert output.get("image").numpy_image.shape == (1000, 1000, 3) # check if the image is modified - assert not np.array_equal(output.image.numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py b/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py new file mode 100644 index 000000000..4c3f07b86 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py @@ -0,0 +1,86 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.circle import ( + CircleManifest, + CircleVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_circle_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CircleVisualization", + "name": "circle1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 10 + } + + # when + result = CircleManifest.model_validate(data) + + # then + assert result == CircleManifest( + type="CircleVisualization", + name="circle1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=10 + ) + + +def test_circle_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CircleVisualization", + "name": "circle1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 10 + } + + # when + with pytest.raises(ValidationError): + _ = CircleManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_circle_visualization_block() -> None: + # given + block = CircleVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_color.py b/tests/workflows/unit_tests/core_steps/visualizations/test_color.py new file mode 100644 index 000000000..e4b057fbb --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_color.py @@ -0,0 +1,86 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.color import ( + ColorManifest, + ColorVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_color_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "ColorVisualization", + "name": "color1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.5 + } + + # when + result = ColorManifest.model_validate(data) + + # then + assert result == ColorManifest( + type="ColorVisualization", + name="color1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.5 + ) + + +def test_color_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "ColorVisualization", + "name": "color1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.5 + } + + # when + with pytest.raises(ValidationError): + _ = ColorManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_color_visualization_block() -> None: + # given + block = ColorVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + opacity=0.5, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py b/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py new file mode 100644 index 000000000..1104e544a --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.corner import ( + CornerManifest, + CornerVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_corner_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CornerVisualization", + "name": "corner1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "corner_length": 5 + } + + # when + result = CornerManifest.model_validate(data) + + # then + assert result == CornerManifest( + type="CornerVisualization", + name="corner1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + corner_length=5 + ) + + +def test_corner_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CornerVisualization", + "name": "corner1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "corner_length": 5 + } + + # when + with pytest.raises(ValidationError): + _ = CornerManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_corner_visualization_block() -> None: + # given + block = CornerVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=2, + corner_length=5, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py b/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py new file mode 100644 index 000000000..24a5c3e87 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.crop import ( + CropManifest, + CropVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_crop_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CropVisualization", + "name": "crop1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": 'TOP_CENTER', + "scale_factor": 2.0, + "border_thickness": 2 + } + + # when + result = CropManifest.model_validate(data) + + # then + assert result == CropManifest( + type="CropVisualization", + name="crop1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position='TOP_CENTER', + scale_factor=2.0, + border_thickness=2 + ) + + +def test_crop_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CropVisualization", + "name": "crop1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": 'TOP_CENTER', + "scale_factor": 2.0, + "border_thickness": 2 + } + + # when + with pytest.raises(ValidationError): + _ = CropManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_crop_visualization_block() -> None: + # given + block = CropVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + position='TOP_CENTER', + scale_factor=2.0, + border_thickness=2 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py b/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py new file mode 100644 index 000000000..be614cb16 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.dot import ( + DotManifest, + DotVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_dot_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "DotVisualization", + "name": "dot1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": "CENTER", + "radius": 5, + "outline_thickness": 1 + } + + # when + result = DotManifest.model_validate(data) + + # then + assert result == DotManifest( + type="DotVisualization", + name="dot1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position="CENTER", + radius=5, + outline_thickness=1 + ) + + +def test_dot_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "DotVisualization", + "name": "dot1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": "CENTER", + "radius": 5, + "outline_thickness": 1 + } + + # when + with pytest.raises(ValidationError): + _ = DotManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_dot_visualization_block() -> None: + # given + block = DotVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="viridis", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + position="CENTER", + radius=5, + outline_thickness=1 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py b/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py new file mode 100644 index 000000000..a9a37a4a4 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.ellipse import ( + EllipseManifest, + EllipseVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_ellipse_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "EllipseVisualization", + "name": "ellipse1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 2, + "start_angle": -45, + "end_angle": 235 + } + + # when + result = EllipseManifest.model_validate(data) + + # then + assert result == EllipseManifest( + type="EllipseVisualization", + name="ellipse1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=2, + start_angle=-45, + end_angle=235 + ) + + +def test_ellipse_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "EllipseVisualization", + "name": "ellipse1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 2, + "start_angle": -45, + "end_angle": 235 + } + + # when + with pytest.raises(ValidationError): + _ = EllipseManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_ellipse_visualization_block() -> None: + # given + block = EllipseVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="CUSTOM", + palette_size=1, + custom_colors=["#FF0000"], + color_axis="CLASS", + thickness=2, + start_angle=-45, + end_angle=235 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py b/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py new file mode 100644 index 000000000..1c5b3b92f --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.halo import ( + HaloManifest, + HaloVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_halo_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "HaloVisualization", + "name": "halo1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.8, + "kernel_size": 40 + } + + # when + result = HaloManifest.model_validate(data) + + # then + assert result == HaloManifest( + type="HaloVisualization", + name="halo1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.8, + kernel_size=40 + ) + + +def test_halo_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "HaloVisualization", + "name": "halo1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.8, + "kernel_size": 40 + } + + # when + with pytest.raises(ValidationError): + _ = HaloManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_halo_visualization_block() -> None: + # given + block = HaloVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=[], + color_axis="CLASS", + opacity=0.8, + kernel_size=40 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_label.py b/tests/workflows/unit_tests/core_steps/visualizations/test_label.py new file mode 100644 index 000000000..29bf42d11 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_label.py @@ -0,0 +1,110 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.label import ( + LabelManifest, + LabelVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_label_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "LabelVisualization", + "name": "label1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "text": "Class", + "text_position": "TOP_LEFT", + "text_color": "WHITE", + "text_scale": 1.0, + "text_thickness": 1, + "text_padding": 10, + "border_radius": 0 + } + + # when + result = LabelManifest.model_validate(data) + + # then + assert result == LabelManifest( + type="LabelVisualization", + name="label1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + text='Class', + text_position='TOP_LEFT', + text_color='WHITE', + text_scale=1.0, + text_thickness=1, + text_padding=10, + border_radius=0 + ) + + +def test_label_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "LabelVisualization", + "name": "label1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "text": "Class", + "text_position": "TOP_LEFT", + "text_color": "WHITE", + "text_scale": 1.0, + "text_thickness": 1, + "text_padding": 10, + "border_radius": 0 + } + + # when + with pytest.raises(ValidationError): + _ = LabelManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_label_visualization_block() -> None: + # given + block = LabelVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + text='Class', + text_position='TOP_LEFT', + text_color='WHITE', + text_scale=1.0, + text_thickness=1, + text_padding=10, + border_radius=0 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py b/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py new file mode 100644 index 000000000..97778a714 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.mask import ( + MaskManifest, + MaskVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_mask_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "MaskVisualization", + "name": "mask1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.5 + } + + # when + result = MaskManifest.model_validate(data) + + # then + assert result == MaskManifest( + type="MaskVisualization", + name="mask1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.5 + ) + + +def test_mask_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "MaskVisualization", + "name": "mask1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.5 + } + + # when + with pytest.raises(ValidationError): + _ = MaskManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_mask_visualization_block() -> None: + # given + block = MaskVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="viridis", + palette_size=10, + custom_colors=["#000000", "#FFFFFF"], + color_axis="CLASS", + opacity=0.5 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py b/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py new file mode 100644 index 000000000..433371739 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.pixelate import ( + PixelateManifest, + PixelateVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + WorkflowImageData, + ImageParentMetadata, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_pixelate_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "PixelateVisualization", + "name": "pixelate1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "pixel_size": 10 + } + + # when + result = PixelateManifest.model_validate(data) + + # then + assert result == PixelateManifest( + type="PixelateVisualization", + name="pixelate1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + pixel_size=10 + ) + + +def test_pixelate_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "PixelateVisualization", + "name": "pixelate1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "pixel_size": 10 + } + + # when + with pytest.raises(ValidationError): + _ = PixelateManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_pixelate_visualization_block() -> None: + # given + block = PixelateVisualizationBlock() + + start_image = np.random.randint(0, 255, (1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + pixel_size=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, start_image) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py b/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py new file mode 100644 index 000000000..a1cdd0afd --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.polygon import ( + PolygonManifest, + PolygonVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_polygon_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "PolygonVisualization", + "name": "polygon1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 2 + } + + # when + result = PolygonManifest.model_validate(data) + + # then + assert result == PolygonManifest( + type="PolygonVisualization", + name="polygon1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=2 + ) + + +def test_polygon_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "PolygonVisualization", + "name": "polygon1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 2 + } + + # when + with pytest.raises(ValidationError): + _ = PolygonManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_polygon_visualization_block() -> None: + # given + block = PolygonVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="tab10", + palette_size=10, + custom_colors=["#FF0000", "#00FF00", "#0000FF"], + color_axis="CLASS", + thickness=2 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py b/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py new file mode 100644 index 000000000..4bec9deb7 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py @@ -0,0 +1,99 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.triangle import ( + TriangleManifest, + TriangleVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_triangle_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "TriangleVisualization", + "name": "triangle1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": "TOP_CENTER", + "base": 30, + "height": 30, + "outline_thickness": 1 + } + + # when + result = TriangleManifest.model_validate(data) + + # then + assert result == TriangleManifest( + type="TriangleVisualization", + name="triangle1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position="TOP_CENTER", + base=30, + height=30, + outline_thickness=1 + ) + + +def test_triangle_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "TriangleVisualization", + "name": "triangle1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": "TOP_CENTER", + "base": 30, + "height": 30, + "outline_thickness": 1 + } + + # when + with pytest.raises(ValidationError): + _ = TriangleManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_triangle_visualization_block() -> None: + # given + block = TriangleVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="tab10", + palette_size=10, + custom_colors=["#FF0000", "#00FF00", "#0000FF"], + color_axis="CLASS", + position="TOP_CENTER", + base=30, + height=30, + outline_thickness=1 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) From c4a5eaff7e9221ef62ed10c83aea03382c6aafea Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Wed, 17 Jul 2024 15:32:58 -0700 Subject: [PATCH 28/36] Add test of copy=False --- .../visualizations/test_bounding_box.py | 45 ++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py index 392db010a..87a77b795 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py @@ -62,10 +62,11 @@ async def test_bounding_box_visualization_block() -> None: # given block = BoundingBoxVisualizationBlock() + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) output = await block.run( image=WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="some"), - numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + numpy_image=start_image, ), predictions=sv.Detections( xyxy=np.array( @@ -92,4 +93,46 @@ async def test_bounding_box_visualization_block() -> None: assert output.get("image").numpy_image.shape == (1000, 1000, 3) # check if the image is modified assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + + # check that the image is copied + assert output.get("image").numpy_image.__array_interface__['data'][0] != start_image.__array_interface__['data'][0] + +@pytest.mark.asyncio +async def test_bounding_box_visualization_block_nocopy() -> None: + # given + block = BoundingBoxVisualizationBlock() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=False, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=1, + roundness=0, + ) + + print("output", output) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + + # check if the image reference references the same memory space as the start_image + assert output.get("image").numpy_image.__array_interface__['data'][0] == start_image.__array_interface__['data'][0] \ No newline at end of file From ef6184c0f4377db8835f5234a490ec82916c10e3 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:19:32 -0700 Subject: [PATCH 29/36] Add init.py --- inference/core/workflows/core_steps/visualizations/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 inference/core/workflows/core_steps/visualizations/__init__.py diff --git a/inference/core/workflows/core_steps/visualizations/__init__.py b/inference/core/workflows/core_steps/visualizations/__init__.py new file mode 100644 index 000000000..e69de29bb From bac72d39864d69c3ee5402e0d5ff453d79ae52e2 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:22:08 -0700 Subject: [PATCH 30/36] Clean up comments --- .../core/workflows/core_steps/visualizations/base.py | 8 +++----- .../core/workflows/core_steps/visualizations/blur.py | 2 +- .../core/workflows/core_steps/visualizations/pixelate.py | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index df7818af2..7877ea02c 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -6,7 +6,7 @@ from inference.core.workflows.core_steps.visualizations.utils import strToColor from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData -from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.entities.types import ( BATCH_OF_IMAGES_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, @@ -68,10 +68,9 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "Matplotlib Inferno", "Matplotlib Magma", "Matplotlib Cividis", - # 'LinearSegmentedColormap' object has no attribute 'colors' + # TODO: Re-enable once supervision 0.23 is released with a fix # "Matplotlib Twilight", # "Matplotlib Twilight_Shifted", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib HSV", # "Matplotlib Jet", # "Matplotlib Turbo", @@ -91,14 +90,13 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "Matplotlib Tab20", "Matplotlib Tab20b", "Matplotlib Tab20c", - # 'LinearSegmentedColormap' object has no attribute 'colors' + # TODO: Re-enable once supervision 0.23 is released with a fix # "Matplotlib Ocean", # "Matplotlib Gist_Earth", # "Matplotlib Terrain", # "Matplotlib Stern", # "Matplotlib gnuplot", # "Matplotlib gnuplot2", - # 'LinearSegmentedColormap' object has no attribute 'colors' # "Matplotlib Spring", # "Matplotlib Summer", # "Matplotlib Autumn", diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py index 7efa1aa39..45dc31647 100644 --- a/inference/core/workflows/core_steps/visualizations/blur.py +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -4,7 +4,7 @@ from pydantic import AliasChoices, ConfigDict, Field from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData -from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.entities.types import ( BATCH_OF_IMAGES_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/pixelate.py b/inference/core/workflows/core_steps/visualizations/pixelate.py index 8114f5448..4e4be9e97 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate.py @@ -4,7 +4,7 @@ from pydantic import AliasChoices, ConfigDict, Field from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData -from inference.core.workflows.entities.types import ( # IMAGE_KIND,; OBJECT_DETECTION_PREDICTION_KIND,; INSTANCE_SEGMENTATION_PREDICTION_KIND,; KEYPOINT_DETECTION_PREDICTION_KIND, +from inference.core.workflows.entities.types import ( BATCH_OF_IMAGES_KIND, BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, From 91bf77507cd8b795ce1b1c409e5802ac85442bb9 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:24:32 -0700 Subject: [PATCH 31/36] Add *args & **kwargs to @abstractmethod signatures --- inference/core/workflows/core_steps/visualizations/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 7877ea02c..79f93dbda 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -167,7 +167,7 @@ def get_manifest(cls) -> Type[VisualizationManifest]: pass @abstractmethod - def getAnnotator(self) -> sv.annotators.base.BaseAnnotator: + def getAnnotator(self, *args, **kwargs) -> sv.annotators.base.BaseAnnotator: pass @classmethod @@ -214,5 +214,7 @@ async def run( palette_size: Optional[int], custom_colors: Optional[List[str]], color_axis: Optional[str], + *args, + **kwargs ) -> BlockResult: pass From b787fc85e344ac488a5b9b07a69c6fe9b0a323e8 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:44:03 -0700 Subject: [PATCH 32/36] Call superclass in init --- inference/core/workflows/core_steps/visualizations/base.py | 4 ++-- inference/core/workflows/core_steps/visualizations/blur.py | 3 ++- .../core/workflows/core_steps/visualizations/bounding_box.py | 3 ++- inference/core/workflows/core_steps/visualizations/circle.py | 3 ++- inference/core/workflows/core_steps/visualizations/color.py | 3 ++- inference/core/workflows/core_steps/visualizations/corner.py | 3 ++- inference/core/workflows/core_steps/visualizations/crop.py | 3 ++- inference/core/workflows/core_steps/visualizations/dot.py | 3 ++- inference/core/workflows/core_steps/visualizations/ellipse.py | 3 ++- inference/core/workflows/core_steps/visualizations/halo.py | 3 ++- inference/core/workflows/core_steps/visualizations/label.py | 3 ++- inference/core/workflows/core_steps/visualizations/mask.py | 3 ++- .../core/workflows/core_steps/visualizations/pixelate.py | 3 ++- inference/core/workflows/core_steps/visualizations/polygon.py | 3 ++- .../core/workflows/core_steps/visualizations/triangle.py | 3 ++- 15 files changed, 30 insertions(+), 16 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index 79f93dbda..f442b7be7 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -158,8 +158,8 @@ def describe_outputs(cls) -> List[OutputDefinition]: class VisualizationBlock(WorkflowBlock, ABC): - def __init__(self): - pass + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) @classmethod @abstractmethod diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py index 45dc31647..ee09598d1 100644 --- a/inference/core/workflows/core_steps/visualizations/blur.py +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -84,7 +84,8 @@ def describe_outputs(cls) -> List[OutputDefinition]: class BlurVisualizationBlock(WorkflowBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index caf226776..343ea4807 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -51,7 +51,8 @@ class BoundingBoxManifest(VisualizationManifest): class BoundingBoxVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py index b41828212..bd5b40ada 100644 --- a/inference/core/workflows/core_steps/visualizations/circle.py +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -43,7 +43,8 @@ class CircleManifest(VisualizationManifest): class CircleVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py index 6ebeaa9da..28355448b 100644 --- a/inference/core/workflows/core_steps/visualizations/color.py +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -44,7 +44,8 @@ class ColorManifest(VisualizationManifest): class ColorVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py index f6b41ffde..2f7bbf772 100644 --- a/inference/core/workflows/core_steps/visualizations/corner.py +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -49,7 +49,8 @@ class CornerManifest(VisualizationManifest): class CornerVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/crop.py b/inference/core/workflows/core_steps/visualizations/crop.py index 78cadc3ba..52ac6baac 100644 --- a/inference/core/workflows/core_steps/visualizations/crop.py +++ b/inference/core/workflows/core_steps/visualizations/crop.py @@ -71,7 +71,8 @@ class CropManifest(VisualizationManifest): class CropVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py index e6b7039fd..57a3d8d24 100644 --- a/inference/core/workflows/core_steps/visualizations/dot.py +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -72,7 +72,8 @@ class DotManifest(VisualizationManifest): class DotVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/ellipse.py b/inference/core/workflows/core_steps/visualizations/ellipse.py index b4a7a3a85..3cd41a5ca 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse.py @@ -55,7 +55,8 @@ class EllipseManifest(VisualizationManifest): class EllipseVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/halo.py b/inference/core/workflows/core_steps/visualizations/halo.py index cfbc86439..173824e9e 100644 --- a/inference/core/workflows/core_steps/visualizations/halo.py +++ b/inference/core/workflows/core_steps/visualizations/halo.py @@ -63,7 +63,8 @@ class HaloManifest(VisualizationManifest): class HaloVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index c110819fb..15d87b5b3 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -103,7 +103,8 @@ class LabelManifest(VisualizationManifest): class LabelVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/mask.py b/inference/core/workflows/core_steps/visualizations/mask.py index 124063197..d569781fd 100644 --- a/inference/core/workflows/core_steps/visualizations/mask.py +++ b/inference/core/workflows/core_steps/visualizations/mask.py @@ -56,7 +56,8 @@ class MaskManifest(VisualizationManifest): class MaskVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/pixelate.py b/inference/core/workflows/core_steps/visualizations/pixelate.py index 4e4be9e97..228c438b5 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate.py @@ -84,7 +84,8 @@ def describe_outputs(cls) -> List[OutputDefinition]: class PixelateVisualizationBlock(WorkflowBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/polygon.py b/inference/core/workflows/core_steps/visualizations/polygon.py index becda5adb..e6f80821f 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon.py +++ b/inference/core/workflows/core_steps/visualizations/polygon.py @@ -55,7 +55,8 @@ class PolygonManifest(VisualizationManifest): class PolygonVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py index aae99aa49..e4475ade1 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle.py +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -76,7 +76,8 @@ class TriangleManifest(VisualizationManifest): class TriangleVisualizationBlock(VisualizationBlock): - def __init__(self): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self.annotatorCache = {} @classmethod From 7036bef01788620e736c7ddfdf96f0dfec8cd841 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:45:10 -0700 Subject: [PATCH 33/36] Update ColorLookup reference --- .../core/workflows/core_steps/visualizations/bounding_box.py | 4 ++-- inference/core/workflows/core_steps/visualizations/circle.py | 2 +- inference/core/workflows/core_steps/visualizations/color.py | 2 +- inference/core/workflows/core_steps/visualizations/corner.py | 2 +- inference/core/workflows/core_steps/visualizations/crop.py | 4 +--- inference/core/workflows/core_steps/visualizations/dot.py | 2 +- inference/core/workflows/core_steps/visualizations/ellipse.py | 2 +- inference/core/workflows/core_steps/visualizations/halo.py | 2 +- inference/core/workflows/core_steps/visualizations/label.py | 2 +- inference/core/workflows/core_steps/visualizations/mask.py | 2 +- inference/core/workflows/core_steps/visualizations/polygon.py | 2 +- .../core/workflows/core_steps/visualizations/triangle.py | 2 +- 12 files changed, 13 insertions(+), 15 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py index 343ea4807..97f515a8c 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -78,13 +78,13 @@ def getAnnotator( if roundness == 0: self.annotatorCache[key] = sv.BoxAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, ) else: self.annotatorCache[key] = sv.RoundBoxAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, roundness=roundness, ) diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py index bd5b40ada..b215531dc 100644 --- a/inference/core/workflows/core_steps/visualizations/circle.py +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -76,7 +76,7 @@ def getAnnotator( self.annotatorCache[key] = sv.CircleAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, ) diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py index 28355448b..769febd60 100644 --- a/inference/core/workflows/core_steps/visualizations/color.py +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -77,7 +77,7 @@ def getAnnotator( self.annotatorCache[key] = sv.ColorAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), opacity=opacity, ) diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py index 2f7bbf772..8bf19cf60 100644 --- a/inference/core/workflows/core_steps/visualizations/corner.py +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -84,7 +84,7 @@ def getAnnotator( self.annotatorCache[key] = sv.BoxCornerAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, corner_length=corner_length, ) diff --git a/inference/core/workflows/core_steps/visualizations/crop.py b/inference/core/workflows/core_steps/visualizations/crop.py index 52ac6baac..7f307aec9 100644 --- a/inference/core/workflows/core_steps/visualizations/crop.py +++ b/inference/core/workflows/core_steps/visualizations/crop.py @@ -108,9 +108,7 @@ def getAnnotator( self.annotatorCache[key] = sv.CropAnnotator( border_color=palette, - border_color_lookup=getattr( - sv.annotators.utils.ColorLookup, color_axis - ), + border_color_lookup=getattr(sv.ColorLookup, color_axis), position=getattr(sv.Position, position), scale_factor=scale_factor, border_thickness=border_thickness, diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py index 57a3d8d24..b91e5be37 100644 --- a/inference/core/workflows/core_steps/visualizations/dot.py +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -109,7 +109,7 @@ def getAnnotator( self.annotatorCache[key] = sv.DotAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), position=getattr(sv.Position, position), radius=radius, outline_thickness=outline_thickness, diff --git a/inference/core/workflows/core_steps/visualizations/ellipse.py b/inference/core/workflows/core_steps/visualizations/ellipse.py index 3cd41a5ca..568949508 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse.py @@ -92,7 +92,7 @@ def getAnnotator( self.annotatorCache[key] = sv.EllipseAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, start_angle=start_angle, end_angle=end_angle, diff --git a/inference/core/workflows/core_steps/visualizations/halo.py b/inference/core/workflows/core_steps/visualizations/halo.py index 173824e9e..46f61f199 100644 --- a/inference/core/workflows/core_steps/visualizations/halo.py +++ b/inference/core/workflows/core_steps/visualizations/halo.py @@ -98,7 +98,7 @@ def getAnnotator( self.annotatorCache[key] = sv.HaloAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), opacity=opacity, ) diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index 15d87b5b3..db91f8fed 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -148,7 +148,7 @@ def getAnnotator( self.annotatorCache[key] = sv.LabelAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), text_position=getattr(sv.Position, text_position), text_color=text_color, text_scale=text_scale, diff --git a/inference/core/workflows/core_steps/visualizations/mask.py b/inference/core/workflows/core_steps/visualizations/mask.py index d569781fd..4eaa82d15 100644 --- a/inference/core/workflows/core_steps/visualizations/mask.py +++ b/inference/core/workflows/core_steps/visualizations/mask.py @@ -89,7 +89,7 @@ def getAnnotator( self.annotatorCache[key] = sv.MaskAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), opacity=opacity, ) diff --git a/inference/core/workflows/core_steps/visualizations/polygon.py b/inference/core/workflows/core_steps/visualizations/polygon.py index e6f80821f..9c798d585 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon.py +++ b/inference/core/workflows/core_steps/visualizations/polygon.py @@ -88,7 +88,7 @@ def getAnnotator( self.annotatorCache[key] = sv.PolygonAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), thickness=thickness, ) diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py index e4475ade1..21a55bfdb 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle.py +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -115,7 +115,7 @@ def getAnnotator( self.annotatorCache[key] = sv.TriangleAnnotator( color=palette, - color_lookup=getattr(sv.annotators.utils.ColorLookup, color_axis), + color_lookup=getattr(sv.ColorLookup, color_axis), position=getattr(sv.Position, position), base=base, height=height, From 2ea750b46d45718e4e31321da2faec6cd4d88010 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:46:33 -0700 Subject: [PATCH 34/36] Rename strToColor -> str_to_color --- inference/core/workflows/core_steps/visualizations/base.py | 4 ++-- inference/core/workflows/core_steps/visualizations/label.py | 4 ++-- inference/core/workflows/core_steps/visualizations/utils.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py index f442b7be7..463ad8bc5 100644 --- a/inference/core/workflows/core_steps/visualizations/base.py +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -4,7 +4,7 @@ import supervision as sv from pydantic import AliasChoices, ConfigDict, Field -from inference.core.workflows.core_steps.visualizations.utils import strToColor +from inference.core.workflows.core_steps.visualizations.utils import str_to_color from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData from inference.core.workflows.entities.types import ( BATCH_OF_IMAGES_KIND, @@ -174,7 +174,7 @@ def getAnnotator(self, *args, **kwargs) -> sv.annotators.base.BaseAnnotator: def getPalette(self, color_palette, palette_size, custom_colors): if color_palette == "CUSTOM": return sv.ColorPalette( - colors=[strToColor(color) for color in custom_colors] + colors=[str_to_color(color) for color in custom_colors] ) elif hasattr(sv.ColorPalette, color_palette): return getattr(sv.ColorPalette, color_palette) diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py index db91f8fed..ee6bcef24 100644 --- a/inference/core/workflows/core_steps/visualizations/label.py +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -7,7 +7,7 @@ VisualizationBlock, VisualizationManifest, ) -from inference.core.workflows.core_steps.visualizations.utils import strToColor +from inference.core.workflows.core_steps.visualizations.utils import str_to_color from inference.core.workflows.entities.base import WorkflowImageData from inference.core.workflows.entities.types import ( FLOAT_KIND, @@ -144,7 +144,7 @@ def getAnnotator( if key not in self.annotatorCache: palette = self.getPalette(color_palette, palette_size, custom_colors) - text_color = strToColor(text_color) + text_color = str_to_color(text_color) self.annotatorCache[key] = sv.LabelAnnotator( color=palette, diff --git a/inference/core/workflows/core_steps/visualizations/utils.py b/inference/core/workflows/core_steps/visualizations/utils.py index b548db60f..812e2c833 100644 --- a/inference/core/workflows/core_steps/visualizations/utils.py +++ b/inference/core/workflows/core_steps/visualizations/utils.py @@ -1,7 +1,7 @@ import supervision as sv -def strToColor(color: str) -> sv.Color: +def str_to_color(color: str) -> sv.Color: if color.startswith("#"): return sv.Color.from_hex(color) elif color.startswith("rgb"): From 1c0f234e6134492d2dbc50dd3699177e057a300c Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:49:32 -0700 Subject: [PATCH 35/36] Add tests for str_to_color --- .../core_steps/visualizations/__init__.py | 0 .../visualizations/test_str_to_color.py | 57 +++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/__init__.py create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py diff --git a/tests/workflows/unit_tests/core_steps/visualizations/__init__.py b/tests/workflows/unit_tests/core_steps/visualizations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py new file mode 100644 index 000000000..bd6d8b25e --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py @@ -0,0 +1,57 @@ +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.utils import ( + str_to_color +) + +def test_str_to_color_with_hex_color() -> None: + # given + color = "#FF0000" + + # when + result = str_to_color(color) + + # then + assert result == sv.Color.from_hex(color) + +def test_str_to_color_with_rgb_color() -> None: + # given + color = "rgb(255, 0, 0)" + expected_color = sv.Color.from_rgb_tuple((255, 0, 0)) + + # when + result = str_to_color(color) + + # then + assert result == expected_color + +def test_str_to_color_with_bgr_color() -> None: + # given + color = "bgr(0, 0, 255)" + expected_color = sv.Color.from_bgr_tuple((0, 0, 255)) + + # when + result = str_to_color(color) + + # then + assert result == expected_color + +def test_str_to_color_with_color_name() -> None: + # given + color = "WHITE" + + # when + result = str_to_color(color) + + # then + assert result == sv.Color.WHITE + +def test_str_to_color_with_invalid_color() -> None: + # given + color = "invalid" + + # when + with pytest.raises(ValueError): + _ = str_to_color(color) From 6d93d8da5f89ffecbb7137beafcce56143cff9a6 Mon Sep 17 00:00:00 2001 From: Brad Dwyer Date: Thu, 18 Jul 2024 09:49:44 -0700 Subject: [PATCH 36/36] Fix unused import --- .../unit_tests/core_steps/visualizations/test_str_to_color.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py index bd6d8b25e..c95efe9de 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py @@ -1,6 +1,5 @@ import pytest import supervision as sv -from pydantic import ValidationError from inference.core.workflows.core_steps.visualizations.utils import ( str_to_color