diff --git a/.github/workflows/load_test_hosted_inference.yml b/.github/workflows/load_test_hosted_inference.yml index dc0e5b437..4cde49a98 100644 --- a/.github/workflows/load_test_hosted_inference.yml +++ b/.github/workflows/load_test_hosted_inference.yml @@ -16,6 +16,7 @@ on: - object-detection - instance-segmentation - classification + - workflows jobs: build: @@ -49,6 +50,10 @@ jobs: if: ${{ github.event.inputs.environment == 'production' && github.event.inputs.model_type == 'classification' }} run: | ROBOFLOW_API_KEY=${{ secrets.LOAD_TEST_PRODUCTION_API_KEY }} python -m inference_cli.main benchmark api-speed -m vehicle-classification-eapcd/2 -d coco -rps 5 -br 500 -h https://classify.roboflow.com --yes --output_location test_results.json + - name: 🏋️♂️ Load test 🚨 PRODUCTION 🚨 | workflows 🔥🔥🔥🔥 + if: ${{ github.event.inputs.environment == 'production' && github.event.inputs.model_type == 'workflows' }} + run: | + ROBOFLOW_API_KEY=${{ secrets.LOAD_TEST_PRODUCTION_API_KEY }} python -m inference_cli.main benchmark api-speed -wid workflows-production-test -wn paul-guerrie-tang1 -d coco -rps 5 -br 500 -h https://classify.roboflow.com --yes --output_location test_results.json - name: 🏋️♂️ Load test 😎 STAGING 😎 | object-detection 🔥🔥🔥🔥 if: ${{ github.event.inputs.environment == 'staging' && github.event.inputs.model_type == 'object-detection' }} @@ -62,5 +67,9 @@ jobs: if: ${{ github.event.inputs.environment == 'staging' && github.event.inputs.model_type == 'classification' }} run: | ROBOFLOW_API_KEY=${{ secrets.LOAD_TEST_STAGING_API_KEY }} python -m inference_cli.main benchmark api-speed -m catdog/28 -d coco -rps 5 -br 500 -h https://lambda-classification.staging.roboflow.com --legacy-endpoints --yes --output_location test_results.json + - name: 🏋️♂️ Load test 😎 STAGING 😎 | workflows 🔥🔥🔥🔥 + if: ${{ github.event.inputs.environment == 'staging' && github.event.inputs.model_type == 'workflows' }} + run: | + ROBOFLOW_API_KEY=${{ secrets.LOAD_TEST_STAGING_API_KEY }} python -m inference_cli.main benchmark api-speed -wid workflows-staging-test -wn paul-guerrie -d coco -rps 5 -br 500 -h https://lambda-classification.staging.roboflow.com --legacy-endpoints --yes --output_location test_results.json - name: 📈 RESULTS run: cat test_results.json | jq diff --git a/development/docs/build_block_docs.py b/development/docs/build_block_docs.py index 2b64f7763..d5968849b 100644 --- a/development/docs/build_block_docs.py +++ b/development/docs/build_block_docs.py @@ -104,7 +104,7 @@ def main() -> None: token=AUTOGENERATED_BLOCKS_LIST_TOKEN, ) block_card_lines = [] - blocks_description = describe_available_blocks() + blocks_description = describe_available_blocks(dynamic_blocks=[]) block_type2manifest_type_identifier = { block.block_class: block.manifest_type_identifier for block in blocks_description.blocks diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda b/docker/dockerfiles/Dockerfile.onnx.lambda index f877b3a65..7927cb044 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda +++ b/docker/dockerfiles/Dockerfile.onnx.lambda @@ -70,6 +70,7 @@ ENV API_LOGGING_ENABLED=True ENV MODEL_VALIDATION_DISABLED=True ENV ALLOW_NON_HTTPS_URL_INPUT=False ENV ALLOW_URL_INPUT_WITHOUT_FQDN=False +ENV ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS=False WORKDIR ${LAMBDA_TASK_ROOT} RUN rm -rf /build diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda.slim b/docker/dockerfiles/Dockerfile.onnx.lambda.slim index ccbbb5c0f..a31efa51c 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda.slim +++ b/docker/dockerfiles/Dockerfile.onnx.lambda.slim @@ -64,6 +64,7 @@ ENV API_LOGGING_ENABLED=True ENV MODEL_VALIDATION_DISABLED=True ENV ALLOW_NON_HTTPS_URL_INPUT=False ENV ALLOW_URL_INPUT_WITHOUT_FQDN=False +ENV ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS=False WORKDIR ${LAMBDA_TASK_ROOT} diff --git a/docs/workflows/blocks.md b/docs/workflows/blocks.md index adc30dd5a..bf4095caf 100644 --- a/docs/workflows/blocks.md +++ b/docs/workflows/blocks.md @@ -36,6 +36,7 @@ hide:
+ diff --git a/docs/workflows/kinds.md b/docs/workflows/kinds.md index faeef32b6..3dbc94c96 100644 --- a/docs/workflows/kinds.md +++ b/docs/workflows/kinds.md @@ -8,26 +8,26 @@ resolved we need a simple type system - that's what we call `kinds`. ## List of `workflows` kinds -* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name -* [`dictionary`](/workflows/kinds/dictionary): Dictionary -* [`string`](/workflows/kinds/string): String value +* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any types +* [`*`](/workflows/kinds/*): Equivalent of any element * [`Batch[dictionary]`](/workflows/kinds/batch_dictionary): Batch of dictionaries -* [`Batch[keypoint_detection_prediction]`](/workflows/kinds/batch_keypoint_detection_prediction): `'predictions'` key from Keypoint Detection Model output -* [`Batch[parent_id]`](/workflows/kinds/batch_parent_id): Identifier of parent for step output -* [`Batch[classification_prediction]`](/workflows/kinds/batch_classification_prediction): `'predictions'` key from Classification Model outputs -* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id * [`Batch[top_class]`](/workflows/kinds/batch_top_class): Batch of string values representing top class predicted by classification model +* [`integer`](/workflows/kinds/integer): Integer value +* [`dictionary`](/workflows/kinds/dictionary): Dictionary +* [`Batch[classification_prediction]`](/workflows/kinds/batch_classification_prediction): `'predictions'` key from Classification Model outputs +* [`Batch[boolean]`](/workflows/kinds/batch_boolean): Boolean values batch * [`boolean`](/workflows/kinds/boolean): Boolean flag +* [`Batch[prediction_type]`](/workflows/kinds/batch_prediction_type): String value with type of prediction +* [`Batch[parent_id]`](/workflows/kinds/batch_parent_id): Identifier of parent for step output +* [`string`](/workflows/kinds/string): String value * [`Batch[instance_segmentation_prediction]`](/workflows/kinds/batch_instance_segmentation_prediction): `'predictions'` key from Instance Segmentation Model outputs -* [`*`](/workflows/kinds/*): Equivalent of any element -* [`integer`](/workflows/kinds/integer): Integer value +* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]` * [`Batch[image_metadata]`](/workflows/kinds/batch_image_metadata): Dictionary with image metadata required by supervision -* [`Batch[bar_code_detection]`](/workflows/kinds/batch_bar_code_detection): Prediction with barcode detection * [`Batch[image]`](/workflows/kinds/batch_image): Image in workflows +* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name * [`Batch[string]`](/workflows/kinds/batch_string): Batch of string values -* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any types -* [`Batch[boolean]`](/workflows/kinds/batch_boolean): Boolean values batch * [`Batch[object_detection_prediction]`](/workflows/kinds/batch_object_detection_prediction): `'predictions'` key from Object Detection Model output -* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]` -* [`Batch[prediction_type]`](/workflows/kinds/batch_prediction_type): String value with type of prediction +* [`Batch[keypoint_detection_prediction]`](/workflows/kinds/batch_keypoint_detection_prediction): `'predictions'` key from Keypoint Detection Model output +* [`Batch[bar_code_detection]`](/workflows/kinds/batch_bar_code_detection): Prediction with barcode detection +* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id diff --git a/inference/core/entities/requests/workflows.py b/inference/core/entities/requests/workflows.py index ab5aa0764..a82a1448f 100644 --- a/inference/core/entities/requests/workflows.py +++ b/inference/core/entities/requests/workflows.py @@ -2,6 +2,10 @@ from pydantic import BaseModel, Field +from inference.core.workflows.execution_engine.dynamic_blocks.entities import ( + DynamicBlockDefinition, +) + class WorkflowInferenceRequest(BaseModel): api_key: str = Field( @@ -18,3 +22,9 @@ class WorkflowInferenceRequest(BaseModel): class WorkflowSpecificationInferenceRequest(WorkflowInferenceRequest): specification: dict + + +class DescribeBlocksRequest(BaseModel): + dynamic_blocks_definitions: List[DynamicBlockDefinition] = Field( + default_factory=list, description="Dynamic blocks to be used." + ) diff --git a/inference/core/entities/responses/workflows.py b/inference/core/entities/responses/workflows.py index 6ecaf4121..95c5b3fc3 100644 --- a/inference/core/entities/responses/workflows.py +++ b/inference/core/entities/responses/workflows.py @@ -146,3 +146,6 @@ class WorkflowsBlocksDescription(BaseModel): universal_query_language_description: UniversalQueryLanguageDescription = Field( description="Definitions of Universal Query Language operations and operators" ) + dynamic_block_definition_schema: dict = Field( + description="Schema for dynamic block definition" + ) diff --git a/inference/core/env.py b/inference/core/env.py index 84beea90f..f0a9e9f7e 100644 --- a/inference/core/env.py +++ b/inference/core/env.py @@ -393,6 +393,9 @@ WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS = int( os.getenv("WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS", "8") ) +ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS = str2bool( + os.getenv("ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS", True) +) MODEL_VALIDATION_DISABLED = str2bool(os.getenv("MODEL_VALIDATION_DISABLED", "False")) diff --git a/inference/core/interfaces/camera/entities.py b/inference/core/interfaces/camera/entities.py index d68b410fb..a43cbd28f 100644 --- a/inference/core/interfaces/camera/entities.py +++ b/inference/core/interfaces/camera/entities.py @@ -59,6 +59,7 @@ class VideoFrame: image: np.ndarray frame_id: FrameID frame_timestamp: FrameTimestamp + fps: float = 0 source_id: Optional[int] = None diff --git a/inference/core/interfaces/camera/video_source.py b/inference/core/interfaces/camera/video_source.py index 9f3ef9694..81f4e90a7 100644 --- a/inference/core/interfaces/camera/video_source.py +++ b/inference/core/interfaces/camera/video_source.py @@ -888,6 +888,7 @@ def _consume_stream_frame( buffer=buffer, decoding_pace_monitor=self._decoding_pace_monitor, source_id=source_id, + fps=declared_source_fps, ) if self._buffer_filling_strategy in DROP_OLDEST_STRATEGIES: return self._process_stream_frame_dropping_oldest( @@ -1082,6 +1083,7 @@ def decode_video_frame_to_buffer( buffer: Queue, decoding_pace_monitor: sv.FPSMonitor, source_id: Optional[int], + fps: float = 0, ) -> bool: success, image = video.retrieve() if not success: @@ -1091,6 +1093,7 @@ def decode_video_frame_to_buffer( image=image, frame_id=frame_id, frame_timestamp=frame_timestamp, + fps=fps, source_id=source_id, ) buffer.put(video_frame) diff --git a/inference/core/interfaces/http/handlers/__init__.py b/inference/core/interfaces/http/handlers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/interfaces/http/handlers/workflows.py b/inference/core/interfaces/http/handlers/workflows.py new file mode 100644 index 000000000..326e1e04e --- /dev/null +++ b/inference/core/interfaces/http/handlers/workflows.py @@ -0,0 +1,78 @@ +# TODO - for everyone: start migrating other handlers to bring relief to http_api.py +from typing import List, Optional + +from inference.core.entities.responses.workflows import ( + ExternalBlockPropertyPrimitiveDefinition, + ExternalWorkflowsBlockSelectorDefinition, + UniversalQueryLanguageDescription, + WorkflowsBlocksDescription, +) +from inference.core.workflows.core_steps.common.query_language.introspection.core import ( + prepare_operations_descriptions, + prepare_operators_descriptions, +) +from inference.core.workflows.execution_engine.dynamic_blocks.block_assembler import ( + compile_dynamic_blocks, +) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import ( + DynamicBlockDefinition, +) +from inference.core.workflows.execution_engine.introspection.blocks_loader import ( + describe_available_blocks, +) +from inference.core.workflows.execution_engine.introspection.connections_discovery import ( + discover_blocks_connections, +) + + +def handle_describe_workflows_blocks_request( + dynamic_blocks_definitions: Optional[List[DynamicBlockDefinition]] = None, +) -> WorkflowsBlocksDescription: + if dynamic_blocks_definitions is None: + dynamic_blocks_definitions = [] + dynamic_blocks = compile_dynamic_blocks( + dynamic_blocks_definitions=dynamic_blocks_definitions, + ) + blocks_description = describe_available_blocks(dynamic_blocks=dynamic_blocks) + blocks_connections = discover_blocks_connections( + blocks_description=blocks_description, + ) + kinds_connections = { + kind_name: [ + ExternalWorkflowsBlockSelectorDefinition( + manifest_type_identifier=c.manifest_type_identifier, + property_name=c.property_name, + property_description=c.property_description, + compatible_element=c.compatible_element, + is_list_element=c.is_list_element, + is_dict_element=c.is_dict_element, + ) + for c in connections + ] + for kind_name, connections in blocks_connections.kinds_connections.items() + } + primitives_connections = [ + ExternalBlockPropertyPrimitiveDefinition( + manifest_type_identifier=primitives_connection.manifest_type_identifier, + property_name=primitives_connection.property_name, + property_description=primitives_connection.property_description, + type_annotation=primitives_connection.type_annotation, + ) + for primitives_connection in blocks_connections.primitives_connections + ] + uql_operations_descriptions = prepare_operations_descriptions() + uql_operators_descriptions = prepare_operators_descriptions() + universal_query_language_description = ( + UniversalQueryLanguageDescription.from_internal_entities( + operations_descriptions=uql_operations_descriptions, + operators_descriptions=uql_operators_descriptions, + ) + ) + return WorkflowsBlocksDescription( + blocks=blocks_description.blocks, + declared_kinds=blocks_description.declared_kinds, + kinds_connections=kinds_connections, + primitives_connections=primitives_connections, + universal_query_language_description=universal_query_language_description, + dynamic_block_definition_schema=DynamicBlockDefinition.schema(), + ) diff --git a/inference/core/interfaces/http/http_api.py b/inference/core/interfaces/http/http_api.py index 6663c196f..75c331f4a 100644 --- a/inference/core/interfaces/http/http_api.py +++ b/inference/core/interfaces/http/http_api.py @@ -11,9 +11,9 @@ from fastapi.responses import JSONResponse, RedirectResponse, Response from fastapi.staticfiles import StaticFiles from fastapi_cprofile.profiler import CProfileMiddleware +from starlette.middleware.base import BaseHTTPMiddleware from inference.core import logger -from inference.core.cache import cache from inference.core.devices.utils import GLOBAL_INFERENCE_SERVER_ID from inference.core.entities.requests.clip import ( ClipCompareRequest, @@ -42,6 +42,7 @@ ClearModelRequest, ) from inference.core.entities.requests.workflows import ( + DescribeBlocksRequest, WorkflowInferenceRequest, WorkflowSpecificationInferenceRequest, ) @@ -73,9 +74,6 @@ ServerVersionInfo, ) from inference.core.entities.responses.workflows import ( - ExternalBlockPropertyPrimitiveDefinition, - ExternalWorkflowsBlockSelectorDefinition, - UniversalQueryLanguageDescription, WorkflowInferenceResponse, WorkflowsBlocksDescription, WorkflowValidationStatus, @@ -128,6 +126,9 @@ WorkspaceLoadError, ) from inference.core.interfaces.base import BaseInterface +from inference.core.interfaces.http.handlers.workflows import ( + handle_describe_workflows_blocks_request, +) from inference.core.interfaces.http.orjson_utils import ( orjson_response, serialise_workflow_result, @@ -140,12 +141,9 @@ InvalidInputTypeError, OperationTypeNotRecognisedError, ) -from inference.core.workflows.core_steps.common.query_language.introspection.core import ( - prepare_operations_descriptions, - prepare_operators_descriptions, -) from inference.core.workflows.entities.base import OutputDefinition from inference.core.workflows.errors import ( + DynamicBlockError, ExecutionGraphStructureError, InvalidReferenceTargetError, ReferenceTypeError, @@ -157,13 +155,8 @@ parse_workflow_definition, ) from inference.core.workflows.execution_engine.core import ExecutionEngine -from inference.core.workflows.execution_engine.introspection.blocks_loader import ( - describe_available_blocks, -) -from inference.core.workflows.execution_engine.introspection.connections_discovery import ( - discover_blocks_connections, -) from inference.models.aliases import resolve_roboflow_model_alias +from inference.usage_tracking.collector import usage_collector if LAMBDA: from inference.core.usage import trackUsage @@ -243,6 +236,7 @@ async def wrapped_route(*args, **kwargs): RuntimeInputError, InvalidInputTypeError, OperationTypeNotRecognisedError, + DynamicBlockError, ) as error: resp = JSONResponse( status_code=400, @@ -346,6 +340,14 @@ async def wrapped_route(*args, **kwargs): return wrapped_route +class LambdaMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request, call_next): + response = await call_next(request) + logger.info("Lambda is terminating, handle unsent usage payloads.") + await usage_collector.async_push_usage_payloads() + return response + + class HttpInterface(BaseInterface): """Roboflow defined HTTP interface for a general-purpose inference server. @@ -393,6 +395,8 @@ def __init__( app.add_middleware( ASGIMiddleware, host="https://app.metlo.com", api_key=METLO_KEY ) + if LAMBDA: + app.add_middleware(LambdaMiddleware) if len(ALLOW_ORIGINS) > 0: app.add_middleware( @@ -461,13 +465,10 @@ async def process_workflow_inference_request( workflow_specification: dict, background_tasks: Optional[BackgroundTasks], ) -> WorkflowInferenceResponse: - step_execution_mode = StepExecutionMode(WORKFLOWS_STEP_EXECUTION_MODE) workflow_init_parameters = { "workflows_core.model_manager": model_manager, "workflows_core.api_key": workflow_request.api_key, "workflows_core.background_tasks": background_tasks, - "workflows_core.cache": cache, - "workflows_core.step_execution_mode": step_execution_mode, } execution_engine = ExecutionEngine.init( workflow_definition=workflow_specification, @@ -892,54 +893,35 @@ async def infer_from_workflow( @app.get( "/workflows/blocks/describe", response_model=WorkflowsBlocksDescription, - summary="[EXPERIMENTAL] Endpoint to get definition of workflows blocks that are accessible", + summary="[LEGACY] Endpoint to get definition of workflows blocks that are accessible", description="Endpoint provides detailed information about workflows building blocks that are " "accessible in the inference server. This information could be used to programmatically " "build / display workflows.", + deprecated=True, ) @with_route_exceptions async def describe_workflows_blocks() -> WorkflowsBlocksDescription: - blocks_description = describe_available_blocks() - blocks_connections = discover_blocks_connections( - blocks_description=blocks_description, - ) - kinds_connections = { - kind_name: [ - ExternalWorkflowsBlockSelectorDefinition( - manifest_type_identifier=c.manifest_type_identifier, - property_name=c.property_name, - property_description=c.property_description, - compatible_element=c.compatible_element, - is_list_element=c.is_list_element, - is_dict_element=c.is_dict_element, - ) - for c in connections - ] - for kind_name, connections in blocks_connections.kinds_connections.items() - } - primitives_connections = [ - ExternalBlockPropertyPrimitiveDefinition( - manifest_type_identifier=primitives_connection.manifest_type_identifier, - property_name=primitives_connection.property_name, - property_description=primitives_connection.property_description, - type_annotation=primitives_connection.type_annotation, - ) - for primitives_connection in blocks_connections.primitives_connections - ] - uql_operations_descriptions = prepare_operations_descriptions() - uql_operators_descriptions = prepare_operators_descriptions() - universal_query_language_description = ( - UniversalQueryLanguageDescription.from_internal_entities( - operations_descriptions=uql_operations_descriptions, - operators_descriptions=uql_operators_descriptions, - ) - ) - return WorkflowsBlocksDescription( - blocks=blocks_description.blocks, - declared_kinds=blocks_description.declared_kinds, - kinds_connections=kinds_connections, - primitives_connections=primitives_connections, - universal_query_language_description=universal_query_language_description, + return handle_describe_workflows_blocks_request() + + @app.post( + "/workflows/blocks/describe", + response_model=WorkflowsBlocksDescription, + summary="[EXPERIMENTAL] Endpoint to get definition of workflows blocks that are accessible", + description="Endpoint provides detailed information about workflows building blocks that are " + "accessible in the inference server. This information could be used to programmatically " + "build / display workflows. Additionally - in request body one can specify list of " + "dynamic blocks definitions which will be transformed into blocks and used to generate " + "schemas and definitions of connections", + ) + @with_route_exceptions + async def describe_workflows_blocks( + request: Optional[DescribeBlocksRequest] = None, + ) -> WorkflowsBlocksDescription: + dynamic_blocks_definitions = None + if request is not None: + dynamic_blocks_definitions = request.dynamic_blocks_definitions + return handle_describe_workflows_blocks_request( + dynamic_blocks_definitions=dynamic_blocks_definitions ) @app.post( @@ -953,6 +935,8 @@ async def describe_workflows_blocks() -> WorkflowsBlocksDescription: async def get_dynamic_block_outputs( step_manifest: Dict[str, Any] ) -> List[OutputDefinition]: + # Potentially TODO: dynamic blocks do not support dynamic outputs, but if it changes + # we need to provide dynamic blocks manifests here dummy_workflow_definition = { "version": "1.0", "inputs": [], @@ -960,7 +944,8 @@ async def get_dynamic_block_outputs( "outputs": [], } parsed_definition = parse_workflow_definition( - raw_workflow_definition=dummy_workflow_definition + raw_workflow_definition=dummy_workflow_definition, + dynamic_blocks=[], ) parsed_manifest = parsed_definition.steps[0] return parsed_manifest.get_actual_outputs() diff --git a/inference/core/interfaces/stream/inference_pipeline.py b/inference/core/interfaces/stream/inference_pipeline.py index 7e85939a9..6808fd57a 100644 --- a/inference/core/interfaces/stream/inference_pipeline.py +++ b/inference/core/interfaces/stream/inference_pipeline.py @@ -55,6 +55,7 @@ from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.models.aliases import resolve_roboflow_model_alias from inference.models.utils import ROBOFLOW_MODEL_TYPES, get_model +from inference.usage_tracking.collector import usage_collector INFERENCE_PIPELINE_CONTEXT = "inference_pipeline" SOURCE_CONNECTION_ATTEMPT_FAILED_EVENT = "SOURCE_CONNECTION_ATTEMPT_FAILED" @@ -555,13 +556,10 @@ def init_with_workflow( workflow_init_parameters["workflows_core.background_tasks"] = ( background_tasks ) - workflow_init_parameters["workflows_core.cache"] = cache - workflow_init_parameters["workflows_core.step_execution_mode"] = ( - StepExecutionMode.LOCAL - ) execution_engine = ExecutionEngine.init( workflow_definition=workflow_specification, init_parameters=workflow_init_parameters, + workflow_id=workflow_id, ) workflow_runner = WorkflowRunner() on_video_frame = partial( diff --git a/inference/core/interfaces/stream/model_handlers/workflows.py b/inference/core/interfaces/stream/model_handlers/workflows.py index 44728a3ad..66f0a9942 100644 --- a/inference/core/interfaces/stream/model_handlers/workflows.py +++ b/inference/core/interfaces/stream/model_handlers/workflows.py @@ -27,9 +27,13 @@ def run_workflow( self._event_loop = event_loop if workflows_parameters is None: workflows_parameters = {} + # TODO: pass fps reflecting each stream to workflows_parameters + fps = video_frames[0].fps workflows_parameters[image_input_name] = [ video_frame.image for video_frame in video_frames ] return execution_engine.run( - runtime_parameters=workflows_parameters, event_loop=self._event_loop + runtime_parameters=workflows_parameters, + event_loop=self._event_loop, + fps=fps, ) diff --git a/inference/core/interfaces/stream/sinks.py b/inference/core/interfaces/stream/sinks.py index 827c737cb..3e9affbef 100644 --- a/inference/core/interfaces/stream/sinks.py +++ b/inference/core/interfaces/stream/sinks.py @@ -80,8 +80,8 @@ def render_boxes( (for sequential input) or position in the batch (from 0 to batch_size-1). Returns: None - Side effects: on_frame_rendered() is called against the np.ndarray produced from video frame - and predictions. + Side effects: on_frame_rendered() is called against the tuple (stream_id, np.ndarray) produced from video + frame and predictions. Example: ```python @@ -92,7 +92,11 @@ def render_boxes( output_size = (640, 480) video_sink = cv2.VideoWriter("output.avi", cv2.VideoWriter_fourcc(*"MJPG"), 25.0, output_size) - on_prediction = partial(render_boxes, display_size=output_size, on_frame_rendered=video_sink.write) + on_prediction = partial( + render_boxes, + display_size=output_size, + on_frame_rendered=lambda frame_data: video_sink.write(frame_data[1]) + ) pipeline = InferencePipeline.init( model_id="your-model/3", @@ -105,7 +109,8 @@ def render_boxes( ``` In this example, `render_boxes()` is used as a sink for `InferencePipeline` predictions - making frames with - predictions displayed to be saved into video file. + predictions displayed to be saved into video file. Please note that this is oversimplified example of usage + which will not be robust against multiple streams - better implementation available in `VideoFileSink` class. """ sequential_input_provided = False if not isinstance(video_frame, list): diff --git a/inference/core/nms.py b/inference/core/nms.py index ffc1b64cc..529d6a47b 100644 --- a/inference/core/nms.py +++ b/inference/core/nms.py @@ -52,7 +52,7 @@ def w_np_non_max_suppression( batch_predictions = [] for np_image_i, np_image_pred in enumerate(prediction): filtered_predictions = [] - np_conf_mask = (np_image_pred[:, 4] >= conf_thresh).squeeze() + np_conf_mask = np_image_pred[:, 4] >= conf_thresh np_image_pred = np_image_pred[np_conf_mask] cls_confs = np_image_pred[:, 5 : num_classes + 5] diff --git a/inference/core/version.py b/inference/core/version.py index 9a9b0d4a7..82410ed6d 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.14.1" +__version__ = "0.15.0" if __name__ == "__main__": diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 37fc716a8..58d49e175 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -1,5 +1,8 @@ -from typing import List, Type +from typing import Callable, List, Tuple, Type, Union +from inference.core.cache import cache +from inference.core.env import API_KEY, WORKFLOWS_STEP_EXECUTION_MODE +from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.core_steps.flow_control.continue_if import ContinueIfBlock from inference.core.workflows.core_steps.formatters.expression import ExpressionBlock from inference.core.workflows.core_steps.formatters.first_non_empty_or_default import ( @@ -76,10 +79,105 @@ from inference.core.workflows.core_steps.transformations.relative_static_crop import ( RelativeStaticCropBlock, ) -from inference.core.workflows.prototypes.block import WorkflowBlock + +# Visualizers +from inference.core.workflows.core_steps.visualizations.blur import ( + BlurVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.bounding_box import ( + BoundingBoxVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.circle import ( + CircleVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.color import ( + ColorVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.corner import ( + CornerVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.crop import ( + CropVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.dot import DotVisualizationBlock +from inference.core.workflows.core_steps.visualizations.ellipse import ( + EllipseVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.halo import ( + HaloVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.label import ( + LabelVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.mask import ( + MaskVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.pixelate import ( + PixelateVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.polygon import ( + PolygonVisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.triangle import ( + TriangleVisualizationBlock, +) +from inference.core.workflows.entities.types import ( + BATCH_OF_BAR_CODE_DETECTION_KIND, + BATCH_OF_BOOLEAN_KIND, + BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + BATCH_OF_DICTIONARY_KIND, + BATCH_OF_IMAGE_METADATA_KIND, + BATCH_OF_IMAGES_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_PARENT_ID_KIND, + BATCH_OF_PREDICTION_TYPE_KIND, + BATCH_OF_QR_CODE_DETECTION_KIND, + BATCH_OF_SERIALISED_PAYLOADS_KIND, + BATCH_OF_STRING_KIND, + BATCH_OF_TOP_CLASS_KIND, + BOOLEAN_KIND, + DETECTION_KIND, + DICTIONARY_KIND, + FLOAT_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + POINT_KIND, + ROBOFLOW_API_KEY_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + STRING_KIND, + WILDCARD_KIND, + ZONE_KIND, + Kind, +) +from inference.core.workflows.prototypes.block import ( + WorkflowBlock, + WorkflowBlockManifest, +) + +REGISTERED_INITIALIZERS = { + "api_key": API_KEY, + "cache": cache, + "step_execution_mode": StepExecutionMode(WORKFLOWS_STEP_EXECUTION_MODE), +} -def load_blocks() -> List[Type[WorkflowBlock]]: +def load_blocks() -> List[ + Union[ + Type[WorkflowBlock], + Tuple[ + Type[WorkflowBlockManifest], + Callable[[Type[WorkflowBlockManifest]], WorkflowBlock], + ], + ] +]: return [ DetectionsConsensusBlock, ClipComparisonBlock, @@ -109,4 +207,56 @@ def load_blocks() -> List[Type[WorkflowBlock]]: PropertyDefinitionBlock, DimensionCollapseBlock, FirstNonEmptyOrDefaultBlock, + BlurVisualizationBlock, + BoundingBoxVisualizationBlock, + CircleVisualizationBlock, + ColorVisualizationBlock, + CornerVisualizationBlock, + CropVisualizationBlock, + DotVisualizationBlock, + EllipseVisualizationBlock, + HaloVisualizationBlock, + LabelVisualizationBlock, + MaskVisualizationBlock, + PixelateVisualizationBlock, + PolygonVisualizationBlock, + TriangleVisualizationBlock, + ] + + +def load_kinds() -> List[Kind]: + return [ + WILDCARD_KIND, + IMAGE_KIND, + BATCH_OF_IMAGES_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + ROBOFLOW_API_KEY_KIND, + FLOAT_ZERO_TO_ONE_KIND, + LIST_OF_VALUES_KIND, + BATCH_OF_SERIALISED_PAYLOADS_KIND, + BOOLEAN_KIND, + BATCH_OF_BOOLEAN_KIND, + INTEGER_KIND, + STRING_KIND, + BATCH_OF_STRING_KIND, + BATCH_OF_TOP_CLASS_KIND, + FLOAT_KIND, + DICTIONARY_KIND, + BATCH_OF_DICTIONARY_KIND, + BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + DETECTION_KIND, + POINT_KIND, + ZONE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_QR_CODE_DETECTION_KIND, + BATCH_OF_BAR_CODE_DETECTION_KIND, + BATCH_OF_PREDICTION_TYPE_KIND, + BATCH_OF_PARENT_ID_KIND, + BATCH_OF_IMAGE_METADATA_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/__init__.py b/inference/core/workflows/core_steps/visualizations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/visualizations/base.py b/inference/core/workflows/core_steps/visualizations/base.py new file mode 100644 index 000000000..8dadaaa7a --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/base.py @@ -0,0 +1,95 @@ +from abc import ABC, abstractmethod +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import AliasChoices, ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.utils import str_to_color +from inference.core.workflows.entities.base import OutputDefinition, WorkflowImageData +from inference.core.workflows.entities.types import ( + BATCH_OF_IMAGES_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BOOLEAN_KIND, + INTEGER_KIND, + LIST_OF_VALUES_KIND, + STRING_KIND, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +OUTPUT_IMAGE_KEY: str = "image" + + +class VisualizationManifest(WorkflowBlockManifest, ABC): + model_config = ConfigDict( + json_schema_extra={ + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + predictions: StepOutputSelector( + kind=[ + BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.object_detection_model.predictions"], + ) + image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + title="Input Image", + description="The input image for this step.", + examples=["$inputs.image", "$steps.cropping.crops"], + validation_alias=AliasChoices("image", "images"), + ) + + copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", + default=True, + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name=OUTPUT_IMAGE_KEY, + kind=[ + BATCH_OF_IMAGES_KIND, + ], + ), + ] + + +class VisualizationBlock(WorkflowBlock, ABC): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @classmethod + @abstractmethod + def get_manifest(cls) -> Type[VisualizationManifest]: + pass + + @abstractmethod + def getAnnotator(self, *args, **kwargs) -> sv.annotators.base.BaseAnnotator: + pass + + @abstractmethod + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + *args, + **kwargs + ) -> BlockResult: + pass diff --git a/inference/core/workflows/core_steps/visualizations/base_colorable.py b/inference/core/workflows/core_steps/visualizations/base_colorable.py new file mode 100644 index 000000000..bc1a438cb --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/base_colorable.py @@ -0,0 +1,159 @@ +from abc import ABC, abstractmethod +from typing import List, Literal, Optional, Union + +import supervision as sv +from pydantic import Field + +from inference.core.workflows.core_steps.visualizations.base import ( + VisualizationBlock, + VisualizationManifest, +) +from inference.core.workflows.core_steps.visualizations.utils import str_to_color +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + LIST_OF_VALUES_KIND, + STRING_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult + + +class ColorableVisualizationManifest(VisualizationManifest, ABC): + color_palette: Union[ + Literal[ + "DEFAULT", + "CUSTOM", + "ROBOFLOW", + "Matplotlib Viridis", + "Matplotlib Plasma", + "Matplotlib Inferno", + "Matplotlib Magma", + "Matplotlib Cividis", + # TODO: Re-enable once supervision 0.23 is released with a fix + # "Matplotlib Twilight", + # "Matplotlib Twilight_Shifted", + # "Matplotlib HSV", + # "Matplotlib Jet", + # "Matplotlib Turbo", + # "Matplotlib Rainbow", + # "Matplotlib gist_rainbow", + # "Matplotlib nipy_spectral", + # "Matplotlib gist_ncar", + "Matplotlib Pastel1", + "Matplotlib Pastel2", + "Matplotlib Paired", + "Matplotlib Accent", + "Matplotlib Dark2", + "Matplotlib Set1", + "Matplotlib Set2", + "Matplotlib Set3", + "Matplotlib Tab10", + "Matplotlib Tab20", + "Matplotlib Tab20b", + "Matplotlib Tab20c", + # TODO: Re-enable once supervision 0.23 is released with a fix + # "Matplotlib Ocean", + # "Matplotlib Gist_Earth", + # "Matplotlib Terrain", + # "Matplotlib Stern", + # "Matplotlib gnuplot", + # "Matplotlib gnuplot2", + # "Matplotlib Spring", + # "Matplotlib Summer", + # "Matplotlib Autumn", + # "Matplotlib Winter", + # "Matplotlib Cool", + # "Matplotlib Hot", + # "Matplotlib Copper", + # "Matplotlib Bone", + # "Matplotlib Greys_R", + # "Matplotlib Purples_R", + # "Matplotlib Blues_R", + # "Matplotlib Greens_R", + # "Matplotlib Oranges_R", + # "Matplotlib Reds_R", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="DEFAULT", + description="Color palette to use for annotations.", + examples=["DEFAULT", "$inputs.color_palette"], + ) + + palette_size: Union[ + int, + WorkflowParameterSelector(kind=[INTEGER_KIND]), + ] = Field( # type: ignore + default=10, + description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", + examples=[10, "$inputs.palette_size"], + ) + + custom_colors: Union[ + List[str], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + ] = Field( # type: ignore + default=[], + description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', + examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], + ) + + color_axis: Union[ + Literal["INDEX", "CLASS", "TRACK"], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="CLASS", + description="Strategy to use for mapping colors to annotations.", + examples=["CLASS", "$inputs.color_axis"], + ) + + +class ColorableVisualizationBlock(VisualizationBlock, ABC): + @classmethod + def getPalette(self, color_palette, palette_size, custom_colors): + if color_palette == "CUSTOM": + return sv.ColorPalette( + colors=[str_to_color(color) for color in custom_colors] + ) + elif hasattr(sv.ColorPalette, color_palette): + return getattr(sv.ColorPalette, color_palette) + else: + palette_name = color_palette.replace("Matplotlib ", "") + + if palette_name in [ + "Greys_R", + "Purples_R", + "Blues_R", + "Greens_R", + "Oranges_R", + "Reds_R", + "Wistia", + "Pastel1", + "Pastel2", + "Paired", + "Accent", + "Dark2", + "Set1", + "Set2", + "Set3", + ]: + palette_name = palette_name.capitalize() + else: + palette_name = palette_name.lower() + + return sv.ColorPalette.from_matplotlib(palette_name, int(palette_size)) + + @abstractmethod + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + *args, + **kwargs + ) -> BlockResult: + pass diff --git a/inference/core/workflows/core_steps/visualizations/blur.py b/inference/core/workflows/core_steps/visualizations/blur.py new file mode 100644 index 000000000..d64568acc --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/blur.py @@ -0,0 +1,85 @@ +from typing import Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import ( + OUTPUT_IMAGE_KEY, + VisualizationBlock, + VisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "BlurVisualization" +SHORT_DESCRIPTION = "Blurs detected objects in an image." +LONG_DESCRIPTION = """ +The `BlurVisualization` block blurs detected +objects in an image using Supervision's `sv.BlurAnnotator`. +""" + + +class BlurManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the average pooling kernel used for blurring.", + default=15, + examples=[15, "$inputs.kernel_size"], + ) + + +class BlurVisualizationBlock(VisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlurManifest + + def getAnnotator( + self, + kernel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [kernel_size])) + + if key not in self.annotatorCache: + self.annotatorCache[key] = sv.BlurAnnotator(kernel_size=kernel_size) + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + kernel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + kernel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box.py b/inference/core/workflows/core_steps/visualizations/bounding_box.py new file mode 100644 index 000000000..375d5f5b9 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/bounding_box.py @@ -0,0 +1,124 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + FloatZeroToOne, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "BoundingBoxVisualization" +SHORT_DESCRIPTION = "Draws a box around detected objects in an image." +LONG_DESCRIPTION = """ +The `BoundingBoxVisualization` block draws a box around detected +objects in an image using Supervision's `sv.RoundBoxAnnotator`. +""" + + +class BoundingBoxManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the bounding box in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + + roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Roundness of the corners of the bounding box.", + default=0.0, + examples=[0.0, "$inputs.roundness"], + ) + + +class BoundingBoxVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BoundingBoxManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + roundness: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map(str, [color_palette, palette_size, color_axis, thickness, roundness]) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + if roundness == 0: + self.annotatorCache[key] = sv.BoxAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + ) + else: + self.annotatorCache[key] = sv.RoundBoxAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + roundness=roundness, + ) + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + roundness: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + roundness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/circle.py b/inference/core/workflows/core_steps/visualizations/circle.py new file mode 100644 index 000000000..ede5e3875 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/circle.py @@ -0,0 +1,114 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "CircleVisualization" +SHORT_DESCRIPTION = "Draws a circle around detected objects in an image." +LONG_DESCRIPTION = """ +The `CircleVisualization` block draws a circle around detected +objects in an image using Supervision's `sv.CircleAnnotator`. +""" + + +class CircleManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + + +class CircleVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CircleManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.CircleAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/color.py b/inference/core/workflows/core_steps/visualizations/color.py new file mode 100644 index 000000000..2e247544c --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/color.py @@ -0,0 +1,115 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + FLOAT_ZERO_TO_ONE_KIND, + FloatZeroToOne, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "ColorVisualization" +SHORT_DESCRIPTION = "Paints a solid color on detected objects in an image." +LONG_DESCRIPTION = """ +The `ColorVisualization` block paints a solid color on detected +objects in an image using Supervision's `sv.ColorAnnotator`. +""" + + +class ColorManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the color overlay.", + default=0.5, + examples=[0.5, "$inputs.opacity"], + ) + + +class ColorVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return ColorManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.ColorAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + opacity=opacity, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/corner.py b/inference/core/workflows/core_steps/visualizations/corner.py new file mode 100644 index 000000000..aee47eadc --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/corner.py @@ -0,0 +1,125 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "CornerVisualization" +SHORT_DESCRIPTION = "Draws the corners of detected objects in an image." +LONG_DESCRIPTION = """ +The `CornerVisualization` block draws the corners of detected +objects in an image using Supervision's `sv.BoxCornerAnnotator`. +""" + + +class CornerManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=4, + examples=[4, "$inputs.thickness"], + ) + + corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Length of the corner lines in pixels.", + default=15, + examples=[15, "$inputs.corner_length"], + ) + + +class CornerVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CornerManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + corner_length: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + corner_length, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.BoxCornerAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + corner_length=corner_length, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + corner_length: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + corner_length, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/crop.py b/inference/core/workflows/core_steps/visualizations/crop.py new file mode 100644 index 000000000..7dd2c6ddf --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/crop.py @@ -0,0 +1,152 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + FLOAT_KIND, + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "CropVisualization" +SHORT_DESCRIPTION = "Draws scaled up crops of detections on the scene." +LONG_DESCRIPTION = """ +The `CropVisualization` block draws scaled up crops of detections +on the scene using Supervision's `sv.CropAnnotator`. +""" + + +class CropManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_CENTER", + description="The anchor position for placing the crop.", + examples=["CENTER", "$inputs.position"], + ) + + scale_factor: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + description="The factor by which to scale the cropped image part. A factor of 2, for example, would double the size of the cropped area, allowing for a closer view of the detection.", + default=2.0, + examples=[2.0, "$inputs.scale_factor"], + ) + + border_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline in pixels.", + default=2, + examples=[2, "$inputs.border_thickness"], + ) + + +class CropVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CropManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + scale_factor: float, + border_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + scale_factor, + border_thickness, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.CropAnnotator( + border_color=palette, + border_color_lookup=getattr(sv.ColorLookup, color_axis), + position=getattr(sv.Position, position), + scale_factor=scale_factor, + border_thickness=border_thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + scale_factor: Optional[float], + border_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + scale_factor, + border_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/dot.py b/inference/core/workflows/core_steps/visualizations/dot.py new file mode 100644 index 000000000..565400fea --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/dot.py @@ -0,0 +1,153 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "DotVisualization" +SHORT_DESCRIPTION = ( + "Draws dots on an image at specific coordinates based on provided detections." +) +LONG_DESCRIPTION = """ +The `DotVisualization` block draws dots on an image at specific coordinates +based on provided detections using Supervision's `sv.DotAnnotator`. +""" + + +class DotManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="CENTER", + description="The anchor position for placing the dot.", + examples=["CENTER", "$inputs.position"], + ) + + radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Radius of the dot in pixels.", + default=4, + examples=[4, "$inputs.radius"], + ) + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline of the dot in pixels.", + default=0, + examples=[2, "$inputs.outline_thickness"], + ) + + +class DotVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return DotManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + radius: int, + outline_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + radius, + outline_thickness, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.DotAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + position=getattr(sv.Position, position), + radius=radius, + outline_thickness=outline_thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + radius: Optional[int], + outline_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + radius, + outline_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/ellipse.py b/inference/core/workflows/core_steps/visualizations/ellipse.py new file mode 100644 index 000000000..1692b548b --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/ellipse.py @@ -0,0 +1,136 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "EllipseVisualization" +SHORT_DESCRIPTION = "Draws ellipses that highlight detected objects in an image." +LONG_DESCRIPTION = """ +The `EllipseVisualization` block draws ellipses that highlight detected +objects in an image using Supervision's `sv.EllipseAnnotator`. +""" + + +class EllipseManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the lines in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + + start_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Starting angle of the ellipse in degrees.", + default=-45, + examples=[-45, "$inputs.start_angle"], + ) + + end_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Ending angle of the ellipse in degrees.", + default=235, + examples=[235, "$inputs.end_angle"], + ) + + +class EllipseVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return EllipseManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + start_angle: int, + end_angle: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + start_angle, + end_angle, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.EllipseAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + start_angle=start_angle, + end_angle=end_angle, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + start_angle: Optional[int], + end_angle: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + start_angle, + end_angle, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/halo.py b/inference/core/workflows/core_steps/visualizations/halo.py new file mode 100644 index 000000000..f58b3d119 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/halo.py @@ -0,0 +1,138 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + FloatZeroToOne, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "HaloVisualization" +SHORT_DESCRIPTION = "Paints a halo around detected objects in an image." +LONG_DESCRIPTION = """ +The `HaloVisualization` block uses a detected polygon +from an instance segmentation to draw a halo using +`sv.HaloAnnotator`. +""" + + +class HaloManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the halo overlay.", + default=0.8, + examples=[0.8, "$inputs.opacity"], + ) + + kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the average pooling kernel used for creating the halo.", + default=40, + examples=[40, "$inputs.kernel_size"], + ) + + +class HaloVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return HaloManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + kernel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + kernel_size, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.HaloAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + opacity=opacity, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + kernel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + kernel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/label.py b/inference/core/workflows/core_steps/visualizations/label.py new file mode 100644 index 000000000..9d3ffe205 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/label.py @@ -0,0 +1,229 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.core_steps.visualizations.utils import str_to_color +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + FLOAT_KIND, + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "LabelVisualization" +SHORT_DESCRIPTION = ( + "Draws labels on an image at specific coordinates based on provided detections." +) +LONG_DESCRIPTION = """ +The `LabelVisualization` block draws labels on an image at specific coordinates +based on provided detections using Supervision's `sv.LabelAnnotator`. +""" + + +class LabelManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + text: Union[ + Literal[ + "Class", "Confidence", "Class and Confidence", "Index", "Dimensions", "Area" + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="Class", + description="The type of text to display.", + examples=["LABEL", "$inputs.text"], + ) + + text_position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_LEFT", + description="The anchor position for placing the label.", + examples=["CENTER", "$inputs.text_position"], + ) + + text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + description="Color of the text.", + default="WHITE", + examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.text_color"], + ) + + text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + description="Scale of the text.", + default=1.0, + examples=[1.0, "$inputs.text_scale"], + ) + + text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the text characters.", + default=1, + examples=[1, "$inputs.text_thickness"], + ) + + text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Padding around the text in pixels.", + default=10, + examples=[10, "$inputs.text_padding"], + ) + + border_radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Radius of the label in pixels.", + default=0, + examples=[0, "$inputs.border_radius"], + ) + + +class LabelVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return LabelManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + text_position: str, + text_color: str, + text_scale: float, + text_thickness: int, + text_padding: int, + border_radius: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + text_position, + text_color, + text_scale, + text_thickness, + text_padding, + border_radius, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + text_color = str_to_color(text_color) + + self.annotatorCache[key] = sv.LabelAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + text_position=getattr(sv.Position, text_position), + text_color=text_color, + text_scale=text_scale, + text_thickness=text_thickness, + text_padding=text_padding, + border_radius=border_radius, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + text: Optional[str], + text_position: Optional[str], + text_color: Optional[str], + text_scale: Optional[float], + text_thickness: Optional[int], + text_padding: Optional[int], + border_radius: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + text_position, + text_color, + text_scale, + text_thickness, + text_padding, + border_radius, + ) + + if text == "Class": + labels = predictions["class_name"] + elif text == "Confidence": + labels = [f"{confidence:.2f}" for confidence in predictions.confidence] + elif text == "Class and Confidence": + labels = [ + f"{class_name} {confidence:.2f}" + for class_name, confidence in zip( + predictions["class_name"], predictions.confidence + ) + ] + elif text == "Index": + labels = [str(i) for i in range(len(predictions))] + elif text == "Dimensions": + # rounded ints: center x, center y wxh from predictions[i].xyxy + labels = [] + for i in range(len(predictions)): + x1, y1, x2, y2 = predictions.xyxy[i] + cx, cy = (x1 + x2) / 2, (y1 + y2) / 2 + w, h = x2 - x1, y2 - y1 + labels.append(f"{int(cx)}, {int(cy)} {int(w)}x{int(h)}") + elif text == "Area": + labels = [str(int(area)) for area in predictions.area] + else: + raise ValueError(f"Invalid text type: {text}") + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + labels=labels, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/mask.py b/inference/core/workflows/core_steps/visualizations/mask.py new file mode 100644 index 000000000..40571f63c --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/mask.py @@ -0,0 +1,127 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + FLOAT_ZERO_TO_ONE_KIND, + FloatZeroToOne, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "MaskVisualization" +SHORT_DESCRIPTION = "Paints a mask over detected objects in an image." +LONG_DESCRIPTION = """ +The `MaskVisualization` block uses a detected polygon +from an instance segmentation to draw a mask using +`sv.MaskAnnotator`. +""" + + +class MaskManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + description="Transparency of the Mask overlay.", + default=0.5, + examples=[0.5, "$inputs.opacity"], + ) + + +class MaskVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MaskManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + opacity: float, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + opacity, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.MaskAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + opacity=opacity, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + opacity: Optional[float], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + opacity, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/pixelate.py b/inference/core/workflows/core_steps/visualizations/pixelate.py new file mode 100644 index 000000000..5a6a1a6e5 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/pixelate.py @@ -0,0 +1,89 @@ +from typing import Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import ( + OUTPUT_IMAGE_KEY, + VisualizationBlock, + VisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +TYPE: str = "PixelateVisualization" +SHORT_DESCRIPTION = "Pixelates detected objects in an image." +LONG_DESCRIPTION = """ +The `PixelateVisualization` block pixelates detected +objects in an image using Supervision's `sv.PixelateAnnotator`. +""" + + +class PixelateManifest(VisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + pixel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Size of the pixelation.", + default=20, + examples=[20, "$inputs.pixel_size"], + ) + + +class PixelateVisualizationBlock(VisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return PixelateManifest + + def getAnnotator( + self, + pixel_size: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join(map(str, [pixel_size])) + + if key not in self.annotatorCache: + self.annotatorCache[key] = sv.PixelateAnnotator(pixel_size=pixel_size) + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + pixel_size: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + pixel_size, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/polygon.py b/inference/core/workflows/core_steps/visualizations/polygon.py new file mode 100644 index 000000000..826e9bfa8 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/polygon.py @@ -0,0 +1,126 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INTEGER_KIND, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "PolygonVisualization" +SHORT_DESCRIPTION = "Draws a polygon around detected objects in an image." +LONG_DESCRIPTION = """ +The `PolygonVisualization` block uses a detections from an +instance segmentation to draw polygons around objects using +`sv.PolygonAnnotator`. +""" + + +class PolygonManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + predictions: StepOutputSelector( + kind=[ + BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.instance_segmentation_model.predictions"], + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + ) + + +class PolygonVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return PolygonManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + thickness, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.PolygonAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + thickness=thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/triangle.py b/inference/core/workflows/core_steps/visualizations/triangle.py new file mode 100644 index 000000000..760944b67 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/triangle.py @@ -0,0 +1,162 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + +from inference.core.workflows.core_steps.visualizations.base import OUTPUT_IMAGE_KEY +from inference.core.workflows.core_steps.visualizations.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) +from inference.core.workflows.entities.base import WorkflowImageData +from inference.core.workflows.entities.types import ( + INTEGER_KIND, + STRING_KIND, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest + +TYPE: str = "TriangleVisualization" +SHORT_DESCRIPTION = "Draws triangle markers on an image at specific coordinates based on provided detections." +LONG_DESCRIPTION = """ +The `TriangleVisualization` block draws triangle markers on an image at specific coordinates +based on provided detections using Supervision's `sv.TriangleAnnotator`. +""" + + +class TriangleManifest(ColorableVisualizationManifest): + type: Literal[f"{TYPE}"] + model_config = ConfigDict( + json_schema_extra={ + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + } + ) + + position: Union[ + Literal[ + "CENTER", + "CENTER_LEFT", + "CENTER_RIGHT", + "TOP_CENTER", + "TOP_LEFT", + "TOP_RIGHT", + "BOTTOM_LEFT", + "BOTTOM_CENTER", + "BOTTOM_RIGHT", + "CENTER_OF_MASS", + ], + WorkflowParameterSelector(kind=[STRING_KIND]), + ] = Field( # type: ignore + default="TOP_CENTER", + description="The anchor position for placing the triangle.", + examples=["CENTER", "$inputs.position"], + ) + + base: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Base width of the triangle in pixels.", + default=10, + examples=[10, "$inputs.base"], + ) + + height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Height of the triangle in pixels.", + default=10, + examples=[10, "$inputs.height"], + ) + + outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline of the triangle in pixels.", + default=0, + examples=[2, "$inputs.outline_thickness"], + ) + + +class TriangleVisualizationBlock(ColorableVisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return TriangleManifest + + def getAnnotator( + self, + color_palette: str, + palette_size: int, + custom_colors: List[str], + color_axis: str, + position: str, + base: int, + height: int, + outline_thickness: int, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color_palette, + palette_size, + color_axis, + position, + base, + height, + outline_thickness, + ], + ) + ) + + if key not in self.annotatorCache: + palette = self.getPalette(color_palette, palette_size, custom_colors) + + self.annotatorCache[key] = sv.TriangleAnnotator( + color=palette, + color_lookup=getattr(sv.ColorLookup, color_axis), + position=getattr(sv.Position, position), + base=base, + height=height, + outline_thickness=outline_thickness, + ) + + return self.annotatorCache[key] + + async def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + color_palette: Optional[str], + palette_size: Optional[int], + custom_colors: Optional[List[str]], + color_axis: Optional[str], + position: Optional[str], + base: Optional[int], + height: Optional[int], + outline_thickness: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color_palette, + palette_size, + custom_colors, + color_axis, + position, + base, + height, + outline_thickness, + ) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + detections=predictions, + ) + + output = WorkflowImageData( + parent_metadata=image.parent_metadata, + workflow_root_ancestor_metadata=image.workflow_root_ancestor_metadata, + numpy_image=annotated_image, + ) + + return {OUTPUT_IMAGE_KEY: output} diff --git a/inference/core/workflows/core_steps/visualizations/utils.py b/inference/core/workflows/core_steps/visualizations/utils.py new file mode 100644 index 000000000..812e2c833 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/utils.py @@ -0,0 +1,18 @@ +import supervision as sv + + +def str_to_color(color: str) -> sv.Color: + if color.startswith("#"): + return sv.Color.from_hex(color) + elif color.startswith("rgb"): + r, g, b = map(int, color[4:-1].split(",")) + return sv.Color.from_rgb_tuple((r, g, b)) + elif color.startswith("bgr"): + b, g, r = map(int, color[4:-1].split(",")) + return sv.Color.from_bgr_tuple((b, g, r)) + elif hasattr(sv.Color, color.upper()): + return getattr(sv.Color, color.upper()) + else: + raise ValueError( + f"Invalid text color: {color}; valid formats are #RRGGBB, rgb(R, G, B), bgr(B, G, R), or a valid color name (like WHITE, BLACK, or BLUE)." + ) diff --git a/inference/core/workflows/errors.py b/inference/core/workflows/errors.py index 1d5290510..1a7b9bda6 100644 --- a/inference/core/workflows/errors.py +++ b/inference/core/workflows/errors.py @@ -33,6 +33,10 @@ def inner_error(self) -> Optional[Exception]: return self._inner_error +class WorkflowEnvironmentConfigurationError(WorkflowError): + pass + + class WorkflowCompilerError(WorkflowError): pass @@ -53,6 +57,10 @@ class BlockInterfaceError(WorkflowCompilerError): pass +class DynamicBlockError(WorkflowCompilerError): + pass + + class WorkflowDefinitionError(WorkflowCompilerError): pass diff --git a/inference/core/workflows/execution_engine/compiler/core.py b/inference/core/workflows/execution_engine/compiler/core.py index 2744cb88c..399eb4b1b 100644 --- a/inference/core/workflows/execution_engine/compiler/core.py +++ b/inference/core/workflows/execution_engine/compiler/core.py @@ -22,6 +22,9 @@ validate_workflow_specification, ) from inference.core.workflows.execution_engine.debugger.core import dump_execution_graph +from inference.core.workflows.execution_engine.dynamic_blocks.block_assembler import ( + compile_dynamic_blocks, +) from inference.core.workflows.execution_engine.introspection.blocks_loader import ( load_initializers, load_workflow_blocks, @@ -33,10 +36,16 @@ def compile_workflow( workflow_definition: dict, init_parameters: Dict[str, Union[Any, Callable[[None], Any]]], ) -> CompiledWorkflow: - available_blocks = load_workflow_blocks() + statically_defined_blocks = load_workflow_blocks() initializers = load_initializers() + dynamic_blocks = compile_dynamic_blocks( + dynamic_blocks_definitions=workflow_definition.get( + "dynamic_blocks_definitions", [] + ) + ) parsed_workflow_definition = parse_workflow_definition( raw_workflow_definition=workflow_definition, + dynamic_blocks=dynamic_blocks, ) validate_workflow_specification(workflow_definition=parsed_workflow_definition) execution_graph = prepare_execution_graph( @@ -44,7 +53,7 @@ def compile_workflow( ) steps = initialise_steps( steps_manifest=parsed_workflow_definition.steps, - available_bocks=available_blocks, + available_bocks=statically_defined_blocks + dynamic_blocks, explicit_init_parameters=init_parameters, initializers=initializers, ) @@ -55,6 +64,8 @@ def compile_workflow( dump_execution_graph(execution_graph=execution_graph) return CompiledWorkflow( workflow_definition=parsed_workflow_definition, + workflow_json=workflow_definition, + init_parameters=init_parameters, execution_graph=execution_graph, steps=steps_by_name, input_substitutions=input_substitutions, diff --git a/inference/core/workflows/execution_engine/compiler/entities.py b/inference/core/workflows/execution_engine/compiler/entities.py index 4bb52d7dd..9752993ad 100644 --- a/inference/core/workflows/execution_engine/compiler/entities.py +++ b/inference/core/workflows/execution_engine/compiler/entities.py @@ -1,7 +1,7 @@ from abc import abstractmethod from dataclasses import dataclass, field from enum import Enum -from typing import Any, Dict, Generator, List, Optional, Set, Type, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Type, Union import networkx as nx @@ -51,6 +51,8 @@ class CompiledWorkflow: execution_graph: nx.DiGraph steps: Dict[str, InitialisedStep] input_substitutions: List[InputSubstitution] + workflow_json: Dict[str, Any] + init_parameters: Dict[str, Any] class NodeCategory(Enum): diff --git a/inference/core/workflows/execution_engine/compiler/steps_initialiser.py b/inference/core/workflows/execution_engine/compiler/steps_initialiser.py index 8603081a5..b7ce38c3a 100644 --- a/inference/core/workflows/execution_engine/compiler/steps_initialiser.py +++ b/inference/core/workflows/execution_engine/compiler/steps_initialiser.py @@ -1,4 +1,5 @@ -from typing import Any, Callable, Dict, List, Union +from dataclasses import replace +from typing import Any, Callable, Dict, List, Tuple, Type, Union from inference.core.workflows.errors import ( BlockInitParameterNotProvidedError, @@ -8,6 +9,7 @@ from inference.core.workflows.execution_engine.compiler.entities import ( BlockSpecification, InitialisedStep, + ParsedWorkflowDefinition, ) from inference.core.workflows.prototypes.block import WorkflowBlockManifest diff --git a/inference/core/workflows/execution_engine/compiler/syntactic_parser.py b/inference/core/workflows/execution_engine/compiler/syntactic_parser.py index 7a19799b5..17898d7cc 100644 --- a/inference/core/workflows/execution_engine/compiler/syntactic_parser.py +++ b/inference/core/workflows/execution_engine/compiler/syntactic_parser.py @@ -7,17 +7,24 @@ from inference.core.workflows.entities.base import InputType, JsonField from inference.core.workflows.errors import WorkflowSyntaxError from inference.core.workflows.execution_engine.compiler.entities import ( + BlockSpecification, ParsedWorkflowDefinition, ) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import ( + DynamicBlockDefinition, +) from inference.core.workflows.execution_engine.introspection.blocks_loader import ( + load_all_defined_kinds, load_workflow_blocks, ) def parse_workflow_definition( - raw_workflow_definition: dict, + raw_workflow_definition: dict, dynamic_blocks: List[BlockSpecification] ) -> ParsedWorkflowDefinition: - workflow_definition_class = build_workflow_definition_entity() + workflow_definition_class = build_workflow_definition_entity( + dynamic_blocks=dynamic_blocks, + ) try: workflow_definition = workflow_definition_class.model_validate( raw_workflow_definition @@ -36,8 +43,10 @@ def parse_workflow_definition( ) from e -def build_workflow_definition_entity() -> Type[BaseModel]: - blocks = load_workflow_blocks() +def build_workflow_definition_entity( + dynamic_blocks: List[BlockSpecification], +) -> Type[BaseModel]: + blocks = load_workflow_blocks() + dynamic_blocks steps_manifests = tuple(block.manifest_class for block in blocks) block_manifest_types_union = Union[steps_manifests] block_type = Annotated[block_manifest_types_union, Field(discriminator="type")] diff --git a/inference/core/workflows/execution_engine/core.py b/inference/core/workflows/execution_engine/core.py index ff06043c9..76bdc24dd 100644 --- a/inference/core/workflows/execution_engine/core.py +++ b/inference/core/workflows/execution_engine/core.py @@ -2,6 +2,7 @@ from asyncio import AbstractEventLoop from typing import Any, Dict, List, Optional +from inference.core.env import API_KEY from inference.core.workflows.execution_engine.compiler.core import compile_workflow from inference.core.workflows.execution_engine.compiler.entities import CompiledWorkflow from inference.core.workflows.execution_engine.executor.core import run_workflow @@ -22,6 +23,7 @@ def init( init_parameters: Optional[Dict[str, Any]] = None, max_concurrent_steps: int = 1, prevent_local_images_loading: bool = False, + workflow_id: Optional[str] = None, ) -> "ExecutionEngine": if init_parameters is None: init_parameters = {} @@ -33,6 +35,7 @@ def init( compiled_workflow=compiled_workflow, max_concurrent_steps=max_concurrent_steps, prevent_local_images_loading=prevent_local_images_loading, + workflow_id=workflow_id, ) def __init__( @@ -40,15 +43,18 @@ def __init__( compiled_workflow: CompiledWorkflow, max_concurrent_steps: int, prevent_local_images_loading: bool, + workflow_id: Optional[str] = None, ): self._compiled_workflow = compiled_workflow self._max_concurrent_steps = max_concurrent_steps self._prevent_local_images_loading = prevent_local_images_loading + self._workflow_id = workflow_id def run( self, runtime_parameters: Dict[str, Any], event_loop: Optional[AbstractEventLoop] = None, + fps: float = 0, ) -> List[Dict[str, Any]]: if event_loop is None: try: @@ -56,11 +62,13 @@ def run( except: event_loop = asyncio.new_event_loop() return event_loop.run_until_complete( - self.run_async(runtime_parameters=runtime_parameters) + self.run_async(runtime_parameters=runtime_parameters, fps=fps) ) async def run_async( - self, runtime_parameters: Dict[str, Any] + self, + runtime_parameters: Dict[str, Any], + fps: float = 0, ) -> List[Dict[str, Any]]: runtime_parameters = assembly_runtime_parameters( runtime_parameters=runtime_parameters, @@ -75,4 +83,6 @@ async def run_async( workflow=self._compiled_workflow, runtime_parameters=runtime_parameters, max_concurrent_steps=self._max_concurrent_steps, + usage_fps=fps, + usage_workflow_id=self._workflow_id, ) diff --git a/inference/core/workflows/execution_engine/dynamic_blocks/__init__.py b/inference/core/workflows/execution_engine/dynamic_blocks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/execution_engine/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/dynamic_blocks/block_assembler.py new file mode 100644 index 000000000..07c55e739 --- /dev/null +++ b/inference/core/workflows/execution_engine/dynamic_blocks/block_assembler.py @@ -0,0 +1,396 @@ +from copy import deepcopy +from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union +from uuid import uuid4 + +from pydantic import BaseModel, ConfigDict, Field, create_model + +from inference.core.env import ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS +from inference.core.workflows.entities.base import OutputDefinition +from inference.core.workflows.entities.types import ( + WILDCARD_KIND, + Kind, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.errors import ( + DynamicBlockError, + WorkflowEnvironmentConfigurationError, +) +from inference.core.workflows.execution_engine.compiler.entities import ( + BlockSpecification, +) +from inference.core.workflows.execution_engine.dynamic_blocks.block_scaffolding import ( + assembly_custom_python_block, +) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import ( + BLOCK_SOURCE, + DynamicBlockDefinition, + DynamicInputDefinition, + DynamicOutputDefinition, + ManifestDescription, + SelectorType, + ValueType, +) +from inference.core.workflows.execution_engine.introspection.blocks_loader import ( + load_all_defined_kinds, +) +from inference.core.workflows.execution_engine.introspection.utils import ( + build_human_friendly_block_name, + get_full_type_name, +) +from inference.core.workflows.prototypes.block import WorkflowBlockManifest + + +def compile_dynamic_blocks( + dynamic_blocks_definitions: List[dict], +) -> List[BlockSpecification]: + if not dynamic_blocks_definitions: + return [] + if not ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS: + raise WorkflowEnvironmentConfigurationError( + public_message="Cannot use dynamic blocks with custom Python code in this installation of `workflows`. " + "This can be changed by setting environmental variable " + "`ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS=True`", + context="workflow_compilation | dynamic_blocks_compilation", + ) + all_defined_kinds = load_all_defined_kinds() + kinds_lookup = {kind.name: kind for kind in all_defined_kinds} + dynamic_blocks = [ + DynamicBlockDefinition.model_validate(dynamic_block) + for dynamic_block in dynamic_blocks_definitions + ] + compiled_blocks = [] + for dynamic_block in dynamic_blocks: + block_specification = create_dynamic_block_specification( + dynamic_block_definition=dynamic_block, + kinds_lookup=kinds_lookup, + ) + compiled_blocks.append(block_specification) + return compiled_blocks + + +def create_dynamic_block_specification( + dynamic_block_definition: DynamicBlockDefinition, + kinds_lookup: Dict[str, Kind], +) -> BlockSpecification: + unique_identifier = str(uuid4()) + block_manifest = assembly_dynamic_block_manifest( + unique_identifier=unique_identifier, + manifest_description=dynamic_block_definition.manifest, + kinds_lookup=kinds_lookup, + ) + block_class = assembly_custom_python_block( + block_type_name=dynamic_block_definition.manifest.block_type, + unique_identifier=unique_identifier, + manifest=block_manifest, + python_code=dynamic_block_definition.code, + ) + return BlockSpecification( + block_source=BLOCK_SOURCE, + identifier=get_full_type_name(selected_type=block_class), + block_class=block_class, + manifest_class=block_manifest, + ) + + +def assembly_dynamic_block_manifest( + unique_identifier: str, + manifest_description: ManifestDescription, + kinds_lookup: Dict[str, Kind], +) -> Type[WorkflowBlockManifest]: + inputs_definitions = build_inputs( + block_type=manifest_description.block_type, + inputs=manifest_description.inputs, + kinds_lookup=kinds_lookup, + ) + manifest_class = create_model( + f"DynamicBlockManifest[{unique_identifier}]", + __config__=ConfigDict( + extra="allow", + json_schema_extra={ + "name": build_human_friendly_block_name( + fully_qualified_name=manifest_description.block_type + ) + }, + ), + name=(str, ...), + type=(Literal[manifest_description.block_type], ...), + **inputs_definitions, + ) + outputs_definitions = build_outputs_definitions( + block_type=manifest_description.block_type, + outputs=manifest_description.outputs, + kinds_lookup=kinds_lookup, + ) + return assembly_manifest_class_methods( + block_type=manifest_description.block_type, + manifest_class=manifest_class, + outputs_definitions=outputs_definitions, + manifest_description=manifest_description, + ) + + +PYTHON_TYPES_MAPPING = { + ValueType.ANY: Any, + ValueType.INTEGER: int, + ValueType.FLOAT: float, + ValueType.BOOLEAN: bool, + ValueType.DICT: dict, + ValueType.LIST: list, + ValueType.STRING: str, +} + + +def build_inputs( + block_type: str, + inputs: Dict[str, DynamicInputDefinition], + kinds_lookup: Dict[str, Kind], +) -> Dict[str, Tuple[type, Field]]: + result = {} + for input_name, input_definition in inputs.items(): + result[input_name] = build_input( + block_type=block_type, + input_name=input_name, + input_definition=input_definition, + kinds_lookup=kinds_lookup, + ) + return result + + +def build_input( + block_type: str, + input_name: str, + input_definition: DynamicInputDefinition, + kinds_lookup: Dict[str, Kind], +) -> Tuple[type, Field]: + input_type = build_input_field_type( + block_type=block_type, + input_name=input_name, + input_definition=input_definition, + kinds_lookup=kinds_lookup, + ) + field_metadata = build_input_field_metadata(input_definition=input_definition) + return input_type, field_metadata + + +def build_input_field_type( + block_type: str, + input_name: str, + input_definition: DynamicInputDefinition, + kinds_lookup: Dict[str, Kind], +) -> type: + input_type_union_elements = collect_python_types_for_selectors( + block_type=block_type, + input_name=input_name, + input_definition=input_definition, + kinds_lookup=kinds_lookup, + ) + input_type_union_elements += collect_python_types_for_values( + block_type=block_type, + input_name=input_name, + input_definition=input_definition, + ) + if not input_type_union_elements: + raise DynamicBlockError( + public_message=f"There is no definition of input type found for property: {input_name} of " + f"dynamic block {block_type}.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) + if len(input_type_union_elements) > 1: + input_type = Union[tuple(input_type_union_elements)] + else: + input_type = input_type_union_elements[0] + if input_definition.is_optional: + input_type = Optional[input_type] + return input_type + + +def collect_python_types_for_selectors( + block_type: str, + input_name: str, + input_definition: DynamicInputDefinition, + kinds_lookup: Dict[str, Kind], +) -> List[type]: + result = [] + for selector_type in input_definition.selector_types: + selector_kind_names = input_definition.selector_data_kind.get( + selector_type, ["*"] + ) + selector_kind = [] + for kind_name in selector_kind_names: + if kind_name not in kinds_lookup: + raise DynamicBlockError( + public_message=f"Could not find kind with name `{kind_name}` declared for input `{input_name}` " + f"of dynamic block `{block_type}` within kinds that would be recognised by Execution " + f"Engine knowing the following kinds: {list(kinds_lookup.keys())}.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) + selector_kind.append(kinds_lookup[kind_name]) + if selector_type is SelectorType.INPUT_IMAGE: + result.append(WorkflowImageSelector) + elif selector_type is SelectorType.STEP_OUTPUT_IMAGE: + result.append(StepOutputImageSelector) + elif selector_type is SelectorType.INPUT_PARAMETER: + result.append(WorkflowParameterSelector(kind=selector_kind)) + elif selector_type is SelectorType.STEP_OUTPUT: + result.append(StepOutputSelector(kind=selector_kind)) + else: + raise DynamicBlockError( + public_message=f"Could not recognise selector type `{selector_type}` declared for input `{input_name}` " + f"of dynamic block `{block_type}`.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) + return result + + +def collect_python_types_for_values( + block_type: str, + input_name: str, + input_definition: DynamicInputDefinition, +) -> List[type]: + result = [] + for value_type_name in input_definition.value_types: + if value_type_name not in PYTHON_TYPES_MAPPING: + raise DynamicBlockError( + public_message=f"Could not resolve Python type `{value_type_name}` declared for input `{input_name}` " + f"of dynamic block `{block_type}` within types that would be recognised by Execution " + f"Engine knowing the following types: {list(PYTHON_TYPES_MAPPING.keys())}.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) + value_type = PYTHON_TYPES_MAPPING[value_type_name] + result.append(value_type) + return result + + +def build_input_field_metadata(input_definition: DynamicInputDefinition) -> Field: + if not input_definition.has_default_value: + return Field() + default_value = input_definition.default_value + field_metadata_params = {} + if default_holds_compound_object(default_value=default_value): + field_metadata_params["default_factory"] = lambda: deepcopy(default_value) + else: + field_metadata_params["default"] = default_value + field_metadata = Field(**field_metadata_params) + return field_metadata + + +def default_holds_compound_object(default_value: Any) -> bool: + return ( + isinstance(default_value, list) + or isinstance(default_value, dict) + or isinstance(default_value, set) + ) + + +def build_outputs_definitions( + block_type: str, + outputs: Dict[str, DynamicOutputDefinition], + kinds_lookup: Dict[str, Kind], +) -> List[OutputDefinition]: + result = [] + for name, definition in outputs.items(): + if not definition.kind: + result.append(OutputDefinition(name=name, kind=[WILDCARD_KIND])) + else: + actual_kinds = collect_actual_kinds_for_output( + block_type=block_type, + output_name=name, + output=definition, + kinds_lookup=kinds_lookup, + ) + result.append(OutputDefinition(name=name, kind=actual_kinds)) + return result + + +def collect_actual_kinds_for_output( + block_type: str, + output_name: str, + output: DynamicOutputDefinition, + kinds_lookup: Dict[str, Kind], +) -> List[Kind]: + actual_kinds = [] + for kind_name in output.kind: + if kind_name not in kinds_lookup: + raise DynamicBlockError( + public_message=f"Could not find kind with name `{kind_name}` declared for output `{output_name}` " + f"of dynamic block `{block_type}` within kinds that would be recognised by Execution " + f"Engine knowing the following kinds: {list(kinds_lookup.keys())}.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) + actual_kinds.append(kinds_lookup[kind_name]) + return actual_kinds + + +def collect_input_dimensionality_offsets( + inputs: Dict[str, DynamicInputDefinition], +) -> Dict[str, int]: + result = {} + for name, definition in inputs.items(): + if definition.dimensionality_offset != 0: + result[name] = definition.dimensionality_offset + return result + + +def assembly_manifest_class_methods( + block_type: str, + manifest_class: Type[BaseModel], + outputs_definitions: List[OutputDefinition], + manifest_description: ManifestDescription, +) -> Type[WorkflowBlockManifest]: + describe_outputs = lambda cls: outputs_definitions + setattr(manifest_class, "describe_outputs", classmethod(describe_outputs)) + setattr(manifest_class, "get_actual_outputs", describe_outputs) + accepts_batch_input = lambda cls: manifest_description.accepts_batch_input + setattr(manifest_class, "accepts_batch_input", classmethod(accepts_batch_input)) + input_dimensionality_offsets = collect_input_dimensionality_offsets( + inputs=manifest_description.inputs + ) + get_input_dimensionality_offsets = lambda cls: input_dimensionality_offsets + setattr( + manifest_class, + "get_input_dimensionality_offsets", + classmethod(get_input_dimensionality_offsets), + ) + dimensionality_reference = pick_dimensionality_reference_property( + block_type=block_type, + inputs=manifest_description.inputs, + ) + get_dimensionality_reference_property = lambda cls: dimensionality_reference + setattr( + manifest_class, + "get_dimensionality_reference_property", + classmethod(get_dimensionality_reference_property), + ) + get_output_dimensionality_offset = ( + lambda cls: manifest_description.output_dimensionality_offset + ) + setattr( + manifest_class, + "get_output_dimensionality_offset", + classmethod(get_output_dimensionality_offset), + ) + accepts_empty_values = lambda cls: manifest_description.accepts_empty_values + setattr(manifest_class, "accepts_empty_values", classmethod(accepts_empty_values)) + return manifest_class + + +def pick_dimensionality_reference_property( + block_type: str, inputs: Dict[str, DynamicInputDefinition] +) -> Optional[str]: + references = [] + for name, definition in inputs.items(): + if definition.is_dimensionality_reference: + references.append(name) + if not references: + return None + if len(references) == 1: + return references[0] + raise DynamicBlockError( + public_message=f"For dynamic block {block_type} detected multiple inputs declared to be " + f"dimensionality reference: {references}, whereas at max one should be declared " + f"to be reference.", + context="workflow_compilation | dynamic_block_compilation | manifest_compilation", + ) diff --git a/inference/core/workflows/execution_engine/dynamic_blocks/block_scaffolding.py b/inference/core/workflows/execution_engine/dynamic_blocks/block_scaffolding.py new file mode 100644 index 000000000..1b1c8477c --- /dev/null +++ b/inference/core/workflows/execution_engine/dynamic_blocks/block_scaffolding.py @@ -0,0 +1,113 @@ +import types +from typing import List, Type + +from inference.core.env import ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS +from inference.core.workflows.errors import ( + DynamicBlockError, + WorkflowEnvironmentConfigurationError, +) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import PythonCode +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +IMPORTS_LINES = [ + "from typing import Any, List, Dict, Set, Optional", + "import supervision as sv", + "import numpy as np", + "import math", + "import time", + "import json", + "import os", + "import requests", + "import cv2", + "import shapely", + "from inference.core.workflows.entities.base import Batch, WorkflowImageData", + "from inference.core.workflows.prototypes.block import BlockResult", +] + + +def assembly_custom_python_block( + block_type_name: str, + unique_identifier: str, + manifest: Type[WorkflowBlockManifest], + python_code: PythonCode, +) -> Type[WorkflowBlock]: + code_module = create_dynamic_module( + block_type_name=block_type_name, + python_code=python_code, + module_name=f"dynamic_module_{unique_identifier}", + ) + if not hasattr(code_module, python_code.run_function_name): + raise DynamicBlockError( + public_message=f"Cannot find function: {python_code.run_function_name} in declared code for " + f"dynamic block: `{block_type_name}`", + context="workflow_compilation | dynamic_block_compilation | declared_symbols_fetching", + ) + run_function = getattr(code_module, python_code.run_function_name) + + async def run(self, *args, **kwargs) -> BlockResult: + if not ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS: + raise WorkflowEnvironmentConfigurationError( + public_message="Cannot use dynamic blocks with custom Python code in this installation of `workflows`. " + "This can be changed by setting environmental variable " + "`ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS=True`", + context="workflow_execution | step_execution | dynamic_step", + ) + return run_function(self, *args, **kwargs) + + if python_code.init_function_code is not None and not hasattr( + code_module, python_code.init_function_name + ): + raise DynamicBlockError( + public_message=f"Cannot find function: {python_code.init_function_name} in declared code for " + f"dynamic block: `{block_type_name}`", + context="workflow_compilation | dynamic_block_compilation | declared_symbols_fetching", + ) + + init_function = getattr(code_module, python_code.init_function_name, dict) + + def constructor(self): + self._init_results = init_function() + + @classmethod + def get_init_parameters(cls) -> List[str]: + return [] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return manifest + + return type( + f"DynamicBlock[{unique_identifier}]", + (WorkflowBlock,), + { + "__init__": constructor, + "get_init_parameters": get_init_parameters, + "get_manifest": get_manifest, + "run": run, + }, + ) + + +def create_dynamic_module( + block_type_name: str, python_code: PythonCode, module_name: str +) -> types.ModuleType: + imports = "\n".join(IMPORTS_LINES) + "\n" + "\n".join(python_code.imports) + "\n\n" + code = python_code.run_function_code + if python_code.init_function_code: + code += "\n\n" + python_code.init_function_code + code = imports + code + try: + dynamic_module = types.ModuleType(module_name) + exec(code, dynamic_module.__dict__) + return dynamic_module + except Exception as error: + raise DynamicBlockError( + public_message=f"Error of type `{error.__class__.__name__}` encountered while attempting to " + f"create Python module with code for block: {block_type_name}. Error message: {error}. Full code:\n{code}", + context="workflow_compilation | dynamic_block_compilation | dynamic_module_creation", + inner_error=error, + ) from error diff --git a/inference/core/workflows/execution_engine/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/dynamic_blocks/entities.py new file mode 100644 index 000000000..7daee0ae3 --- /dev/null +++ b/inference/core/workflows/execution_engine/dynamic_blocks/entities.py @@ -0,0 +1,147 @@ +from enum import Enum +from typing import Any, Dict, List, Literal, Optional + +from pydantic import BaseModel, Field + + +class SelectorType(Enum): + INPUT_IMAGE = "input_image" + STEP_OUTPUT_IMAGE = "step_output_image" + INPUT_PARAMETER = "input_parameter" + STEP_OUTPUT = "step_output" + + +class ValueType(Enum): + ANY = "any" + INTEGER = "integer" + FLOAT = "float" + BOOLEAN = "boolean" + DICT = "dict" + LIST = "list" + STRING = "string" + + +class DynamicInputDefinition(BaseModel): + type: Literal["DynamicInputDefinition"] + has_default_value: bool = Field( + default=False, + description="Flag to decide if default value is provided for input", + ) + default_value: Any = Field( + description="Definition of default value for a field. Use in combination with, " + "`has_default_value` to decide on default value if field is optional.", + default=None, + ) + is_optional: bool = Field( + description="Flag deciding if `default_value` will be added for manifest field annotation.", + default=False, + ) + is_dimensionality_reference: bool = Field( + default=False, + description="Flag deciding if declared property holds dimensionality reference - see how " + "dimensionality works for statically defined blocks to discover meaning of the " + "parameter.", + ) + dimensionality_offset: int = Field( + default=0, + ge=-1, + le=1, + description="Accepted dimensionality offset for parameter. Dimensionality works the same as for " + "traditional workflows blocks.", + ) + selector_types: List[SelectorType] = Field( + default_factory=list, + description="Union of selector types accepted by input. Should be empty if field does not accept " + "selectors.", + ) + selector_data_kind: Dict[SelectorType, List[str]] = Field( + default_factory=dict, + description="Mapping of `selector_types` into names of kinds to be compatible. " + "Empty dict (default value) means wildcard kind for all selectors. If name of kind given - " + "must be valid kind, known for workflow execution engine.", + ) + value_types: List[ValueType] = Field( + default_factory=list, + description="List of types representing union of types for static values (non selectors) " + "that shall be accepted for input field. Empty list represents no value types allowed.", + ) + + +class DynamicOutputDefinition(BaseModel): + type: Literal["DynamicOutputDefinition"] + kind: List[str] = Field( + default_factory=list, + description="List representing union of kinds for defined output", + ) + + +class ManifestDescription(BaseModel): + type: Literal["ManifestDescription"] + block_type: str = Field( + description="Field holds type of the bock to be dynamically created. Block can be initialised " + "as step using the type declared in the field." + ) + inputs: Dict[str, DynamicInputDefinition] = Field( + description="Mapping name -> input definition for block inputs (parameters for run() function of" + "dynamic block)" + ) + outputs: Dict[str, DynamicOutputDefinition] = Field( + default_factory=dict, + description="Mapping name -> output kind for block outputs.", + ) + output_dimensionality_offset: int = Field( + default=0, ge=-1, le=1, description="Definition of output dimensionality offset" + ) + accepts_batch_input: bool = Field( + default=False, + description="Flag to decide if function will be provided with batch data as whole or with singular " + "batch elements while execution", + ) + accepts_empty_values: bool = Field( + default=False, + description="Flag to decide if empty (optional) values will be shipped as run() function parameters", + ) + + +class PythonCode(BaseModel): + type: Literal["PythonCode"] + run_function_code: str = Field( + description="Code of python function. Content should be properly formatted including indentations. " + "Workflows execution engine is to create dynamic module with provided function - ensuring " + "imports of the following symbols: [Any, List, Dict, Set, sv, np, math, time, json, os, " + "requests, cv2, shapely, Batch, WorkflowImageData, BlockResult]. Expected signature is: " + "def run(self, ... # parameters of manifest apart from name and type). Through self, " + "one may access self._init_results which is dict returned by `init_code` if given." + ) + run_function_name: str = Field( + default="run", description="Name of the function shipped in `function_code`." + ) + init_function_code: Optional[str] = Field( + description="Code of the function to perform initialisation of the block. It must be " + "parameter-free function with signature `def init() -> Dict[str, Any]` setting " + "self._init_results on dynamic class initialisation", + default=None, + ) + init_function_name: str = Field( + default="init", + description="Name of init_code function.", + ) + imports: List[str] = Field( + default_factory=list, + description="List of additional imports required to run the code", + ) + + +class DynamicBlockDefinition(BaseModel): + type: Literal["DynamicBlockDefinition"] + manifest: ManifestDescription = Field( + description="Definition of manifest for dynamic block to be created in runtime by " + "workflows execution engine." + ) + code: PythonCode = Field( + description="Code to be executed in run(...) method of block that will be dynamically " + "created." + ) + + +BLOCK_SOURCE = "dynamic_workflows_blocks" diff --git a/inference/core/workflows/execution_engine/executor/core.py b/inference/core/workflows/execution_engine/executor/core.py index 68c3f380c..b59b084a0 100644 --- a/inference/core/workflows/execution_engine/executor/core.py +++ b/inference/core/workflows/execution_engine/executor/core.py @@ -22,9 +22,11 @@ construct_workflow_output, ) from inference.core.workflows.prototypes.block import WorkflowBlock +from inference.usage_tracking.collector import usage_collector from inference_sdk.http.utils.iterables import make_batches +@usage_collector async def run_workflow( workflow: CompiledWorkflow, runtime_parameters: Dict[str, Any], diff --git a/inference/core/workflows/execution_engine/introspection/blocks_loader.py b/inference/core/workflows/execution_engine/introspection/blocks_loader.py index ca29730a5..f8b45e643 100644 --- a/inference/core/workflows/execution_engine/introspection/blocks_loader.py +++ b/inference/core/workflows/execution_engine/introspection/blocks_loader.py @@ -4,7 +4,11 @@ from collections import Counter from typing import Any, Callable, Dict, List, Union -from inference.core.workflows.core_steps.loader import load_blocks +from inference.core.workflows.core_steps.loader import ( + REGISTERED_INITIALIZERS, + load_blocks, + load_kinds, +) from inference.core.workflows.entities.types import Kind from inference.core.workflows.errors import PluginInterfaceError, PluginLoadingError from inference.core.workflows.execution_engine.compiler.entities import ( @@ -14,38 +18,24 @@ BlockDescription, BlocksDescription, ) -from inference.core.workflows.execution_engine.introspection.schema_parser import ( - retrieve_selectors_from_schema, -) from inference.core.workflows.execution_engine.introspection.utils import ( build_human_friendly_block_name, get_full_type_name, ) +from inference.core.workflows.prototypes.block import WorkflowBlock WORKFLOWS_PLUGINS_ENV = "WORKFLOWS_PLUGINS" +WORKFLOWS_CORE_PLUGIN_NAME = "workflows_core" -def describe_available_blocks() -> BlocksDescription: - blocks = load_workflow_blocks() - declared_kinds = [] +def describe_available_blocks( + dynamic_blocks: List[BlockSpecification], +) -> BlocksDescription: + blocks = load_workflow_blocks() + dynamic_blocks result = [] for block in blocks: block_schema = block.manifest_class.model_json_schema() outputs_manifest = block.manifest_class.describe_outputs() - schema_selectors = retrieve_selectors_from_schema( - schema=block_schema, - inputs_dimensionality_offsets=block.manifest_class.get_input_dimensionality_offsets(), - dimensionality_reference_property=block.manifest_class.get_dimensionality_reference_property(), - ) - block_kinds = [ - k - for s in schema_selectors.values() - for r in s.allowed_references - for k in r.kind - ] - declared_kinds.extend(block_kinds) - for output in outputs_manifest: - declared_kinds.extend(output.kind) manifest_type_identifiers = get_manifest_type_identifiers( block_schema=block_schema, block_source=block.block_source, @@ -68,8 +58,7 @@ def describe_available_blocks() -> BlocksDescription: ) _validate_loaded_blocks_names_uniqueness(blocks=result) _validate_loaded_blocks_manifest_type_identifiers(blocks=result) - declared_kinds = list(set(declared_kinds)) - _validate_used_kinds_uniqueness(declared_kinds=declared_kinds) + declared_kinds = load_all_defined_kinds() return BlocksDescription(blocks=result, declared_kinds=declared_kinds) @@ -110,14 +99,16 @@ def load_core_workflow_blocks() -> List[BlockSpecification]: already_spotted_blocks = set() result = [] for block in core_blocks: + manifest_class = block.get_manifest() + identifier = get_full_type_name(selected_type=block) if block in already_spotted_blocks: continue result.append( BlockSpecification( - block_source="workflows_core", - identifier=get_full_type_name(selected_type=block), + block_source=WORKFLOWS_CORE_PLUGIN_NAME, + identifier=identifier, block_class=block, - manifest_class=block.get_manifest(), + manifest_class=manifest_class, ) ) already_spotted_blocks.add(block) @@ -132,13 +123,6 @@ def load_plugins_blocks() -> List[BlockSpecification]: return custom_blocks -def get_plugin_modules() -> List[str]: - plugins_to_load = os.environ.get(WORKFLOWS_PLUGINS_ENV) - if plugins_to_load is None: - return [] - return plugins_to_load.split(",") - - def load_blocks_from_plugin(plugin_name: str) -> List[BlockSpecification]: try: return _load_blocks_from_plugin(plugin_name=plugin_name) @@ -163,7 +147,21 @@ def _load_blocks_from_plugin(plugin_name: str) -> List[BlockSpecification]: blocks = module.load_blocks() already_spotted_blocks = set() result = [] - for block in blocks: + if not isinstance(blocks, list): + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` implement `load_blocks()` function " + f"incorrectly. Expected to return list of entries being subclass of `WorkflowBlock`, " + f"but entry of different characteristics found: {type(blocks)}.", + context="workflow_compilation | blocks_loading", + ) + for i, block in enumerate(blocks): + if not isinstance(block, type) or not issubclass(block, WorkflowBlock): + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` implement `load_blocks()` function " + f"incorrectly. Expected to return list of entries being subclass of `WorkflowBlock`, " + f"but entry of different characteristics found: {block} at position: {i}.", + context="workflow_compilation | blocks_loading", + ) if block in already_spotted_blocks: continue result.append( @@ -179,15 +177,20 @@ def _load_blocks_from_plugin(plugin_name: str) -> List[BlockSpecification]: def load_initializers() -> Dict[str, Union[Any, Callable[[None], Any]]]: - plugins_to_load = os.environ.get(WORKFLOWS_PLUGINS_ENV) - if plugins_to_load is None: - return {} - result = {} - for plugin_name in plugins_to_load.split(","): + plugins_to_load = get_plugin_modules() + result = load_core_blocks_initializers() + for plugin_name in plugins_to_load: result.update(load_initializers_from_plugin(plugin_name=plugin_name)) return result +def load_core_blocks_initializers() -> Dict[str, Union[Any, Callable[[None], Any]]]: + return { + f"{WORKFLOWS_CORE_PLUGIN_NAME}.{parameter_name}": initializer + for parameter_name, initializer in REGISTERED_INITIALIZERS.items() + } + + def load_initializers_from_plugin( plugin_name: str, ) -> Dict[str, Union[Any, Callable[[None], Any]]]: @@ -265,3 +268,68 @@ def _validate_used_kinds_uniqueness(declared_kinds: List[Kind]) -> None: f"the same name.", context="workflow_compilation | blocks_loading", ) + + +def load_all_defined_kinds() -> List[Kind]: + core_blocks_kinds = load_kinds() + plugins_kinds = load_plugins_kinds() + declared_kinds = core_blocks_kinds + plugins_kinds + declared_kinds = list(set(declared_kinds)) + _validate_used_kinds_uniqueness(declared_kinds=declared_kinds) + return declared_kinds + + +def load_plugins_kinds() -> List[Kind]: + plugins_to_load = get_plugin_modules() + result = [] + for plugin_name in plugins_to_load: + result.extend(load_plugin_kinds(plugin_name=plugin_name)) + return result + + +def load_plugin_kinds(plugin_name: str) -> List[Kind]: + try: + return _load_plugin_kinds(plugin_name=plugin_name) + except ImportError as e: + raise PluginLoadingError( + public_message=f"It is not possible to load kinds from workflow plugin `{plugin_name}`. " + f"Make sure the library providing custom step is correctly installed in Python environment.", + context="workflow_compilation | blocks_loading", + inner_error=e, + ) from e + except AttributeError as e: + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " + f"interface correctly and cannot be loaded.", + context="workflow_compilation | blocks_loading", + inner_error=e, + ) from e + + +def _load_plugin_kinds(plugin_name: str) -> List[Kind]: + module = importlib.import_module(plugin_name) + if not hasattr(module, "load_kinds"): + return [] + kinds_extractor = getattr(module, "load_kinds") + if not callable(kinds_extractor): + logging.warning( + f"Found `load_kinds` symbol in plugin `{plugin_name}` module init, but it is not callable. " + f"Not importing kinds from that plugin." + ) + return [] + kinds = kinds_extractor() + if not isinstance(kinds, list) or not all(isinstance(e, Kind) for e in kinds): + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " + f"interface correctly and cannot be loaded. Return value of `load_kinds()` " + f"is not list of objects `Kind`.", + context="workflow_compilation | blocks_loading", + ) + return kinds + + +def get_plugin_modules() -> List[str]: + plugins_to_load = os.environ.get(WORKFLOWS_PLUGINS_ENV) + if plugins_to_load is None: + return [] + return plugins_to_load.split(",") diff --git a/inference/core/workflows/execution_engine/introspection/entities.py b/inference/core/workflows/execution_engine/introspection/entities.py index f906d9514..d86ae00e8 100644 --- a/inference/core/workflows/execution_engine/introspection/entities.py +++ b/inference/core/workflows/execution_engine/introspection/entities.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Dict, List, Optional, Set, Type +from typing import Callable, Dict, List, Optional, Set, Type, Union from pydantic import BaseModel, Field @@ -90,7 +90,12 @@ class DiscoveredConnections: class BlockDescription(BaseModel): - manifest_class: Type[WorkflowBlockManifest] = Field(exclude=True) + manifest_class: Union[Type[WorkflowBlockManifest], Type[BaseModel]] = Field( + exclude=True + ) + # Type[BaseModel] here is to let dynamic blocks being BaseModel to pass validation - but that should be + # the only case for using this type in this field. Dynamic blocks implements the same interface, yet due + # to dynamic nature of creation - cannot be initialised as abstract class WorkflowBlockManifest block_class: Type[WorkflowBlock] = Field(exclude=True) block_schema: dict = Field( description="OpenAPI specification of block manifest that " diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index d143465d7..667c55c3d 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -1,10 +1,9 @@ from abc import ABC, abstractmethod -from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Type, Union from openai import BaseModel from pydantic import ConfigDict, Field -from inference.core import logger from inference.core.workflows.entities.base import OutputDefinition from inference.core.workflows.entities.types import FlowControl from inference.core.workflows.errors import BlockInterfaceError diff --git a/inference/models/yolo_world/yolo_world.py b/inference/models/yolo_world/yolo_world.py index 268519406..a5d790440 100644 --- a/inference/models/yolo_world/yolo_world.py +++ b/inference/models/yolo_world/yolo_world.py @@ -1,6 +1,6 @@ import os.path from time import perf_counter -from typing import Any, Optional +from typing import Any, List, Optional import clip import numpy as np @@ -127,7 +127,13 @@ def infer( bbox_array = np.array([box.xywh.tolist()[0] for box in results.boxes]) conf_array = np.array([[float(box.conf)] for box in results.boxes]) cls_array = np.array( - [self.get_cls_conf_array(int(box.cls)) for box in results.boxes] + [ + self.get_cls_conf_array( + max_class_id=int(box.cls), + max_class_confidence=float(box.conf), + ) + for box in results.boxes + ] ) pred_array = np.concatenate([bbox_array, conf_array, cls_array], axis=1) @@ -224,7 +230,9 @@ def get_infer_bucket_file_list(self) -> list: """ return ["yolo-world.pt"] - def get_cls_conf_array(self, class_id) -> list: - arr = [0] * len(self.class_names) - arr[class_id] = 1 + def get_cls_conf_array( + self, max_class_id: int, max_class_confidence: float + ) -> List[float]: + arr = [0.0] * len(self.class_names) + arr[max_class_id] = max_class_confidence return arr diff --git a/inference/usage_tracking/__init__.py b/inference/usage_tracking/__init__.py new file mode 100644 index 000000000..23462463a --- /dev/null +++ b/inference/usage_tracking/__init__.py @@ -0,0 +1,5 @@ +""" +Inference utilizes Roboflow's cloud services and requires telemetry during deployment. +Customers with an offline deployment can turn the telemetry off by setting TELEMETRY_OPT_OUT environment variable to True. +For more information please consult our licensing page [roboflow.com/licensing] or contact sales [roboflow.com/sales]. +""" diff --git a/inference/usage_tracking/collector.py b/inference/usage_tracking/collector.py new file mode 100644 index 000000000..1a73f1fe5 --- /dev/null +++ b/inference/usage_tracking/collector.py @@ -0,0 +1,673 @@ +import asyncio +import atexit +import hashlib +import json +import mimetypes +import socket +import sys +import time +from collections import defaultdict +from functools import wraps +from queue import Queue +from threading import Event, Lock, Thread +from typing import Any, Callable, DefaultDict, Dict, List, Optional, Tuple, Union +from uuid import uuid4 + +import requests + +from inference.core.env import API_KEY, LAMBDA +from inference.core.logger import logger +from inference.core.version import __version__ as inference_version +from inference.core.workflows.execution_engine.compiler.entities import CompiledWorkflow +from inference.usage_tracking.utils import collect_func_params + +from .config import TelemetrySettings, get_telemetry_settings + +ResourceID = str +Usage = Union[DefaultDict[str, Any], Dict[str, Any]] +ResourceUsage = Union[DefaultDict[ResourceID, Usage], Dict[ResourceID, Usage]] +APIKey = str +APIKeyUsage = Union[DefaultDict[APIKey, ResourceUsage], Dict[APIKey, ResourceUsage]] +ResourceDetails = Dict[str, Any] +SystemDetails = Dict[str, Any] +UsagePayload = Union[APIKeyUsage, ResourceDetails, SystemDetails] + + +class UsageCollector: + _lock = Lock() + _async_lock = asyncio.Lock() + + def __new__(cls, *args, **kwargs): + with UsageCollector._lock: + if not hasattr(cls, "_instance"): + cls._instance = super().__new__(cls) + cls._instance._queue = None + return cls._instance + + def __init__(self): + with UsageCollector._lock: + if self._queue: + return + + self._exec_session_id = f"{time.time_ns()}_{uuid4().hex[:4]}" + + self._settings: TelemetrySettings = get_telemetry_settings() + self._usage: APIKeyUsage = self.empty_usage_dict( + exec_session_id=self._exec_session_id + ) + + # TODO: use persistent queue, i.e. https://pypi.org/project/persist-queue/ + self._queue: "Queue[UsagePayload]" = Queue(maxsize=self._settings.queue_size) + self._queue_lock = Lock() + + self._system_info_sent: bool = False + self._resource_details_lock = Lock() + self._resource_details: DefaultDict[APIKey, Dict[ResourceID, bool]] = ( + defaultdict(dict) + ) + + self._terminate_collector_thread = Event() + self._collector_thread = Thread(target=self._usage_collector, daemon=True) + self._collector_thread.start() + + self._terminate_sender_thread = Event() + self._sender_thread = Thread(target=self._usage_sender, daemon=True) + self._sender_thread.start() + + atexit.register(self._cleanup) + + @staticmethod + def empty_usage_dict(exec_session_id: str) -> APIKeyUsage: + return defaultdict( # api_key + lambda: defaultdict( # category:resource_id + lambda: { + "timestamp_start": None, + "timestamp_stop": None, + "exec_session_id": exec_session_id, + "processed_frames": 0, + "fps": 0, + "source_duration": 0, + "category": "", + "resource_id": "", + "hosted": LAMBDA, + "api_key": None, + "enterprise": False, + } + ) + ) + + @staticmethod + def _merge_usage_dicts(d1: UsagePayload, d2: UsagePayload): + merged = {} + if d1 and d2 and d1.get("resource_id") != d2.get("resource_id"): + raise ValueError("Cannot merge usage for different resource IDs") + if "timestamp_start" in d1 and "timestamp_start" in d2: + merged["timestamp_start"] = min( + d1["timestamp_start"], d2["timestamp_start"] + ) + if "timestamp_stop" in d1 and "timestamp_stop" in d2: + merged["timestamp_stop"] = max(d1["timestamp_stop"], d2["timestamp_stop"]) + if "processed_frames" in d1 and "processed_frames" in d2: + merged["processed_frames"] = d1["processed_frames"] + d2["processed_frames"] + if "source_duration" in d1 and "source_duration" in d2: + merged["source_duration"] = d1["source_duration"] + d2["source_duration"] + return {**d1, **d2, **merged} + + def _dump_usage_queue_no_lock(self) -> List[APIKeyUsage]: + usage_payloads: List[APIKeyUsage] = [] + while self._queue: + if self._queue.empty(): + break + usage_payloads.append(self._queue.get_nowait()) + return usage_payloads + + def _dump_usage_queue_with_lock(self) -> List[APIKeyUsage]: + with self._queue_lock: + usage_payloads = self._dump_usage_queue_no_lock() + return usage_payloads + + @staticmethod + def _get_api_key_usage_containing_resource( + api_key: APIKey, usage_payloads: List[APIKeyUsage] + ) -> Optional[ResourceUsage]: + for usage_payload in usage_payloads: + for other_api_key, resource_payloads in usage_payload.items(): + if api_key and other_api_key != api_key: + continue + if other_api_key is None: + continue + for resource_id, resource_usage in resource_payloads.items(): + if not resource_id: + continue + if not resource_usage or "resource_id" not in resource_usage: + continue + return resource_usage + return None + + @staticmethod + def _zip_usage_payloads(usage_payloads: List[APIKeyUsage]) -> List[APIKeyUsage]: + merged_api_key_usage_payloads: APIKeyUsage = {} + system_info_payload = None + for usage_payload in usage_payloads: + for api_key, resource_payloads in usage_payload.items(): + if api_key is None: + if ( + resource_payloads + and len(resource_payloads) > 1 + or list(resource_payloads.keys()) != [None] + ): + logger.debug( + "Dropping usage payload %s due to missing API key", + resource_payloads, + ) + continue + api_key_usage_with_resource = ( + UsageCollector._get_api_key_usage_containing_resource( + api_key=api_key, + usage_payloads=usage_payloads, + ) + ) + if not api_key_usage_with_resource: + system_info_payload = resource_payloads + continue + api_key = api_key_usage_with_resource["api_key"] + resource_id = api_key_usage_with_resource["resource_id"] + category = api_key_usage_with_resource.get("category") + for v in resource_payloads.values(): + v["api_key"] = api_key + if "resource_id" not in v or not v["resource_id"]: + v["resource_id"] = resource_id + if "category" not in v or not v["category"]: + v["category"] = category + for ( + resource_usage_key, + resource_usage_payload, + ) in resource_payloads.items(): + if resource_usage_key is None: + api_key_usage_with_resource = ( + UsageCollector._get_api_key_usage_containing_resource( + api_key=api_key, + usage_payloads=usage_payloads, + ) + ) + if not api_key_usage_with_resource: + system_info_payload = {None: resource_usage_payload} + continue + resource_id = api_key_usage_with_resource["resource_id"] + category = api_key_usage_with_resource.get("category") + resource_usage_key = f"{category}:{resource_id}" + resource_usage_payload["api_key"] = api_key + resource_usage_payload["resource_id"] = resource_id + resource_usage_payload["category"] = category + merged_api_key_payload = merged_api_key_usage_payloads.setdefault( + api_key, {} + ) + merged_resource_payload = merged_api_key_payload.setdefault( + resource_usage_key, {} + ) + merged_api_key_payload[resource_usage_key] = ( + UsageCollector._merge_usage_dicts( + merged_resource_payload, + resource_usage_payload, + ) + ) + + zipped_payloads = [merged_api_key_usage_payloads] + if system_info_payload: + system_info_api_key = next(iter(system_info_payload.values()))["api_key"] + zipped_payloads.append({system_info_api_key: system_info_payload}) + return zipped_payloads + + @staticmethod + def _hash(payload: str, length=5): + payload_hash = hashlib.sha256(payload.encode()) + return payload_hash.hexdigest()[:length] + + def _enqueue_payload(self, payload: UsagePayload): + logger.debug("Enqueuing usage payload %s", payload) + if not payload: + return + with self._queue_lock: + if not self._queue.full(): + self._queue.put(payload) + else: + usage_payloads = self._dump_usage_queue_no_lock() + usage_payloads.append(payload) + merged_usage_payloads = self._zip_usage_payloads( + usage_payloads=usage_payloads, + ) + for usage_payload in merged_usage_payloads: + self._queue.put(usage_payload) + + @staticmethod + def _calculate_resource_hash(resource_details: Dict[str, Any]) -> str: + return UsageCollector._hash(json.dumps(resource_details, sort_keys=True)) + + def record_resource_details( + self, + category: str, + resource_details: Dict[str, Any], + resource_id: Optional[str] = None, + api_key: Optional[str] = None, + enterprise: bool = False, + ): + if not category: + raise ValueError("Category is compulsory when recording resource details.") + if not resource_details and not resource_id: + return + if not isinstance(resource_details, dict) and not resource_id: + return + + if not api_key: + api_key = API_KEY + if not resource_id: + resource_id = UsageCollector._calculate_resource_hash( + resource_details=resource_details + ) + + with self._resource_details_lock: + api_key_specifications = self._resource_details[api_key] + if resource_id in api_key_specifications: + return + api_key_specifications[resource_id] = True + + resource_details_payload: ResourceDetails = { + api_key: { + f"{category}:{resource_id}": { + "timestamp_start": time.time_ns(), + "category": category, + "resource_id": resource_id, + "hosted": LAMBDA, + "resource_details": json.dumps(resource_details), + "api_key": api_key, + "enterprise": enterprise, + } + } + } + logger.debug("Usage (%s details): %s", category, resource_details_payload) + self._enqueue_payload(payload=resource_details_payload) + + @staticmethod + def system_info( + exec_session_id: str, + api_key: Optional[str] = None, + ip_address: Optional[str] = None, + time_ns: Optional[int] = None, + enterprise: bool = False, + ) -> SystemDetails: + if ip_address: + ip_address_hash_hex = UsageCollector._hash(ip_address) + else: + ip_address: str = socket.gethostbyname(socket.gethostname()) + ip_address_hash_hex = UsageCollector._hash(ip_address) + + if not time_ns: + time_ns = time.time_ns() + + if not api_key: + api_key = API_KEY + + return { + "timestamp_start": time_ns, + "exec_session_id": exec_session_id, + "ip_address_hash": ip_address_hash_hex, + "api_key": api_key, + "hosted": LAMBDA, + "is_gpu_available": False, # TODO + "python_version": sys.version.split()[0], + "inference_version": inference_version, + "enterprise": enterprise, + } + + def record_system_info( + self, + api_key: str, + ip_address: Optional[str] = None, + enterprise: bool = False, + ): + if self._system_info_sent: + return + if not api_key: + api_key = API_KEY + system_info_payload = { + api_key: { + None: self.system_info( + exec_session_id=self._exec_session_id, + api_key=api_key, + ip_address=ip_address, + enterprise=enterprise, + ) + } + } + logger.debug("Usage (system info): %s", system_info_payload) + self._enqueue_payload(payload=system_info_payload) + self._system_info_sent = True + + @staticmethod + def _guess_source_type(source: str) -> str: + mime_type, _ = mimetypes.guess_type(source) + stream_schemes = ["rtsp", "rtmp"] + source_type = None + if mime_type and mime_type.startswith("video"): + source_type = "video" + elif mime_type and mime_type.startswith("image"): + source_type = "image" + elif mime_type: + logger.debug("Unhandled mime type") + source_type = mime_type.split("/")[0] + elif not mime_type and str.isnumeric(source): + source_type = "camera" + elif not mime_type and any( + source.lower().startswith(s) for s in stream_schemes + ): + source_type = "stream" + return source_type + + def _update_usage_payload( + self, + source: str, + category: str, + frames: int = 1, + api_key: Optional[str] = None, + resource_details: Optional[Dict[str, Any]] = None, + resource_id: Optional[str] = None, + fps: float = 0, + enterprise: bool = False, + ): + source = str(source) if source else "" + if not api_key: + api_key = API_KEY + if not resource_id and resource_details: + resource_id = UsageCollector._calculate_resource_hash(resource_details) + with UsageCollector._lock: + source_usage = self._usage[api_key][f"{category}:{resource_id}"] + if not source_usage["timestamp_start"]: + source_usage["timestamp_start"] = time.time_ns() + source_usage["timestamp_stop"] = time.time_ns() + source_usage["processed_frames"] += frames + source_usage["fps"] = round(fps, 2) + source_usage["source_duration"] += frames / fps if fps else 0 + source_usage["category"] = category + source_usage["resource_id"] = resource_id + source_usage["api_key"] = api_key + source_usage["enterprise"] = enterprise + logger.debug("Updated usage: %s", source_usage) + + def record_usage( + self, + source: str, + category: str, + enterprise: bool, + frames: int = 1, + api_key: Optional[str] = None, + resource_details: Optional[Dict[str, Any]] = None, + resource_id: Optional[str] = None, + fps: float = 0, + ) -> DefaultDict[str, Any]: + if self._settings.opt_out and not enterprise: + return + self.record_system_info( + api_key=api_key, + enterprise=enterprise, + ) + self.record_resource_details( + category=category, + resource_details=resource_details, + resource_id=resource_id, + api_key=api_key, + enterprise=enterprise, + ) + self._update_usage_payload( + source=source, + category=category, + frames=frames, + api_key=api_key, + resource_details=resource_details, + resource_id=resource_id, + fps=fps, + enterprise=enterprise, + ) + + async def async_record_usage( + self, + source: str, + category: str, + enterprise: bool, + frames: int = 1, + api_key: Optional[str] = None, + resource_details: Optional[Dict[str, Any]] = None, + resource_id: Optional[str] = None, + fps: float = 0, + ) -> DefaultDict[str, Any]: + async with UsageCollector._async_lock: + self.record_usage( + source=source, + category=category, + frames=frames, + enterprise=enterprise, + api_key=api_key, + resource_details=resource_details, + resource_id=resource_id, + fps=fps, + ) + + def _usage_collector(self): + while True: + if self._terminate_collector_thread.wait(self._settings.flush_interval): + break + self._enqueue_usage_payload() + logger.debug("Terminating collector thread") + self._enqueue_usage_payload() + + def _enqueue_usage_payload(self): + if not self._usage: + return + with UsageCollector._lock: + self._enqueue_payload(payload=self._usage) + self._usage = self.empty_usage_dict(exec_session_id=self._exec_session_id) + + def _usage_sender(self): + while True: + if self._terminate_sender_thread.wait(self._settings.flush_interval): + break + self._flush_queue() + logger.debug("Terminating sender thread") + self._flush_queue() + + def _flush_queue(self): + usage_payloads = self._dump_usage_queue_with_lock() + if not usage_payloads: + return + merged_payloads: APIKeyUsage = self._zip_usage_payloads( + usage_payloads=usage_payloads, + ) + self._offload_to_api(payloads=merged_payloads) + + def _offload_to_api(self, payloads: List[APIKeyUsage]): + ssl_verify = True + if "localhost" in self._settings.api_usage_endpoint_url.lower(): + ssl_verify = False + if "127.0.0.1" in self._settings.api_usage_endpoint_url.lower(): + ssl_verify = False + + api_keys_failed = set() + for payload in payloads: + for api_key, workflow_payloads in payload.items(): + if any("processed_frames" not in w for w in workflow_payloads.values()): + api_keys_failed.add(api_key) + continue + enterprise = any( + w.get("enterprise") for w in workflow_payloads.values() + ) + try: + logger.debug( + "Offloading usage to %s, payload: %s", + self._settings.api_usage_endpoint_url, + workflow_payloads, + ) + response = requests.post( + self._settings.api_usage_endpoint_url, + json=list(workflow_payloads.values()), + verify=ssl_verify, + headers={"Authorization": f"Bearer {api_key}"}, + timeout=1, + ) + except Exception as exc: + logger.debug("Failed to send usage - %s", exc) + api_keys_failed.add(api_key) + continue + if response.status_code != 200: + logger.debug( + "Failed to send usage - got %s status code (%s)", + response.status_code, + response.raw, + ) + api_keys_failed.add(api_key) + continue + for api_key in list(payload.keys()): + if api_key not in api_keys_failed: + del payload[api_key] + if payload: + logger.warning("Enqueuing back unsent payload") + self._enqueue_payload(payload=payload) + + def push_usage_payloads(self): + self._enqueue_usage_payload() + self._flush_queue() + + async def async_push_usage_payloads(self): + async with UsageCollector._async_lock: + self.push_usage_payloads() + + @staticmethod + def _resource_details_from_workflow_json( + workflow_json: Dict[str, Any] + ) -> Tuple[ResourceID, ResourceDetails]: + if not isinstance(workflow_json, dict): + raise ValueError("workflow_json must be dict") + return { + "steps": [ + f"{step.get('type', 'unknown')}:{step.get('name', 'unknown')}" + for step in workflow_json.get("steps", []) + if isinstance(step, dict) + ] + } + + @staticmethod + def _extract_usage_params_from_func_kwargs( + usage_fps: float, + usage_api_key: str, + usage_workflow_id: str, + func: Callable[[Any], Any], + args: List[Any], + kwargs: Dict[str, Any], + ) -> Dict[str, Any]: + if not usage_api_key: + usage_api_key = API_KEY + func_kwargs = collect_func_params(func, args, kwargs) + resource_details = {} + resource_id = None + category = None + if "workflow" in func_kwargs: + workflow: CompiledWorkflow = func_kwargs["workflow"] + if hasattr(workflow, "workflow_definition"): + # TODO: handle enterprise blocks here + workflow_definition = workflow.workflow_definition + enterprise = False + if hasattr(workflow, "init_parameters"): + init_parameters = workflow.init_parameters + if "workflows_core.api_key" in init_parameters: + usage_api_key = init_parameters["workflows_core.api_key"] + workflow_json = {} + if hasattr(workflow, "workflow_json"): + workflow_json = workflow.workflow_json + resource_details = UsageCollector._resource_details_from_workflow_json( + workflow_json=workflow_json, + ) + resource_id = usage_workflow_id + if not resource_id and resource_details: + usage_workflow_id = UsageCollector._calculate_resource_hash( + resource_details=resource_details + ) + category = "workflows" + elif "model_id" in func_kwargs: + # TODO: handle model + pass + source = None + runtime_parameters = func_kwargs.get("runtime_parameters") + if ( + isinstance(runtime_parameters, dict) + and "image" in func_kwargs["runtime_parameters"] + ): + images = runtime_parameters["image"] + if not isinstance(images, list): + images = [images] + image = images[0] + if isinstance(image, dict): + source = image.get("value") + elif hasattr(image, "_image_reference"): + source = image._image_reference + return { + "source": source, + "api_key": usage_api_key, + "category": category, + "resource_details": resource_details, + "resource_id": resource_id, + "fps": usage_fps, + "enterprise": enterprise, + } + + def __call__(self, func: Callable[[Any], Any]): + @wraps(func) + def sync_wrapper( + *args, + usage_fps: float = 0, + usage_api_key: Optional[str] = None, + usage_workflow_id: Optional[str] = None, + **kwargs, + ): + self.record_usage( + **self._extract_usage_params_from_func_kwargs( + usage_fps=usage_fps, + usage_api_key=usage_api_key, + usage_workflow_id=usage_workflow_id, + func=func, + args=args, + kwargs=kwargs, + ) + ) + return func(*args, **kwargs) + + @wraps(func) + async def async_wrapper( + *args, + usage_fps: float = 0, + usage_api_key: Optional[str] = None, + usage_workflow_id: Optional[str] = None, + **kwargs, + ): + await self.async_record_usage( + **self._extract_usage_params_from_func_kwargs( + usage_fps=usage_fps, + usage_api_key=usage_api_key, + usage_workflow_id=usage_workflow_id, + func=func, + args=args, + kwargs=kwargs, + ) + ) + return await func(*args, **kwargs) + + if asyncio.iscoroutinefunction(func): + return async_wrapper + else: + return sync_wrapper + + def _cleanup(self): + self._terminate_collector_thread.set() + self._collector_thread.join() + self._terminate_sender_thread.set() + self._sender_thread.join() + + +usage_collector = UsageCollector() diff --git a/inference/usage_tracking/config.py b/inference/usage_tracking/config.py new file mode 100644 index 000000000..59f446fdc --- /dev/null +++ b/inference/usage_tracking/config.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from functools import lru_cache +from urllib.parse import urljoin + +from pydantic import Field, model_validator +from pydantic_settings import BaseSettings, SettingsConfigDict +from typing_extensions import Optional + +from inference.core.env import PROJECT +from inference.core.utils.url_utils import wrap_url + + +class TelemetrySettings(BaseSettings): + model_config = SettingsConfigDict(env_prefix="telemetry_") + + api_usage_endpoint_url: str = "https://api.roboflow.one/usage/inference" + flush_interval: int = Field(default=10, ge=10, le=300) + opt_out: Optional[bool] = False + queue_size: int = Field(default=10, ge=10, le=10000) + + @model_validator(mode="after") + def check_values(cls, inst: TelemetrySettings): + if PROJECT == "roboflow-platform": + inst.api_usage_endpoint_url = wrap_url( + "https://api.roboflow.com/usage/inference" + ) + else: + inst.api_usage_endpoint_url = wrap_url( + "https://api.roboflow.one/usage/inference" + ) + inst.flush_interval = min(max(inst.flush_interval, 10), 300) + inst.queue_size = min(max(inst.queue_size, 10), 10000) + return inst + + +@lru_cache +def get_telemetry_settings() -> TelemetrySettings: + return TelemetrySettings() diff --git a/inference/usage_tracking/utils.py b/inference/usage_tracking/utils.py new file mode 100644 index 000000000..ff9960e01 --- /dev/null +++ b/inference/usage_tracking/utils.py @@ -0,0 +1,25 @@ +import inspect +from typing import Any, Callable, Dict, Iterable + +from inference.core.logger import logger + + +def collect_func_params( + func: Callable[[Any], Any], args: Iterable[Any], kwargs: Dict[Any, Any] +) -> Dict[str, Any]: + signature = inspect.signature(func) + + params = {} + if args: + for param, arg_value in zip(signature.parameters.keys(), args): + params[param] = arg_value + if kwargs: + params = {**params, **kwargs} + defaults = set(signature.parameters.keys()).difference(set(params.keys())) + for default_arg in defaults: + params[default_arg] = signature.parameters[default_arg].default + + if set(params) != set(signature.parameters): + logger.error("Params mismatch for %s.%s", func.__module__, func.__name__) + + return params diff --git a/inference_cli/benchmark.py b/inference_cli/benchmark.py index 328f446b7..24a4fbbb3 100644 --- a/inference_cli/benchmark.py +++ b/inference_cli/benchmark.py @@ -1,3 +1,4 @@ +import json from typing import Optional import typer @@ -5,8 +6,9 @@ from inference_cli.lib.benchmark.dataset import PREDEFINED_DATASETS from inference_cli.lib.benchmark_adapter import ( - run_api_speed_benchmark, + run_infer_api_speed_benchmark, run_python_package_speed_benchmark, + run_workflow_api_speed_benchmark, ) benchmark_app = typer.Typer(help="Commands for running inference benchmarks.") @@ -15,13 +17,45 @@ @benchmark_app.command() def api_speed( model_id: Annotated[ - str, + Optional[str], typer.Option( "--model_id", "-m", help="Model ID in format project/version.", ), - ], + ] = None, + workflow_id: Annotated[ + Optional[str], + typer.Option( + "--workflow-id", + "-wid", + help="Workflow ID.", + ), + ] = None, + workspace_name: Annotated[ + Optional[str], + typer.Option( + "--workspace-name", + "-wn", + help="Workspace Name.", + ), + ] = None, + workflow_specification: Annotated[ + Optional[str], + typer.Option( + "--workflow-specification", + "-ws", + help="Workflow specification.", + ), + ] = None, + workflow_parameters: Annotated[ + Optional[str], + typer.Option( + "--workflow-parameters", + "-wp", + help="Model ID in format project/version.", + ), + ] = None, dataset_reference: Annotated[ str, typer.Option( @@ -110,20 +144,42 @@ def api_speed( if proceed.lower() != "y": return None try: - run_api_speed_benchmark( - model_id=model_id, - dataset_reference=dataset_reference, - host=host, - warm_up_requests=warm_up_requests, - benchmark_requests=benchmark_requests, - request_batch_size=request_batch_size, - number_of_clients=number_of_clients, - requests_per_second=requests_per_second, - api_key=api_key, - model_configuration=model_configuration, - output_location=output_location, - enforce_legacy_endpoints=enforce_legacy_endpoints, - ) + if model_id: + run_infer_api_speed_benchmark( + model_id=model_id, + dataset_reference=dataset_reference, + host=host, + warm_up_requests=warm_up_requests, + benchmark_requests=benchmark_requests, + request_batch_size=request_batch_size, + number_of_clients=number_of_clients, + requests_per_second=requests_per_second, + api_key=api_key, + model_configuration=model_configuration, + output_location=output_location, + enforce_legacy_endpoints=enforce_legacy_endpoints, + ) + else: + if workflow_specification: + workflow_specification = json.loads(workflow_specification) + if workflow_parameters: + workflow_parameters = json.loads(workflow_parameters) + run_workflow_api_speed_benchmark( + workflow_id=workflow_id, + workspace_name=workspace_name, + workflow_specification=workflow_specification, + workflow_parameters=workflow_parameters, + dataset_reference=dataset_reference, + host=host, + warm_up_requests=warm_up_requests, + benchmark_requests=benchmark_requests, + request_batch_size=request_batch_size, + number_of_clients=number_of_clients, + requests_per_second=requests_per_second, + api_key=api_key, + model_configuration=model_configuration, + output_location=output_location, + ) except Exception as error: typer.echo(f"Command failed. Cause: {error}") raise typer.Exit(code=1) diff --git a/inference_cli/lib/benchmark/api_speed.py b/inference_cli/lib/benchmark/api_speed.py index 8b80cf479..df3d5e1f1 100644 --- a/inference_cli/lib/benchmark/api_speed.py +++ b/inference_cli/lib/benchmark/api_speed.py @@ -3,7 +3,7 @@ import time from functools import partial from threading import Thread -from typing import Callable, List, Optional +from typing import Any, Callable, Dict, List, Optional import numpy as np import requests @@ -28,7 +28,7 @@ def run_api_warm_up( _ = client.infer(inference_input=image) -def coordinate_api_speed_benchmark( +def coordinate_infer_api_speed_benchmark( client: InferenceHTTPClient, images: List[np.ndarray], model_id: str, @@ -52,7 +52,7 @@ def coordinate_api_speed_benchmark( target=display_benchmark_statistics, args=(results_collector,) ) statistics_display_thread.start() - execute_api_speed_benchmark( + execute_infer_api_speed_benchmark( results_collector=results_collector, client=client, images=images, @@ -66,7 +66,96 @@ def coordinate_api_speed_benchmark( return statistics -def execute_api_speed_benchmark( +def coordinate_workflow_api_speed_benchmark( + client: InferenceHTTPClient, + images: List[np.ndarray], + workspace_name: Optional[str], + workflow_id: Optional[str], + workflow_specification: Optional[str], + workflow_parameters: Optional[Dict[str, Any]], + benchmark_requests: int, + request_batch_size: int, + number_of_clients: int, + requests_per_second: Optional[int], +) -> InferenceStatistics: + image_sizes = {i.shape[:2] for i in images} + print(f"Detected images dimensions: {image_sizes}") + results_collector = ResultsCollector() + statistics_display_thread = Thread( + target=display_benchmark_statistics, args=(results_collector,) + ) + statistics_display_thread.start() + execute_workflow_api_speed_benchmark( + workspace_name=workspace_name, + workflow_id=workflow_id, + workflow_specification=workflow_specification, + workflow_parameters=workflow_parameters, + results_collector=results_collector, + client=client, + images=images, + benchmark_requests=benchmark_requests, + request_batch_size=request_batch_size, + number_of_clients=number_of_clients, + requests_per_second=requests_per_second, + ) + statistics = results_collector.get_statistics() + statistics_display_thread.join() + return statistics + + +def execute_infer_api_speed_benchmark( + results_collector: ResultsCollector, + client: InferenceHTTPClient, + images: List[np.ndarray], + benchmark_requests: int, + request_batch_size: int, + number_of_clients: int, + requests_per_second: Optional[int], +) -> None: + while len(images) < request_batch_size: + images = images + images + api_request_executor = partial( + execute_infer_api_request, + results_collector=results_collector, + client=client, + images=images, + request_batch_size=request_batch_size, + delay=requests_per_second is not None, + ) + if requests_per_second is not None: + if number_of_clients is not None: + print( + "Parameter specifying `number_of_clients` is ignored when number of " + "RPS to maintain is specified." + ) + results_collector.start_benchmark() + execute_given_rps_sequentially( + executor=api_request_executor, + benchmark_requests=benchmark_requests, + requests_per_second=requests_per_second, + ) + results_collector.stop_benchmark() + return None + client_threads = [] + results_collector.start_benchmark() + for _ in range(number_of_clients): + client_thread = Thread( + target=execute_requests_sequentially, + args=(api_request_executor, benchmark_requests), + ) + client_thread.start() + client_threads.append(client_thread) + for thread in client_threads: + thread.join() + results_collector.stop_benchmark() + return None + + +def execute_workflow_api_speed_benchmark( + workspace_name: Optional[str], + workflow_id: Optional[str], + workflow_specification: Optional[str], + workflow_parameters: Optional[Dict[str, Any]], results_collector: ResultsCollector, client: InferenceHTTPClient, images: List[np.ndarray], @@ -78,7 +167,11 @@ def execute_api_speed_benchmark( while len(images) < request_batch_size: images = images + images api_request_executor = partial( - execute_api_request, + execute_workflow_api_request, + workspace_name=workspace_name, + workflow_id=workflow_id, + workflow_specification=workflow_specification, + workflow_parameters=workflow_parameters, results_collector=results_collector, client=client, images=images, @@ -147,7 +240,7 @@ def execute_given_rps_sequentially( thread.join() -def execute_api_request( +def execute_infer_api_request( results_collector: ResultsCollector, client: InferenceHTTPClient, images: List[np.ndarray], @@ -179,6 +272,52 @@ def execute_api_request( ) +def execute_workflow_api_request( + workspace_name: Optional[str], + workflow_id: Optional[str], + workflow_specification: Optional[str], + workflow_parameters: Optional[Dict[str, Any]], + results_collector: ResultsCollector, + client: InferenceHTTPClient, + images: List[np.ndarray], + request_batch_size: int, + delay: bool = False, +) -> None: + if delay: + time.sleep(random.random()) + random.shuffle(images) + images = {f"image": images[:request_batch_size]} + start = time.time() + try: + kwargs = { + "images": images, + } + if workflow_parameters: + kwargs["parameters"] = workflow_parameters + if workspace_name and workflow_id: + kwargs["workspace_name"] = workspace_name + kwargs["workflow_id"] = workflow_id + else: + kwargs["specification"] = workflow_specification + _ = client.run_workflow(**kwargs) + duration = time.time() - start + results_collector.register_inference_duration( + batch_size=request_batch_size, duration=duration + ) + except Exception as exc: + duration = time.time() - start + results_collector.register_inference_duration( + batch_size=request_batch_size, duration=duration + ) + status_code = exc.__class__.__name__ + if isinstance(exc, requests.exceptions.HTTPError): + status_code = str(exc.response.status_code) + + results_collector.register_error( + batch_size=request_batch_size, status_code=status_code + ) + + def display_benchmark_statistics( results_collector: ResultsCollector, sleep_time: float = 5.0, diff --git a/inference_cli/lib/benchmark_adapter.py b/inference_cli/lib/benchmark_adapter.py index cde0e4c59..b238bd2f7 100644 --- a/inference_cli/lib/benchmark_adapter.py +++ b/inference_cli/lib/benchmark_adapter.py @@ -2,10 +2,11 @@ from dataclasses import asdict from datetime import datetime from threading import Thread -from typing import Optional +from typing import Any, Dict, Optional from inference_cli.lib.benchmark.api_speed import ( - coordinate_api_speed_benchmark, + coordinate_infer_api_speed_benchmark, + coordinate_workflow_api_speed_benchmark, display_benchmark_statistics, ) from inference_cli.lib.benchmark.dataset import load_dataset_images @@ -17,7 +18,7 @@ from inference_cli.lib.utils import dump_json, initialise_client -def run_api_speed_benchmark( +def run_infer_api_speed_benchmark( model_id: str, dataset_reference: str, host: str, @@ -45,7 +46,7 @@ def run_api_speed_benchmark( client.select_model(model_id=model_id) if enforce_legacy_endpoints: client.select_api_v0() - benchmark_results = coordinate_api_speed_benchmark( + benchmark_results = coordinate_infer_api_speed_benchmark( client=client, images=dataset_images, model_id=model_id, @@ -75,6 +76,69 @@ def run_api_speed_benchmark( ) +def run_workflow_api_speed_benchmark( + workspace_name: Optional[str], + workflow_id: Optional[str], + workflow_specification: Optional[str], + workflow_parameters: Optional[Dict[str, Any]], + dataset_reference: str, + host: str, + warm_up_requests: int = 10, + benchmark_requests: int = 1000, + request_batch_size: int = 1, + number_of_clients: int = 1, + requests_per_second: Optional[int] = None, + api_key: Optional[str] = None, + model_configuration: Optional[str] = None, + output_location: Optional[str] = None, +) -> None: + dataset_images = load_dataset_images( + dataset_reference=dataset_reference, + ) + client = initialise_client( + host=host, + api_key=api_key, + model_configuration=model_configuration, + disable_active_learning=True, + max_concurrent_requests=1, + max_batch_size=request_batch_size, + ) + benchmark_results = coordinate_workflow_api_speed_benchmark( + client=client, + images=dataset_images, + workspace_name=workspace_name, + workflow_id=workflow_id, + workflow_specification=workflow_specification, + workflow_parameters=workflow_parameters, + benchmark_requests=benchmark_requests, + request_batch_size=request_batch_size, + number_of_clients=number_of_clients, + requests_per_second=requests_per_second, + ) + if output_location is None: + return None + benchmark_parameters = { + "datetime": datetime.now().isoformat(), + "dataset_reference": dataset_reference, + "host": host, + "benchmark_inferences": benchmark_requests, + "batch_size": request_batch_size, + "number_of_clients": number_of_clients, + "requests_per_second": requests_per_second, + "model_configuration": model_configuration, + } + if workflow_id and workspace_name: + benchmark_parameters["workflow_id"] = workflow_id + benchmark_parameters["workspace_name"] = workspace_name + else: + benchmark_parameters["workflow_id"] = "locally defined" + dump_benchmark_results( + output_location=output_location, + benchmark_parameters=benchmark_parameters, + benchmark_results=benchmark_results, + ) + + def run_python_package_speed_benchmark( model_id: str, dataset_reference: str, diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 0b32eb120..71f34b825 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -19,6 +19,7 @@ setuptools>=65.5.1,<70.0.0 pytest-asyncio<=0.21.1 networkx>=3.1 pydantic~=2.6 +pydantic-settings~=2.2 openai>=1.12.0 structlog>=24.1.0 zxing-cpp>=2.2.0 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 000000000..0c40096e3 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,3 @@ +import os + +os.environ["TELEMETRY_OPT_OUT"] = "True" diff --git a/tests/inference/hosted_platform_tests/conftest.py b/tests/inference/hosted_platform_tests/conftest.py index 0d9ef9af0..43bc04359 100644 --- a/tests/inference/hosted_platform_tests/conftest.py +++ b/tests/inference/hosted_platform_tests/conftest.py @@ -249,6 +249,3 @@ def retry_at_max_n_times(function: callable, n: int, function_description: str) return None attempts += 1 raise Exception(f"Could not achieve success of {function_description}") - - - diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index f5beeb401..0a34381f2 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -5,7 +5,9 @@ @pytest.mark.flaky(retries=4, delay=1) -def test_getting_schemas(object_detection_service_url: str) -> None: +def test_getting_schemas_from_legacy_get_endpoint( + object_detection_service_url: str, +) -> None: # when response = requests.get(f"{object_detection_service_url}/workflows/blocks/describe") @@ -18,6 +20,37 @@ def test_getting_schemas(object_detection_service_url: str) -> None: "kinds_connections", "primitives_connections", "universal_query_language_description", + "dynamic_block_definition_schema", + } + assert len(response_data["blocks"]) > 0, "Some blocs expected to be added" + assert len(response_data["declared_kinds"]) > 0, "Some kinds must be declared" + assert len(response_data["declared_kinds"]) >= len( + response_data["kinds_connections"] + ), "Kinds connections declared as inputs for blocks must be at most in number of all declared kinds" + assert ( + len(response_data["primitives_connections"]) > 0 + ), "Expected some primitive parameters for steps to be declared" + + +@pytest.mark.flaky(retries=4, delay=1) +def test_getting_schemas_from_new_post_endpoint( + object_detection_service_url: str, +) -> None: + # when + response = requests.post( + f"{object_detection_service_url}/workflows/blocks/describe" + ) + + # then + response.raise_for_status() + response_data = response.json() + assert set(response_data.keys()) == { + "blocks", + "declared_kinds", + "kinds_connections", + "primitives_connections", + "universal_query_language_description", + "dynamic_block_definition_schema", } assert len(response_data["blocks"]) > 0, "Some blocs expected to be added" assert len(response_data["declared_kinds"]) > 0, "Some kinds must be declared" @@ -29,6 +62,80 @@ def test_getting_schemas(object_detection_service_url: str) -> None: ), "Expected some primitive parameters for steps to be declared" +FUNCTION = """ +def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData]) -> BlockResult: + detection_id2bbox = { + detection_id.item(): i for i, detection_id in enumerate(prediction.data["detection_id"]) + } + results = [] + for crop in crops: + parent_id = crop.parent_metadata.parent_id + results.append({"associated_detections": prediction[detection_id2bbox[parent_id]]}) + return results + """ +DYNAMIC_BLOCKS_DEFINITION = [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "DetectionsToCropsAssociation", + "inputs": { + "prediction": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + "selector_data_kind": { + "step_output": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ] + }, + }, + "crops": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output_image"], + "is_dimensionality_reference": True, + "dimensionality_offset": 1, + }, + }, + "outputs": { + "associated_detections": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION, + "run_function_name": "my_function", + }, + }, +] + + +@pytest.mark.flaky(retries=4, delay=1) +def test_getting_schemas_from_new_post_endpoint_with_dynamic_blocks( + object_detection_service_url: str, +) -> None: + # when + response = requests.post( + f"{object_detection_service_url}/workflows/blocks/describe", + json={"dynamic_blocks_definitions": DYNAMIC_BLOCKS_DEFINITION}, + ) + + # then + assert response.status_code == 500 + response_data = response.json() + assert ( + "Cannot use dynamic blocks with custom Python code" in response_data["message"] + ), "Expected execution to be prevented" + + @pytest.mark.flaky(retries=4, delay=1) def test_getting_dynamic_outputs(object_detection_service_url: str) -> None: # when @@ -410,7 +517,7 @@ def test_ocr_workflow_run_when_run_expected_to_succeed( @pytest.mark.flaky(retries=4, delay=1) def test_yolo_world_workflow_run_when_run_expected_to_succeed( - object_detection_service_url: str, detection_model_id: str + object_detection_service_url: str, ) -> None: # when response = requests.post( @@ -440,3 +547,115 @@ def test_yolo_world_workflow_run_when_run_expected_to_succeed( assert ( len(response_data["outputs"]) == 2 ), "Two images submitted - two response expected" + + +FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS = """ +def run(self, predictions: Batch[sv.Detections]) -> BlockResult: + result = [] + for prediction in predictions: + result.append({"max_confidence": np.max(prediction.confidence).item()}) + return result +""" + +WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "model_id"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "MaxConfidence", + "inputs": { + "predictions": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + }, + }, + "outputs": { + "max_confidence": { + "type": "DynamicOutputDefinition", + "kind": ["float_zero_to_one"], + } + }, + "accepts_batch_input": True, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS, + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + }, + { + "type": "MaxConfidence", + "name": "confidence_aggregation", + "predictions": "$steps.model.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "max_confidence", + "selector": "$steps.confidence_aggregation.max_confidence", + }, + ], +} + + +@pytest.mark.flaky(retries=4, delay=1) +def test_workflow_run_with_dynamic_blocks( + object_detection_service_url: str, detection_model_id: str +) -> None: + # when + response = requests.post( + f"{object_detection_service_url}/workflows/run", + json={ + "specification": WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH, + "api_key": ROBOFLOW_API_KEY, + "inputs": { + "image": [ + { + "type": "url", + "value": "https://media.roboflow.com/fruit.png", + } + ] + * 2, + "model_id": detection_model_id, + }, + }, + ) + + # then + assert response.status_code == 500 + response_data = response.json() + assert ( + "Cannot use dynamic blocks with custom Python code" in response_data["message"] + ), "Expected execution to be prevented" + + +@pytest.mark.flaky(retries=4, delay=1) +def test_workflow_validate_with_dynamic_blocks( + object_detection_service_url: str, detection_model_id: str +) -> None: + # when + response = requests.post( + f"{object_detection_service_url}/workflows/validate", + json=WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH, + ) + + # then + assert response.status_code == 500 + response_data = response.json() + assert ( + "Cannot use dynamic blocks with custom Python code" in response_data["message"] + ), "Expected execution to be prevented" diff --git a/tests/inference/integration_tests/test_workflow_endpoints.py b/tests/inference/integration_tests/test_workflow_endpoints.py index e1d8213c8..fe402c98b 100644 --- a/tests/inference/integration_tests/test_workflow_endpoints.py +++ b/tests/inference/integration_tests/test_workflow_endpoints.py @@ -5,7 +5,7 @@ API_KEY = os.environ.get("API_KEY") -def test_getting_blocks_descriptions(server_url) -> None: +def test_getting_blocks_descriptions_using_legacy_get_endpoint(server_url) -> None: # when response = requests.get(f"{server_url}/workflows/blocks/describe") @@ -30,6 +30,216 @@ def test_getting_blocks_descriptions(server_url) -> None: assert ( len(response_data["primitives_connections"]) > 0 ), "Expected some primitive parameters for steps to be declared" + assert ( + "universal_query_language_description" in response_data + ), "Expected universal_query_language_description key to be present in response" + assert ( + "dynamic_block_definition_schema" in response_data + ), "Expected key `dynamic_block_definition_schema` to be present in response" + + +def test_getting_blocks_descriptions_using_new_post_endpoint(server_url) -> None: + # when + response = requests.post(f"{server_url}/workflows/blocks/describe") + + # then + response.raise_for_status() + response_data = response.json() + assert "blocks" in response_data, "Response expected to define blocks" + assert len(response_data["blocks"]) > 0, "Some blocs expected to be added" + assert ( + "declared_kinds" in response_data + ), "Declared kinds must be provided in output" + assert len(response_data["declared_kinds"]) > 0, "Some kinds must be declared" + assert ( + "kinds_connections" in response_data + ), "Kinds connections expected to be declared" + assert len(response_data["declared_kinds"]) >= len( + response_data["kinds_connections"] + ), "Kinds connections declared as inputs for blocks must be at most in number of all declared kinds" + assert ( + "primitives_connections" in response_data + ), "Primitives connections expected to be in response" + assert ( + len(response_data["primitives_connections"]) > 0 + ), "Expected some primitive parameters for steps to be declared" + assert ( + "universal_query_language_description" in response_data + ), "Expected universal_query_language_description key to be present in response" + assert ( + "dynamic_block_definition_schema" in response_data + ), "Expected key `dynamic_block_definition_schema` to be present in response" + + +def test_getting_blocks_descriptions_using_new_post_endpoint_with_dynamic_steps( + server_url, +) -> None: + # given + function_code = """ +def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData]) -> BlockResult: + detection_id2bbox = { + detection_id.item(): i for i, detection_id in enumerate(prediction.data["detection_id"]) + } + results = [] + for crop in crops: + parent_id = crop.parent_metadata.parent_id + results.append({"associated_detections": prediction[detection_id2bbox[parent_id]]}) + return results + """ + dynamic_blocks_definitions = [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "DetectionsToCropsAssociation", + "inputs": { + "prediction": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + "selector_data_kind": { + "step_output": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ] + }, + }, + "crops": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output_image"], + "is_dimensionality_reference": True, + "dimensionality_offset": 1, + }, + }, + "outputs": { + "associated_detections": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": function_code, + "run_function_name": "my_function", + }, + }, + ] + + # when + response = requests.post( + f"{server_url}/workflows/blocks/describe", + json={"dynamic_blocks_definitions": dynamic_blocks_definitions}, + ) + + # then + response.raise_for_status() + response_data = response.json() + assert "blocks" in response_data, "Response expected to define blocks" + assert len(response_data["blocks"]) > 0, "Some blocs expected to be added" + assert ( + "declared_kinds" in response_data + ), "Declared kinds must be provided in output" + assert len(response_data["declared_kinds"]) > 0, "Some kinds must be declared" + assert ( + "kinds_connections" in response_data + ), "Kinds connections expected to be declared" + assert len(response_data["declared_kinds"]) >= len( + response_data["kinds_connections"] + ), "Kinds connections declared as inputs for blocks must be at most in number of all declared kinds" + assert ( + "primitives_connections" in response_data + ), "Primitives connections expected to be in response" + assert ( + len(response_data["primitives_connections"]) > 0 + ), "Expected some primitive parameters for steps to be declared" + assert ( + "universal_query_language_description" in response_data + ), "Expected universal_query_language_description key to be present in response" + assert ( + "dynamic_block_definition_schema" in response_data + ), "Expected key `dynamic_block_definition_schema` to be present in response" + types_compatible_with_object_detection_predictions = { + e["manifest_type_identifier"] + for e in response_data["kinds_connections"][ + "Batch[object_detection_prediction]" + ] + } + assert ( + "DetectionsToCropsAssociation" + in types_compatible_with_object_detection_predictions + ), "Expected dynamic block to be manifested in connections" + + +def test_getting_blocks_descriptions_using_new_post_endpoint_with_dynamic_steps_when_steps_are_malformed( + server_url, +) -> None: + # given + function_code = """ +def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData]) -> BlockResult: + pass + """ + dynamic_blocks_definitions = [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "DetectionsToCropsAssociation", + "inputs": { + "prediction": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + "is_dimensionality_reference": True, + "selector_data_kind": { + "step_output": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ] + }, + }, + "crops": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output_image"], + "is_dimensionality_reference": True, + "dimensionality_offset": 1, + }, + }, + "outputs": { + "associated_detections": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": function_code, + "run_function_name": "my_function", + }, + }, + ] + + # when + response = requests.post( + f"{server_url}/workflows/blocks/describe", + json={"dynamic_blocks_definitions": dynamic_blocks_definitions}, + ) + + # then + assert response.status_code == 400, "Expected bad request to be manifested" + response_data = response.json() + assert ( + "dimensionality reference" in response_data["message"] + ), "Expected the cause of problem being dimensionality reference declaration" def test_getting_dynamic_outputs(server_url: str) -> None: @@ -100,6 +310,86 @@ def test_compilation_endpoint_when_compilation_succeeds( assert response_data["status"] == "ok" +def test_compilation_endpoint_when_compilation_succeeds_with_custom_block( + server_url: str, +) -> None: + # given + init_function = """ +def init_model() -> Dict[str, Any]: + model = YOLOv8ObjectDetection(model_id="yolov8n-640") + return {"model": model} +""" + infer_function = """ +def infer(self, image: WorkflowImageData) -> BlockResult: + predictions = self._init_results["model"].infer(image.numpy_image) + return {"predictions": sv.Detections.from_inference(predictions[0].model_dump(by_alias=True, exclude_none=True))} +""" + valid_workflow_definition = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "CustomModel", + "inputs": { + "image": { + "type": "DynamicInputDefinition", + "selector_types": ["input_image"], + }, + }, + "outputs": { + "predictions": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": infer_function, + "run_function_name": "infer", + "init_function_code": init_function, + "init_function_name": "init_model", + "imports": [ + "from inference.models.yolov8 import YOLOv8ObjectDetection", + ], + }, + }, + ], + "steps": [ + { + "type": "CustomModel", + "name": "model", + "image": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.model.predictions", + }, + ], + } + + # when + response = requests.post( + f"{server_url}/workflows/validate", + json=valid_workflow_definition, + ) + + # then + response.raise_for_status() + response_data = response.json() + assert response_data["status"] == "ok" + + def test_compilation_endpoint_when_compilation_fails( server_url: str, ) -> None: @@ -142,7 +432,7 @@ def test_compilation_endpoint_when_compilation_fails( def test_workflow_run( server_url: str, - clean_loaded_models_fixture + clean_loaded_models_fixture, ) -> None: # given valid_workflow_definition = { @@ -204,3 +494,104 @@ def test_workflow_run( assert ( len(response_data["outputs"][1]["result"]["predictions"]) == 6 ), "Expected to see 6 predictions" + + +FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS = """ +def run(self, predictions: Batch[sv.Detections]) -> BlockResult: + result = [] + for prediction in predictions: + result.append({"max_confidence": np.max(prediction.confidence).item()}) + return result +""" + +WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "MaxConfidence", + "inputs": { + "predictions": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + }, + }, + "outputs": { + "max_confidence": { + "type": "DynamicOutputDefinition", + "kind": ["float_zero_to_one"], + } + }, + "accepts_batch_input": True, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS, + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "MaxConfidence", + "name": "confidence_aggregation", + "predictions": "$steps.model.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "max_confidence", + "selector": "$steps.confidence_aggregation.max_confidence", + }, + ], +} + + +def test_workflow_run_when_dynamic_block_is_in_use( + server_url: str, + clean_loaded_models_fixture, +) -> None: + # when + response = requests.post( + f"{server_url}/workflows/run", + json={ + "specification": WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH, + "api_key": API_KEY, + "inputs": { + "image": [ + { + "type": "url", + "value": "https://media.roboflow.com/fruit.png", + } + ] + * 2, + }, + }, + ) + + # then + response.raise_for_status() + response_data = response.json() + assert isinstance( + response_data["outputs"], list + ), "Expected list of elements to be returned" + assert ( + len(response_data["outputs"]) == 2 + ), "Two images submitted - two responses expected" + assert set(response_data["outputs"][0].keys()) == { + "max_confidence" + }, "Expected only `max_confidence` output" + assert set(response_data["outputs"][1].keys()) == { + "max_confidence" + }, "Expected only `max_confidence` output" diff --git a/tests/inference/models_predictions_tests/test_yolo_world.py b/tests/inference/models_predictions_tests/test_yolo_world.py index c3136b069..6d2223f0f 100644 --- a/tests/inference/models_predictions_tests/test_yolo_world.py +++ b/tests/inference/models_predictions_tests/test_yolo_world.py @@ -1,7 +1,7 @@ import numpy as np +import supervision as sv from inference.models import YOLOWorld -import supervision as sv def test_yolo_world_v1_s_against_single_image(person_image: np.ndarray) -> None: @@ -29,6 +29,46 @@ def test_yolo_world_v1_s_against_single_image(person_image: np.ndarray) -> None: detection_results = sv.Detections.from_inference(results) # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" + assert np.allclose( + detection_results.xyxy, expected_detections.xyxy, atol=0.05 + ), "Boxes coordinates detection differ" + assert np.allclose( + detection_results.confidence, expected_detections.confidence, atol=1e-4 + ), "Confidences differ" + assert np.allclose( + detection_results.class_id, expected_detections.class_id + ), "Classes id differ" + + +def test_yolo_world_v1_s_against_single_image_with_only_one_detected_box( + person_image: np.ndarray, +) -> None: + # given + model = YOLOWorld(model_id="yolo_world/s") + model.set_classes(["person"]) + expected_detections = sv.Detections( + xyxy=np.array( + [ + [273.18, 160.18, 358.61, 378.85], + ] + ), + confidence=np.array([0.9503]), + class_id=np.array([0]), + ) + + # when + results = model.infer(person_image, confidence=0.03).dict( + by_alias=True, exclude_none=True + ) + detection_results = sv.Detections.from_inference(results) + + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -62,7 +102,11 @@ def test_yolo_world_v1_m_against_single_image(person_image: np.ndarray) -> None: by_alias=True, exclude_none=True ) detection_results = sv.Detections.from_inference(results) + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, @@ -98,7 +142,11 @@ def test_yolo_world_v1_l_against_single_image(person_image: np.ndarray) -> None: by_alias=True, exclude_none=True ) detection_results = sv.Detections.from_inference(results) + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -132,7 +180,11 @@ def test_yolo_world_v1_x_against_single_image(person_image: np.ndarray) -> None: by_alias=True, exclude_none=True ) detection_results = sv.Detections.from_inference(results) + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -168,6 +220,9 @@ def test_yolo_world_v2_s_against_single_image(person_image: np.ndarray) -> None: detection_results = sv.Detections.from_inference(results) # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -202,6 +257,9 @@ def test_yolo_world_v2_m_against_single_image(person_image: np.ndarray) -> None: ) detection_results = sv.Detections.from_inference(results) # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -235,7 +293,11 @@ def test_yolo_world_v2_l_against_single_image(person_image: np.ndarray) -> None: by_alias=True, exclude_none=True ) detection_results = sv.Detections.from_inference(results) + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" @@ -269,7 +331,11 @@ def test_yolo_world_v2_x_against_single_image(person_image: np.ndarray) -> None: by_alias=True, exclude_none=True ) detection_results = sv.Detections.from_inference(results) + # then + assert len(detection_results) == len( + expected_detections + ), "Expected the same number of boxes" assert np.allclose( detection_results.xyxy, expected_detections.xyxy, atol=0.05 ), "Boxes coordinates detection differ" diff --git a/tests/inference/models_predictions_tests/test_yolonas.py b/tests/inference/models_predictions_tests/test_yolonas.py index 144666058..c553da336 100644 --- a/tests/inference/models_predictions_tests/test_yolonas.py +++ b/tests/inference/models_predictions_tests/test_yolonas.py @@ -1,9 +1,7 @@ import numpy as np import pytest -from inference.core.entities.responses.inference import ( - ObjectDetectionInferenceResponse, -) +from inference.core.entities.responses.inference import ObjectDetectionInferenceResponse from inference.core.env import MAX_BATCH_SIZE from inference.models import YOLONASObjectDetection diff --git a/tests/inference/models_predictions_tests/test_yolov10.py b/tests/inference/models_predictions_tests/test_yolov10.py index 05b80329e..f682ea1ce 100644 --- a/tests/inference/models_predictions_tests/test_yolov10.py +++ b/tests/inference/models_predictions_tests/test_yolov10.py @@ -1,13 +1,9 @@ import numpy as np import pytest -from inference.core.entities.responses.inference import ( - ObjectDetectionInferenceResponse, -) +from inference.core.entities.responses.inference import ObjectDetectionInferenceResponse from inference.core.env import MAX_BATCH_SIZE -from inference.models import ( - YOLOv10ObjectDetection, -) +from inference.models import YOLOv10ObjectDetection @pytest.mark.slow @@ -86,4 +82,4 @@ def assert_yolov10_detection_prediction_matches_reference( ] assert np.allclose( xywh, [314.5, 217.0, 597.0, 414.0], atol=0.6 - ), "while test creation, box coordinates was [314.5, 217.0, 597.0, 414.0]" \ No newline at end of file + ), "while test creation, box coordinates was [314.5, 217.0, 597.0, 414.0]" diff --git a/tests/inference/unit_tests/usage_tracking/__init__.py b/tests/inference/unit_tests/usage_tracking/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py new file mode 100644 index 000000000..0f78d17d2 --- /dev/null +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -0,0 +1,522 @@ +import hashlib +import json +import sys + +import pytest + +from inference.core.env import LAMBDA +from inference.usage_tracking.collector import UsageCollector + + +def test_create_empty_usage_dict(): + # given + usage_default_dict = UsageCollector.empty_usage_dict(exec_session_id="exec_session_id") + + # when + usage_default_dict["fake_api_key"]["category:fake_id"] + + # then + assert json.dumps(usage_default_dict) == json.dumps({ + "fake_api_key": { + "category:fake_id": { + "timestamp_start": None, + "timestamp_stop": None, + "exec_session_id": "exec_session_id", + "processed_frames": 0, + "fps": 0, + "source_duration": 0, + "category": "", + "resource_id": "", + "hosted": LAMBDA, + "api_key": None, + "enterprise": False, + } + } + }) + + +def test_merge_usage_dicts_raises_on_mismatched_resource_id(): + # given + usage_payload_1 = {"resource_id": "some"} + usage_payload_2 = {"resource_id": "other"} + + with pytest.raises(ValueError): + UsageCollector._merge_usage_dicts(d1=usage_payload_1, d2=usage_payload_2) + + +def test_merge_usage_dicts_merge_with_empty(): + # given + usage_payload_1 = { + "resource_id": "some", + "api_key": "some", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + } + usage_payload_2 = { + "resource_id": "some", + "api_key": "some" + } + + assert UsageCollector._merge_usage_dicts(d1=usage_payload_1, d2=usage_payload_2) == usage_payload_1 + assert UsageCollector._merge_usage_dicts(d1=usage_payload_2, d2=usage_payload_1) == usage_payload_1 + + +def test_merge_usage_dicts(): + # given + usage_payload_1 = { + "resource_id": "some", + "api_key": "some", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + } + usage_payload_2 = { + "resource_id": "some", + "api_key": "some", + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + } + + assert UsageCollector._merge_usage_dicts(d1=usage_payload_1, d2=usage_payload_2) == { + "resource_id": "some", + "api_key": "some", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855003, + "processed_frames": 2, + "source_duration": 2, + } + + +def test_get_api_key_usage_containing_resource_with_no_payload_containing_api_key(): + # given + usage_payloads = [ + { + None: { + None: { + "api_key": None, + "resource_id": None, + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + }, + ] + + # when + api_key_usage_with_resource = UsageCollector._get_api_key_usage_containing_resource(api_key="api1", usage_payloads=usage_payloads) + + # then + assert api_key_usage_with_resource is None + + +def test_get_api_key_usage_containing_resource_with_no_payload_containing_resource_for_given_api_key(): + # given + usage_payloads = [ + { + "api1": { + "resource1": { + "api_key": "api1", + "resource_id": "resource1", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + }, + { + "api1": { + "resource2": { + "api_key": "api1", + "resource_id": "resource2", + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + None: { + None: { + "api_key": None, + "resource_id": None, + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + api_key_usage_with_resource = UsageCollector._get_api_key_usage_containing_resource(api_key="api2", usage_payloads=usage_payloads) + + # then + assert api_key_usage_with_resource is None + + +def test_get_api_key_usage_containing_resource(): + # given + usage_payloads = [ + { + "api1": { + "resource1": { + "api_key": "api1", + "resource_id": "resource1", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + }, + { + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + api_key_usage_with_resource = UsageCollector._get_api_key_usage_containing_resource(api_key="api2", usage_payloads=usage_payloads) + + # then + assert api_key_usage_with_resource == { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + } + + +def test_zip_usage_payloads(): + dumped_usage_payloads = [ + { + "api1": { + "resource1": { + "api_key": "api1", + "resource_id": "resource1", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + "resource2": { + "api_key": "api1", + "resource_id": "resource2", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856001, + "processed_frames": 1, + "source_duration": 1, + }, + "resource2": { + "api_key": "api2", + "resource_id": "resource2", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + }, + { + "api1": { + "resource1": { + "api_key": "api1", + "resource_id": "resource1", + "timestamp_start": 1721032989934855002, + "timestamp_stop": 1721032989934855003, + "processed_frames": 1, + "source_duration": 1, + }, + "resource3": { + "api_key": "api1", + "resource_id": "resource3", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + }, + { + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934856002, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + }, + "resource3": { + "api_key": "api2", + "resource_id": "resource3", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + zipped_usage_payloads = UsageCollector._zip_usage_payloads(usage_payloads=dumped_usage_payloads) + + # then + assert zipped_usage_payloads == [{ + "api1": { + "resource1": { + "api_key": "api1", + "resource_id": "resource1", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855003, + "processed_frames": 2, + "source_duration": 2, + }, + "resource2": { + "api_key": "api1", + "resource_id": "resource2", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + "resource3": { + "api_key": "api1", + "resource_id": "resource3", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934855001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856003, + "processed_frames": 2, + "source_duration": 2, + }, + "resource2": { + "api_key": "api2", + "resource_id": "resource2", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856001, + "processed_frames": 1, + "source_duration": 1, + }, + "resource3": { + "api_key": "api2", + "resource_id": "resource3", + "timestamp_start": 1721032989934856000, + "timestamp_stop": 1721032989934856001, + "processed_frames": 1, + "source_duration": 1, + }, + }, + },] + + +def test_zip_usage_payloads_with_system_info_missing_resource_id_and_no_resource_id_was_collected(): + dumped_usage_payloads = [ + { + "api1": { + None: { + "api_key": "api1", + "resource_id": None, + "timestamp_start": 1721032989934855000, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + }, + { + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934856002, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + zipped_usage_payloads = UsageCollector._zip_usage_payloads(usage_payloads=dumped_usage_payloads) + + # then + assert zipped_usage_payloads == [{ + "api2": { + "resource1": { + "api_key": "api2", + "resource_id": "resource1", + "timestamp_start": 1721032989934856002, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + },{ + "api1": { + None: { + "api_key": "api1", + "resource_id": None, + "timestamp_start": 1721032989934855000, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + }] + + +def test_zip_usage_payloads_with_system_info_missing_resource_id(): + dumped_usage_payloads = [ + { + "api2": { + None: { + "api_key": "api2", + "resource_id": None, + "timestamp_start": 1721032989934855000, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + }, + { + "api2": { + "fake:resource1": { + "api_key": "api2", + "resource_id": "resource1", + "category": "fake", + "timestamp_start": 1721032989934856002, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + zipped_usage_payloads = UsageCollector._zip_usage_payloads(usage_payloads=dumped_usage_payloads) + + # then + assert zipped_usage_payloads == [{ + "api2": { + "fake:resource1": { + "api_key": "api2", + "resource_id": "resource1", + "category": "fake", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + },] + + +def test_zip_usage_payloads_with_system_info_missing_resource_id_and_api_key(): + dumped_usage_payloads = [ + { + None: { + None: { + "api_key": None, + "resource_id": None, + "timestamp_start": 1721032989934855000, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + }, + { + "api2": { + "fake:resource1": { + "api_key": "api2", + "resource_id": "resource1", + "category": "fake", + "timestamp_start": 1721032989934856002, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + }, + }, + } + ] + + # when + zipped_usage_payloads = UsageCollector._zip_usage_payloads(usage_payloads=dumped_usage_payloads) + + # then + assert zipped_usage_payloads == [{ + "api2": { + "fake:resource1": { + "api_key": "api2", + "resource_id": "resource1", + "category": "fake", + "timestamp_start": 1721032989934855000, + "timestamp_stop": 1721032989934856003, + "processed_frames": 1, + "source_duration": 1, + "is_gpu_available": False, + "python_version": "3.10.0", + "inference_version": "10.10.10", + }, + }, + },] + + +def test_system_info(): + # given + system_info = UsageCollector.system_info(exec_session_id="exec_session_id", time_ns=1, ip_address="w.x.y.z") + + # then + expected_system_info = { + "timestamp_start": 1, + "exec_session_id": "exec_session_id", + "ip_address_hash": hashlib.sha256("w.x.y.z".encode()).hexdigest()[:5], + "api_key": None, + "is_gpu_available": False, + } + for k, v in expected_system_info.items(): + assert system_info[k] == v diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py b/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py new file mode 100644 index 000000000..d74642e2d --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py @@ -0,0 +1,926 @@ +from unittest import mock + +import numpy as np +import pytest + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.errors import ( + DynamicBlockError, + WorkflowEnvironmentConfigurationError, +) +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.dynamic_blocks import block_assembler + +FUNCTION_TO_GET_OVERLAP_OF_BBOXES = """ +def run(self, predictions: sv.Detections, class_x: str, class_y: str) -> BlockResult: + bboxes_class_x = predictions[predictions.data["class_name"] == class_x] + bboxes_class_y = predictions[predictions.data["class_name"] == class_y] + overlap = [] + for bbox_x in bboxes_class_x: + bbox_x_coords = bbox_x[0] + bbox_overlaps = [] + for bbox_y in bboxes_class_y: + if bbox_y[-1]["detection_id"] == bbox_x[-1]["detection_id"]: + continue + bbox_y_coords = bbox_y[0] + x_min = max(bbox_x_coords[0], bbox_y_coords[0]) + y_min = max(bbox_x_coords[1], bbox_y_coords[1]) + x_max = min(bbox_x_coords[2], bbox_y_coords[2]) + y_max = min(bbox_x_coords[3], bbox_y_coords[3]) + # compute the area of intersection rectangle + intersection_area = max(0, x_max - x_min + 1) * max(0, y_max - y_min + 1) + box_x_area = (bbox_x_coords[2] - bbox_x_coords[0] + 1) * (bbox_x_coords[3] - bbox_x_coords[1] + 1) + local_overlap = intersection_area / (box_x_area + 1e-5) + bbox_overlaps.append(local_overlap) + overlap.append(bbox_overlaps) + return {"overlap": overlap} +""" + + +FUNCTION_TO_GET_MAXIMUM_OVERLAP = """ +def run(self, overlaps: List[List[float]]) -> BlockResult: + max_value = -1 + for overlap in overlaps: + for overlap_value in overlap: + if not max_value: + max_value = overlap_value + else: + max_value = max(max_value, overlap_value) + return {"max_value": max_value} +""" + +WORKFLOW_WITH_OVERLAP_MEASUREMENT = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "OverlapMeasurement", + "inputs": { + "predictions": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + }, + "class_x": { + "type": "DynamicInputDefinition", + "value_types": ["string"], + }, + "class_y": { + "type": "DynamicInputDefinition", + "value_types": ["string"], + }, + }, + "outputs": {"overlap": {"type": "DynamicOutputDefinition", "kind": []}}, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_GET_OVERLAP_OF_BBOXES, + }, + }, + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "MaximumOverlap", + "inputs": { + "overlaps": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + }, + }, + "outputs": { + "max_value": {"type": "DynamicOutputDefinition", "kind": []} + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_GET_MAXIMUM_OVERLAP, + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "OverlapMeasurement", + "name": "overlap_measurement", + "predictions": "$steps.model.predictions", + "class_x": "dog", + "class_y": "dog", + }, + { + "type": "ContinueIf", + "name": "continue_if", + "condition_statement": { + "type": "StatementGroup", + "statements": [ + { + "type": "BinaryStatement", + "left_operand": { + "type": "DynamicOperand", + "operand_name": "overlaps", + "operations": [{"type": "SequenceLength"}], + }, + "comparator": {"type": "(Number) >="}, + "right_operand": { + "type": "StaticOperand", + "value": 1, + }, + } + ], + }, + "evaluation_parameters": {"overlaps": "$steps.overlap_measurement.overlap"}, + "next_steps": ["$steps.maximum_overlap"], + }, + { + "type": "MaximumOverlap", + "name": "maximum_overlap", + "overlaps": "$steps.overlap_measurement.overlap", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "overlaps", + "selector": "$steps.overlap_measurement.overlap", + }, + { + "type": "JsonField", + "name": "max_overlap", + "selector": "$steps.maximum_overlap.max_value", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_blocks_measuring_overlap( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_OVERLAP_MEASUREMENT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = await execution_engine.run_async( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 2, "Expected 2 elements in the output for two input images" + assert set(result[0].keys()) == { + "overlaps", + "max_overlap", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "overlaps", + "max_overlap", + }, "Expected all declared outputs to be delivered" + assert ( + len(result[0]["overlaps"]) == 2 + ), "Expected 2 instances of dogs found, each overlap with another for first image" + assert ( + abs(result[0]["max_overlap"] - 0.177946) < 1e-3 + ), "Expected max overlap to be calculated properly" + assert ( + len(result[1]["overlaps"]) == 0 + ), "Expected no instances of dogs found for second image" + assert ( + result[1]["max_overlap"] is None + ), "Expected `max_overlap` not to be calculated for second image due to conditional execution" + + +FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS = """ +def run(self, predictions: Batch[sv.Detections]) -> BlockResult: + result = [] + for prediction in predictions: + result.append({"max_confidence": np.max(prediction.confidence).item()}) + return result +""" + +WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "MaxConfidence", + "inputs": { + "predictions": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + }, + }, + "outputs": { + "max_confidence": { + "type": "DynamicOutputDefinition", + "kind": ["float_zero_to_one"], + } + }, + "accepts_batch_input": True, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_GET_MAXIMUM_CONFIDENCE_FROM_BATCH_OF_DETECTIONS, + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "MaxConfidence", + "name": "confidence_aggregation", + "predictions": "$steps.model.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "max_confidence", + "selector": "$steps.confidence_aggregation.max_confidence", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_operating_on_batch( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_ON_BATCH, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = await execution_engine.run_async( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 2, "Expected 2 elements in the output for two input images" + assert set(result[0].keys()) == { + "max_confidence", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "max_confidence", + }, "Expected all declared outputs to be delivered" + assert ( + abs(result[0]["max_confidence"] - 0.85599) < 1e-3 + ), "Expected max confidence to be extracted" + assert ( + abs(result[1]["max_confidence"] - 0.84284) < 1e-3 + ), "Expected max confidence to be extracted" + + +FUNCTION_TO_ASSOCIATE_DETECTIONS_FOR_CROPS = """ +def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData]) -> BlockResult: + detection_id2bbox = { + detection_id.item(): i for i, detection_id in enumerate(prediction.data["detection_id"]) + } + results = [] + for crop in crops: + parent_id = crop.parent_metadata.parent_id + results.append({"associated_detections": prediction[detection_id2bbox[parent_id]]}) + return results +""" + + +WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_CROSS_DIMENSIONS = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "DetectionsToCropsAssociation", + "inputs": { + "prediction": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output"], + "selector_data_kind": { + "step_output": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ] + }, + }, + "crops": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output_image"], + "is_dimensionality_reference": True, + "dimensionality_offset": 1, + }, + }, + "outputs": { + "associated_detections": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + "Batch[instance_segmentation_prediction]", + "Batch[keypoint_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_ASSOCIATE_DETECTIONS_FOR_CROPS, + "run_function_name": "my_function", + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "yolov8n-640", + }, + { + "type": "Crop", + "name": "crop", + "image": "$inputs.image", + "predictions": "$steps.model.predictions", + }, + { + "type": "DetectionsToCropsAssociation", + "name": "detections_associations", + "prediction": "$steps.model.predictions", + "crops": "$steps.crop.crops", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "associated_detections", + "selector": "$steps.detections_associations.associated_detections", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_operating_cross_dimensions( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_CROSS_DIMENSIONS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = await execution_engine.run_async( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 2, "Expected 2 elements in the output for two input images" + assert set(result[0].keys()) == { + "associated_detections", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "associated_detections", + }, "Expected all declared outputs to be delivered" + assert len(result[1]["associated_detections"]) == 12 + class_names_first_image_crops = [ + e["class_name"].tolist() for e in result[0]["associated_detections"] + ] + for class_names in class_names_first_image_crops: + assert len(class_names) == 1, "Expected single bbox to be associated" + assert len(class_names_first_image_crops) == 2, "Expected 2 crops for first image" + class_names_second_image_crops = [ + e["class_name"].tolist() for e in result[1]["associated_detections"] + ] + for class_names in class_names_second_image_crops: + assert len(class_names) == 1, "Expected single bbox to be associated" + assert ( + len(class_names_second_image_crops) == 12 + ), "Expected 12 crops for second image" + + +@pytest.mark.asyncio +@mock.patch.object(block_assembler, "ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS", False) +async def test_workflow_with_custom_python_block_when_custom_python_execution_forbidden( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(WorkflowEnvironmentConfigurationError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_CROSS_DIMENSIONS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +FUNCTION_TO_MERGE_CROPS_INTO_TILES = """ +def run(self, crops: Optional[Batch[Optional[WorkflowImageData]]]) -> BlockResult: + if crops is None: + return {"tiles": None} + black_image = np.zeros((192, 168, 3), dtype=np.uint8) + images = [crop.numpy_image if crop is not None else black_image for crop in crops] + return {"tiles": sv.create_tiles(images)} +""" + + +WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_DIMENSIONALITY_REDUCTION = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "DimensionalityReduction", + "inputs": { + "crops": { + "type": "DynamicInputDefinition", + "selector_types": ["step_output_image"], + }, + }, + "outputs": {"tiles": {"type": "DynamicOutputDefinition", "kind": []}}, + "output_dimensionality_offset": -1, + "accepts_empty_values": True, + }, + "code": { + "type": "PythonCode", + "run_function_code": FUNCTION_TO_MERGE_CROPS_INTO_TILES, + }, + }, + ], + "steps": [ + { + "type": "RoboflowObjectDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["person"], + }, + { + "type": "Crop", + "name": "crop", + "image": "$inputs.image", + "predictions": "$steps.model.predictions", + }, + { + "type": "DimensionalityReduction", + "name": "tile_creation", + "crops": "$steps.crop.crops", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "tiles", + "selector": "$steps.tile_creation.tiles", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_reducing_dimensionality( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PYTHON_BLOCK_RUNNING_DIMENSIONALITY_REDUCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = await execution_engine.run_async( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 2, "Expected 2 elements in the output for two input images" + assert set(result[0].keys()) == { + "tiles", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "tiles", + }, "Expected all declared outputs to be delivered" + assert result[0]["tiles"] is None, "Expected no crops - hence empty output" + assert isinstance(result[1]["tiles"], np.ndarray), "Expected np array with tile" + + +MODEL_INIT_FUNCTION = """ +def init_model() -> Dict[str, Any]: + model = YOLOv8ObjectDetection(model_id="yolov8n-640") + return {"model": model} +""" + +MODEL_INFER_FUNCTION = """ +def infer(self, image: WorkflowImageData) -> BlockResult: + predictions = self._init_results["model"].infer(image.numpy_image) + return {"predictions": sv.Detections.from_inference(predictions[0].model_dump(by_alias=True, exclude_none=True))} +""" + +WORKFLOW_WITH_PYTHON_BLOCK_HOSTING_MODEL = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "CustomModel", + "inputs": { + "image": { + "type": "DynamicInputDefinition", + "selector_types": ["input_image"], + }, + }, + "outputs": { + "predictions": { + "type": "DynamicOutputDefinition", + "kind": [ + "Batch[object_detection_prediction]", + ], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": MODEL_INFER_FUNCTION, + "run_function_name": "infer", + "init_function_code": MODEL_INIT_FUNCTION, + "init_function_name": "init_model", + "imports": [ + "from inference.models.yolov8 import YOLOv8ObjectDetection", + ], + }, + }, + ], + "steps": [ + { + "type": "CustomModel", + "name": "model", + "image": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.model.predictions", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_running_custom_model( + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_PYTHON_BLOCK_HOSTING_MODEL, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = await execution_engine.run_async( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 2, "Expected 2 elements in the output for two input images" + assert set(result[0].keys()) == { + "predictions", + }, "Expected all declared outputs to be delivered" + assert set(result[1].keys()) == { + "predictions", + }, "Expected all declared outputs to be delivered" + assert np.allclose( + result[0]["predictions"].confidence, + [0.85599, 0.50392], + atol=1e-3, + ), "Expected reproducible predictions for first image" + assert np.allclose( + result[1]["predictions"].confidence, + [ + 0.84284, + 0.83957, + 0.81555, + 0.80455, + 0.75804, + 0.75794, + 0.71715, + 0.71408, + 0.71003, + 0.56938, + 0.54092, + 0.43511, + ], + atol=1e-3, + ), "Expected reproducible predictions for second image" + + +BROKEN_RUN_FUNCTION = """ +def run(some: InvalidType): + pass +""" + + +WORKFLOW_WITH_CODE_THAT_DOES_NOT_COMPILE = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "CustomModel", + "inputs": { + "image": { + "type": "DynamicInputDefinition", + "selector_types": ["input_image"], + }, + }, + "outputs": { + "predictions": { + "type": "DynamicOutputDefinition", + "kind": [], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": BROKEN_RUN_FUNCTION, + }, + }, + ], + "steps": [ + { + "type": "CustomModel", + "name": "model", + "image": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.model.predictions", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_when_code_cannot_be_compiled( + model_manager: ModelManager, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(DynamicBlockError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_CODE_THAT_DOES_NOT_COMPILE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITHOUT_RUN_FUNCTION = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "CustomModel", + "inputs": { + "image": { + "type": "DynamicInputDefinition", + "selector_types": ["input_image"], + }, + }, + "outputs": { + "predictions": { + "type": "DynamicOutputDefinition", + "kind": [], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": "", + }, + }, + ], + "steps": [ + { + "type": "CustomModel", + "name": "model", + "image": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.model.predictions", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_when_code_does_not_define_declared_run_function( + model_manager: ModelManager, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(DynamicBlockError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITHOUT_RUN_FUNCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITHOUT_DECLARED_INIT_FUNCTION = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "dynamic_blocks_definitions": [ + { + "type": "DynamicBlockDefinition", + "manifest": { + "type": "ManifestDescription", + "block_type": "CustomModel", + "inputs": { + "image": { + "type": "DynamicInputDefinition", + "selector_types": ["input_image"], + }, + }, + "outputs": { + "predictions": { + "type": "DynamicOutputDefinition", + "kind": [], + } + }, + }, + "code": { + "type": "PythonCode", + "run_function_code": MODEL_INFER_FUNCTION, + "run_function_name": "infer", + "init_function_code": "", + "init_function_name": "init_model", + "imports": [ + "from inference.models.yolov8 import YOLOv8ObjectDetection", + ], + }, + }, + ], + "steps": [ + { + "type": "CustomModel", + "name": "model", + "image": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.model.predictions", + }, + ], +} + + +@pytest.mark.asyncio +async def test_workflow_with_custom_python_block_when_code_does_not_define_declared_init_function( + model_manager: ModelManager, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(DynamicBlockError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITHOUT_DECLARED_INIT_FUNCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) diff --git a/tests/workflows/unit_tests/core_steps/formatters/test_property_extraction.py b/tests/workflows/unit_tests/core_steps/formatters/test_property_extraction.py index 013bdb7eb..db08c2cb9 100644 --- a/tests/workflows/unit_tests/core_steps/formatters/test_property_extraction.py +++ b/tests/workflows/unit_tests/core_steps/formatters/test_property_extraction.py @@ -2,8 +2,8 @@ from inference.core.entities.responses.inference import ( ClassificationInferenceResponse, - InferenceResponseImage, ClassificationPrediction, + InferenceResponseImage, ) from inference.core.workflows.core_steps.common.query_language.entities.operations import ( OperationsChain, diff --git a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py index 944a9ec1d..a4d9a9367 100644 --- a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py +++ b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py @@ -1,15 +1,20 @@ import numpy as np import pytest - import supervision as sv from supervision.config import CLASS_NAME_DATA_FIELD -from inference.core.entities.responses.inference import MultiLabelClassificationInferenceResponse, \ - InferenceResponseImage, MultiLabelClassificationPrediction, ClassificationInferenceResponse, \ - ClassificationPrediction +from inference.core.entities.responses.inference import ( + ClassificationInferenceResponse, + ClassificationPrediction, + InferenceResponseImage, + MultiLabelClassificationInferenceResponse, + MultiLabelClassificationPrediction, +) from inference.core.workflows.constants import DETECTION_ID_KEY -from inference.core.workflows.core_steps.fusion.detections_classes_replacement import DetectionsClassesReplacementBlock, \ - extract_leading_class_from_prediction +from inference.core.workflows.core_steps.fusion.detections_classes_replacement import ( + DetectionsClassesReplacementBlock, + extract_leading_class_from_prediction, +) from inference.core.workflows.entities.base import Batch @@ -25,7 +30,9 @@ async def test_classes_replacement_when_object_detection_object_is_none() -> Non ) # then - assert result == {"predictions": None}, "object_detection_predictions is superior object so lack of value means lack of output" + assert result == { + "predictions": None + }, "object_detection_predictions is superior object so lack of value means lack of output" @pytest.mark.asyncio @@ -43,24 +50,30 @@ async def test_classes_replacement_when_there_are_no_predictions_is_none() -> No ) # then - assert result == {"predictions": sv.Detections.empty()}, "classification_predictions is inferior object so lack of value means empty output" + assert result == { + "predictions": sv.Detections.empty() + }, "classification_predictions is inferior object so lack of value means empty output" @pytest.mark.asyncio -async def test_classes_replacement_when_replacement_to_happen_without_filtering_for_multi_label_results() -> None: +async def test_classes_replacement_when_replacement_to_happen_without_filtering_for_multi_label_results() -> ( + None +): # given step = DetectionsClassesReplacementBlock() detections = sv.Detections( - xyxy=np.array([ - [10, 20, 30, 40], - [11, 21, 31, 41], - ]), + xyxy=np.array( + [ + [10, 20, 30, 40], + [11, 21, 31, 41], + ] + ), class_id=np.array([7, 7]), confidence=np.array([0.36, 0.91]), data={ "class_name": np.array(["animal", "animal"]), - "detection_id": np.array(["zero", "one"]) - } + "detection_id": np.array(["zero", "one"]), + }, ) first_cls_prediction = MultiLabelClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -85,7 +98,7 @@ async def test_classes_replacement_when_replacement_to_happen_without_filtering_ first_cls_prediction, second_cls_prediction, ], - indices=[(0, 0), (0, 1)] + indices=[(0, 0), (0, 1)], ) # when @@ -95,28 +108,44 @@ async def test_classes_replacement_when_replacement_to_happen_without_filtering_ ) # then - assert np.allclose(result["predictions"].xyxy, np.array([[10, 20, 30, 40], [11, 21, 31, 41]])), "Expected coordinates not to be touched" - assert np.allclose(result["predictions"].confidence, np.array([0.6, 0.4])), "Expected to choose [cat, dog] confidences" - assert np.allclose(result["predictions"].class_id, np.array([0, 1])), "Expected to choose [cat, dog] class ids" - assert result["predictions"].data["class_name"].tolist() == ["cat", "dog"], "Expected cat class to be assigned" - assert result["predictions"].data["detection_id"].tolist() != ["zero", "one"], "Expected to generate new detection id" + assert np.allclose( + result["predictions"].xyxy, np.array([[10, 20, 30, 40], [11, 21, 31, 41]]) + ), "Expected coordinates not to be touched" + assert np.allclose( + result["predictions"].confidence, np.array([0.6, 0.4]) + ), "Expected to choose [cat, dog] confidences" + assert np.allclose( + result["predictions"].class_id, np.array([0, 1]) + ), "Expected to choose [cat, dog] class ids" + assert result["predictions"].data["class_name"].tolist() == [ + "cat", + "dog", + ], "Expected cat class to be assigned" + assert result["predictions"].data["detection_id"].tolist() != [ + "zero", + "one", + ], "Expected to generate new detection id" @pytest.mark.asyncio -async def test_classes_replacement_when_replacement_to_happen_without_filtering_for_multi_class_results() -> None: +async def test_classes_replacement_when_replacement_to_happen_without_filtering_for_multi_class_results() -> ( + None +): # given step = DetectionsClassesReplacementBlock() detections = sv.Detections( - xyxy=np.array([ - [10, 20, 30, 40], - [11, 21, 31, 41], - ]), + xyxy=np.array( + [ + [10, 20, 30, 40], + [11, 21, 31, 41], + ] + ), class_id=np.array([7, 7]), confidence=np.array([0.36, 0.91]), data={ "class_name": np.array(["animal", "animal"]), - "detection_id": np.array(["zero", "one"]) - } + "detection_id": np.array(["zero", "one"]), + }, ) first_cls_prediction = ClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -153,7 +182,7 @@ async def test_classes_replacement_when_replacement_to_happen_without_filtering_ first_cls_prediction, second_cls_prediction, ], - indices=[(0, 0), (0, 1)] + indices=[(0, 0), (0, 1)], ) # when @@ -163,28 +192,44 @@ async def test_classes_replacement_when_replacement_to_happen_without_filtering_ ) # then - assert np.allclose(result["predictions"].xyxy, np.array([[10, 20, 30, 40], [11, 21, 31, 41]])), "Expected coordinates not to be touched" - assert np.allclose(result["predictions"].confidence, np.array([0.6, 0.6])), "Expected to choose [cat, dog] confidences" - assert np.allclose(result["predictions"].class_id, np.array([0, 1])), "Expected to choose [cat, dog] class ids" - assert result["predictions"].data["class_name"].tolist() == ["cat", "dog"], "Expected cat class to be assigned" - assert result["predictions"].data["detection_id"].tolist() != ["zero", "one"], "Expected to generate new detection id" + assert np.allclose( + result["predictions"].xyxy, np.array([[10, 20, 30, 40], [11, 21, 31, 41]]) + ), "Expected coordinates not to be touched" + assert np.allclose( + result["predictions"].confidence, np.array([0.6, 0.6]) + ), "Expected to choose [cat, dog] confidences" + assert np.allclose( + result["predictions"].class_id, np.array([0, 1]) + ), "Expected to choose [cat, dog] class ids" + assert result["predictions"].data["class_name"].tolist() == [ + "cat", + "dog", + ], "Expected cat class to be assigned" + assert result["predictions"].data["detection_id"].tolist() != [ + "zero", + "one", + ], "Expected to generate new detection id" @pytest.mark.asyncio -async def test_classes_replacement_when_replacement_to_happen_and_one_result_to_be_filtered_out() -> None: +async def test_classes_replacement_when_replacement_to_happen_and_one_result_to_be_filtered_out() -> ( + None +): # given step = DetectionsClassesReplacementBlock() detections = sv.Detections( - xyxy=np.array([ - [10, 20, 30, 40], - [11, 21, 31, 41], - ]), + xyxy=np.array( + [ + [10, 20, 30, 40], + [11, 21, 31, 41], + ] + ), class_id=np.array([7, 7]), confidence=np.array([0.36, 0.91]), data={ "class_name": np.array(["animal", "animal"]), - "detection_id": np.array(["zero", "one"]) - } + "detection_id": np.array(["zero", "one"]), + }, ) first_cls_prediction = MultiLabelClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -200,7 +245,7 @@ async def test_classes_replacement_when_replacement_to_happen_and_one_result_to_ first_cls_prediction, None, ], - indices=[(0, 0), (0, 1)] + indices=[(0, 0), (0, 1)], ) # when @@ -210,13 +255,27 @@ async def test_classes_replacement_when_replacement_to_happen_and_one_result_to_ ) # then - assert len(result["predictions"]) == 1, "Expected only one bbox left, as there was mo cls result for second bbox" - assert np.allclose(result["predictions"].xyxy, np.array([[10, 20, 30, 40]])), "Expected first bbox to be left" - assert np.allclose(result["predictions"].confidence, np.array([0.6])), "Expected to choose cat confidence" - assert np.allclose(result["predictions"].class_id, np.array([0])), "Expected to choose cat class id" - assert result["predictions"].data["class_name"].tolist() == ["cat"], "Expected cat class to be assigned" - assert len(result["predictions"].data["detection_id"]) == 1, "Expected only single detection_id" - assert result["predictions"].data["detection_id"].tolist() != ["zero"], "Expected to generate new detection id" + assert ( + len(result["predictions"]) == 1 + ), "Expected only one bbox left, as there was mo cls result for second bbox" + assert np.allclose( + result["predictions"].xyxy, np.array([[10, 20, 30, 40]]) + ), "Expected first bbox to be left" + assert np.allclose( + result["predictions"].confidence, np.array([0.6]) + ), "Expected to choose cat confidence" + assert np.allclose( + result["predictions"].class_id, np.array([0]) + ), "Expected to choose cat class id" + assert result["predictions"].data["class_name"].tolist() == [ + "cat" + ], "Expected cat class to be assigned" + assert ( + len(result["predictions"].data["detection_id"]) == 1 + ), "Expected only single detection_id" + assert result["predictions"].data["detection_id"].tolist() != [ + "zero" + ], "Expected to generate new detection id" def test_extract_leading_class_from_prediction_when_prediction_is_multi_label() -> None: @@ -243,7 +302,9 @@ def test_extract_leading_class_from_prediction_when_prediction_is_multi_label() assert result == ("cat", 0, 0.6) -def test_extract_leading_class_from_prediction_when_prediction_is_faulty_multi_label() -> None: +def test_extract_leading_class_from_prediction_when_prediction_is_faulty_multi_label() -> ( + None +): # given prediction = ClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -265,7 +326,9 @@ def test_extract_leading_class_from_prediction_when_prediction_is_faulty_multi_l _ = extract_leading_class_from_prediction(prediction=prediction) -def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_with_predicted_classes() -> None: +def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_with_predicted_classes() -> ( + None +): # given prediction = MultiLabelClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -283,7 +346,9 @@ def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_wi assert result == ("cat", 0, 0.6) -def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_without_predicted_classes() -> None: +def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_without_predicted_classes() -> ( + None +): # given prediction = MultiLabelClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), @@ -301,12 +366,13 @@ def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_wi assert result is None -def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_without_classes_defined() -> None: +def test_extract_leading_class_from_prediction_when_prediction_is_multi_class_without_classes_defined() -> ( + None +): # given prediction = MultiLabelClassificationInferenceResponse( image=InferenceResponseImage(width=128, height=256), - predictions={ - }, + predictions={}, predicted_classes=[], ).dict(by_alias=True, exclude_none=True) diff --git a/tests/workflows/unit_tests/core_steps/fusion/test_domension_collapse.py b/tests/workflows/unit_tests/core_steps/fusion/test_domension_collapse.py index 1b7907a82..209d6e4be 100644 --- a/tests/workflows/unit_tests/core_steps/fusion/test_domension_collapse.py +++ b/tests/workflows/unit_tests/core_steps/fusion/test_domension_collapse.py @@ -1,6 +1,8 @@ import pytest -from inference.core.workflows.core_steps.fusion.dimension_collapse import DimensionCollapseBlock +from inference.core.workflows.core_steps.fusion.dimension_collapse import ( + DimensionCollapseBlock, +) from inference.core.workflows.entities.base import Batch @@ -8,10 +10,7 @@ async def test_dimension_collapse() -> None: # given step = DimensionCollapseBlock() - data = Batch( - content=[1, 2, 3, 4], - indices=[(0, 1), (0, 2), (0, 3), (0, 4)] - ) + data = Batch(content=[1, 2, 3, 4], indices=[(0, 1), (0, 2), (0, 3), (0, 4)]) # when result = await step.run(data=data) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/__init__.py b/tests/workflows/unit_tests/core_steps/visualizations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py b/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py new file mode 100644 index 000000000..0b9266dfb --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_blur.py @@ -0,0 +1,83 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.blur import ( + BlurManifest, + BlurVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_blur_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "BlurVisualization", + "name": "blur1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "kernel_size": 5 + } + + # when + result = BlurManifest.model_validate(data) + + # then + assert result == BlurManifest( + type="BlurVisualization", + name="blur1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + kernel_size=5 + ) + + +def test_blur_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "BlurVisualization", + "name": "blur1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "kernel_size": 5 + } + + # when + with pytest.raises(ValidationError): + _ = BlurManifest.model_validate(data) + +@pytest.mark.asyncio +async def test_blur_visualization_block() -> None: + # given + block = BlurVisualizationBlock() + + start_image = np.random.randint(0, 255, (1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + kernel_size=5 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, start_image) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py new file mode 100644 index 000000000..87a77b795 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_bounding_box.py @@ -0,0 +1,138 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.bounding_box import ( + BoundingBoxManifest, + BoundingBoxVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + WorkflowImageData, +) +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_bounding_box_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "BoundingBoxVisualization", + "name": "square1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 1, + "roundness": 0 + } + + # when + result = BoundingBoxManifest.model_validate(data) + + # then + assert result == BoundingBoxManifest( + type="BoundingBoxVisualization", + name="square1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=1, + roundness=0 + ) + +def test_bounding_box_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "BoundingBoxVisualization", + "name": "square1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 1, + "roundness": 0 + } + + # when + with pytest.raises(ValidationError): + _ = BoundingBoxManifest.model_validate(data) + +@pytest.mark.asyncio +async def test_bounding_box_visualization_block() -> None: + # given + block = BoundingBoxVisualizationBlock() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=1, + roundness=0, + ) + + print("output", output) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + + # check that the image is copied + assert output.get("image").numpy_image.__array_interface__['data'][0] != start_image.__array_interface__['data'][0] + +@pytest.mark.asyncio +async def test_bounding_box_visualization_block_nocopy() -> None: + # given + block = BoundingBoxVisualizationBlock() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=False, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=1, + roundness=0, + ) + + print("output", output) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) + + # check if the image reference references the same memory space as the start_image + assert output.get("image").numpy_image.__array_interface__['data'][0] == start_image.__array_interface__['data'][0] \ No newline at end of file diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py b/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py new file mode 100644 index 000000000..4c3f07b86 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_circle.py @@ -0,0 +1,86 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.circle import ( + CircleManifest, + CircleVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_circle_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CircleVisualization", + "name": "circle1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 10 + } + + # when + result = CircleManifest.model_validate(data) + + # then + assert result == CircleManifest( + type="CircleVisualization", + name="circle1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=10 + ) + + +def test_circle_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CircleVisualization", + "name": "circle1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 10 + } + + # when + with pytest.raises(ValidationError): + _ = CircleManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_circle_visualization_block() -> None: + # given + block = CircleVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_color.py b/tests/workflows/unit_tests/core_steps/visualizations/test_color.py new file mode 100644 index 000000000..e4b057fbb --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_color.py @@ -0,0 +1,86 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.color import ( + ColorManifest, + ColorVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_color_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "ColorVisualization", + "name": "color1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.5 + } + + # when + result = ColorManifest.model_validate(data) + + # then + assert result == ColorManifest( + type="ColorVisualization", + name="color1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.5 + ) + + +def test_color_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "ColorVisualization", + "name": "color1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.5 + } + + # when + with pytest.raises(ValidationError): + _ = ColorManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_color_visualization_block() -> None: + # given + block = ColorVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + opacity=0.5, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py b/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py new file mode 100644 index 000000000..1104e544a --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_corner.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.corner import ( + CornerManifest, + CornerVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_corner_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CornerVisualization", + "name": "corner1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "corner_length": 5 + } + + # when + result = CornerManifest.model_validate(data) + + # then + assert result == CornerManifest( + type="CornerVisualization", + name="corner1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + corner_length=5 + ) + + +def test_corner_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CornerVisualization", + "name": "corner1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "corner_length": 5 + } + + # when + with pytest.raises(ValidationError): + _ = CornerManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_corner_visualization_block() -> None: + # given + block = CornerVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + thickness=2, + corner_length=5, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py b/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py new file mode 100644 index 000000000..24a5c3e87 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_crop.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.crop import ( + CropManifest, + CropVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_crop_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "CropVisualization", + "name": "crop1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": 'TOP_CENTER', + "scale_factor": 2.0, + "border_thickness": 2 + } + + # when + result = CropManifest.model_validate(data) + + # then + assert result == CropManifest( + type="CropVisualization", + name="crop1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position='TOP_CENTER', + scale_factor=2.0, + border_thickness=2 + ) + + +def test_crop_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "CropVisualization", + "name": "crop1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": 'TOP_CENTER', + "scale_factor": 2.0, + "border_thickness": 2 + } + + # when + with pytest.raises(ValidationError): + _ = CropManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_crop_visualization_block() -> None: + # given + block = CropVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + position='TOP_CENTER', + scale_factor=2.0, + border_thickness=2 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py b/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py new file mode 100644 index 000000000..be614cb16 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_dot.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.dot import ( + DotManifest, + DotVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_dot_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "DotVisualization", + "name": "dot1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": "CENTER", + "radius": 5, + "outline_thickness": 1 + } + + # when + result = DotManifest.model_validate(data) + + # then + assert result == DotManifest( + type="DotVisualization", + name="dot1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position="CENTER", + radius=5, + outline_thickness=1 + ) + + +def test_dot_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "DotVisualization", + "name": "dot1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": "CENTER", + "radius": 5, + "outline_thickness": 1 + } + + # when + with pytest.raises(ValidationError): + _ = DotManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_dot_visualization_block() -> None: + # given + block = DotVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="viridis", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + position="CENTER", + radius=5, + outline_thickness=1 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py b/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py new file mode 100644 index 000000000..a9a37a4a4 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_ellipse.py @@ -0,0 +1,94 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.ellipse import ( + EllipseManifest, + EllipseVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_ellipse_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "EllipseVisualization", + "name": "ellipse1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 2, + "start_angle": -45, + "end_angle": 235 + } + + # when + result = EllipseManifest.model_validate(data) + + # then + assert result == EllipseManifest( + type="EllipseVisualization", + name="ellipse1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=2, + start_angle=-45, + end_angle=235 + ) + + +def test_ellipse_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "EllipseVisualization", + "name": "ellipse1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 2, + "start_angle": -45, + "end_angle": 235 + } + + # when + with pytest.raises(ValidationError): + _ = EllipseManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_ellipse_visualization_block() -> None: + # given + block = EllipseVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="CUSTOM", + palette_size=1, + custom_colors=["#FF0000"], + color_axis="CLASS", + thickness=2, + start_angle=-45, + end_angle=235 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py b/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py new file mode 100644 index 000000000..1c5b3b92f --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_halo.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.halo import ( + HaloManifest, + HaloVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_halo_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "HaloVisualization", + "name": "halo1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.8, + "kernel_size": 40 + } + + # when + result = HaloManifest.model_validate(data) + + # then + assert result == HaloManifest( + type="HaloVisualization", + name="halo1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.8, + kernel_size=40 + ) + + +def test_halo_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "HaloVisualization", + "name": "halo1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.8, + "kernel_size": 40 + } + + # when + with pytest.raises(ValidationError): + _ = HaloManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_halo_visualization_block() -> None: + # given + block = HaloVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=[], + color_axis="CLASS", + opacity=0.8, + kernel_size=40 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_label.py b/tests/workflows/unit_tests/core_steps/visualizations/test_label.py new file mode 100644 index 000000000..29bf42d11 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_label.py @@ -0,0 +1,110 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.label import ( + LabelManifest, + LabelVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_label_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "LabelVisualization", + "name": "label1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "text": "Class", + "text_position": "TOP_LEFT", + "text_color": "WHITE", + "text_scale": 1.0, + "text_thickness": 1, + "text_padding": 10, + "border_radius": 0 + } + + # when + result = LabelManifest.model_validate(data) + + # then + assert result == LabelManifest( + type="LabelVisualization", + name="label1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + text='Class', + text_position='TOP_LEFT', + text_color='WHITE', + text_scale=1.0, + text_thickness=1, + text_padding=10, + border_radius=0 + ) + + +def test_label_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "LabelVisualization", + "name": "label1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "text": "Class", + "text_position": "TOP_LEFT", + "text_color": "WHITE", + "text_scale": 1.0, + "text_thickness": 1, + "text_padding": 10, + "border_radius": 0 + } + + # when + with pytest.raises(ValidationError): + _ = LabelManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_label_visualization_block() -> None: + # given + block = LabelVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="DEFAULT", + palette_size=10, + custom_colors=None, + color_axis="CLASS", + text='Class', + text_position='TOP_LEFT', + text_color='WHITE', + text_scale=1.0, + text_thickness=1, + text_padding=10, + border_radius=0 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py b/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py new file mode 100644 index 000000000..97778a714 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_mask.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.mask import ( + MaskManifest, + MaskVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_mask_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "MaskVisualization", + "name": "mask1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "opacity": 0.5 + } + + # when + result = MaskManifest.model_validate(data) + + # then + assert result == MaskManifest( + type="MaskVisualization", + name="mask1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + opacity=0.5 + ) + + +def test_mask_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "MaskVisualization", + "name": "mask1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "opacity": 0.5 + } + + # when + with pytest.raises(ValidationError): + _ = MaskManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_mask_visualization_block() -> None: + # given + block = MaskVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="viridis", + palette_size=10, + custom_colors=["#000000", "#FFFFFF"], + color_axis="CLASS", + opacity=0.5 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py b/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py new file mode 100644 index 000000000..433371739 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_pixelate.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.pixelate import ( + PixelateManifest, + PixelateVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + WorkflowImageData, + ImageParentMetadata, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_pixelate_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "PixelateVisualization", + "name": "pixelate1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "pixel_size": 10 + } + + # when + result = PixelateManifest.model_validate(data) + + # then + assert result == PixelateManifest( + type="PixelateVisualization", + name="pixelate1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + pixel_size=10 + ) + + +def test_pixelate_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "PixelateVisualization", + "name": "pixelate1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "pixel_size": 10 + } + + # when + with pytest.raises(ValidationError): + _ = PixelateManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_pixelate_visualization_block() -> None: + # given + block = PixelateVisualizationBlock() + + start_image = np.random.randint(0, 255, (1000, 1000, 3), dtype=np.uint8) + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + pixel_size=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, start_image) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py b/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py new file mode 100644 index 000000000..a1cdd0afd --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_polygon.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.polygon import ( + PolygonManifest, + PolygonVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_polygon_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "PolygonVisualization", + "name": "polygon1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "thickness": 2 + } + + # when + result = PolygonManifest.model_validate(data) + + # then + assert result == PolygonManifest( + type="PolygonVisualization", + name="polygon1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + thickness=2 + ) + + +def test_polygon_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "PolygonVisualization", + "name": "polygon1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "thickness": 2 + } + + # when + with pytest.raises(ValidationError): + _ = PolygonManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_polygon_visualization_block() -> None: + # given + block = PolygonVisualizationBlock() + + mask = np.zeros((3, 1000, 1000), dtype=np.bool_) + mask[0, 0:20, 0:20] = True + mask[1, 80:120, 80:120] = True + mask[2, 450:550, 450:550] = True + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + mask=mask, + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="tab10", + palette_size=10, + custom_colors=["#FF0000", "#00FF00", "#0000FF"], + color_axis="CLASS", + thickness=2 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py new file mode 100644 index 000000000..c95efe9de --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_str_to_color.py @@ -0,0 +1,56 @@ +import pytest +import supervision as sv + +from inference.core.workflows.core_steps.visualizations.utils import ( + str_to_color +) + +def test_str_to_color_with_hex_color() -> None: + # given + color = "#FF0000" + + # when + result = str_to_color(color) + + # then + assert result == sv.Color.from_hex(color) + +def test_str_to_color_with_rgb_color() -> None: + # given + color = "rgb(255, 0, 0)" + expected_color = sv.Color.from_rgb_tuple((255, 0, 0)) + + # when + result = str_to_color(color) + + # then + assert result == expected_color + +def test_str_to_color_with_bgr_color() -> None: + # given + color = "bgr(0, 0, 255)" + expected_color = sv.Color.from_bgr_tuple((0, 0, 255)) + + # when + result = str_to_color(color) + + # then + assert result == expected_color + +def test_str_to_color_with_color_name() -> None: + # given + color = "WHITE" + + # when + result = str_to_color(color) + + # then + assert result == sv.Color.WHITE + +def test_str_to_color_with_invalid_color() -> None: + # given + color = "invalid" + + # when + with pytest.raises(ValueError): + _ = str_to_color(color) diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py b/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py new file mode 100644 index 000000000..4bec9deb7 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_triangle.py @@ -0,0 +1,99 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.triangle import ( + TriangleManifest, + TriangleVisualizationBlock, +) + +from inference.core.workflows.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_triangle_validation_when_valid_manifest_is_given(images_field_alias: str) -> None: + # given + data = { + "type": "TriangleVisualization", + "name": "triangle1", + "predictions": "$steps.od_model.predictions", + images_field_alias: "$inputs.image", + "position": "TOP_CENTER", + "base": 30, + "height": 30, + "outline_thickness": 1 + } + + # when + result = TriangleManifest.model_validate(data) + + # then + assert result == TriangleManifest( + type="TriangleVisualization", + name="triangle1", + images="$inputs.image", + predictions="$steps.od_model.predictions", + position="TOP_CENTER", + base=30, + height=30, + outline_thickness=1 + ) + + +def test_triangle_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "TriangleVisualization", + "name": "triangle1", + "images": "invalid", + "predictions": "$steps.od_model.predictions", + "position": "TOP_CENTER", + "base": 30, + "height": 30, + "outline_thickness": 1 + } + + # when + with pytest.raises(ValidationError): + _ = TriangleManifest.model_validate(data) + + +@pytest.mark.asyncio +async def test_triangle_visualization_block() -> None: + # given + block = TriangleVisualizationBlock() + + output = await block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), + ), + predictions=sv.Detections( + xyxy=np.array( + [[0, 0, 20, 20], [80, 80, 120, 120], [450, 450, 550, 550]], dtype=np.float64 + ), + class_id=np.array([1, 1, 1]), + ), + copy_image=True, + color_palette="tab10", + palette_size=10, + custom_colors=["#FF0000", "#00FF00", "#0000FF"], + color_axis="CLASS", + position="TOP_CENTER", + base=30, + height=30, + outline_thickness=1 + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal(output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8)) diff --git a/tests/workflows/unit_tests/execution_engine/dynamic_blocs/__init__.py b/tests/workflows/unit_tests/execution_engine/dynamic_blocs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_assembler.py b/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_assembler.py new file mode 100644 index 000000000..1c76126b9 --- /dev/null +++ b/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_assembler.py @@ -0,0 +1,505 @@ +from typing import Type, Union +from unittest import mock + +import pytest +from pydantic import ValidationError +from pydantic_core import PydanticUndefinedType + +from inference.core.workflows.entities.base import OutputDefinition +from inference.core.workflows.entities.types import ( + WILDCARD_KIND, + Kind, + StepOutputImageSelector, + StepOutputSelector, + WorkflowImageSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.errors import DynamicBlockError +from inference.core.workflows.execution_engine.dynamic_blocks import block_assembler +from inference.core.workflows.execution_engine.dynamic_blocks.block_assembler import ( + build_input_field_metadata, + build_outputs_definitions, + collect_input_dimensionality_offsets, + collect_python_types_for_selectors, + collect_python_types_for_values, + create_dynamic_block_specification, + pick_dimensionality_reference_property, +) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import ( + DynamicBlockDefinition, + DynamicInputDefinition, + DynamicOutputDefinition, + ManifestDescription, + PythonCode, + SelectorType, + ValueType, +) + + +def test_pick_dimensionality_reference_property_when_there_is_no_reference_property() -> ( + None +): + # given + inputs = { + "a": DynamicInputDefinition( + type="DynamicInputDefinition", selector_types=[SelectorType.INPUT_PARAMETER] + ), + "b": DynamicInputDefinition( + type="DynamicInputDefinition", value_types=[ValueType.INTEGER] + ), + } + + # when + result = pick_dimensionality_reference_property( + block_type="some", + inputs=inputs, + ) + + # then + assert result is None + + +def test_pick_dimensionality_reference_property_when_there_is_single_reference_property() -> ( + None +): + # given + inputs = { + "a": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + is_dimensionality_reference=True, + ), + "b": DynamicInputDefinition( + type="DynamicInputDefinition", value_types=[ValueType.INTEGER] + ), + } + + # when + result = pick_dimensionality_reference_property( + block_type="some", + inputs=inputs, + ) + + # then + assert result == "a", "Expected `a` to be picked as dimensionality reference" + + +def test_pick_dimensionality_reference_property_when_there_are_multiple_reference_properties() -> ( + None +): + # given + inputs = { + "a": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + is_dimensionality_reference=True, + ), + "b": DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.INTEGER], + is_dimensionality_reference=True, + ), + } + + # when + with pytest.raises(DynamicBlockError): + _ = pick_dimensionality_reference_property( + block_type="some", + inputs=inputs, + ) + + +def test_build_outputs_definitions_when_build_should_succeed() -> None: + # given + outputs = { + "a": DynamicOutputDefinition(type="DynamicOutputDefinition"), + "b": DynamicOutputDefinition( + type="DynamicOutputDefinition", kind=["string", "integer"] + ), + } + kinds_lookup = { + "*": WILDCARD_KIND, + "string": Kind(name="string"), + "integer": Kind(name="integer"), + } + + # when + result = build_outputs_definitions( + block_type="some", + outputs=outputs, + kinds_lookup=kinds_lookup, + ) + + # then + assert result == [ + OutputDefinition(name="a", kind=[WILDCARD_KIND]), + OutputDefinition( + name="b", kind=[kinds_lookup["string"], kinds_lookup["integer"]] + ), + ], "Expected outputs to be built such that `a` has * kind and `b` has exactly the kinds that were defined" + + +def test_build_outputs_definitions_when_build_should_fail_on_not_recognised_kind() -> ( + None +): + # given + outputs = { + "a": DynamicOutputDefinition(type="DynamicOutputDefinition"), + "b": DynamicOutputDefinition( + type="DynamicOutputDefinition", kind=["string", "integer"] + ), + } + kinds_lookup = { + "*": WILDCARD_KIND, + "string": Kind(name="string"), + } + + # when + with pytest.raises(DynamicBlockError): + _ = build_outputs_definitions( + block_type="some", + outputs=outputs, + kinds_lookup=kinds_lookup, + ) + + +def test_collect_input_dimensionality_offsets() -> None: + # given + inputs = { + "a": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + dimensionality_offset=1, + ), + "b": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + ), + "c": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + dimensionality_offset=-1, + ), + } + + # when + result = collect_input_dimensionality_offsets(inputs=inputs) + + # then + assert result == { + "a": 1, + "c": -1, + }, "Expected only entries with non-default value be given in results" + + +def test_build_input_field_metadata_for_field_without_default_value() -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[SelectorType.INPUT_PARAMETER], + dimensionality_offset=1, + ) + + # when + result = build_input_field_metadata(input_definition=input_definition) + + # then + assert isinstance(result.default, PydanticUndefinedType) + + +def test_build_input_field_metadata_for_field_without_default_being_none() -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.INTEGER], + is_optional=True, + has_default_value=True, + ) + + # when + result = build_input_field_metadata(input_definition=input_definition) + + # then + assert result.default is None + + +def test_build_input_field_metadata_for_field_without_default_being_primitive() -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.INTEGER], + is_optional=True, + has_default_value=True, + default_value=3.0, + ) + + # when + result = build_input_field_metadata(input_definition=input_definition) + + # then + assert result.default == 3 + + +@pytest.mark.parametrize("default_type", [list, set, dict]) +def test_build_input_field_metadata_for_field_without_default_being_compound( + default_type: Union[Type[list], Type[set], Type[dict]], +) -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.LIST], + has_default_value=True, + default_value=default_type(), + ) + + # when + result = build_input_field_metadata(input_definition=input_definition) + + # then + assert ( + result.default_factory() == default_type() + ), "Expected default_factory used creates new instance of compound element" + + +@pytest.mark.parametrize( + "default_value", [[2, 3, 4], {"a", "b", "c"}, {"a": 1, "b": 2}] +) +def test_build_input_field_metadata_for_field_without_default_being_non_empty_compound( + default_value: Union[set, list, dict], +) -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.LIST], + has_default_value=True, + default_value=default_value, + ) + + # when + result = build_input_field_metadata(input_definition=input_definition) + + # then + assert ( + result.default_factory() == default_value + ), "Expected default_factory to create identical instance of compound data" + assert id(result.default_factory()) != id( + default_value + ), "Expected default_factory to create new instance of compound data" + + +def test_collect_python_types_for_values_when_types_can_be_resolved() -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.LIST, ValueType.INTEGER], + ) + + # when + result = collect_python_types_for_values( + block_type="some", + input_name="a", + input_definition=input_definition, + ) + + # then + assert result == [list, int], "Expected python types to be resolved properly" + + +@mock.patch.object(block_assembler, "PYTHON_TYPES_MAPPING", {}) +def test_collect_python_types_for_values_when_type_cannot_be_resolved() -> None: + # given + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.LIST, ValueType.INTEGER], + ) + + # when + with pytest.raises(DynamicBlockError): + _ = collect_python_types_for_values( + block_type="some", + input_name="a", + input_definition=input_definition, + ) + + +def test_collect_python_types_for_selectors_when_collection_should_succeed() -> None: + # given + kinds_lookup = { + "*": WILDCARD_KIND, + "string": Kind(name="string"), + "integer": Kind(name="integer"), + } + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[ + SelectorType.INPUT_PARAMETER, + SelectorType.INPUT_IMAGE, + SelectorType.STEP_OUTPUT_IMAGE, + SelectorType.STEP_OUTPUT, + ], + selector_data_kind={SelectorType.STEP_OUTPUT: ["string", "integer"]}, + ) + + # when + result = collect_python_types_for_selectors( + block_type="some", + input_name="a", + input_definition=input_definition, + kinds_lookup=kinds_lookup, + ) + + # then + + assert len(result) == 4, "Expected union of 4 types" + assert repr(result[0]) == repr( + WorkflowParameterSelector(kind=[WILDCARD_KIND]) + ), "First element of union is to be input param of kind *" + assert repr(result[1]) == repr( + WorkflowImageSelector + ), "Second element of union is to be input image selector" + assert repr(result[2]) == repr( + StepOutputImageSelector + ), "Third element of union is to be step output image selector" + assert repr(result[3]) == repr( + StepOutputSelector(kind=[kinds_lookup["string"], kinds_lookup["integer"]]) + ), "Last element of union is to be step output selector of kinds string integer" + + +def test_collect_python_types_for_selectors_when_collection_should_fail_on_unknown_kind() -> ( + None +): + # given + kinds_lookup = { + "*": WILDCARD_KIND, + "string": Kind(name="string"), + } + input_definition = DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[ + SelectorType.INPUT_PARAMETER, + SelectorType.INPUT_IMAGE, + SelectorType.STEP_OUTPUT_IMAGE, + SelectorType.STEP_OUTPUT, + ], + selector_data_kind={SelectorType.STEP_OUTPUT: ["string", "integer"]}, + ) + + # when + with pytest.raises(DynamicBlockError): + _ = collect_python_types_for_selectors( + block_type="some", + input_name="a", + input_definition=input_definition, + kinds_lookup=kinds_lookup, + ) + + +PYTHON_CODE = """ +def run(self, a, b): + return {"output": b[::-1]} +""" + + +@pytest.mark.asyncio +async def test_create_dynamic_block_specification() -> None: + # given + kinds_lookup = { + "*": WILDCARD_KIND, + "string": Kind(name="string"), + "integer": Kind(name="integer"), + } + dynamic_block_definition = DynamicBlockDefinition( + type="DynamicBlockDefinition", + manifest=ManifestDescription( + type="ManifestDescription", + block_type="MyBlock", + inputs={ + "a": DynamicInputDefinition( + type="DynamicInputDefinition", + selector_types=[ + SelectorType.INPUT_PARAMETER, + SelectorType.STEP_OUTPUT, + ], + selector_data_kind={ + SelectorType.STEP_OUTPUT: ["string", "integer"] + }, + ), + "b": DynamicInputDefinition( + type="DynamicInputDefinition", + value_types=[ValueType.LIST], + has_default_value=True, + default_value=[1, 2, 3], + ), + }, + outputs={ + "a": DynamicOutputDefinition(type="DynamicOutputDefinition"), + "b": DynamicOutputDefinition( + type="DynamicOutputDefinition", kind=["string", "integer"] + ), + }, + output_dimensionality_offset=1, + accepts_batch_input=True, + ), + code=PythonCode( + type="PythonCode", + run_function_code=PYTHON_CODE, + ), + ) + + # when + result = create_dynamic_block_specification( + dynamic_block_definition=dynamic_block_definition, + kinds_lookup=kinds_lookup, + ) + + # then + assert result.block_source == "dynamic_workflows_blocks" + assert result.manifest_class.describe_outputs() == [ + OutputDefinition(name="a", kind=[WILDCARD_KIND]), + OutputDefinition( + name="b", kind=[kinds_lookup["string"], kinds_lookup["integer"]] + ), + ], "Expected outputs to be built such that `a` has * kind and `b` has exactly the kinds that were defined" + assert ( + result.manifest_class.accepts_batch_input() is True + ), "Manifest defined to accept batch input" + assert ( + result.manifest_class.accepts_empty_values() is False + ), "Manifest defined not to accept empty input" + assert ( + result.manifest_class.get_input_dimensionality_offsets() == {} + ), "No explicit offsets defined" + assert ( + result.manifest_class.get_dimensionality_reference_property() is None + ), "No dimensionality reference property expected" + assert ( + result.manifest_class.get_output_dimensionality_offset() == 1 + ), "Expected output dimensionality offset announced" + + block_instance = result.block_class() + code_run_result = await block_instance.run(a="some", b=[1, 2, 3]) + assert code_run_result == { + "output": [3, 2, 1] + }, "Expected code to work properly and revert second param" + + _ = result.manifest_class.model_validate( + {"name": "some", "type": "MyBlock", "a": "$steps.some.a", "b": [1, 2, 3, 4, 5]} + ) # no error expected + + _ = result.manifest_class.model_validate( + { + "name": "some", + "type": "MyBlock", + "a": "$steps.some.a", + } + ) # no error expected, default value for "b" defined + + with pytest.raises(ValidationError): + _ = result.manifest_class.model_validate( + {"name": "some", "type": "MyBlock", "a": "some", "b": [1, 2, 3, 4, 5]} + ) # error expected - value "a" without selector + + with pytest.raises(ValidationError): + _ = result.manifest_class.model_validate( + {"name": "some", "type": "MyBlock", "a": "$steps.some.a", "b": 1} + ) # error expected - value "b" not a list diff --git a/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_scaffolding.py b/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_scaffolding.py new file mode 100644 index 000000000..4411be733 --- /dev/null +++ b/tests/workflows/unit_tests/execution_engine/dynamic_blocs/test_block_scaffolding.py @@ -0,0 +1,211 @@ +from unittest import mock + +import pytest + +from inference.core.workflows.core_steps.formatters.expression import BlockManifest +from inference.core.workflows.errors import ( + DynamicBlockError, + WorkflowEnvironmentConfigurationError, +) +from inference.core.workflows.execution_engine.dynamic_blocks import block_scaffolding +from inference.core.workflows.execution_engine.dynamic_blocks.block_scaffolding import ( + assembly_custom_python_block, + create_dynamic_module, +) +from inference.core.workflows.execution_engine.dynamic_blocks.entities import PythonCode + + +def test_create_dynamic_module_when_syntax_error_happens() -> None: + # given + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 35} +""" + run_function = """ +def run_function( -> BlockResult: + return {"result": a + b} +""" + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="run_function", + init_function_code=init_function, + init_function_name="init_fun", + imports=["import math"], + ) + + # when + with pytest.raises(DynamicBlockError): + _ = create_dynamic_module( + block_type_name="some", python_code=python_code, module_name="my_module" + ) + + +def test_create_dynamic_module_when_creation_should_succeed() -> None: + # given + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 35} +""" + run_function = """ +def run_function(a, b) -> BlockResult: + return {"result": a + b} +""" + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="run_function", + init_function_code=init_function, + init_function_name="init_fun", + imports=["import math"], + ) + + # when + module = create_dynamic_module( + block_type_name="some", python_code=python_code, module_name="my_module" + ) + + # then + assert module.init_fun() == {"a": 35} + assert module.run_function(3, 5) == {"result": 8} + + +@pytest.mark.asyncio +async def test_assembly_custom_python_block() -> None: + # given + manifest = BlockManifest + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 6} +""" + run_function = """ +def run_function(self, a, b) -> BlockResult: + return {"result": a + b + self._init_results["a"]} + """ + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="run_function", + init_function_code=init_function, + init_function_name="init_fun", + imports=["import math"], + ) + + # when + workflow_block_class = assembly_custom_python_block( + block_type_name="some", + unique_identifier="unique-id", + manifest=manifest, + python_code=python_code, + ) + workflow_block_instance = workflow_block_class() + execution_result = await workflow_block_instance.run(a=3, b=5) + + # then + assert ( + workflow_block_class.get_init_parameters() == [] + ), "Expected no init parameters defined" + assert ( + workflow_block_class.get_manifest() == BlockManifest + ), "Expected manifest to be returned" + assert execution_result == { + "result": 14 + }, "Expected result of 3 + 5 + 6 (last value from init)" + + +@pytest.mark.asyncio +async def test_assembly_custom_python_block_when_run_function_not_found() -> None: + # given + manifest = BlockManifest + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 6} +""" + run_function = """ +def run_function(self, a, b) -> BlockResult: + return {"result": a + b + self._init_results["a"]} + """ + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="invalid", + init_function_code=init_function, + init_function_name="init_fun", + imports=["import math"], + ) + + # when + with pytest.raises(DynamicBlockError): + _ = assembly_custom_python_block( + block_type_name="some", + unique_identifier="unique-id", + manifest=manifest, + python_code=python_code, + ) + + +@pytest.mark.asyncio +async def test_assembly_custom_python_block_when_init_function_not_found() -> None: + # given + manifest = BlockManifest + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 6} +""" + run_function = """ +def run_function(self, a, b) -> BlockResult: + return {"result": a + b + self._init_results["a"]} + """ + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="run_function", + init_function_code=init_function, + init_function_name="invalid", + imports=["import math"], + ) + + # when + with pytest.raises(DynamicBlockError): + _ = assembly_custom_python_block( + block_type_name="some", + unique_identifier="unique-id", + manifest=manifest, + python_code=python_code, + ) + + +@pytest.mark.asyncio +@mock.patch.object( + block_scaffolding, "ALLOW_CUSTOM_PYTHON_EXECUTION_IN_WORKFLOWS", False +) +async def test_run_assembled_custom_python_block_when_custom_python_forbidden() -> None: + # given + manifest = BlockManifest + init_function = """ +def init_fun() -> Dict[str, Any]: + return {"a": 6} +""" + run_function = """ +def run_function(self, a, b) -> BlockResult: + return {"result": a + b + self._init_results["a"]} + """ + python_code = PythonCode( + type="PythonCode", + run_function_code=run_function, + run_function_name="run_function", + init_function_code=init_function, + init_function_name="init_fun", + imports=["import math"], + ) + + # when + workflow_block_class = assembly_custom_python_block( + block_type_name="some", + unique_identifier="unique-id", + manifest=manifest, + python_code=python_code, + ) + workflow_block_instance = workflow_block_class() + with pytest.raises(WorkflowEnvironmentConfigurationError): + _ = await workflow_block_instance.run(a=3, b=5) diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py index 662e2a9fc..68dd7202c 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py @@ -174,7 +174,7 @@ def test_load_initializers_when_plugin_exists_and_initializers_provided() -> Non result = load_initializers() # then - assert len(result) == 2 + assert len(result) == 5 assert ( result[ "tests.workflows.unit_tests.execution_engine.introspection.plugin_with_initializers.a" @@ -199,7 +199,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded( ) # when - result = describe_available_blocks() + result = describe_available_blocks(dynamic_blocks=[]) # then assert len(result.blocks) == 2, "Expected 2 blocks to be loaded" @@ -207,7 +207,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded( assert result.blocks[0].manifest_class == plugin_with_valid_blocks.Block1Manifest assert result.blocks[1].block_class == plugin_with_valid_blocks.Block2 assert result.blocks[1].manifest_class == plugin_with_valid_blocks.Block2Manifest - assert len(result.declared_kinds) == 3 + assert len(result.declared_kinds) == 33 @mock.patch.object(blocks_loader, "load_workflow_blocks") @@ -224,7 +224,7 @@ def test_describe_available_blocks_when_plugins_duplicate_class_names( # when with pytest.raises(PluginLoadingError): - _ = describe_available_blocks() + _ = describe_available_blocks(dynamic_blocks=[]) @mock.patch.object(blocks_loader, "load_workflow_blocks") @@ -238,4 +238,4 @@ def test_describe_available_blocks_when_plugins_duplicate_type_identifiers( # when with pytest.raises(PluginLoadingError): - _ = describe_available_blocks() + _ = describe_available_blocks(dynamic_blocks=[])