Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backend python save img and inference #7863

Open
davy-blavette opened this issue Dec 10, 2024 · 0 comments
Open

Backend python save img and inference #7863

davy-blavette opened this issue Dec 10, 2024 · 0 comments

Comments

@davy-blavette
Copy link

I'm trying to implement a method with the python backend to be able to save my inference images, it works. I also return the result of my inference :

import triton_python_backend_utils as pb_utils
import numpy as np
from PIL import Image
import os
import time
import json

class TritonPythonModel:
    def initialize(self, args):
        print('Initializing...')
        self.model_config = json.loads(args['model_config'])
        self.model_dir = args['model_repository']
        self.model_version = args['model_version']

        # Créer un dossier logs spécifique au modèle
        self.log_dir = os.path.join(self.model_dir, self.model_version, 'logs')
        os.makedirs(self.log_dir, exist_ok=True)
        print(f'Log directory initialized to: {self.log_dir}')

    def execute(self, requests):
        responses = []
        for request in requests:
            try:
                input_tensor = pb_utils.get_input_tensor_by_name(request, "images")
                input_image = input_tensor.as_numpy()

                print(f"Original input shape: {input_image.shape}, dtype: {input_image.dtype}", flush=True)
                print(f"Min value: {input_image.min()}, Max value: {input_image.max()}", flush=True)

                if input_image.ndim == 4:
                    input_image = input_image.squeeze(0)  # Remove batch dimension
                input_image = input_image.transpose(1, 2, 0)  # Change from CHW to HWC format

                # Normalize the image if necessary
                if input_image.max() <= 1.0:
                    input_image = (input_image * 255).astype(np.uint8)
                else:
                    input_image = input_image.astype(np.uint8)

                print(f"Processed input shape: {input_image.shape}, dtype: {input_image.dtype}", flush=True)
                print(f"Min value: {input_image.min()}, Max value: {input_image.max()}", flush=True)

                timestamp = int(time.time() * 1000)

                try:
                    img = Image.fromarray(input_image, 'RGB')
                    unique_filename = f"image_{timestamp}.png"
                    save_path = os.path.join(self.log_dir, unique_filename)
                    img.save(save_path, format='PNG')
                    print(f"Image saved successfully to: {save_path}", flush=True)

                    output_tensor = pb_utils.Tensor("output0", input_tensor.as_numpy())

                    # Créer la réponse d'inférence
                    inference_response = pb_utils.InferenceResponse(output_tensors=[output_tensor])
                    responses.append(inference_response)

                except Exception as e:
                    print(f"Error saving image: {str(e)}", flush=True)


            except Exception as e:
                print(f"Error processing image: {str(e)}", flush=True)
                error = pb_utils.TritonError(f"An error occurred while processing the image: {str(e)}")
                responses.append(pb_utils.InferenceResponse(error=error))

        return responses

    def finalize(self):
        print('Cleaning up...', flush=True)

with my client I have to adapt my code if I use the backend or not:

def process_triton_results(output_data, image_name, labels, log_mode=False):
    if log_mode:
        # Method for log mode (Python backend)
        image_data = output_data[0]
        mean_value = np.mean(image_data)

        if mean_value > 0.5:
            predicted_class = 1  # GOOD
            confidence = mean_value
        else:
            predicted_class = 0  # BAD
            confidence = 1 - mean_value
    else:
        # Standard method (without custom Python backend)
        class_probabilities = output_data[0][0]
        predicted_class = np.argmax(class_probabilities)
        confidence = class_probabilities[predicted_class]

    processed_results = {
        "file_name": image_name,
        "predicted_label": labels[int(predicted_class)] if labels else f"Class_{predicted_class}",
        "confidence": round(float(confidence), 4)
    }
    return processed_results

but depending on whether I'm using a model with a backend or a model without a backend, for the same image and the same model (onnx), I don't get the same inference score...I must have an error in my code but I don't know where, if you can help me.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Development

No branches or pull requests

1 participant