Skip to content

Commit

Permalink
remove rocm from diffusers tests
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Jan 13, 2025
1 parent eb25460 commit 0a7a23d
Showing 1 changed file with 12 additions and 6 deletions.
18 changes: 12 additions & 6 deletions tests/onnxruntime/test_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,16 +281,18 @@ def test_negative_prompt(self, model_arch: str):
grid_parameters(
{
"model_arch": SUPPORTED_ARCHITECTURES,
"provider": ["CUDAExecutionProvider", "ROCMExecutionProvider", "TensorrtExecutionProvider"],
"provider": ["CUDAExecutionProvider", "TensorrtExecutionProvider"],
}
)
)
@pytest.mark.rocm_ep_test
@pytest.mark.cuda_ep_test
@pytest.mark.trt_ep_test
@require_torch_gpu
@require_diffusers
def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
if provider == "TensorrtExecutionProvider" and model_arch != self.__class__.SUPPORTED_ARCHITECTURES[0]:
self.skipTest("Testing a single arch for TensorrtExecutionProvider")

model_args = {"test_name": test_name, "model_arch": model_arch}
self._setup(model_args)

Expand Down Expand Up @@ -519,16 +521,18 @@ def test_image_reproducibility(self, model_arch: str):
grid_parameters(
{
"model_arch": SUPPORTED_ARCHITECTURES,
"provider": ["CUDAExecutionProvider", "ROCMExecutionProvider", "TensorrtExecutionProvider"],
"provider": ["CUDAExecutionProvider", "TensorrtExecutionProvider"],
}
)
)
@pytest.mark.rocm_ep_test
@pytest.mark.cuda_ep_test
@pytest.mark.trt_ep_test
@require_torch_gpu
@require_diffusers
def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
if provider == "TensorrtExecutionProvider" and model_arch != self.__class__.SUPPORTED_ARCHITECTURES[0]:
self.skipTest("Testing a single arch for TensorrtExecutionProvider")

model_args = {"test_name": test_name, "model_arch": model_arch}
self._setup(model_args)

Expand Down Expand Up @@ -759,16 +763,18 @@ def test_image_reproducibility(self, model_arch: str):
grid_parameters(
{
"model_arch": SUPPORTED_ARCHITECTURES,
"provider": ["CUDAExecutionProvider", "ROCMExecutionProvider", "TensorrtExecutionProvider"],
"provider": ["CUDAExecutionProvider", "TensorrtExecutionProvider"],
}
)
)
@pytest.mark.rocm_ep_test
@pytest.mark.cuda_ep_test
@pytest.mark.trt_ep_test
@require_torch_gpu
@require_diffusers
def test_pipeline_on_gpu(self, test_name: str, model_arch: str, provider: str):
if provider == "TensorrtExecutionProvider" and model_arch != self.__class__.SUPPORTED_ARCHITECTURES[0]:
self.skipTest("Testing a single arch for TensorrtExecutionProvider")

model_args = {"test_name": test_name, "model_arch": model_arch}
self._setup(model_args)

Expand Down

0 comments on commit 0a7a23d

Please sign in to comment.