Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

make pipelines tests device-agnostic (part1) #9399

Open
wants to merge 67 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
67 commits
Select commit Hold shift + click to select a range
275b6af
enable on xpu
faaany Sep 9, 2024
f422d55
add 1 more
faaany Sep 9, 2024
84540d9
add one more
faaany Sep 9, 2024
b94f751
enable more
faaany Sep 9, 2024
fa508a4
add 1 more
faaany Sep 9, 2024
a04fedf
add more
faaany Sep 9, 2024
7442dc1
enable 1
faaany Sep 9, 2024
658789a
enable more cases
faaany Sep 9, 2024
06c77c7
enable
faaany Sep 9, 2024
1bf9b0c
enable
faaany Sep 10, 2024
6296e88
update comment
faaany Sep 10, 2024
6d0b378
one more
faaany Sep 10, 2024
0d9f975
enable 1
faaany Sep 10, 2024
732ff3a
add more cases
faaany Sep 10, 2024
3803d93
enable xpu
faaany Sep 10, 2024
1825fd5
Merge branch 'main' into enable_xpu
faaany Sep 12, 2024
0666a53
Merge branch 'main' into enable_xpu
faaany Sep 13, 2024
e00bcca
add one more caswe
faaany Sep 13, 2024
6513201
add more cases
faaany Sep 13, 2024
3cb0f96
add 1
faaany Sep 13, 2024
314e76d
add more
faaany Sep 13, 2024
65c95e9
add more cases
faaany Sep 13, 2024
2882ee4
add case
faaany Sep 13, 2024
f604bce
enable
faaany Sep 13, 2024
0c66254
add more
faaany Sep 13, 2024
c699d7a
add more
faaany Sep 13, 2024
961a8e5
add more
faaany Sep 13, 2024
266f5f9
enbale more
faaany Sep 13, 2024
405bc16
add more
faaany Sep 13, 2024
48b67d6
update code
faaany Sep 13, 2024
1117427
update test marker
faaany Sep 13, 2024
88d289a
add skip back
faaany Sep 13, 2024
b534c50
update comment
faaany Sep 13, 2024
496e0be
Merge branch 'main' into enable_xpu
faaany Sep 14, 2024
1da965b
Merge branch 'main' into xpu_more3
faaany Sep 14, 2024
ab4bc3a
remove single files
faaany Sep 14, 2024
769d713
remove
faaany Sep 14, 2024
bf5e0b2
Merge branch 'xpu_more3' into enable_xpu
faaany Sep 14, 2024
daeb966
style
faaany Sep 14, 2024
28a73ac
add
faaany Sep 14, 2024
3052847
revert
faaany Sep 14, 2024
a5384ff
reformat
faaany Sep 14, 2024
2089e26
Merge branch 'huggingface:main' into enable_xpu
faaany Sep 17, 2024
563124e
Merge branch 'main' into enable_xpu
faaany Sep 18, 2024
feb9fd2
Merge branch 'main' into enable_xpu
faaany Sep 19, 2024
0e39938
Merge branch 'main' into enable_xpu
faaany Sep 20, 2024
58de480
Merge branch 'main' into enable_xpu
faaany Sep 23, 2024
ac90301
Merge branch 'main' into enable_xpu
faaany Sep 26, 2024
2d08678
Merge branch 'main' into enable_xpu
sayakpaul Oct 14, 2024
d1b81ec
Merge branch 'huggingface:main' into enable_xpu
faaany Oct 15, 2024
da9c5c9
Merge branch 'huggingface:main' into enable_xpu
faaany Oct 23, 2024
34a0a7b
update decorator
faaany Oct 23, 2024
3e7490e
Merge branch 'huggingface:main' into enable_xpu
faaany Oct 23, 2024
bad2a3e
update
faaany Oct 23, 2024
f157e98
update
faaany Oct 23, 2024
359754e
update
faaany Oct 23, 2024
9052f83
Merge branch 'main' into enable_xpu
faaany Oct 31, 2024
ed55b90
Merge branch 'main' into enable_xpu
faaany Nov 6, 2024
f73a38a
Merge branch 'huggingface:main' into enable_xpu
faaany Nov 11, 2024
7ef5dab
Update tests/pipelines/deepfloyd_if/test_if.py
faaany Nov 11, 2024
915c741
Update src/diffusers/utils/testing_utils.py
faaany Nov 11, 2024
e0507b5
Update tests/pipelines/animatediff/test_animatediff_controlnet.py
faaany Nov 11, 2024
97f0e9e
Update tests/pipelines/animatediff/test_animatediff.py
faaany Nov 11, 2024
451790f
Update tests/pipelines/animatediff/test_animatediff_controlnet.py
faaany Nov 11, 2024
b671e25
update float16
faaany Nov 11, 2024
35de6d3
no unitest.skipt
faaany Nov 11, 2024
64ef84a
update
faaany Nov 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/diffusers/utils/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,14 @@ def require_note_seq(test_case):
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)


def require_accelerator(test_case):
"""
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
hardware accelerator available.
"""
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)


def require_torchsde(test_case):
"""
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
Expand Down
4 changes: 2 additions & 2 deletions tests/pipelines/amused/test_amused.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)
Expand Down Expand Up @@ -129,7 +129,7 @@ def test_inference_batch_single_identical(self):


@slow
@require_torch_gpu
@require_torch_accelerator
class AmusedPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedPipeline.from_pretrained("amused/amused-256")
Expand Down
4 changes: 2 additions & 2 deletions tests/pipelines/amused/test_amused_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)
Expand Down Expand Up @@ -131,7 +131,7 @@ def test_inference_batch_single_identical(self):


@slow
@require_torch_gpu
@require_torch_accelerator
class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256")
Expand Down
4 changes: 2 additions & 2 deletions tests/pipelines/amused/test_amused_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
require_torch_accelerator,
slow,
torch_device,
)
Expand Down Expand Up @@ -135,7 +135,7 @@ def test_inference_batch_single_identical(self):


@slow
@require_torch_gpu
@require_torch_accelerator
class AmusedInpaintPipelineSlowTests(unittest.TestCase):
def test_amused_256(self):
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256")
Expand Down
18 changes: 12 additions & 6 deletions tests/pipelines/animatediff/test_animatediff.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,13 @@
)
from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import (
numpy_cosine_similarity_distance,
require_accelerator,
require_torch_gpu,
slow,
torch_device,
)

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
Expand Down Expand Up @@ -272,7 +278,7 @@ def test_inference_batch_single_identical(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -288,14 +294,14 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)

def test_to_dtype(self):
components = self.get_dummy_components()
Expand Down
12 changes: 6 additions & 6 deletions tests/pipelines/animatediff/test_animatediff_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device
from diffusers.utils.testing_utils import require_accelerator, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
Expand Down Expand Up @@ -281,7 +281,7 @@ def test_inference_batch_single_identical(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -297,14 +297,14 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)

def test_to_dtype(self):
components = self.get_dummy_components()
Expand Down
12 changes: 6 additions & 6 deletions tests/pipelines/animatediff/test_animatediff_sdxl.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
UNetMotionModel,
)
from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device
from diffusers.utils.testing_utils import require_accelerator, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
Expand Down Expand Up @@ -212,7 +212,7 @@ def test_inference_batch_single_identical(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -228,14 +228,14 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)

def test_to_dtype(self):
components = self.get_dummy_components()
Expand Down
10 changes: 5 additions & 5 deletions tests/pipelines/animatediff/test_animatediff_sparsectrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
)
from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import torch_device
from diffusers.utils.testing_utils import require_accelerator, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
Expand Down Expand Up @@ -345,7 +345,7 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -361,13 +361,13 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)

def test_to_dtype(self):
Expand Down
12 changes: 6 additions & 6 deletions tests/pipelines/animatediff/test_animatediff_video2video.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
)
from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device
from diffusers.utils.testing_utils import require_accelerator, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
Expand Down Expand Up @@ -258,7 +258,7 @@ def test_inference_batch_single_identical(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -274,14 +274,14 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)

def test_to_dtype(self):
components = self.get_dummy_components()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
)
from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import torch_device
from diffusers.utils.testing_utils import require_accelerator, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
Expand Down Expand Up @@ -274,7 +274,7 @@ def test_inference_batch_single_identical(
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -290,13 +290,13 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)

def test_to_dtype(self):
Expand Down
11 changes: 6 additions & 5 deletions tests/pipelines/controlnet_xs/test_controlnetxs.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
is_torch_compile,
load_image,
load_numpy,
require_accelerator,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
Expand Down Expand Up @@ -306,7 +307,7 @@ def test_multi_vae(self):

assert out_vae_np.shape == out_np.shape

@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
@require_accelerator
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -322,14 +323,14 @@ def test_to_device(self):
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)

pipe.to("cuda")
pipe.to(torch_device)
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
self.assertTrue(all(device == torch_device for device in model_devices))

output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)


@slow
Expand Down
5 changes: 3 additions & 2 deletions tests/pipelines/deepfloyd_if/test_if.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import load_numpy, require_accelerator, require_torch_gpu, skip_mps, slow, torch_device

from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
Expand Down Expand Up @@ -58,7 +58,8 @@ def get_dummy_inputs(self, device, seed=0):
def test_save_load_optional_components(self):
self._test_save_load_optional_components()

@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
Expand Down
16 changes: 13 additions & 3 deletions tests/pipelines/deepfloyd_if/test_if_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,15 @@
from diffusers import IFImg2ImgPipeline
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import (
floats_tensor,
load_numpy,
require_accelerator,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)

from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
Expand Down Expand Up @@ -70,12 +78,14 @@ def test_save_load_optional_components(self):
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)

@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)

@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)

Expand Down
Loading