Skip to content

Commit

Permalink
Support bfloat16 for Upsample2D
Browse files Browse the repository at this point in the history
  • Loading branch information
darhsu committed Sep 20, 2024
1 parent 14a1b86 commit 9ebb411
Showing 1 changed file with 9 additions and 6 deletions.
15 changes: 9 additions & 6 deletions src/diffusers/models/upsampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,15 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version

from ..utils import deprecate
from .normalization import RMSNorm


is_torch_less_than_2_1 = version.parse(version.parse(torch.__version__).base_version) < version.parse("2.1")


class Upsample1D(nn.Module):
"""A 1D upsampling layer with an optional convolution.
Expand Down Expand Up @@ -151,11 +155,10 @@ def forward(self, hidden_states: torch.Tensor, output_size: Optional[int] = None
if self.use_conv_transpose:
return self.conv(hidden_states)

# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
# TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
# https://github.com/pytorch/pytorch/issues/86679
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 until PyTorch 2.1
# https://github.com/pytorch/pytorch/issues/86679#issuecomment-1783978767
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
if dtype == torch.bfloat16 and is_torch_less_than_2_1:
hidden_states = hidden_states.to(torch.float32)

# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
Expand All @@ -170,8 +173,8 @@ def forward(self, hidden_states: torch.Tensor, output_size: Optional[int] = None
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")

# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
# Cast back to original dtype
if dtype == torch.bfloat16 and is_torch_less_than_2_1:
hidden_states = hidden_states.to(dtype)

# TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
Expand Down

0 comments on commit 9ebb411

Please sign in to comment.