Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion deepspeed/module_inject/containers/vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ def __init__(self):
super().__init__()
try:
import diffusers
self._orig_layer_class = diffusers.models.vae.AutoencoderKL
if hasattr(diffusers.models.vae, "AutoencoderKL"):
self._orig_layer_class = diffusers.models.vae.AutoencoderKL
else:
# Diffusers >= 0.12.0 changes location of AutoencoderKL
self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
except ImportError:
self._orig_layer_class = None

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,14 @@ def __init__(self,
self.transformer_cuda_module = load_transformer_module()
load_spatial_module()

def forward(self,
hidden_states,
context=None,
encoder_hidden_states=None,
timestep=None):
def forward(self, hidden_states, context=None, timestep=None, **kwargs):
# In v0.12.0 of diffuser, several new kwargs were added. Capturing
# those with kwargs to maintain backward compatibility

# In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states'
# This is so we can support older and newer versions of diffusers
if context == None and encoder_hidden_states != None:
context = encoder_hidden_states
if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] != None:
context = kwargs["encoder_hidden_states"]

out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states,
self.norm1_g,
Expand Down