Skip to content

Commit

Permalink
More fixes for callback_outputs on_step_end outputing noneType.
Browse files Browse the repository at this point in the history
  • Loading branch information
Skquark committed Jun 18, 2024
1 parent 5fb7c24 commit efd8d3b
Show file tree
Hide file tree
Showing 14 changed files with 67 additions and 67 deletions.
8 changes: 4 additions & 4 deletions examples/community/fresco_v2v.py
Original file line number Diff line number Diff line change
Expand Up @@ -2467,10 +2467,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
Expand Down
12 changes: 6 additions & 6 deletions examples/community/hd_painter.py
Original file line number Diff line number Diff line change
Expand Up @@ -841,12 +841,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
mask = callback_outputs.pop("mask", mask)
masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
mask = callback_outputs.pop("mask", mask)
masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)

# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
Expand Down
8 changes: 4 additions & 4 deletions examples/community/ip_adapter_face_id.py
Original file line number Diff line number Diff line change
Expand Up @@ -1088,10 +1088,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
Expand Down
8 changes: 4 additions & 4 deletions examples/community/pipeline_animatediff_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1098,10 +1098,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
Expand Down
8 changes: 4 additions & 4 deletions src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,10 +287,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)

if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -363,10 +363,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)

if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -515,12 +515,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)
masked_image = callback_outputs.pop("masked_image", masked_image)
mask_image = callback_outputs.pop("mask_image", mask_image)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
image_embeds = callback_outputs.pop("image_embeds", image_embeds)
negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds)
masked_image = callback_outputs.pop("masked_image", masked_image)
mask_image = callback_outputs.pop("mask_image", mask_image)

if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -516,11 +516,11 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
)
text_mask = callback_outputs.pop("text_mask", text_mask)

Expand Down
12 changes: 6 additions & 6 deletions src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,12 +536,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)

if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -604,12 +604,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
attention_mask = callback_outputs.pop("attention_mask", attention_mask)
negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask)

if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1161,10 +1161,10 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
# prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
# prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,8 +594,8 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)

if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
Expand Down
12 changes: 6 additions & 6 deletions src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,12 +402,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
)

if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
Expand Down
12 changes: 6 additions & 6 deletions src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,12 +490,12 @@ def __call__(
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)

latents = callback_outputs.pop("latents", latents)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
if callback_outputs is not None:
latents = callback_outputs.pop("latents", latents)
text_encoder_hidden_states = callback_outputs.pop(
"text_encoder_hidden_states", text_encoder_hidden_states
)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
Expand Down

0 comments on commit efd8d3b

Please sign in to comment.