Skip to content

Commit 556a1c8

Browse files
committed
inputs
1 parent 74cbf4d commit 556a1c8

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

docker_images/latent-to-image/app/pipelines/latent_to_image.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -77,9 +77,9 @@ def _process_req(self, inputs, **kwargs):
7777
)
7878
if needs_upcasting:
7979
self.vae = self.vae.to(torch.float32)
80-
latents = latents.to(self.device, torch.float32)
80+
inputs = inputs.to(self.device, torch.float32)
8181
else:
82-
latents = inputs.to(self.device, self.dtype)
82+
inputs = inputs.to(self.device, self.dtype)
8383

8484
# unscale/denormalize the latents
8585
# denormalize with the mean and std if available and not None
@@ -95,21 +95,21 @@ def _process_req(self, inputs, **kwargs):
9595
latents_mean = (
9696
torch.tensor(self.vae.config.latents_mean)
9797
.view(1, 4, 1, 1)
98-
.to(latents.device, latents.dtype)
98+
.to(inputs.device, inputs.dtype)
9999
)
100100
latents_std = (
101101
torch.tensor(self.vae.config.latents_std)
102102
.view(1, 4, 1, 1)
103-
.to(latents.device, latents.dtype)
103+
.to(inputs.device, inputs.dtype)
104104
)
105-
latents = (
106-
latents * latents_std / self.vae.config.scaling_factor + latents_mean
105+
inputs = (
106+
inputs * latents_std / self.vae.config.scaling_factor + latents_mean
107107
)
108108
else:
109-
latents = latents / self.vae.config.scaling_factor
109+
inputs = inputs / self.vae.config.scaling_factor
110110

111111
with torch.no_grad():
112-
image = self.vae.decode(latents, return_dict=False)[0]
112+
image = self.vae.decode(inputs, return_dict=False)[0]
113113

114114
if needs_upcasting:
115115
self.vae.to(dtype=torch.float16)

0 commit comments

Comments
 (0)