Open
Description
There are various use cases for wanting a sample to be seamless or "tile-able", the best way to achieve that is to allow the convolutions in the U-Net to wrap at the image edges rather than crop, and there is a built-in way to do this fairly easily, it just needs to be integrated into the grpc server:
import torch
def patch_conv(cls):
init = cls.__init__
def __init__(self, *args, **kwargs):
return init(self, *args, **kwargs, padding_mode='circular')
cls.__init__ = __init__
patch_conv(torch.nn.Conv2d)
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
pipe.to("cuda")
pipe("a photograph of an astronaut riding a horse")["sample"][0].save("output.png")