Skip to content

Commit b7828a5

Browse files
committed
Replace deprecated stabilityai/stable-diffusion-2-* models with ones maintained by sd2-community.
Signed-off-by: Artur Kloniecki <arturx.kloniecki@intel.com>
1 parent 1d8504c commit b7828a5

File tree

6 files changed

+14
-14
lines changed

6 files changed

+14
-14
lines changed

docs/source/tutorials/stable_diffusion.mdx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ Here is an example:
7373
```python
7474
from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline
7575

76-
model_name = "stabilityai/stable-diffusion-2-1"
76+
model_name = "sd2-community/stable-diffusion-2-1"
7777

7878
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
7979

@@ -98,8 +98,8 @@ outputs = pipeline(
9898

9999
There are two different checkpoints for Stable Diffusion 2:
100100

101-
- use [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1) for generating 768x768 images
102-
- use [stabilityai/stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base) for generating 512x512 images
101+
- use [sd2-community/stable-diffusion-2-1](https://huggingface.co/sd2-community/stable-diffusion-2-1) for generating 768x768 images
102+
- use [sd2-community/stable-diffusion-2-1-base](https://huggingface.co/sd2-community/stable-diffusion-2-1-base) for generating 512x512 images
103103

104104
</Tip>
105105

examples/stable-diffusion/depth_to_image_generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def main():
5252

5353
parser.add_argument(
5454
"--model_name_or_path",
55-
default="stabilityai/stable-diffusion-2-depth",
55+
default="sd2-community/stable-diffusion-2-depth",
5656
type=str,
5757
help="Path to pre-trained model",
5858
)

examples/stable-diffusion/training/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ Then proceed to training with command:
9595

9696
```bash
9797
PT_HPU_LAZY_MODE=1 python train_controlnet.py \
98-
--pretrained_model_name_or_path=stabilityai/stable-diffusion-2-1 \
98+
--pretrained_model_name_or_path=sd2-community/stable-diffusion-2-1 \
9999
--output_dir=/tmp/stable_diffusion2_1 \
100100
--dataset_name=fusing/fill50k \
101101
--resolution=512 \
@@ -120,7 +120,7 @@ After training completes, you can use `text_to_image_generation.py` sample to ru
120120

121121
```bash
122122
PT_HPU_LAZY_MODE=1 python ../text_to_image_generation.py \
123-
--model_name_or_path stabilityai/stable-diffusion-2-1 \
123+
--model_name_or_path sd2-community/stable-diffusion-2-1 \
124124
--controlnet_model_name_or_path /tmp/stable_diffusion2_1 \
125125
--prompts "pale golden rod circle with old lace background" \
126126
--control_image "./cnet/conditioning_image_1.png" \
@@ -226,7 +226,7 @@ To launch the multi-card Stable Diffusion training, use:
226226

227227
```bash
228228
PT_HPU_LAZY_MODE=1 python ../../gaudi_spawn.py --world_size 8 --use_mpi train_dreambooth.py \
229-
--pretrained_model_name_or_path="stabilityai/stable-diffusion-2-1" \
229+
--pretrained_model_name_or_path="sd2-community/stable-diffusion-2-1" \
230230
--instance_data_dir="dog" \
231231
--output_dir="dog_sd" \
232232
--class_data_dir="path-to-class-images" \
@@ -265,7 +265,7 @@ To run the multi-card training, use:
265265

266266
```bash
267267
PT_HPU_LAZY_MODE=1 python ../../gaudi_spawn.py --world_size 8 --use_mpi train_dreambooth.py \
268-
--pretrained_model_name_or_path="stabilityai/stable-diffusion-2-1" \
268+
--pretrained_model_name_or_path="sd2-community/stable-diffusion-2-1" \
269269
--instance_data_dir="dog" \
270270
--output_dir="dog_sd" \
271271
--class_data_dir="path-to-class-images" \
@@ -310,7 +310,7 @@ After training completes, you can use `text_to_image_generation.py` sample for i
310310

311311
```bash
312312
PT_HPU_LAZY_MODE=1 python ../text_to_image_generation.py \
313-
--model_name_or_path stabilityai/stable-diffusion-2-1 \
313+
--model_name_or_path sd2-community/stable-diffusion-2-1 \
314314
--unet_adapter_name_or_path dog_sd/unet \
315315
--prompts "a sks dog" \
316316
--num_images_per_prompt 5 \

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ def __call__(
368368
>>> from diffusers import StableDiffusionDepth2ImgPipeline
369369
370370
>>> pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
371-
... "stabilityai/stable-diffusion-2-depth",
371+
... "sd2-community/stable-diffusion-2-depth",
372372
... torch_dtype=torch.float16,
373373
... )
374374
>>> pipe.to("cuda")

optimum/habana/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ def __call__(
339339
>>> mask_image = download_image(mask_url).resize((512, 512))
340340
341341
>>> pipe = StableDiffusionInpaintPipeline.from_pretrained(
342-
... "stabilityai/stable-diffusion-2-inpainting", torch_dtype=torch.float16
342+
... "sd2-community/stable-diffusion-2-inpainting", torch_dtype=torch.float16
343343
... )
344344
>>> pipe = pipe.to("cuda")
345345

tests/test_diffusers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -793,7 +793,7 @@ def test_no_throughput_regression_autocast(self):
793793
]
794794
num_images_per_prompt = 28
795795
batch_size = 7
796-
model_name = "stabilityai/stable-diffusion-2-1"
796+
model_name = "sd2-community/stable-diffusion-2-1"
797797
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
798798
pipeline = GaudiStableDiffusionPipeline.from_pretrained(
799799
model_name,
@@ -2635,7 +2635,7 @@ def test_depth2img_pipeline_hpu_graphs(self):
26352635
@legacy
26362636
def test_depth2img_pipeline(self):
26372637
gaudi_config = GaudiConfig(use_torch_autocast=True)
2638-
model_name = "stabilityai/stable-diffusion-2-depth"
2638+
model_name = "sd2-community/stable-diffusion-2-depth"
26392639
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
26402640

26412641
pipe = GaudiStableDiffusionDepth2ImgPipeline.from_pretrained(
@@ -5687,7 +5687,7 @@ def test_stable_diffusion_inpaint_no_throughput_regression(self):
56875687
prompts = [
56885688
"concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k",
56895689
]
5690-
model_name = "stabilityai/stable-diffusion-2-inpainting"
5690+
model_name = "sd2-community/stable-diffusion-2-inpainting"
56915691
num_images_per_prompt = 12
56925692
batch_size = 4
56935693
pipeline = GaudiStableDiffusionInpaintPipeline.from_pretrained(

0 commit comments

Comments
 (0)