Skip to content

Commit eb23a2f

Browse files
authored
Update dependencies for use with torch 2.4.1 and composer 0.25.0 (#176)
1 parent 588147f commit eb23a2f

File tree

7 files changed

+35
-12
lines changed

7 files changed

+35
-12
lines changed

.github/workflows/code-quality.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ jobs:
2424
strategy:
2525
matrix:
2626
python_version:
27-
- "3.9"
2827
- "3.10"
28+
- "3.11"
2929
pip_deps:
3030
- "[dev]"
3131
steps:

.github/workflows/docker.yaml

+6
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ jobs:
2929
- name: "2.4.0_cu124_aws"
3030
base_image: mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu20.04-aws
3131
dep_groups: "[all]"
32+
- name: "2.4.1_cu124"
33+
base_image: mosaicml/pytorch:2.4.1_cu124-python3.11-ubuntu20.04
34+
dep_groups: "[all]"
35+
- name: "2.4.1_cu124_aws"
36+
base_image: mosaicml/pytorch:2.4.1_cu124-python3.11-ubuntu20.04-aws
37+
dep_groups: "[all]"
3238
steps:
3339

3440
- name: Checkout

.github/workflows/pr-cpu.yaml

+4-4
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@ jobs:
1919
strategy:
2020
matrix:
2121
include:
22-
- name: 'cpu-3.9-1.12'
23-
container: mosaicml/pytorch:1.12.1_cpu-python3.9-ubuntu20.04
22+
- name: 'cpu-3.10-2.1'
23+
container: mosaicml/pytorch:2.1.2_cpu-python3.10-ubuntu20.04
2424
markers: 'not gpu'
2525
pytest_command: 'coverage run -m pytest'
26-
- name: 'cpu-3.10-1.13'
27-
container: mosaicml/pytorch:1.13.1_cpu-python3.10-ubuntu20.04
26+
- name: 'cpu-3.11-2.4'
27+
container: mosaicml/pytorch:2.4.1_cpu-python3.11-ubuntu20.04
2828
markers: 'not gpu'
2929
pytest_command: 'coverage run -m pytest'
3030
name: ${{ matrix.name }}

diffusion/datasets/image_caption_latents.py

+1
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ def build_streaming_image_caption_latents_dataloader(
273273
text_latent_shapes=text_latent_shapes,
274274
attention_mask_keys=attention_mask_keys,
275275
latent_dtype=dtype,
276+
batch_size=batch_size,
276277
**streaming_kwargs,
277278
)
278279

diffusion/models/models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ def stable_diffusion_2(
125125
precision = torch.float16 if encode_latents_in_fp16 else None
126126
# Make the text encoder
127127
text_encoder = CLIPTextModel.from_pretrained(model_name, subfolder='text_encoder', torch_dtype=precision)
128-
tokenizer = CLIPTokenizer.from_pretrained(model_name, subfolder='tokenizer')
128+
tokenizer = CLIPTokenizer.from_pretrained(model_name, subfolder='tokenizer', clean_up_tokenization_spaces=True)
129129

130130
# Make the autoencoder
131131
if autoencoder_path is None:

diffusion/models/precomputed_text_latent_diffusion.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def set_rng_generator(self, rng_generator: torch.Generator):
189189
self.rng_generator = rng_generator
190190

191191
def encode_images(self, inputs, dtype=torch.bfloat16):
192-
with torch.amp.autocast('cuda', enabled=False):
192+
with torch.autocast(device_type='cuda', enabled=False):
193193
latents = self.vae.encode(inputs.to(dtype))['latent_dist'].sample().data
194194
latents = (latents - self.latent_mean) / self.latent_std # scale latents
195195
return latents

setup.py

+21-5
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,27 @@
66
from setuptools import find_packages, setup
77

88
install_requires = [
9-
'mosaicml==0.20.1', 'mosaicml-streaming==0.7.4', 'hydra-core>=1.2', 'hydra-colorlog>=1.1.0',
10-
'diffusers[torch]==0.26.3', 'transformers[torch]==4.38.2', 'huggingface_hub==0.21.2', 'wandb==0.16.3',
11-
'xformers==0.0.23.post1', 'triton==2.1.0', 'torchmetrics[image]==1.3.1', 'lpips==0.1.4', 'clean-fid==0.1.35',
12-
'clip@git+https://github.com/openai/CLIP.git@a1d071733d7111c9c014f024669f959182114e33', 'gradio==4.19.2',
13-
'datasets==2.19.2', 'peft==0.12.0'
9+
'mosaicml==0.25.0',
10+
'mosaicml-streaming==0.9.0',
11+
'hydra-core>=1.2',
12+
'hydra-colorlog>=1.1.0',
13+
'diffusers[torch]==0.30.3',
14+
'transformers[torch]==4.44.2',
15+
'huggingface-hub[hf_transfer]>=0.23.2',
16+
'wandb>=0.18.1',
17+
'xformers==0.0.28.post1',
18+
'triton>=2.1.0',
19+
'torchmetrics[image]>=1.4.0.post0',
20+
'lpips==0.1.4',
21+
'clean-fid==0.1.35',
22+
'clip@git+https://github.com/openai/CLIP.git@a1d071733d7111c9c014f024669f959182114e33',
23+
'gradio==4.44.0',
24+
'datasets==2.19.2',
25+
'peft==0.12.0',
26+
'numpy<2.0.0',
27+
'sentencepiece',
28+
'mlflow',
29+
'pynvml',
1430
]
1531

1632
extras_require = {}

0 commit comments

Comments
 (0)