Skip to content

Commit 02f59c7

Browse files
committed
{megapaks} Update PyTorch 2.9.0 to 2.9.1
1 parent 7a5fc3b commit 02f59c7

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

cu126-megapak/Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
235235
&& pip install \
236236
--upgrade pip wheel setuptools \
237237
&& pip install \
238-
--dry-run xformers==0.0.33.post1 torchvision torchaudio \
238+
--dry-run xformers==0.0.33.post2 torchvision torchaudio \
239239
--index-url https://download.pytorch.org/whl/cu126
240240

241241
# Here's some hack. To reduce image size & single layer size, we:
@@ -246,14 +246,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
246246
# Note: xFormers is not always released with latest PyTorch, so using `pip install --no-deps` is not a good idea.
247247
RUN --mount=type=cache,target=/root/.cache/pip \
248248
pip install \
249-
xformers==0.0.33.post1 torchvision torchaudio \
249+
xformers==0.0.33.post2 torchvision torchaudio \
250250
--index-url https://download.pytorch.org/whl/cu126 \
251251
&& pip uninstall --yes torch \
252252
&& find /usr/local/lib/python3.12/site-packages/nvidia/ -mindepth 1 -maxdepth 1 ! -name 'nccl' ! -name 'nvshmem' -exec rm -rfv {} +
253253

254254
RUN --mount=type=cache,target=/root/.cache/pip \
255255
pip install \
256-
xformers==0.0.33.post1 torchvision torchaudio \
256+
xformers==0.0.33.post2 torchvision torchaudio \
257257
--index-url https://download.pytorch.org/whl/cu126
258258

259259
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}\
@@ -313,7 +313,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
313313
https://github.com/nunchaku-tech/nunchaku/releases/download/v1.0.2/nunchaku-1.0.2+torch2.9-cp312-cp312-linux_x86_64.whl \
314314
# FlashAttention (version pair with xFormers, binary pair with PyTorch & CUDA)
315315
&& pip install \
316-
https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.4.17/flash_attn-2.8.3+cu126torch2.9-cp312-cp312-linux_x86_64.whl
316+
https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.5.4/flash_attn-2.8.3+cu126torch2.9-cp312-cp312-linux_x86_64.whl
317317

318318
################################################################################
319319
# Bundle ComfyUI in the image

cu128-megapak/Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
235235
&& pip install \
236236
--upgrade pip wheel setuptools \
237237
&& pip install \
238-
--dry-run xformers==0.0.33.post1 torchvision torchaudio \
238+
--dry-run xformers==0.0.33.post2 torchvision torchaudio \
239239
--index-url https://download.pytorch.org/whl/cu128
240240

241241
# Here's some hack. To reduce image size & single layer size, we:
@@ -246,14 +246,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
246246
# Note: xFormers is not always released with latest PyTorch, so using `pip install --no-deps` is not a good idea.
247247
RUN --mount=type=cache,target=/root/.cache/pip \
248248
pip install \
249-
xformers==0.0.33.post1 torchvision torchaudio \
249+
xformers==0.0.33.post2 torchvision torchaudio \
250250
--index-url https://download.pytorch.org/whl/cu128 \
251251
&& pip uninstall --yes torch \
252252
&& find /usr/local/lib/python3.12/site-packages/nvidia/ -mindepth 1 -maxdepth 1 ! -name 'nccl' ! -name 'nvshmem' -exec rm -rfv {} +
253253

254254
RUN --mount=type=cache,target=/root/.cache/pip \
255255
pip install \
256-
xformers==0.0.33.post1 torchvision torchaudio \
256+
xformers==0.0.33.post2 torchvision torchaudio \
257257
--index-url https://download.pytorch.org/whl/cu128
258258

259259
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}\
@@ -313,7 +313,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
313313
https://github.com/nunchaku-tech/nunchaku/releases/download/v1.0.2/nunchaku-1.0.2+torch2.9-cp312-cp312-linux_x86_64.whl \
314314
# FlashAttention (version pair with xFormers, binary pair with PyTorch & CUDA)
315315
&& pip install \
316-
https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.4.17/flash_attn-2.8.3+cu128torch2.9-cp312-cp312-linux_x86_64.whl \
316+
https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.5.4/flash_attn-2.8.3+cu128torch2.9-cp312-cp312-linux_x86_64.whl \
317317
# SageAttention
318318
&& pip install \
319319
https://github.com/YanWenKun/ComfyUI-Containerfiles/releases/download/sageattn2/sageattention-2.2.0-cp312-cp312-linux_x86_64.whl \

0 commit comments

Comments
 (0)