@@ -53,8 +53,6 @@ ENV PYTHONUNBUFFERED=1
5353ENV PYTHONIOENCODING=UTF-8
5454ENV LANG=C.UTF-8
5555ENV LC_ALL=C.UTF-8
56- ENV UV_VERSION=0.8.22
57- ENV UV_SYSTEM_PYTHON=1
5856
5957ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
6058
@@ -82,15 +80,8 @@ ENV LD_LIBRARY_PATH="/usr/local/lib:/opt/amazon/ofi-nccl/lib/x86_64-linux-gnu:/o
8280# Python Path
8381ENV PATH="/usr/local/bin:${PATH}"
8482
85-
86- ENV UV_CACHE_DIR=/root/.cache/uv \
87- UV_SYSTEM_PYTHON=1
88-
89- RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
90- && ln -s /root/.local/bin/uv /usr/local/bin/uv
91-
9283# Install common conda packages
93- RUN uv pip install --no-cache-dir \
84+ RUN pip install --no-cache-dir \
9485 cython \
9586 cryptography \
9687 pyOpenSSL \
@@ -123,11 +114,11 @@ RUN uv pip install --no-cache-dir \
123114 tornado>=6.5.1
124115
125116# Install PyTorch
126- RUN uv pip install --no-cache-dir -U torch==${PYTORCH_VERSION} \
117+ RUN pip install --no-cache-dir -U torch==${PYTORCH_VERSION} \
127118 torchvision==${TORCHVISION_VERSION} \
128119 torchaudio==${TORCHAUDIO_VERSION} \
129120 --index-url https://download.pytorch.org/whl/cu129 \
130- && uv pip install --no-cache-dir -U torchtnt==${TORCHTNT_VERSION} \
121+ && pip install --no-cache-dir -U torchtnt==${TORCHTNT_VERSION} \
131122 torchdata==${TORCHDATA_VERSION} \
132123 triton \
133124 s3torchconnector \
@@ -140,18 +131,18 @@ RUN uv pip install --no-cache-dir -U torch==${PYTORCH_VERSION} \
140131 thinc==8.3.4 \
141132 blis \
142133 numpy \
143- && uv pip uninstall dataclasses
134+ && pip uninstall -y dataclasses
144135
145136# Install flash attn and NVIDIA transformer engine.
146137# Optionally set NVTE_FRAMEWORK to avoid bringing in additional frameworks during TE install
147138ENV NVTE_FRAMEWORK=pytorch
148139
149140RUN curl -LO https://github.com/Dao-AILab/flash-attention/releases/download/v${FLASH_ATTN_VERSION}/flash_attn-${FLASH_ATTN_VERSION}+cu12torch2.8cxx11abiTRUE-cp312-cp312-linux_x86_64.whl \
150- && uv pip install flash_attn-${FLASH_ATTN_VERSION}+cu12torch2.8cxx11abiTRUE-cp312-cp312-linux_x86_64.whl --no-build-isolation \
141+ && pip install flash_attn-${FLASH_ATTN_VERSION}+cu12torch2.8cxx11abiTRUE-cp312-cp312-linux_x86_64.whl --no-build-isolation \
151142 && rm flash_attn-${FLASH_ATTN_VERSION}+cu12torch2.8cxx11abiTRUE-cp312-cp312-linux_x86_64.whl
152143
153144# Install TE using instructions from https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/installation.html
154- RUN uv pip install --no-cache-dir git+https://github.com/NVIDIA/TransformerEngine.git@release_v${TE_VERSION} --no-build-isolation
145+ RUN pip install --no-cache-dir git+https://github.com/NVIDIA/TransformerEngine.git@release_v${TE_VERSION} --no-build-isolation
155146
156147RUN curl -o /license.txt https://aws-dlc-licenses.s3.amazonaws.com/pytorch-2.8/license.txt
157148
@@ -254,15 +245,15 @@ ARG PYTHON
254245WORKDIR /
255246
256247# Install SM packages
257- RUN uv pip install --no-cache-dir -U \
248+ RUN pip install --no-cache-dir -U \
258249 smclarify \
259250 "sagemaker>=2" \
260251 sagemaker-experiments \
261252 sagemaker-pytorch-training \
262253 sagemaker-training
263254
264255# Install extra packages
265- RUN uv pip install --no-cache-dir -U \
256+ RUN pip install --no-cache-dir -U \
266257 bokeh \
267258 imageio \
268259 numba \
0 commit comments