Skip to content

Commit 18e1b16

Browse files
authored
Merge branch 'master' into py27
2 parents 8d7b449 + 362173a commit 18e1b16

File tree

22 files changed

+1812
-66
lines changed

22 files changed

+1812
-66
lines changed

base/buildspec-cu128-ubuntu24.yml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment>
2+
prod_account_id: &PROD_ACCOUNT_ID 763104351884
3+
region: &REGION <set-$REGION-in-environment>
4+
framework: &FRAMEWORK base
5+
version: &VERSION 12.8.1
6+
short_version: &SHORT_VERSION "12.8"
7+
arch_type: &ARCH_TYPE x86_64
8+
autopatch_build: "False"
9+
10+
repository_info:
11+
base_repository: &BASE_REPOSITORY
12+
image_type: &IMAGE_TYPE gpu
13+
root: .
14+
repository_name: &REPOSITORY_NAME !join [ pr, "-", *FRAMEWORK ]
15+
repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ]
16+
release_repository_name: &RELEASE_REPOSITORY_NAME !join [ *FRAMEWORK ]
17+
release_repository: &RELEASE_REPOSITORY !join [ *PROD_ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *RELEASE_REPOSITORY_NAME ]
18+
19+
context:
20+
base_context: &BASE_CONTEXT
21+
deep_learning_container:
22+
source: src/deep_learning_container.py
23+
target: deep_learning_container.py
24+
install_python:
25+
source: scripts/install_python.sh
26+
target: install_python.sh
27+
install_cuda:
28+
source: scripts/install_cuda.sh
29+
target: install_cuda.sh
30+
install_efa:
31+
source: scripts/install_efa.sh
32+
target: install_efa.sh
33+
34+
images:
35+
base_x86_64_gpu_cuda128:
36+
<<: *BASE_REPOSITORY
37+
context:
38+
<<: *BASE_CONTEXT
39+
image_size_baseline: 11000
40+
device_type: &DEVICE_TYPE gpu
41+
cuda_version: &CUDA_VERSION cu128
42+
python_version: &DOCKER_PYTHON_VERSION py3
43+
tag_python_version: &TAG_PYTHON_VERSION py312
44+
os_version: &OS_VERSION ubuntu24.04
45+
tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
46+
latest_release_tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
47+
docker_file: !join [ *FRAMEWORK, /, *ARCH_TYPE, /, *DEVICE_TYPE, /, *CUDA_VERSION, /, *OS_VERSION, /Dockerfile ]
48+
target: final
49+
build: true
50+
enable_common_stage_build: false
51+
test_configs:
52+
test_platforms:
53+
- sanity
54+
- security

base/buildspec-cu129-ubuntu22.yml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment>
2+
prod_account_id: &PROD_ACCOUNT_ID 763104351884
3+
region: &REGION <set-$REGION-in-environment>
4+
framework: &FRAMEWORK base
5+
version: &VERSION 12.9.1
6+
short_version: &SHORT_VERSION "12.9"
7+
arch_type: &ARCH_TYPE x86_64
8+
autopatch_build: "False"
9+
10+
repository_info:
11+
base_repository: &BASE_REPOSITORY
12+
image_type: &IMAGE_TYPE gpu
13+
root: .
14+
repository_name: &REPOSITORY_NAME !join [ pr, "-", *FRAMEWORK ]
15+
repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ]
16+
release_repository_name: &RELEASE_REPOSITORY_NAME !join [ *FRAMEWORK ]
17+
release_repository: &RELEASE_REPOSITORY !join [ *PROD_ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *RELEASE_REPOSITORY_NAME ]
18+
19+
context:
20+
base_context: &BASE_CONTEXT
21+
deep_learning_container:
22+
source: src/deep_learning_container.py
23+
target: deep_learning_container.py
24+
install_python:
25+
source: scripts/install_python.sh
26+
target: install_python.sh
27+
install_cuda:
28+
source: scripts/install_cuda.sh
29+
target: install_cuda.sh
30+
install_efa:
31+
source: scripts/install_efa.sh
32+
target: install_efa.sh
33+
34+
images:
35+
base_x86_64_gpu_cuda129_ubuntu22:
36+
<<: *BASE_REPOSITORY
37+
context:
38+
<<: *BASE_CONTEXT
39+
image_size_baseline: 11000
40+
device_type: &DEVICE_TYPE gpu
41+
cuda_version: &CUDA_VERSION cu129
42+
python_version: &DOCKER_PYTHON_VERSION py3
43+
tag_python_version: &TAG_PYTHON_VERSION py312
44+
os_version: &OS_VERSION ubuntu22.04
45+
tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
46+
latest_release_tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
47+
docker_file: !join [ *FRAMEWORK, /, *ARCH_TYPE, /, *DEVICE_TYPE, /, *CUDA_VERSION, /, *OS_VERSION, /Dockerfile ]
48+
target: final
49+
build: true
50+
enable_common_stage_build: false
51+
test_configs:
52+
test_platforms:
53+
- sanity
54+
- security

base/buildspec.yml

Lines changed: 1 addition & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1 @@
1-
account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment>
2-
prod_account_id: &PROD_ACCOUNT_ID 763104351884
3-
region: &REGION <set-$REGION-in-environment>
4-
framework: &FRAMEWORK base
5-
version: &VERSION 12.8.1
6-
short_version: &SHORT_VERSION "12.8"
7-
arch_type: &ARCH_TYPE x86_64
8-
autopatch_build: "False"
9-
10-
repository_info:
11-
base_repository: &BASE_REPOSITORY
12-
image_type: &IMAGE_TYPE gpu
13-
root: .
14-
repository_name: &REPOSITORY_NAME !join [ pr, "-", *FRAMEWORK ]
15-
repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ]
16-
release_repository_name: &RELEASE_REPOSITORY_NAME !join [ *FRAMEWORK ]
17-
release_repository: &RELEASE_REPOSITORY !join [ *PROD_ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *RELEASE_REPOSITORY_NAME ]
18-
19-
context:
20-
base_context: &BASE_CONTEXT
21-
deep_learning_container:
22-
source: src/deep_learning_container.py
23-
target: deep_learning_container.py
24-
install_python:
25-
source: scripts/install_python.sh
26-
target: install_python.sh
27-
install_cuda:
28-
source: scripts/install_cuda.sh
29-
target: install_cuda.sh
30-
install_efa:
31-
source: scripts/install_efa.sh
32-
target: install_efa.sh
33-
34-
images:
35-
base_x86_64_gpu_cuda128:
36-
<<: *BASE_REPOSITORY
37-
context:
38-
<<: *BASE_CONTEXT
39-
image_size_baseline: 11000
40-
device_type: &DEVICE_TYPE gpu
41-
cuda_version: &CUDA_VERSION cu128
42-
python_version: &DOCKER_PYTHON_VERSION py3
43-
tag_python_version: &TAG_PYTHON_VERSION py312
44-
os_version: &OS_VERSION ubuntu24.04
45-
tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
46-
latest_release_tag: !join [ *VERSION, "-", *DEVICE_TYPE, "-", *TAG_PYTHON_VERSION, "-", *CUDA_VERSION, "-", *OS_VERSION, "-ec2" ]
47-
docker_file: !join [ *FRAMEWORK, /, *ARCH_TYPE, /, *DEVICE_TYPE, /, *CUDA_VERSION, /Dockerfile ]
48-
target: final
49-
build: true
50-
enable_common_stage_build: false
51-
test_configs:
52-
test_platforms:
53-
- sanity
54-
- security
1+
buildspec_pointer: buildspec-cu129-ubuntu22.yml
File renamed without changes.
Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
ARG PYTHON="python3"
2+
ARG PYTHON_VERSION="3.12.10"
3+
ARG PYTHON_SHORT_VERSION="3.12"
4+
ARG CUDA_MAJOR="12"
5+
ARG CUDA_MINOR="9"
6+
ARG EFA_VERSION="1.43.1"
7+
FROM nvidia/cuda:12.9.1-base-ubuntu22.04 AS base-builder
8+
9+
10+
RUN mv /usr/local/cuda/compat /usr/local \
11+
&& apt-get update \
12+
&& apt-get -y upgrade --only-upgrade systemd \
13+
&& apt-get install -y --allow-change-held-packages --no-install-recommends \
14+
automake \
15+
build-essential \
16+
ca-certificates \
17+
cmake \
18+
curl \
19+
emacs \
20+
git \
21+
jq \
22+
libcurl4-openssl-dev \
23+
libglib2.0-0 \
24+
libegl1 \
25+
libgl1 \
26+
libsm6 \
27+
libssl-dev \
28+
libxext6 \
29+
libxrender-dev \
30+
zlib1g-dev \
31+
unzip \
32+
vim \
33+
wget \
34+
libhwloc-dev \
35+
libgomp1 \
36+
libibverbs-dev \
37+
libnuma1 \
38+
libnuma-dev \
39+
libtool \
40+
openssl \
41+
python3-dev \
42+
autoconf \
43+
pkg-config \
44+
check \
45+
libsubunit0 \
46+
libsubunit-dev \
47+
libffi-dev \
48+
libbz2-dev \
49+
liblzma-dev \
50+
&& apt-get autoremove -y \
51+
&& apt-get clean \
52+
&& rm -rf /var/lib/apt/lists/*
53+
54+
##############################################################################
55+
FROM base-builder AS python-builder
56+
ARG PYTHON_VERSION
57+
COPY install_python.sh install_python.sh
58+
RUN bash install_python.sh ${PYTHON_VERSION} && rm install_python.sh
59+
60+
##############################################################################
61+
FROM base-builder AS cuda-builder
62+
ARG CUDA_MAJOR
63+
ARG CUDA_MINOR
64+
COPY install_cuda.sh install_cuda.sh
65+
RUN bash install_cuda.sh "${CUDA_MAJOR}.${CUDA_MINOR}" && rm install_cuda.sh
66+
67+
##############################################################################
68+
FROM nvidia/cuda:12.9.1-base-ubuntu22.04 AS final
69+
ARG PYTHON
70+
ARG PYTHON_SHORT_VERSION
71+
ARG CUDA_MAJOR
72+
ARG CUDA_MINOR
73+
ARG EFA_VERSION
74+
LABEL maintainer="Amazon AI"
75+
LABEL dlc_major_version="1"
76+
ENV DEBIAN_FRONTEND=noninteractive \
77+
LANG=C.UTF-8 \
78+
LC_ALL=C.UTF-8 \
79+
DLC_CONTAINER_TYPE=base \
80+
# Python won’t try to write .pyc or .pyo files on the import of source modules
81+
# Force stdin, stdout and stderr to be totally unbuffered. Good for logging
82+
PYTHONDONTWRITEBYTECODE=1 \
83+
PYTHONUNBUFFERED=1 \
84+
PYTHONIOENCODING=UTF-8 \
85+
CUDA_HOME="/usr/local/cuda" \
86+
PATH="/opt/amazon/openmpi/bin:/opt/amazon/efa/bin:/usr/local/cuda/bin:${PATH}" \
87+
LD_LIBRARY_PATH="/usr/local/lib:/usr/local/cuda/lib64:/opt/amazon/ofi-nccl/lib:/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:${LD_LIBRARY_PATH}"
88+
89+
WORKDIR /
90+
91+
# + python and pip packages (awscli, boto3, requests)
92+
COPY --from=python-builder /usr/local/lib/python${PYTHON_SHORT_VERSION} /usr/local/lib/python${PYTHON_SHORT_VERSION}
93+
COPY --from=python-builder /usr/local/include/python${PYTHON_SHORT_VERSION} /usr/local/include/python${PYTHON_SHORT_VERSION}
94+
COPY --from=python-builder /usr/local/bin /usr/local/bin
95+
# + cuda-toolkit, cudnn, nccl
96+
COPY --from=cuda-builder /usr/local/cuda-${CUDA_MAJOR}.${CUDA_MINOR} /usr/local/cuda-${CUDA_MAJOR}.${CUDA_MINOR}
97+
COPY install_efa.sh install_efa.sh
98+
COPY deep_learning_container.py /usr/local/bin/deep_learning_container.py
99+
COPY bash_telemetry.sh /usr/local/bin/bash_telemetry.sh
100+
RUN chmod +x /usr/local/bin/deep_learning_container.py && \
101+
chmod +x /usr/local/bin/bash_telemetry.sh && \
102+
echo 'source /usr/local/bin/bash_telemetry.sh' >> /etc/bash.bashrc && \
103+
# Install EFA
104+
bash install_efa.sh ${EFA_VERSION} && \
105+
rm install_efa.sh && \
106+
# OSS compliance
107+
apt-get update && \
108+
apt-get upgrade -y && \
109+
apt-get install -y --allow-change-held-packages --no-install-recommends \
110+
unzip \
111+
wget && \
112+
apt-get clean && \
113+
HOME_DIR=/root && \
114+
curl -o ${HOME_DIR}/oss_compliance.zip https://aws-dlinfra-utilities.s3.amazonaws.com/oss_compliance.zip && \
115+
unzip ${HOME_DIR}/oss_compliance.zip -d ${HOME_DIR}/ && \
116+
cp ${HOME_DIR}/oss_compliance/test/testOSSCompliance /usr/local/bin/testOSSCompliance && \
117+
chmod +x /usr/local/bin/testOSSCompliance && \
118+
chmod +x ${HOME_DIR}/oss_compliance/generate_oss_compliance.sh && \
119+
${HOME_DIR}/oss_compliance/generate_oss_compliance.sh ${HOME_DIR} ${PYTHON} && \
120+
rm -rf ${HOME_DIR}/oss_compliance* && \
121+
rm -rf /tmp/tmp* && \
122+
rm -rf /var/lib/apt/lists/* && \
123+
rm -rf /root/.cache | true
124+
125+
CMD ["/bin/bash"]

dlc_developer_config.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,9 @@ use_scheduler = false
118118

119119
### TRAINING PR JOBS ###
120120

121+
# Base
122+
dlc-pr-base = ""
123+
121124
# Standard Framework Training
122125
dlc-pr-pytorch-training = "pytorch/training/buildspec-2-7-ec2.yml"
123126
dlc-pr-tensorflow-2-training = ""

scripts/install_cuda.sh

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,53 @@ function install_cuda128_stack {
8787
ldconfig
8888
}
8989

90+
function install_cuda129_stack {
91+
CUDNN_VERSION="9.10.2.21"
92+
NCCL_VERSION="v2.27.3-1"
93+
CUDA_HOME="/usr/local/cuda"
94+
95+
# move cuda-compt and remove existing cuda dir from nvidia/cuda:**.*.*-base-*
96+
rm -rf /usr/local/cuda-*
97+
rm -rf /usr/local/cuda
98+
99+
# install CUDA
100+
wget -q https://developer.download.nvidia.com/compute/cuda/12.9.1/local_installers/cuda_12.9.1_575.57.08_linux.run
101+
chmod +x cuda_12.9.1_575.57.08_linux.run
102+
./cuda_12.9.1_575.57.08_linux.run --toolkit --silent
103+
rm -f cuda_12.9.1_575.57.08_linux.run
104+
ln -s /usr/local/cuda-12.9 /usr/local/cuda
105+
# bring back cuda-compat
106+
mv /usr/local/compat /usr/local/cuda/compat
107+
108+
# install cudnn
109+
mkdir -p /tmp/cudnn
110+
cd /tmp/cudnn
111+
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
112+
tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
113+
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
114+
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
115+
116+
# install nccl
117+
mkdir -p /tmp/nccl
118+
cd /tmp/nccl
119+
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
120+
cd nccl
121+
make -j src.build
122+
cp -a build/include/* /usr/local/cuda/include/
123+
cp -a build/lib/* /usr/local/cuda/lib64/
124+
125+
prune_cuda
126+
ldconfig
127+
}
128+
90129
# idiomatic parameter and option handling in sh
91130
while test $# -gt 0
92131
do
93132
case "$1" in
94133
12.8) install_cuda128_stack;
95134
;;
135+
12.9) install_cuda129_stack;
136+
;;
96137
*) echo "bad argument $1"; exit 1
97138
;;
98139
esac

test/dlc_tests/container_tests/bin/efa/testEFA

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ validate_all_reduce_performance_logs(){
3636
# EFA 1.37.0 using "Using network Libfabric" instead of "Using network AWS Libfabric"
3737
grep -E "Using network (AWS )?Libfabric" ${TRAINING_LOG} || { echo "efa is not working, please check if it is installed correctly"; exit 1; }
3838
if [[ ${INSTANCE_TYPE} == p4d* || ${INSTANCE_TYPE} == p5* ]]; then
39-
grep "Setting NCCL_TOPO_FILE environment variable to" ${TRAINING_LOG}
39+
grep "NCCL_TOPO_FILE set by environment to" ${TRAINING_LOG}
4040
# EFA 1.37.0 change from NET/AWS Libfabric/0/GDRDMA to NET/Libfabric/0/GDRDMA
4141
grep -E "NET/(AWS )?Libfabric/0/GDRDMA" ${TRAINING_LOG}
4242
fi

test/dlc_tests/ec2/test_efa.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -294,10 +294,16 @@ def _setup_container(connection, docker_image, container_name):
294294
# using SSH on a pre-defined port (as decided by sshd_config on server-side).
295295
# Allow instance to share all memory with container using memlock=-1:-1.
296296
# Share all EFA devices with container using --device <device_location> for all EFA devices.
297-
connection.run(
298-
f"docker run --runtime=nvidia --gpus all -id --name {container_name} --network host --ulimit memlock=-1:-1 "
299-
f"{docker_all_devices_arg} -v $HOME/container_tests:/test -v /dev/shm:/dev/shm {docker_image} bash"
300-
)
297+
if "vllm" in docker_image:
298+
connection.run(
299+
f"docker run --entrypoint=/bin/bash -e CUDA_HOME=/usr/local/cuda --runtime=nvidia --gpus all -id --name {container_name} --network host --ulimit memlock=-1:-1 "
300+
f"{docker_all_devices_arg} -v $HOME/container_tests:/test -v /dev/shm:/dev/shm {docker_image}"
301+
)
302+
else:
303+
connection.run(
304+
f"docker run --runtime=nvidia --gpus all -id --name {container_name} --network host --ulimit memlock=-1:-1 "
305+
f"{docker_all_devices_arg} -v $HOME/container_tests:/test -v /dev/shm:/dev/shm {docker_image} bash"
306+
)
301307

302308

303309
def _setup_master_efa_ssh_config(connection):

test/sagemaker_tests/pytorch/training/integration/sagemaker/test_distributed_operations.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,6 +664,7 @@ def test_smmodelparallel_gpt2_sdp_multinode_efa(
664664
)
665665

666666

667+
@pytest.mark.skip(reason="Sagemaker efa test is a duplicate of ec2 efa test on p4d instances")
667668
@pytest.mark.integration("smmodelparallel")
668669
@pytest.mark.model("mnist")
669670
@pytest.mark.processor("gpu")

0 commit comments

Comments
 (0)