-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathapp.conf
More file actions
109 lines (95 loc) · 7.46 KB
/
app.conf
File metadata and controls
109 lines (95 loc) · 7.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# =============================================================================
# ODH Base Containers - CUDA 13.1 Build Arguments
# =============================================================================
#
# Build arguments for CUDA 13.1.x base image (GPU workloads).
# This file is passed directly to podman/buildah via --build-arg-file.
#
# Source: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/13.1.1
#
# Usage:
# ./scripts/build.sh cuda-13.1
#
# =============================================================================
# -----------------------------------------------------------------------------
# Image Tag (used by build.sh)
# -----------------------------------------------------------------------------
IMAGE_TAG=13.1-py312
# Python version bundled with this CUDA image (used in container labels)
PYTHON_VERSION=3.12
# -----------------------------------------------------------------------------
# Base Image
# -----------------------------------------------------------------------------
# Use sclorg Python image on CentOS Stream 9 (UBI 9 lacks CUDA dependencies)
# Digest is pinned for reproducible builds and auto-updated by Renovate
# Source: https://quay.io/repository/sclorg/python-312-c9s
BASE_IMAGE=quay.io/sclorg/python-312-c9s:c9s@sha256:6fec4978f2f3851ca63a477963c1d6cbc8927b651e49c63fddf354cd36723db6
# -----------------------------------------------------------------------------
# CUDA Versions
# Source: https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist/13.1.1
# -----------------------------------------------------------------------------
CUDA_MAJOR=13
CUDA_MAJOR_MINOR=13-1
CUDA_MAJOR_MINOR_DOT=13.1
CUDA_VERSION=13.1.1
NV_CUDA_CUDART_VERSION=13.1.80-1
NV_CUDA_LIB_VERSION=13.1.1-1
NV_NVTX_VERSION=13.1.115-1
# -----------------------------------------------------------------------------
# NVIDIA Driver Requirements
# Defines compatible driver versions for nvidia-container-runtime
# Source: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/13.1.1/ubi9/base/Dockerfile
# -----------------------------------------------------------------------------
NVIDIA_REQUIRE_CUDA="cuda>=13.1 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551 brand=unknown,driver>=570,driver<571 brand=grid,driver>=570,driver<571 brand=tesla,driver>=570,driver<571 brand=nvidia,driver>=570,driver<571 brand=quadro,driver>=570,driver<571 brand=quadrortx,driver>=570,driver<571 brand=nvidiartx,driver>=570,driver<571 brand=vapps,driver>=570,driver<571 brand=vpc,driver>=570,driver<571 brand=vcs,driver>=570,driver<571 brand=vws,driver>=570,driver<571 brand=cloudgaming,driver>=570,driver<571 brand=unknown,driver>=575,driver<576 brand=grid,driver>=575,driver<576 brand=tesla,driver>=575,driver<576 brand=nvidia,driver>=575,driver<576 brand=quadro,driver>=575,driver<576 brand=quadrortx,driver>=575,driver<576 brand=nvidiartx,driver>=575,driver<576 brand=vapps,driver>=575,driver<576 brand=vpc,driver>=575,driver<576 brand=vcs,driver>=575,driver<576 brand=vws,driver>=575,driver<576 brand=cloudgaming,driver>=575,driver<576 brand=unknown,driver>=580,driver<581 brand=grid,driver>=580,driver<581 brand=tesla,driver>=580,driver<581 brand=nvidia,driver>=580,driver<581 brand=quadro,driver>=580,driver<581 brand=quadrortx,driver>=580,driver<581 brand=nvidiartx,driver>=580,driver<581 brand=vapps,driver>=580,driver<581 brand=vpc,driver>=580,driver<581 brand=vcs,driver>=580,driver<581 brand=vws,driver>=580,driver<581 brand=cloudgaming,driver>=580,driver<581"
# -----------------------------------------------------------------------------
# cuBLAS
# Source: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/13.1.1/ubi9/runtime/Dockerfile
# -----------------------------------------------------------------------------
NV_LIBCUBLAS_VERSION=13.2.1.1-1
# -----------------------------------------------------------------------------
# NPP (NVIDIA Performance Primitives)
# Source: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/13.1.1/ubi9/runtime/Dockerfile
# -----------------------------------------------------------------------------
NV_LIBNPP_VERSION=13.0.3.3-1
# -----------------------------------------------------------------------------
# NCCL (NVIDIA Collective Communications Library)
# Source: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/13.1.1/ubi9/runtime/Dockerfile
# -----------------------------------------------------------------------------
NV_LIBNCCL_VERSION=2.29.2
NV_LIBNCCL_PACKAGE_VERSION=2.29.2-1
# -----------------------------------------------------------------------------
# cuDNN (CUDA Deep Neural Network library)
# Source: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/13.1.1/ubi9/runtime/cudnn/Dockerfile
# -----------------------------------------------------------------------------
NV_CUDNN_VERSION=9.17.1.4-1
# -----------------------------------------------------------------------------
# CUPTI (CUDA Profiling Tools Interface)
# Required by PyTorch profiler; not present in NVIDIA images (ODH-specific).
# Source: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/
# -----------------------------------------------------------------------------
NV_CUDA_CUPTI_VERSION=13.1.115-1
# -----------------------------------------------------------------------------
# cuSPARSELt (structured sparsity)
# Required by PyTorch sparse operations; not present in NVIDIA images (ODH-specific).
# Source: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/
# -----------------------------------------------------------------------------
NV_LIBCUSPARSELT_VERSION=0.7.1.0-1
# -----------------------------------------------------------------------------
# cuDSS (direct sparse solver)
# Required by scientific/ML solvers; not present in NVIDIA images (ODH-specific).
# Source: https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64/
# -----------------------------------------------------------------------------
NV_LIBCUDSS_VERSION=0.7.1.4-1
# -----------------------------------------------------------------------------
# PyPI Indexes
# Note: Using cu130 for CUDA 13.1 - PyTorch doesn't publish cu131 wheels.
# CUDA minor versions within the same major are compatible.
# -----------------------------------------------------------------------------
PIP_INDEX_URL=https://pypi.org/simple
PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cu130
# -----------------------------------------------------------------------------
# uv PyTorch Backend
# Tells uv which CUDA variant to select for PyTorch wheels (uv pip install).
# https://docs.astral.sh/uv/guides/integration/pytorch/
# -----------------------------------------------------------------------------
UV_TORCH_BACKEND=cu130