Skip to content

Commit d2ec3c3

Browse files
committed
containerd-1.7: add support for CDI
The GPU Operator provides additional runtime classes to allow selecting the NVIDIA stack for the pods. Provide the same runtimes the GPU Operator provides for compatibility. Provide them in a separate config file to prevent broken runtimes in non-CDI variants. Signed-off-by: Jingwei Wang <[email protected]>
1 parent 6f014eb commit d2ec3c3

File tree

2 files changed

+104
-1
lines changed

2 files changed

+104
-1
lines changed

packages/containerd-1.7/containerd-1.7.spec

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ Source3: containerd-config-toml_basic
2323
Source4: containerd-config-toml_k8s_nvidia_containerd_sock
2424
Source5: containerd-tmpfiles.conf
2525
Source6: containerd-cri-base-json
26+
Source7: containerd-config-toml_k8s_nvidia_cdi_containerd_sock
2627

2728
# Mount for writing containerd configuration
2829
Source100: etc-containerd.mount
@@ -141,7 +142,7 @@ install -p -m 0644 %{S:1} %{S:100} %{S:110} %{buildroot}%{_cross_unitdir}
141142

142143
install -d %{buildroot}%{_cross_templatedir}
143144
install -d %{buildroot}%{_cross_factorydir}%{_cross_sysconfdir}/containerd
144-
install -p -m 0644 %{S:2} %{S:3} %{S:4} %{S:6} %{buildroot}%{_cross_templatedir}
145+
install -p -m 0644 %{S:2} %{S:3} %{S:4} %{S:6} %{S:7} %{buildroot}%{_cross_templatedir}
145146

146147
install -d %{buildroot}%{_cross_tmpfilesdir}
147148
install -p -m 0644 %{S:5} %{buildroot}%{_cross_tmpfilesdir}/containerd.conf
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
[required-extensions]
2+
container-registry = "v1"
3+
container-runtime = "v1"
4+
kubernetes = "v1"
5+
std = { version = "v1", helpers = ["join_array", "default"]}
6+
+++
7+
version = 2
8+
root = "/var/lib/containerd"
9+
state = "/run/containerd"
10+
disabled_plugins = [
11+
"io.containerd.internal.v1.opt",
12+
"io.containerd.snapshotter.v1.aufs",
13+
"io.containerd.snapshotter.v1.devmapper",
14+
"io.containerd.snapshotter.v1.native",
15+
"io.containerd.snapshotter.v1.zfs",
16+
]
17+
18+
[grpc]
19+
address = "/run/containerd/containerd.sock"
20+
21+
[plugins."io.containerd.grpc.v1.cri"]
22+
device_ownership_from_security_context = {{default false settings.kubernetes.device-ownership-from-security-context}}
23+
enable_selinux = true
24+
enable_cdi = true
25+
# Pause container image is specified here, shares the same image as kubelet's pod-infra-container-image
26+
sandbox_image = "localhost/kubernetes/pause:0.1.0"
27+
{{#if settings.container-runtime.max-container-log-line-size}}
28+
max_container_log_line_size = {{settings.container-runtime.max-container-log-line-size}}
29+
{{/if}}
30+
{{#if settings.container-runtime.max-concurrent-downloads}}
31+
max_concurrent_downloads = {{settings.container-runtime.max-concurrent-downloads}}
32+
{{/if}}
33+
{{#if settings.container-runtime.enable-unprivileged-ports}}
34+
enable_unprivileged_ports = {{settings.container-runtime.enable-unprivileged-ports}}
35+
{{/if}}
36+
{{#if settings.container-runtime.enable-unprivileged-icmp}}
37+
enable_unprivileged_icmp = {{settings.container-runtime.enable-unprivileged-icmp}}
38+
{{/if}}
39+
40+
[plugins."io.containerd.grpc.v1.cri".containerd]
41+
default_runtime_name = "nvidia"
42+
43+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia]
44+
runtime_type = "io.containerd.runc.v2"
45+
base_runtime_spec = "/etc/containerd/cri-base.json"
46+
47+
# cdi only nvidia container runtime
48+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-cdi]
49+
runtime_type = "io.containerd.runc.v2"
50+
base_runtime_spec = "/etc/containerd/cri-base.json"
51+
52+
# legacy only nvidia container runtime
53+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-legacy]
54+
runtime_type = "io.containerd.runc.v2"
55+
base_runtime_spec = "/etc/containerd/cri-base.json"
56+
57+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia.options]
58+
SystemdCgroup = true
59+
BinaryName = "nvidia-container-runtime"
60+
61+
# cdi only nvidia container runtime
62+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-cdi.options]
63+
SystemdCgroup = true
64+
BinaryName = "nvidia-container-runtime.cdi"
65+
66+
# legacy only nvidia container runtime
67+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia-legacy.options]
68+
SystemdCgroup = true
69+
BinaryName = "nvidia-container-runtime.legacy"
70+
71+
[plugins."io.containerd.grpc.v1.cri".cni]
72+
bin_dir = "/opt/cni/bin"
73+
conf_dir = "/etc/cni/net.d"
74+
75+
{{#if settings.container-registry.mirrors}}
76+
{{#each settings.container-registry.mirrors}}
77+
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{registry}}"]
78+
endpoint = [{{join_array ", " endpoint }}]
79+
{{/each}}
80+
{{/if}}
81+
82+
{{#if settings.container-registry.credentials}}
83+
{{#each settings.container-registry.credentials}}
84+
{{#if (eq registry "docker.io" )~}}
85+
[plugins."io.containerd.grpc.v1.cri".registry.configs."registry-1.docker.io".auth]
86+
{{else}}
87+
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{registry}}".auth]
88+
{{/if}}
89+
{{#if username}}
90+
username = "{{{username}}}"
91+
{{/if}}
92+
{{#if password}}
93+
password = "{{{password}}}"
94+
{{/if}}
95+
{{#if auth}}
96+
auth = "{{{auth}}}"
97+
{{/if}}
98+
{{#if identitytoken}}
99+
identitytoken = "{{{identitytoken}}}"
100+
{{/if}}
101+
{{/each}}
102+
{{/if}}

0 commit comments

Comments
 (0)