forked from kubernetes-sigs/dra-driver-nvidia-gpu
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdriver.go
More file actions
281 lines (239 loc) · 8.72 KB
/
driver.go
File metadata and controls
281 lines (239 loc) · 8.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"context"
"errors"
"fmt"
"path/filepath"
"sync"
"time"
resourceapi "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
coreclientset "k8s.io/client-go/kubernetes"
"k8s.io/dynamic-resource-allocation/kubeletplugin"
"k8s.io/dynamic-resource-allocation/resourceslice"
"k8s.io/klog/v2"
"github.com/NVIDIA/k8s-dra-driver-gpu/pkg/flock"
"github.com/NVIDIA/k8s-dra-driver-gpu/pkg/workqueue"
)
const (
// ErrorRetryMaxTimeout limits the amount of time spent in the request
// handlers UnprepareResourceClaims() and PrepareResourceClaims(), so that
// we send a response to the kubelet in a predictable amount of time. Within
// that deadline, retryable errors are retried (with backoff) via the
// workqueue abstraction.
ErrorRetryMaxTimeout = 45 * time.Second
// DriverPrepUprepFlockPath is the path to a lock file used to make sure
// that calls to nodePrepareResource() / nodeUnprepareResource() never
// interleave, node-globally.
DriverPrepUprepFlockFileName = "pu.lock"
)
// permanentError defines an error indicating that it is permanent.
// By default, every error will be retried up to ErrorRetryMaxTimeout.
// Errors marked as permanent will not be retried.
type permanentError struct{ error }
func isPermanentError(err error) bool {
return errors.As(err, &permanentError{})
}
type driver struct {
client coreclientset.Interface
pluginhelper *kubeletplugin.Helper
state *DeviceState
pulock *flock.Flock
healthcheck *healthcheck
}
func NewDriver(ctx context.Context, config *Config) (*driver, error) {
state, err := NewDeviceState(ctx, config)
if err != nil {
return nil, err
}
puLockPath := filepath.Join(config.DriverPluginPath(), DriverPrepUprepFlockFileName)
driver := &driver{
client: config.clientsets.Core,
state: state,
pulock: flock.NewFlock(puLockPath),
}
helper, err := kubeletplugin.Start(
ctx,
driver,
kubeletplugin.KubeClient(driver.client),
kubeletplugin.NodeName(config.flags.nodeName),
kubeletplugin.DriverName(DriverName),
// By default, the DRA library serializes (un)prepare calls. That is, at
// most one such call is exposed to the driver at any given time.
// Disable that behavior: this driver has codependent prepare() actions
// (where for the first prepare() to eventually complete, a second
// prepare() must be incoming). Concurrency management for incoming
// requests is done with this driver's work queue abstraction.
kubeletplugin.Serialize(false),
kubeletplugin.RegistrarDirectoryPath(config.flags.kubeletRegistrarDirectoryPath),
kubeletplugin.PluginDataDirectoryPath(config.DriverPluginPath()),
)
if err != nil {
return nil, err
}
driver.pluginhelper = helper
// Enumerate the set of ComputeDomain daemon devices and publish them
var resourceSlice resourceslice.Slice
for _, device := range state.allocatable {
// Explicitly exclude ComputeDomain channels from being advertised here. They
// are instead advertised in as a network resource from the control plane.
if device.Type() == ComputeDomainChannelType && device.Channel.ID != 0 {
continue
}
resourceSlice.Devices = append(resourceSlice.Devices, device.GetDevice())
}
resources := resourceslice.DriverResources{
Pools: map[string]resourceslice.Pool{
config.flags.nodeName: {Slices: []resourceslice.Slice{resourceSlice}},
},
}
if err := state.computeDomainManager.Start(ctx); err != nil {
return nil, err
}
healthcheck, err := startHealthcheck(ctx, config)
if err != nil {
return nil, fmt.Errorf("start healthcheck: %w", err)
}
driver.healthcheck = healthcheck
if err := driver.pluginhelper.PublishResources(ctx, resources); err != nil {
return nil, err
}
return driver, nil
}
func (d *driver) Shutdown() error {
if d == nil {
return nil
}
if err := d.state.computeDomainManager.Stop(); err != nil {
return fmt.Errorf("error stopping ComputeDomainManager: %w", err)
}
if d.healthcheck != nil {
d.healthcheck.Stop()
}
d.pluginhelper.Stop()
return nil
}
func (d *driver) PrepareResourceClaims(ctx context.Context, claims []*resourceapi.ResourceClaim) (map[types.UID]kubeletplugin.PrepareResult, error) {
klog.V(6).Infof("PrepareResourceClaims called with %d claim(s)", len(claims))
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(ctx, ErrorRetryMaxTimeout)
workQueue := workqueue.New(workqueue.DefaultControllerRateLimiter())
results := make(map[types.UID]kubeletplugin.PrepareResult)
for _, claim := range claims {
wg.Add(1)
key := fmt.Sprintf("%v", claim.UID)
workQueue.EnqueueRawWithKey(claim, key, func(ctx context.Context, obj any) error {
done, res := d.nodePrepareResource(ctx, claim)
if done {
results[claim.UID] = res
wg.Done()
return nil
}
return fmt.Errorf("%w", res.Err)
})
}
go func() {
wg.Wait()
cancel()
}()
workQueue.Run(ctx)
return results, nil
}
func (d *driver) UnprepareResourceClaims(ctx context.Context, claimRefs []kubeletplugin.NamespacedObject) (map[types.UID]error, error) {
klog.V(6).Infof("UnprepareResourceClaims called with %d claim(s)", len(claimRefs))
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(ctx, ErrorRetryMaxTimeout)
workQueue := workqueue.New(workqueue.DefaultControllerRateLimiter())
results := make(map[types.UID]error)
for _, claim := range claimRefs {
wg.Add(1)
key := fmt.Sprintf("%v", claim.UID)
workQueue.EnqueueRawWithKey(claim, key, func(ctx context.Context, obj any) error {
done, err := d.nodeUnprepareResource(ctx, claim)
if done {
results[claim.UID] = err
wg.Done()
return nil
}
return fmt.Errorf("%w", err)
})
}
go func() {
wg.Wait()
cancel()
}()
workQueue.Run(ctx)
return results, nil
}
func (d *driver) HandleError(ctx context.Context, err error, msg string) {
// For now we just follow the advice documented in the DRAPlugin API docs.
// See: https://pkg.go.dev/k8s.io/apimachinery/pkg/util/runtime#HandleErrorWithContext
runtime.HandleErrorWithContext(ctx, err, msg)
}
// nodePrepareResource() returns a 2-tuple; the first value is a boolean
// indicating whether the work is 'done', the second value is a result which can
// also reflect an error. Set the boolean to `true` for any result wrapping a
// non-retryable error.
func (d *driver) nodePrepareResource(ctx context.Context, claim *resourceapi.ResourceClaim) (bool, kubeletplugin.PrepareResult) {
release, err := d.pulock.Acquire(ctx, flock.WithTimeout(10*time.Second))
if err != nil {
res := kubeletplugin.PrepareResult{
Err: fmt.Errorf("error acquiring prep/unprep lock: %w", err),
}
return false, res
}
defer release()
if claim.Status.Allocation == nil {
res := kubeletplugin.PrepareResult{
Err: fmt.Errorf("no allocation set in ResourceClaim %s in namespace %s", claim.Name, claim.Namespace),
}
return true, res
}
devs, err := d.state.Prepare(ctx, claim)
if err != nil {
res := kubeletplugin.PrepareResult{
Err: fmt.Errorf("error preparing devices for claim %s/%s:%s: %w", claim.Namespace, claim.Name, claim.UID, err),
}
if isPermanentError(err) {
klog.V(6).Infof("Permanent error preparing devices for claim %v: %v", claim.UID, err)
return true, res
}
return false, res
}
klog.Infof("Prepared devices for claim '%s/%s:%s': %v", claim.Namespace, claim.Name, claim.UID, devs)
return true, kubeletplugin.PrepareResult{Devices: devs}
}
func (d *driver) nodeUnprepareResource(ctx context.Context, claimRef kubeletplugin.NamespacedObject) (bool, error) {
release, err := d.pulock.Acquire(ctx, flock.WithTimeout(10*time.Second))
if err != nil {
return false, fmt.Errorf("error acquiring prep/unprep lock: %w", err)
}
defer release()
if err := d.state.Unprepare(ctx, claimRef); err != nil {
return isPermanentError(err), fmt.Errorf("error unpreparing devices for claim '%v': %w", claimRef.String(), err)
}
klog.Infof("Unprepared devices for claim '%v'", claimRef.String())
return true, nil
}
// TODO: implement loop to remove CDI files from the CDI path for claimUIDs
// that have been removed from the AllocatedClaims map.
// func (d *driver) cleanupCDIFiles(wg *sync.WaitGroup) chan error {
// errors := make(chan error)
// return errors
// }