-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Expand file tree
/
Copy pathtemplate_hook.go
More file actions
320 lines (258 loc) · 8.83 KB
/
template_hook.go
File metadata and controls
320 lines (258 loc) · 8.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
// Copyright IBM Corp. 2015, 2025
// SPDX-License-Identifier: BUSL-1.1
package taskrunner
import (
"context"
"fmt"
"sync"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
ti "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/template"
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
templateHookName = "template"
)
type templateHookConfig struct {
// the allocation
alloc *structs.Allocation
// logger is used to log
logger log.Logger
// lifecycle is used to interact with the task's lifecycle
lifecycle ti.TaskLifecycle
// events is used to emit events
events ti.EventEmitter
// templates is the set of templates we are managing
templates []*structs.Template
// clientConfig is the Nomad Client configuration
clientConfig *config.Config
// envBuilder is the environment variable builder for the task.
envBuilder *taskenv.Builder
// consulNamespace is the current Consul namespace
consulNamespace string
// nomadNamespace is the job's Nomad namespace
nomadNamespace string
// renderOnTaskRestart is flag to explicitly render templates on task restart
renderOnTaskRestart bool
// hookResources are used to fetch Consul tokens
hookResources *cstructs.AllocHookResources
}
type templateHook struct {
config *templateHookConfig
// logger is used to log
logger log.Logger
// templateManager is used to manage any consul-templates this task may have
templateManager *template.TaskTemplateManager
managerLock sync.Mutex
// consulNamespace is the current Consul namespace
consulNamespace string
// vaultToken is the current Vault token
vaultToken string
// vaultNamespace is the current Vault namespace
vaultNamespace string
// nomadToken is the current Nomad token
nomadToken string
// consulToken is the Consul ACL token obtained from consul_hook via
// workload identity
consulToken string
// task is the task that defines these templates
task *structs.Task
// taskDir is the task directory
taskDir string
// taskID is a unique identifier for this templateHook, for use in
// downstream platform-specific template runner consumers
taskID string
}
func newTemplateHook(config *templateHookConfig) *templateHook {
return &templateHook{
config: config,
consulNamespace: config.consulNamespace,
logger: config.logger.Named(templateHookName),
}
}
func (*templateHook) Name() string {
return templateHookName
}
func (h *templateHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// If we have already run prerun before exit early.
if h.templateManager != nil {
if !h.config.renderOnTaskRestart {
return nil
}
h.logger.Info("re-rendering templates on task restart")
h.templateManager.Stop()
h.templateManager = nil
}
// Store request information so they can be used in other hooks.
h.task = req.Task
h.taskDir = req.TaskDir.Dir
h.vaultToken = req.VaultToken
h.nomadToken = req.NomadToken
h.taskID = req.Alloc.ID + "-" + req.Task.Name
// Set the consul token if the task uses WI.
tg := h.config.alloc.Job.LookupTaskGroup(h.config.alloc.TaskGroup)
consulBlock := tg.Consul
if req.Task.Consul != nil {
consulBlock = req.Task.Consul
}
consulWIDName := consulBlock.IdentityName()
// Check if task has an identity for Consul and assume WI flow if it does.
// COMPAT simplify this logic and assume WI flow in 1.9+
hasConsulIdentity := false
for _, wid := range req.Task.Identities {
if wid.Name == consulWIDName {
hasConsulIdentity = true
break
}
}
if hasConsulIdentity {
consulCluster := req.Task.GetConsulClusterName(tg)
consulTokens := h.config.hookResources.GetConsulTokens()
clusterTokens := consulTokens[consulCluster]
if clusterTokens == nil {
return fmt.Errorf(
"consul tokens for cluster %s requested by task %s not found",
consulCluster, req.Task.Name,
)
}
consulToken := clusterTokens[consulWIDName+"/"+req.Task.Name]
if consulToken == nil {
return fmt.Errorf(
"consul tokens for cluster %s and identity %s requested by task %s not found",
consulCluster, consulWIDName, req.Task.Name,
)
}
h.consulToken = consulToken.SecretID
}
// Set vault namespace if specified
if req.Task.Vault != nil {
h.vaultNamespace = req.Task.Vault.Namespace
}
once, watch := []*structs.Template{}, []*structs.Template{}
for _, tmpl := range h.config.templates {
if tmpl.Once {
once = append(once, tmpl)
} else {
watch = append(watch, tmpl)
}
}
return h.renderTemplates(ctx, once, watch)
}
func (h *templateHook) newManager(tmpls []*structs.Template) (manager *template.TaskTemplateManager, unblock chan struct{}, err error) {
vaultCluster := h.task.GetVaultClusterName()
vaultConfig := h.config.clientConfig.GetVaultConfigs(h.logger)[vaultCluster]
// Fail if task has a vault block but no client config was found.
if h.task.Vault != nil && vaultConfig == nil {
return nil, nil, fmt.Errorf("Vault cluster %q is disabled or not configured", vaultCluster)
}
tg := h.config.alloc.Job.LookupTaskGroup(h.config.alloc.TaskGroup)
consulCluster := h.task.GetConsulClusterName(tg)
consulConfig := h.config.clientConfig.GetConsulConfigs(h.logger)[consulCluster]
unblock = make(chan struct{})
m, err := template.NewTaskTemplateManager(&template.TaskTemplateManagerConfig{
UnblockCh: unblock,
Lifecycle: h.config.lifecycle,
Events: h.config.events,
Templates: tmpls,
ClientConfig: h.config.clientConfig,
ConsulNamespace: h.config.consulNamespace,
ConsulToken: h.consulToken,
ConsulConfig: consulConfig,
VaultToken: h.vaultToken,
VaultConfig: vaultConfig,
VaultNamespace: h.vaultNamespace,
TaskDir: h.taskDir,
EnvBuilder: h.config.envBuilder,
MaxTemplateEventRate: template.DefaultMaxTemplateEventRate,
NomadNamespace: h.config.nomadNamespace,
NomadToken: h.nomadToken,
TaskID: h.taskID,
Logger: h.logger,
})
if err != nil {
h.logger.Error("failed to create template manager", "error", err)
return nil, nil, err
}
return m, unblock, nil
}
func (h *templateHook) Poststart(_ context.Context, _ *interfaces.TaskPoststartRequest, _ *interfaces.TaskPoststartResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
if h.templateManager != nil {
h.templateManager.RunFirstRenderScripts()
}
return nil
}
func (h *templateHook) Stop(_ context.Context, req *interfaces.TaskStopRequest, resp *interfaces.TaskStopResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// Shutdown any created template
if h.templateManager != nil {
h.templateManager.Stop()
}
return nil
}
// Update is used to handle updates to vault and/or nomad tokens.
func (h *templateHook) Update(ctx context.Context, req *interfaces.TaskUpdateRequest, resp *interfaces.TaskUpdateResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// no template manager to manage
if h.templateManager == nil {
return nil
}
// neither vault or nomad token has been updated, nothing to do
if req.VaultToken == h.vaultToken && req.NomadToken == h.nomadToken {
return nil
} else {
h.vaultToken = req.VaultToken
h.nomadToken = req.NomadToken
}
tmpls := h.templateManager.Templates()
// shutdown the old template
h.templateManager.Stop()
h.templateManager = nil
err := h.renderTemplates(ctx, nil, tmpls)
if err != nil {
err = fmt.Errorf("failed to build template manager: %v", err)
h.logger.Error("failed to build template manager", "error", err)
_ = h.config.lifecycle.Kill(context.Background(),
structs.NewTaskEvent(structs.TaskKilling).
SetFailsTask().
SetDisplayMessage(fmt.Sprintf("Template update %v", err)))
}
return nil
}
// renderTemplates creates the template managers and waits until each template has rendered, setting the watch
// templateManger on the hook when complete so it can be referenced during token updates.
func (h *templateHook) renderTemplates(ctx context.Context, once []*structs.Template, watch []*structs.Template) error {
onceMgr, unblockOne, err := h.newManager(once)
if err != nil {
return err
}
watchMgr, unblockWatch, err := h.newManager(watch)
if err != nil {
return err
}
go onceMgr.Run()
go watchMgr.Run()
select {
case <-ctx.Done():
onceMgr.Stop()
case <-unblockOne:
}
select {
case <-ctx.Done():
watchMgr.Stop()
case <-unblockWatch:
}
// The template hook only needs to manage "watched" templates.
// We can ignore the "once" manager after it's templates render.
h.templateManager = watchMgr
return nil
}