-
Notifications
You must be signed in to change notification settings - Fork 33
Expand file tree
/
Copy pathvalues.yaml
More file actions
1003 lines (822 loc) · 31 KB
/
values.yaml
File metadata and controls
1003 lines (822 loc) · 31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Default values for Nublado.
image:
# -- Nublado image to use
repository: "ghcr.io/lsst-sqre/nublado"
# -- Pull policy for the Nublado image
pullPolicy: "IfNotPresent"
# -- Tag of Nublado image to use
# @default -- The appVersion of the chart
tag: null
controller:
# -- Affinity rules for the Nublado controller
affinity: {}
# -- If Google Artifact Registry is used as the image source, the Google
# service account that has an IAM binding to the `nublado-controller`
# Kubernetes service account and has the Artifact Registry reader role
# @default -- None, must be set when using Google Artifact Registry
googleServiceAccount: null
ingress:
# -- Additional annotations to add for the Nublado controller ingress
annotations: {}
# -- Node selector rules for the Nublado controller
nodeSelector: {}
# -- Annotations for the Nublado controller
podAnnotations: {}
# -- Resource limits and requests for the Nublado controller
# @default -- See `values.yaml`
resources:
limits:
cpu: "1"
memory: "1Gi"
requests:
cpu: "0.05"
memory: "200Mi"
# -- Whether to enable Slack alerts. If set to true, `slack_webhook` must be
# set in the corresponding Nublado Vault secret.
slackAlerts: false
# -- Tolerations for the Nublado controller
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
# Passed as YAML to the lab controller.
config:
# -- Level of Python logging
logLevel: "INFO"
# -- Path prefix that will be routed to the controller
pathPrefix: "/nublado"
fileserver:
# -- Enable user file servers
enabled: false
# -- Affinity rules for user file server pods
affinity: {}
# -- Argo CD application in which to collect user file servers
application: "nublado-fileservers"
# -- Timeout to wait for Kubernetes to create file servers, in Safir
# `parse_timedelta` format
creationTimeout: 3m
# -- Timeout for deleting a user's file server from Kubernetes, in Safir
# `parse_timedelta` format
deleteTimeout: 2m
# -- Timeout for idle user fileservers, in Safir `parse_timedelta`
# format
idleTimeout: 1d
# -- Namespace for user file servers
namespace: "fileservers"
# -- Node selector rules for user file server pods
nodeSelector: {}
# -- Path prefix for user file servers
pathPrefix: "/files"
# -- How frequently to reconcile file server state against Kubernetes to
# catch deletions from outside Nublado, in Safir `parse_timedelta`
# format
reconcileInterval: "1h"
# -- Resource requests and limits for user file servers
# @default -- See `values.yaml`
resources:
requests:
cpu: 0.1
memory: "1Gi"
limits:
cpu: 1
memory: "10Gi"
# -- Tolerations for user file server pods
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
# -- Volumes that should be made available via WebDAV
volumeMounts: []
# volumeMounts:
# - containerPath: "/project"
# readOnly: true
# volumeName: "project"
fsadmin:
# -- Affinity rules for fsadmin pods
affinity: {}
# -- Argo CD application in which to collect fsadmins
application: "nublado-fileservers"
# -- Extra volumes that should be made available to fsadmin
extraVolumes: []
# -- Extra volumes that should be mounted to fsadmin
extraVolumeMounts: []
# -- Mount prefix, to be prepended to mountpoints in order to
# collect them in one place
mountPrefix: null
# -- Node selector rules for fsadmin pods
nodeSelector: {}
# -- Resource requests and limits for fsadmin
# @default -- See `values.yaml`
resources:
requests:
cpu: 0.1
memory: "250Mi"
limits:
cpu: 1
memory: "10Gi"
# -- Timeout to wait for Kubernetes to create/destroy fsadmin, in Safir
# `parse_timedelta` format
timeout: 2m
# -- Tolerations for fsadmin pod
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
images:
# -- How long to wait for a prepull of a pod to finish before deciding
# it has failed, in Safir `parse_timedelta` format.
prepullTimeout: "10m"
# -- How frequently to refresh the list of available images and compare
# it to the cached images on nodes to prepull new images, in Safir
# `parse_timedelta` format. Newly-available images will not appear in
# the menu for up to this interval.
refreshInterval: "5m"
# -- Source for prepulled images. For Docker, set `type` to `docker`,
# `registry` to the hostname and `repository` to the name of the
# repository. For Google Artifact Repository, set `type` to `google`,
# `location` to the region, `projectId` to the Google project,
# `repository` to the name of the repository, and `image` to the name of
# the image.
# @default -- None, must be specified
source: {}
# -- Tag marking the recommended image (shown first in the menu)
recommendedTag: "recommended"
# -- Number of most-recent releases to prepull.
numReleases: 1
# -- Number of most-recent weeklies to prepull.
numWeeklies: 2
# -- Number of most-recent dailies to prepull.
numDailies: 3
# -- Restrict images to this SAL cycle, if given.
cycle: null
# -- List of additional image tags to prepull. Listing the image tagged
# as recommended here is recommended when using a Docker image source to
# ensure its name can be expanded properly in the menu.
pin: []
# -- Additional tags besides `recommendedTag` that should be recognized
# as aliases.
aliasTags: []
lab:
# -- How frequently the lab should report activity to JupyterHub in
# Safir `parse_timedelta` format
activityInterval: 1h
# -- Affinity rules for user lab pods
affinity: {}
# -- Argo CD application in which to collect user lab objects
application: "nublado-users"
# -- Default size selected on the spawner form. This must be either
# `null` or the name of one of the sizes listed in `sizes`. If `null`,
# the first listed size will be the default.
defaultSize: "large"
# -- Timeout for deleting a user's lab resources from Kubernetes in
# Safir `parse_timedelta` format
deleteTimeout: 1m
# -- Select where `/tmp` and `/lab_startup` in the lab will come
# from. Choose between `disk` (node-local ephemeral storage) and
# `memory` (tmpfs capped at 25% of the available memory for `/tmp`).
emptyDirSource: "memory"
# -- Environment variables to set for every user lab
# @default -- See `values.yaml`
env:
API_ROUTE: "/api"
ARROW_DEFAULT_MEMORY_POOL: "jemalloc" # Arrow >17 memory leak hack
AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod"
FIREFLY_ROUTE: "/portal/app"
HUB_ROUTE: "/nb/hub"
RSP_SITE_TYPE: "science"
TAP_ROUTE: "/api/tap"
TUTORIAL_NOTEBOOKS_URL: "https://github.com/lsst/tutorial-notebooks@main"
TUTORIAL_NOTEBOOKS_DIR: "/opt/lsst/software/notebooks-at-build-time/tutorial-notebooks"
# -- Extra annotations to add to user lab pods
extraAnnotations: {}
# -- Files to be mounted as ConfigMaps inside the user lab pod.
# `contents` contains the file contents. Set `modify` to true to make
# the file writable in the pod.
# @default -- See `values.yaml`
files:
/opt/lsst/software/jupyterlab/lsst_dask.yml: |
# No longer used, but preserves compatibility with runlab.sh
dask_worker.yml: |
enabled: false
/opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: |
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2020
[common]
# if logdir is configured, idds will write to idds.log in this
# directory, else idds will go to stdout/stderr. With supervisord,
# it's good to write to stdout/stderr, then supervisord can manage
# and rotate logs.
# logdir = /var/log/idds
loglevel = INFO
[rest]
host = https://iddsserver.cern.ch:443/idds
#url_prefix = /idds
#cacher_dir = /tmp
cacher_dir = /data/idds
# -- Path inside the lab container where custom JupyterLab configuration
# is stored
jupyterlabConfigDir: "/opt/lsst/software/jupyterlab"
# -- Prefix of home directory path to add before the username. This is
# the path inside the container, not the path of the volume.
homedirPrefix: "/home"
# -- Schema for home directory construction. Choose between `username`
# (paths like `/home/rachel`) and `initialThenUsername` (paths like
# `/home/r/rachel`).
homedirSchema: "username"
# -- Portion of the home directory path after the username. This is
# intended for environments that want the JupyterLab home directory to
# be a subdirectory of the user's home directory in some external
# environment.
homedirSuffix: ""
# -- Home volume name. The controller needs to know which volume
# contains user homes.
homeVolumeName: "home"
# -- Containers run as init containers with each user pod. Each should
# set `name`, `image` (a Docker image and pull policy specification),
# and `privileged`, and may contain `volumeMounts` (similar to the main
# `volumeMountss` configuration). If `privileged` is true, the container
# will run as root with all capabilities. Otherwise it will run as the
# user.
initContainers: []
# -- Command executed in the container to start the lab
labStartCommand:
- "/opt/lsst/software/jupyterlab/runlab.sh"
# -- Prefix for namespaces for user labs. To this will be added a dash
# (`-`) and the user's username.
namespacePrefix: "nublado"
# -- Node selector rules for user lab pods
nodeSelector: {}
nss:
# -- Base `/etc/passwd` file for lab containers
# @default -- See `values.yaml`
basePasswd: |
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
bin:x:2:2:bin:/bin:/usr/sbin/nologin
sys:x:3:3:sys:/dev:/usr/sbin/nologin
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/usr/sbin/nologin
man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
irc:x:39:39:ircd:/run/ircd:/usr/sbin/nologin
_apt:x:42:65534::/nonexistent:/usr/sbin/nologin
nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
# -- Base `/etc/group` file for lab containers
# @default -- See `values.yaml`
baseGroup: |
root:x:0:
daemon:x:1:
bin:x:2:
sys:x:3:
adm:x:4:
tty:x:5:
disk:x:6:
lp:x:7:
mail:x:8:
news:x:9:
uucp:x:10:
man:x:12:
proxy:x:13:
kmem:x:15:
dialout:x:20:
fax:x:21:
voice:x:22:
cdrom:x:24:
floppy:x:25:
tape:x:26:
sudo:x:27:
audio:x:29:
dip:x:30:
www-data:x:33:
backup:x:34:
operator:x:37:
list:x:38:
irc:x:39:
src:x:40:
shadow:x:42:
utmp:x:43:
video:x:44:
sasl:x:45:
plugdev:x:46:
staff:x:50:
games:x:60:
users:x:100:
_ssh:x:101:
nogroup:x:65534:
# -- Pull secret to use for labs. Set to the string `pull-secret` to use
# the normal pull secret from Vault.
# @default -- Do not use a pull secret
pullSecret: null
# -- How frequently to reconcile lab state against Kubernetes to catch
# deletions from outside Nublado, in Safir `parse_timedelta` format. If
# a lab is deleted by a node replacement or upgrade, or manually with
# `kubectl`, that deletion will not be noticed, and the user will not be
# able to spawn a new lab, for up to this interval.
reconcileInterval: "5m"
# -- Directory in the lab under which runtime information such as
# tokens, environment variables, and container information will be
# mounted
runtimeMountsDir: "/opt/lsst/software/jupyterlab"
# -- Secrets to set in the user pods. Each should have a `secretKey` key
# pointing to a secret in the same namespace as the controller
# (generally `nublado-secret`) and `secretRef` pointing to a field in
# that key.
secrets: []
# -- Available lab sizes. Sizes must be chosen from `fine`,
# `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`,
# `gargantuan`, and `colossal` in that order. Each should specify the
# maximum CPU equivalents and memory. SI suffixes for memory are
# supported. Sizes will be shown in the order defined here, and the
# first defined size will be the default.
# @default -- See `values.yaml`
sizes:
- size: "small"
resources:
limits:
cpu: 1.0
memory: "4Gi"
requests:
cpu: 0.25
memory: "0.75Gi"
- size: "large"
resources:
limits:
cpu: 4.0
memory: "16Gi"
requests:
cpu: 1.0
memory: "4Gi"
# -- How long to wait for Kubernetes to spawn a lab in seconds. This
# should generally be shorter than the spawn timeout set in JupyterHub.
spawnTimeout: 600
# -- Whether to use standard inithome container (requires
# administrative access to home volume) or not.
standardInithome: true
# -- Tolerations for user lab pods
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
# -- Volumes that will be in lab pods or init containers. This supports
# NFS, HostPath, and PVC volume types (differentiated in source.type).
volumes: []
# volumes:
# - name: "project"
# source:
# type: nfs
# readOnly: true
# serverPath: "/share1/project"
# server: "10.87.86.26"
# -- Volumes that should be mounted in lab pods.
volumeMounts: []
# volumeMounts:
# - containerPath: "/project"
# readOnly: true
# volumeName: "project"
# -- How frequently to restart a Kubernetes watch request. These
# connections can be dropped and throw a 400 error, or even be silently
# dropped in different Kubernetes enviroments. Setting this value can
# help prevent those things from happening.
watchReconnectTimeout: 3m
metrics:
# -- Whether to enable sending metrics
enabled: false
# -- Name under which to log metrics. Generally there is no reason to
# change this.
application: "nublado"
events:
# -- Topic prefix for events. It may sometimes be useful to change this
# in development environments.
topicPrefix: "lsst.square.metrics.events"
schemaManager:
# -- URL of the Confluent-compatible schema registry server
# @default -- Sasquatch in the local cluster
registryUrl: "http://sasquatch-schema-registry.sasquatch.svc.cluster.local:8081"
# -- Suffix to add to all registered subjects. This is sometimes useful
# for experimentation during development.
suffix: ""
# JupyterHub configuration handled directly by this chart rather than by Zero
# to JupyterHub.
hub:
# -- Whether to use the cluster-internal PostgreSQL server instead of an
# external server. This is not used directly by the Nublado chart, but
# controls how the database password is managed.
internalDatabase: false
# -- Default spawn page. Usually '/lab', but can be overridden in order
# to specify a custom landing page.
landingPage: "/lab"
# -- Minimum remaining token lifetime when spawning a lab. The token cannot
# be renewed, so it should ideally live as long as the lab does. If the
# token has less remaining lifetime, the user will be redirected to
# reauthenticate before spawning a lab.
# @default -- `jupyterhub.cull.maxAge` if lab culling is enabled, else none
minimumTokenLifetime: null
# -- Resource limits and requests for the Hub
# @default -- See `values.yaml`
resources:
limits:
cpu: "1"
memory: "512Mi"
requests:
cpu: "6m"
memory: "130Mi"
timeout:
# -- Timeout for JupyterLab to start in seconds. Currently this sometimes
# takes over 60 seconds for reasons we don't understand.
startup: 90
# -- Whether to put each user's lab in a separate domain. This is strongly
# recommended for security, but requires wildcard DNS and cert-manager
# support and requires subdomain support be enabled in Gafaelfawr.
useSubdomains: false
# Configuration for the Zero to JupyterHub subchart.
jupyterhub:
cull:
# -- Enable the lab culler.
enabled: true
# -- Default idle timeout before the lab is automatically deleted in
# seconds
# @default -- 432000 (5 days)
timeout: 432000
# -- How frequently to check for idle labs in seconds
# @default -- 3600 (1 hour)
every: 3600
# -- Whether to log out the user (from JupyterHub) when culling their lab
users: false
# -- Whether to remove named servers when culling their lab
removeNamedServers: true
# -- Maximum age of a lab regardless of activity
# @default -- 864000 (10 days)
maxAge: 864000
hub:
# -- Whether to require metrics requests to be authenticated
authenticatePrometheus: false
image:
# -- Image to use for JupyterHub
name: "ghcr.io/lsst-sqre/nublado-jupyterhub"
# -- Tag of image to use for JupyterHub
tag: "12.1.0"
# -- Resource limits and requests
# @default -- See `values.yaml`
resources:
limits:
cpu: "900m"
memory: "1Gi" # Should support about 200 users
requests:
cpu: "100m"
memory: "128Mi"
# -- Tolerations for Hub pod
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
db:
# -- Type of database to use
type: "postgres"
# -- Database password (not used)
# @default -- Comes from nublado-secret
password: "true"
# -- Whether to automatically update DB schema at Hub start
upgrade: false
# -- URL of PostgreSQL server
# @default -- Use the in-cluster PostgreSQL installed by Phalanx
url: "postgresql://nublado3@postgres.postgres/jupyterhub"
# -- Security context for JupyterHub container
# @default -- See `values.yaml`
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
# -- Base URL on which JupyterHub listens
baseUrl: "/nb"
# -- Existing secret to use for private keys
existingSecret: "nublado-secret"
# -- Additional environment variables to set
# @default -- Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret`
extraEnv:
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: "nublado-secret"
key: "hub.config.CryptKeeper.keys"
# -- Additional volumes to make available to JupyterHub
# @default -- The `hub-config` `ConfigMap` and the Gafaelfawr token
extraVolumes:
- name: "hub-config"
configMap:
name: "hub-config"
- name: "hub-gafaelfawr-token"
secret:
secretName: "hub-gafaelfawr-token"
# -- Additional volume mounts for JupyterHub
# @default -- `hub-config` and the Gafaelfawr token
extraVolumeMounts:
- name: "hub-config"
mountPath: "/usr/local/etc/jupyterhub/jupyterhub_config.d"
- name: "hub-gafaelfawr-token"
mountPath: "/etc/gafaelfawr"
networkPolicy:
# -- Whether to enable the default `NetworkPolicy` (currently, the
# upstream one does not work correctly)
enabled: false
loadRoles:
server:
# -- Default scopes for the user's lab, overridden to allow the lab to
# delete itself (which we use for our added menu items)
# @default -- See `values.yaml`
scopes:
- "access:servers!user"
- "delete:servers!user"
- "users:activity!user"
ingress:
# -- Whether to enable the default ingress. Should always be disabled
# since we install our own `GafaelfawrIngress` to avoid repeating the
# global hostname and manually configuring authentication
enabled: false
prePuller:
continuous:
# -- Whether to run the JupyterHub continuous prepuller (the Nublado
# controller does its own prepulling)
enabled: false
hook:
# -- Whether to run the JupyterHub hook prepuller (the Nublado
# controller does its own prepulling)
enabled: false
proxy:
service:
# -- Only expose the proxy to the cluster, overriding the default of
# exposing the proxy directly to the Internet
type: "ClusterIP"
chp:
networkPolicy:
# -- Enable access to the proxy from other namespaces, since we put
# each user's lab environment in its own namespace
interNamespaceAccessLabels: "accept"
# -- Enable the proxy to send traffic to any pod in any namespace with
# the `nublado.lsst.io/category: lab` label.
egress:
- to:
- namespaceSelector: {}
podSelector:
matchLabels:
nublado.lsst.io/category: lab
# -- Extra CLI options to pass to the proxy. The most up-to-date list is
# [here](https://github.com/jupyterhub/configurable-http-proxy/blob/main/bin/configurable-http-proxy)
# (not the docs, unfortunately)
extraCommandLineFlags:
# Keepalive timeout, in milliseconds. This should be longer than any
# keepalive timeouts on any downstream proxies. The keepalive timeout
# for ingress-nginx connections is 60s.
- --keep-alive-timeout=61000
# -- Resource limits and requests for proxy pod
# @default -- See `values.yaml`
resources:
limits:
cpu: "1"
memory: "3Gi"
requests:
cpu: "250m"
memory: "200Mi"
# -- Tolerations for proxy pod
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
scheduling:
userScheduler:
# -- Whether the user scheduler should be enabled
enabled: false
userPlaceholder:
# -- Whether to spawn placeholder pods representing fake users to force
# autoscaling in advance of running out of resources
enabled: false
cloudsql:
# -- Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on
# Google Cloud
enabled: false
# -- Affinity rules for the Cloud SQL Auth Proxy pod
affinity: {}
image:
# -- Cloud SQL Auth Proxy image to use
repository: "gcr.io/cloudsql-docker/gce-proxy"
# -- Pull policy for Cloud SQL Auth Proxy images
pullPolicy: "IfNotPresent"
# -- Cloud SQL Auth Proxy tag to use
tag: "1.37.14"
# -- Instance connection name for a Cloud SQL PostgreSQL instance
# @default -- None, must be set if Cloud SQL Auth Proxy is enabled
instanceConnectionName: ""
# -- Resource limits and requests for the Cloud SQL Proxy pod
# @default -- See `values.yaml`
resources:
limits:
cpu: "100m"
memory: "30Mi"
requests:
cpu: "5m"
memory: "15Mi"
# -- Annotations for the Cloud SQL Auth Proxy pod
podAnnotations: {}
# -- Node selection rules for the Cloud SQL Auth Proxy pod
nodeSelector: {}
# -- The Google service account that has an IAM binding to the
# `cloud-sql-proxy` Kubernetes service account and has the `cloudsql.client`
# role
# @default -- None, must be set if Cloud SQL Auth Proxy is enabled
serviceAccount: null
# -- Tolerations for the Cloud SQL Auth Proxy pod
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
# JupyterHub proxy configuration handled directly by this chart rather than by
# Zero to JupyterHub.
proxy:
ingress:
# -- Additional annotations to add to the proxy ingress (also used to talk
# to JupyterHub and all user labs)
# @default -- See `values.yaml`
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: "300" # 5 minutes
nginx.ingress.kubernetes.io/proxy-send-timeout: "300" # 5 minutes
# Configuration for Nublado secrets management.
secrets:
# -- Whether to use the new secrets management mechanism. If enabled, the
# Vault nublado secret will be split into a nublado secret for JupyterHub
# and a nublado-lab-secret secret used as a source for secret values for the
# user's lab.
templateSecrets: true
purger:
# -- Purge scratch space?
enabled: false
# -- Affinity rules for purger
affinity: {}
# -- Node selector rules for purger
nodeSelector: {}
# -- Annotations for the purger pod
podAnnotations: {}
# -- Tolerations for purger
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
# -- Resource limits and requests for the filesystem purger
# @default -- See `values.yaml`
resources:
limits:
cpu: "1"
memory: "1Gi"
requests:
cpu: "0.05"
memory: "120Mi"
# -- Crontab entry for when to run.
schedule: "05 03 * * *"
config:
# -- Add timestamps to log messages
addTimestamp: false
# -- File holding purge policy
policyFile: /etc/purger/policy.yaml
# -- Report only; do not purge
dryRun: false
# -- Level at which to log
logLevel: info
# -- "production" (JSON logs) or "development" (human-friendly)
logProfile: production
policy:
# -- Per-directory pruning policy.
# @default -- See `values.yaml`
directories:
- path: /scratch
# Files this large or larger will be subject to the "large" interval
# set
threshold: 1GiB
# If any of these times are older than specified, remove the file.
# Zero means "never remove".
intervals:
large:
accessInterval: 0
modificationInterval: 0
creationInterval: 0
small:
accessInterval: 0
modificationInterval: 0
creationInterval: 0
# -- Name of volume to purge (from controller.lab.config.volumes)
# @default -- None, must be set for each environment
volumeName: null
# If we're installing tutorials and artifacts at a given site, this cronjob
# controls their refresh.
cronjob:
tutorials:
# -- Clone the notebooks?
enabled: false
# -- Source for Tutorials repository to clone
gitSource: "https://github.com/lsst/tutorial-notebooks"
# -- Target where Tutorials repository should land
gitTarget: "/rubin/cst_repos/tutorial-notebooks"
# -- Branch of repository to clone
gitBranch: "main"
# -- Where repository volume should be mounted
targetVolumePath: "/rubin"
targetVolume:
# -- Name of volume to mount (from controller.lab.config.volumes)
# @default -- None, must be set for each environment
volumeName: null
# -- Where volume will be mounted in the container
mountPath: "/rubin" # Conventional
# -- Schedule for the cloning cronjob(s).
schedule: "42 * * * *"
# -- UID for the cloning cronjob(s)
uid: 1000
# -- GID for the cloning cronjob(s)
gid: 1000
artifacts:
# -- Clone the artifacts?
enabled: false
# -- Source for Tutorials binary artifact repository to clone
gitSource: "https://github.com/lsst/tutorial-notebooks-data"
# -- Target where Tutorial artifacts repository should land
gitTarget: "/rubin/cst_repos/tutorial-notebooks-data"
# -- Branch of repository to clone
gitBranch: "main"
# -- Where repository volume should be mounted
targetVolumePath: "/rubin"
targetVolume:
# -- Name of volume to mount (from controller.lab.config.volumes)
# @default -- None, must be set for each environment
volumeName: null
# -- Where volume will be mounted in the container
mountPath: "/rubin" # Conventional
# -- Schedule for the cloning cronjob(s).
schedule: "43 * * * *"
# -- UID for the cloning cronjob(s)
uid: 1000
# -- GID for the cloning cronjob(s)
gid: 1000
# -- Affinity rules for the cloning cronjob(s).
affinity: {}
# -- Resource limits and requests for the cloning cronjob(s)
# @default -- See `values.yaml`
resources:
limits:
cpu: "1"
memory: "1Gi"
requests:
cpu: "50m"
memory: "50Mi"
# -- Tolerations for the cloning cronjob(s).
# @default -- Tolerate GKE arm64 taint
tolerations:
- key: "kubernetes.io/arch"
operator: "Equal"
value: "arm64"
effect: "NoSchedule"
sentry:
# -- Whether to report errors to Sentry. Applies to all Nublado components
# that support Sentry.
enabled: false
# The following will be set by parameters injected by Argo CD and should not
# be set in the individual environment values files.
global:
# -- Base URL for the environment
# @default -- Set by Argo CD
baseUrl: null
# -- Name of the Phalanx environment
# @default -- Set by Argo CD Application
environmentName: null
# -- Host name for ingress
# @default -- Set by Argo CD
host: null
# -- Base URL for Repertoire discovery API
# @default -- Set by Argo CD
repertoireUrl: null