diff --git a/.github/workflows/build-and-publish-images.yml b/.github/workflows/build-and-publish-images.yml new file mode 100644 index 0000000000000..381dc88fc6179 --- /dev/null +++ b/.github/workflows/build-and-publish-images.yml @@ -0,0 +1,45 @@ +name: Publish Dev Operator + +on: + push: + tags: + - 'v*.*.*' + - 'v*.*.*-*' +jobs: + publish: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and publish k8s-operator image + env: + REPO: ghcr.io/${{ github.repository_owner }}/tailscale-k8s-operator + TAGS: ${{ github.ref_name }} + run: | + echo "Building and publishing k8s-operator to ${REPO} with tags ${TAGS}" + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh + - name: Build and publish nameserver image + env: + REPO: ghcr.io/${{ github.repository_owner }}/tailscale-k8s-nameserver + TAGS: ${{ github.ref_name }} + run: | + echo "Building and publishing k8s-nameserver to ${REPO} with tags ${TAGS}" + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh + - name: Build and publish client image + env: + REPO: ghcr.io/${{ github.repository_owner }}/tailscale + TAGS: ${{ github.ref_name }} + run: | + echo "Building and publishing tailscale client to ${REPO} with tags ${TAGS}" + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh diff --git a/.github/workflows/chart.yaml b/.github/workflows/chart.yaml new file mode 100644 index 0000000000000..eb0ffd5f62003 --- /dev/null +++ b/.github/workflows/chart.yaml @@ -0,0 +1,38 @@ +name: package-helm-chart + +on: + push: + tags: + - 'v*.*.*' + - 'v*.*.*-*' + workflow_dispatch: + +jobs: + package-and-push-helm-chart: + permissions: + contents: read + packages: write + + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4.2.2 + + - name: Set environment variables + id: set-variables + run: | + echo "REPOSITORY=ghcr.io/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> "$GITHUB_OUTPUT" + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Build, package and push helm chart + run: | + ./tool/go run cmd/k8s-operator/generate/main.go helmcrd + ./tool/helm package --app-version=${{ github.ref_name }} --version=${{ github.ref_name }} './cmd/k8s-operator/deploy/chart' + ./tool/helm push ./tailscale-operator-${{ github.ref_name }}.tgz oci://${{ steps.set-variables.outputs.REPOSITORY }}/charts diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2e80b44dcc4d3..f87c3f6548611 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -208,6 +208,8 @@ jobs: find $(go env GOCACHE) -type f -mmin +90 -delete windows: + # we disable the windows tests as we don't have a big enough runner + if: false # windows-8vpu is a 2022 GitHub-managed runner in our # org with 8 cores and 32 GB of RAM: # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1 @@ -314,7 +316,10 @@ jobs: needs: gomod-cache runs-on: ["self-hosted", "linux", "vm"] # VM tests run with some privileges, don't let them run on 3p PRs. - if: github.repository == 'tailscale/tailscale' + # if: github.repository == 'tailscale/tailscale' + # + # we disable VM tests + if: false steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -853,8 +858,6 @@ jobs: needs: - android - test - - windows - - vm - cross - ios - wasm @@ -902,8 +905,6 @@ jobs: needs: - check_mergeability_strict - test - - windows - - vm - wasm - fuzz - race-root-integration diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index ca4b449358083..76888ebc7f576 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -11,6 +11,7 @@ package main import ( "context" "encoding/json" + "flag" "fmt" "log" "net" @@ -19,16 +20,38 @@ import ( "path/filepath" "sync" "syscall" + "time" "github.com/fsnotify/fsnotify" "github.com/miekg/dns" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + listersv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" operatorutils "tailscale.com/k8s-operator" "tailscale.com/util/dnsname" ) +var ( + // domain is the DNS domain that this nameserver has registered a handler for. + domain = flag.String("domain", "ts.net", "the DNS domain to serve records for") + updateMode = flag.String( + "update-mode", + mountAccessUpdateMode, + fmt.Sprintf( + "how to detect changes to the configMap which contains the DNS entries.\n"+ + "%q watches the mounted configMap for changes.\n"+ + "%q watches the configMap directly via the Kubernetes API.", + mountAccessUpdateMode, + directAccessUpdateMode, + ), + ) +) + const ( - // tsNetDomain is the domain that this DNS nameserver has registered a handler for. - tsNetDomain = "ts.net" // addr is the the address that the UDP and TCP listeners will listen on. addr = ":1053" @@ -37,10 +60,31 @@ const ( // /config is the only supported way for configuring this nameserver. defaultDNSConfigDir = "/config" kubeletMountedConfigLn = "..data" + + // configMapName is the name of the configMap which needs to be watched + // for changes when using the non-mount update mode. + configMapName = "dnsrecords" + // configMapKey is the configMap key which contains the DNS data + configMapKey = "records.json" + + // the update modes define how changes to the configMap are detected. + // Either by watching the mounted file (might be slower due to the time + // needed for syncing) or by watching the configMap directly (needs + // more permissions for the service account the k8s-namesever runs + // with). + directAccessUpdateMode = "direct-access" + mountAccessUpdateMode = "mount" + + // configMapDefaultNamespace sets the default namespace for reading the + // configMap if the env variable POD_NAMESPACE is not set. Otherwise + // the content of the POD_NAMESPACE env variable determines where to + // read the configMap from. This only matters when using direct access + // mode for updates. + configMapDefaultNamespace = "tailscale" ) // nameserver is a simple nameserver that responds to DNS queries for A records -// for ts.net domain names over UDP or TCP. It serves DNS responses from +// for the names of the given domain over UDP or TCP. It serves DNS responses from // in-memory IPv4 host records. It is intended to be deployed on Kubernetes with // a ConfigMap mounted at /config that should contain the host records. It // dynamically reconfigures its in-memory mappings as the contents of the @@ -49,7 +93,7 @@ type nameserver struct { // configReader returns the latest desired configuration (host records) // for the nameserver. By default it gets set to a reader that reads // from a Kubernetes ConfigMap mounted at /config, but this can be - // overridden in tests. + // overridden. configReader configReaderFunc // configWatcher is a watcher that returns an event when the desired // configuration has changed and the nameserver should update the @@ -63,26 +107,31 @@ type nameserver struct { } func main() { + flag.Parse() ctx, cancel := context.WithCancel(context.Background()) - // Ensure that we watch the kube Configmap mounted at /config for - // nameserver configuration updates and send events when updates happen. - c := ensureWatcherForKubeConfigMap(ctx) + if !validUpdateMode(*updateMode) { + log.Fatalf("non valid update mode: %q", *updateMode) + } + reader, watcher, err := configMapReaderAndWatcher(ctx, *updateMode) + if err != nil { + log.Fatalf("can not setup configMap reader: %v", err) + } ns := &nameserver{ - configReader: configMapConfigReader, - configWatcher: c, + configReader: reader, + configWatcher: watcher, } // Ensure that in-memory records get set up to date now and will get // reset when the configuration changes. ns.runRecordsReconciler(ctx) - // Register a DNS server handle for ts.net domain names. Not having a + // Register a DNS server handle for names of the domain. Not having a // handle registered for any other domain names is how we enforce that - // this nameserver can only be used for ts.net domains - querying any + // this nameserver can only be used for the given domain - querying any // other domain names returns Rcode Refused. - dns.HandleFunc(tsNetDomain, ns.handleFunc()) + dns.HandleFunc(*domain, ns.handleFunc()) // Listen for DNS queries over UDP and TCP. udpSig := make(chan os.Signal) @@ -112,7 +161,7 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { h := func(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) defer func() { - w.WriteMsg(m) + _ = w.WriteMsg(m) }() if len(r.Question) < 1 { log.Print("[unexpected] nameserver received a request with no questions") @@ -135,7 +184,7 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { m.RecursionAvailable = false ips := n.lookupIP4(fqdn) - if ips == nil || len(ips) == 0 { + if len(ips) == 0 { // As we are the authoritative nameserver for MagicDNS // names, if we do not have a record for this MagicDNS // name, it does not exist. @@ -231,7 +280,7 @@ func (n *nameserver) resetRecords() error { log.Printf("error reading nameserver's configuration: %v", err) return err } - if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + if len(dnsCfgBytes) < 1 { log.Print("nameserver's configuration is empty, any in-memory records will be unset") n.mu.Lock() n.ip4 = make(map[dnsname.FQDN][]net.IP) @@ -284,7 +333,7 @@ func listenAndServe(net, addr string, shutdown chan os.Signal) { go func() { <-shutdown log.Printf("shutting down server for %s", net) - s.Shutdown() + _ = s.Shutdown() }() log.Printf("listening for %s queries on %s", net, addr) if err := s.ListenAndServe(); err != nil { @@ -292,10 +341,86 @@ func listenAndServe(net, addr string, shutdown chan os.Signal) { } } -// ensureWatcherForKubeConfigMap sets up a new file watcher for the ConfigMap +func getClientset() (*kubernetes.Clientset, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to load in-cluster config: %w", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to create clientset: %w", err) + } + return clientset, nil +} + +func getConfigMapNamespace() string { + namespace := configMapDefaultNamespace + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + namespace = ns + } + return namespace +} + +func configMapReaderAndWatcher(ctx context.Context, updateMode string) (configReaderFunc, chan string, error) { + switch updateMode { + case mountAccessUpdateMode: + return configMapMountedReader, watchMountedConfigMap(ctx), nil + case directAccessUpdateMode: + cs, err := getClientset() + if err != nil { + return nil, nil, err + } + watcherChannel, cacheReader, err := watchConfigMap(ctx, cs, configMapName, getConfigMapNamespace()) + if err != nil { + return nil, nil, err + } + return configMapCacheReader(cacheReader), watcherChannel, nil + default: + return nil, nil, fmt.Errorf("no implementation for update mode %q", updateMode) + } +} + +// watchConfigMap watches the configMap identified by the given name and +// namespace. It emits a message in the returned channel whenever the configMap +// updated. It also returns a configMapLister which allows to access the cached objects +// retrieved by the API server. +func watchConfigMap(ctx context.Context, cs kubernetes.Interface, configMapName, configMapNamespace string) (chan string, listersv1.ConfigMapLister, error) { + ch := make(chan string) + + fieldSelector := fields.OneTermEqualSelector("metadata.name", configMapName).String() + factory := informers.NewSharedInformerFactoryWithOptions( + cs, + // we resync every hour to account for missed watches + time.Hour, + informers.WithNamespace(configMapNamespace), + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector + }), + ) + cmFactory := factory.Core().V1().ConfigMaps() + _, _ = cmFactory.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj any) { + ch <- fmt.Sprintf("ConfigMap %s added or synced", configMapName) + }, + UpdateFunc: func(oldObj, newObj any) { + ch <- fmt.Sprintf("ConfigMap %s updated", configMapName) + }, + }) + factory.Start(ctx.Done()) + // Wait for cache sync + log.Println("waiting for configMap cache to sync") + if !cache.WaitForCacheSync(ctx.Done(), cmFactory.Informer().HasSynced) { + return nil, nil, fmt.Errorf("configMap cache did not sync successful") + } + log.Println("configMap cache successfully synced") + + return ch, cmFactory.Lister(), nil +} + +// watchMountedConfigMap sets up a new file watcher for the ConfigMap // that's expected to be mounted at /config. Returns a channel that receives an // event every time the contents get updated. -func ensureWatcherForKubeConfigMap(ctx context.Context) chan string { +func watchMountedConfigMap(ctx context.Context) chan string { c := make(chan string) watcher, err := fsnotify.NewWatcher() if err != nil { @@ -307,7 +432,9 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string { // https://github.com/kubernetes/kubernetes/blob/v1.28.1/pkg/volume/util/atomic_writer.go#L39-L61 toWatch := filepath.Join(defaultDNSConfigDir, kubeletMountedConfigLn) go func() { - defer watcher.Close() + defer func() { + _ = watcher.Close() + }() log.Printf("starting file watch for %s", defaultDNSConfigDir) for { select { @@ -354,9 +481,24 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string { // configReaderFunc is a function that returns the desired nameserver configuration. type configReaderFunc func() ([]byte, error) -// configMapConfigReader reads the desired nameserver configuration from a +func configMapCacheReader(lister listersv1.ConfigMapLister) configReaderFunc { + return func() ([]byte, error) { + cm, err := lister.ConfigMaps(getConfigMapNamespace()).Get(configMapName) + if err != nil { + return nil, fmt.Errorf("can not read configMap: %w", err) + } + if data, exists := cm.Data[configMapKey]; exists { + return []byte(data), nil + } + // if the configMap is empty we need to return `nil` which will + // be handled by the caller specifically + return nil, nil + } +} + +// configMapMountedReader reads the desired nameserver configuration from a // records.json file in a ConfigMap mounted at /config. -var configMapConfigReader configReaderFunc = func() ([]byte, error) { +var configMapMountedReader configReaderFunc = func() ([]byte, error) { if contents, err := os.ReadFile(filepath.Join(defaultDNSConfigDir, operatorutils.DNSRecordsCMKey)); err == nil { return contents, nil } else if os.IsNotExist(err) { @@ -377,3 +519,12 @@ func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP { f := n.ip4[fqdn] return f } + +func validUpdateMode(m string) bool { + switch m { + case directAccessUpdateMode, mountAccessUpdateMode: + return true + default: + return false + } +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index d9a33c4faffe5..e6ab931996425 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -6,11 +6,15 @@ package main import ( + "context" "net" "testing" "github.com/google/go-cmp/cmp" "github.com/miekg/dns" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" "tailscale.com/util/dnsname" ) @@ -195,6 +199,33 @@ func TestResetRecords(t *testing.T) { t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) } }) + t.Run(tt.name+" (direct access)", func(t *testing.T) { + client := fake.NewSimpleClientset(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: configMapDefaultNamespace, + }, + Data: map[string]string{ + configMapKey: string(tt.config), + }, + }) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + _, reader, err := watchConfigMap(ctx, client, configMapName, configMapDefaultNamespace) + if err != nil { + t.Fatal(err) + } + ns := &nameserver{ + ip4: tt.hasIp4, + configReader: configMapCacheReader(reader), + } + if err := ns.resetRecords(); err == nil == tt.wantsErr { + t.Errorf("resetRecords() returned err: %v, wantsErr: %v", err, tt.wantsErr) + } + if diff := cmp.Diff(ns.ip4, tt.wantsIp4); diff != "" { + t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) + } + }) } } diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 268d978c15f37..a1e5c51d64ca2 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -91,6 +91,122 @@ spec: when a DNSConfig is applied. type: object properties: + cmd: + description: Cmd can be used to overwrite the command used when running the nameserver image. + type: array + items: + type: string + env: + description: |- + Env can be used to pass environment variables to the nameserver + container. + type: array + items: + description: EnvVar represents an environment variable present in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + default: "" + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + default: "" + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic image: description: Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. type: object @@ -101,6 +217,13 @@ spec: tag: description: Tag defaults to unstable. type: string + podLabels: + description: |- + PodLabels are the labels which will be attached to the nameserver + pod. They can be used to define network policies. + type: object + additionalProperties: + type: string status: description: |- Status describes the status of the DNSConfig. This is set diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml index c3a16e03e9a42..57ad783755453 100644 --- a/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml +++ b/cmd/k8s-operator/deploy/manifests/nameserver/deploy.yaml @@ -18,6 +18,11 @@ spec: containers: - imagePullPolicy: IfNotPresent name: nameserver + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace ports: - name: tcp protocol: TCP diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/role.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/role.yaml new file mode 100644 index 0000000000000..4771ab3115c17 --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: dnsrecords-watcher +rules: + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["dnsrecords"] + verbs: ["get", "list", "watch"] diff --git a/cmd/k8s-operator/deploy/manifests/nameserver/rolebinding.yaml b/cmd/k8s-operator/deploy/manifests/nameserver/rolebinding.yaml new file mode 100644 index 0000000000000..54498462f8b64 --- /dev/null +++ b/cmd/k8s-operator/deploy/manifests/nameserver/rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: dnsrecords-watcher-binding +subjects: + - kind: ServiceAccount + name: nameserver + namespace: tailscale +roleRef: + kind: Role + name: dnsrecords-watcher + apiGroup: rbac.authorization.k8s.io diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index cdf301318f923..f47ee3c72b44f 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -379,6 +379,122 @@ spec: Tailscale Ingresses. The operator will always deploy this nameserver when a DNSConfig is applied. properties: + cmd: + description: Cmd can be used to overwrite the command used when running the nameserver image. + items: + type: string + type: array + env: + description: |- + Env can be used to pass environment variables to the nameserver + container. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array image: description: Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. properties: @@ -389,6 +505,13 @@ spec: description: Tag defaults to unstable. type: string type: object + podLabels: + additionalProperties: + type: string + description: |- + PodLabels are the labels which will be attached to the nameserver + pod. They can be used to define network policies. + type: object type: object required: - nameserver diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index ca6562071eba7..2a530041c9db8 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -70,10 +70,11 @@ var gaugeEgressServices = clientmetric.NewGauge(kubetypes.MetricEgressServiceCou // on whose proxies it should be exposed. type egressSvcsReconciler struct { client.Client - logger *zap.SugaredLogger - recorder record.EventRecorder - clock tstime.Clock - tsNamespace string + logger *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsNamespace string + validationOpts validationOpts mu sync.Mutex // protects following svcs set.Slice[types.UID] // UIDs of all currently managed egress Services for ProxyGroup @@ -523,7 +524,8 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } - if violations := validateEgressService(svc, pg); len(violations) > 0 { + + if violations := validateEgressService(svc, pg, esr.validationOpts); len(violations) > 0 { msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) l.Info(msg) @@ -559,8 +561,8 @@ func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *z return cfg } -func validateEgressService(svc *corev1.Service, pg *tsapi.ProxyGroup) []string { - violations := validateService(svc) +func validateEgressService(svc *corev1.Service, pg *tsapi.ProxyGroup, opts validationOpts) []string { + violations := validateService(svc, opts) // We check that only one of these two is set in the earlier validateService function. if svc.Annotations[AnnotationTailnetTargetFQDN] == "" && svc.Annotations[AnnotationTailnetTargetIP] == "" { diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 20d66f7d0766a..6ce98e1297e9a 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -19,6 +19,7 @@ import ( xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -168,12 +169,33 @@ func nameserverResourceLabels(name, namespace string) map[string]string { return labels } -func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { - labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) +// mergeEnvVars merges `source` with `other` while prioritizing the values from +// `other` if there a duplicate environment variables found. +func mergeEnvVars(source []corev1.EnvVar, other []corev1.EnvVar) []corev1.EnvVar { + merged := make([]corev1.EnvVar, len(other)) + copy(merged, source) + + // create a map to track existing env var names in `other` + existing := make(map[string]bool, len(other)) + for _, env := range other { + existing[env.Name] = true + } + // now we add the missing env variable names from source if they do not + // already exist + for _, env := range source { + if !existing[env.Name] { + merged = append(merged, env) + } + } + return merged +} + +func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, _ *zap.SugaredLogger) error { + resourceLabels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) dCfg := &deployConfig{ ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}, namespace: a.tsNamespace, - labels: labels, + labels: resourceLabels, imageRepo: defaultNameserverImageRepo, imageTag: defaultNameserverImageTag, } @@ -183,7 +205,13 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" { dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag } - for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { + if len(tsDNSCfg.Spec.Nameserver.Cmd) > 0 { + dCfg.cmd = tsDNSCfg.Spec.Nameserver.Cmd + } + dCfg.env = tsDNSCfg.Spec.Nameserver.Env + dCfg.podLabels = tsDNSCfg.Spec.Nameserver.PodLabels + + for _, deployable := range []deployable{saDeployable, roleDeployable, rolebindingDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { return fmt.Errorf("error reconciling %s: %w", deployable.kind, err) } @@ -211,6 +239,9 @@ type deployConfig struct { imageRepo string imageTag string labels map[string]string + podLabels map[string]string + cmd []string + env []corev1.EnvVar ownerRefs []metav1.OwnerReference namespace string } @@ -224,6 +255,10 @@ var ( saYaml []byte //go:embed deploy/manifests/nameserver/svc.yaml svcYaml []byte + //go:embed deploy/manifests/nameserver/role.yaml + roleYaml []byte + //go:embed deploy/manifests/nameserver/rolebinding.yaml + roleBindingYaml []byte deployDeployable = deployable{ kind: "Deployment", @@ -236,6 +271,17 @@ var ( d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels d.ObjectMeta.OwnerReferences = cfg.ownerRefs + if d.Spec.Template.Labels == nil { + d.Spec.Template.Labels = make(map[string]string) + } + for key, value := range cfg.podLabels { + d.Spec.Template.Labels[key] = value + } + if len(cfg.cmd) > 0 { + d.Spec.Template.Spec.Containers[0].Command = cfg.cmd + } + d.Spec.Template.Spec.Containers[0].Env = mergeEnvVars(d.Spec.Template.Spec.Containers[0].Env, cfg.env) + updateF := func(oldD *appsv1.Deployment) { oldD.Spec = d.Spec } @@ -285,4 +331,35 @@ var ( return err }, } + roleDeployable = deployable{ + kind: "Role", + updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error { + role := new(rbacv1.Role) + if err := yaml.Unmarshal(roleYaml, &role); err != nil { + return fmt.Errorf("error unmarshalling role yaml: %w", err) + } + role.ObjectMeta.Labels = cfg.labels + role.ObjectMeta.OwnerReferences = cfg.ownerRefs + role.ObjectMeta.Namespace = cfg.namespace + _, err := createOrUpdate[rbacv1.Role](ctx, kubeClient, cfg.namespace, role, func(*rbacv1.Role) {}) + return err + }, + } + rolebindingDeployable = deployable{ + kind: "RoleBinding", + updateObj: func(ctx context.Context, cfg *deployConfig, kubeClient client.Client) error { + roleBinding := new(rbacv1.RoleBinding) + if err := yaml.Unmarshal(roleBindingYaml, &roleBinding); err != nil { + return fmt.Errorf("error unmarshalling rolebinding yaml: %w", err) + } + roleBinding.ObjectMeta.Labels = cfg.labels + roleBinding.ObjectMeta.OwnerReferences = cfg.ownerRefs + roleBinding.ObjectMeta.Namespace = cfg.namespace + if len(roleBinding.Subjects) > 0 { + roleBinding.Subjects[0].Namespace = cfg.namespace + } + _, err := createOrUpdate[rbacv1.RoleBinding](ctx, kubeClient, cfg.namespace, roleBinding, func(*rbacv1.RoleBinding) {}) + return err + }, + } ) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 96b3b37ad0340..562c3b9d14069 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -68,6 +68,10 @@ import ( // Generate CRD API docs. //go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md +const ( + defaultDomain = "ts.net" +) + func main() { // Required to use our client API. We're fine with the instability since the // client lives in the same repo as this code. @@ -84,6 +88,15 @@ func main() { isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ingressClassName = defaultEnv("OPERATOR_INGRESS_CLASS_NAME", "tailscale") + baseDomain = defaultEnv("OPERATOR_DOMAIN", defaultDomain) + // relaxedDomainValidation can be used to only validate that the + // base domain and at least 1 sub domain in the FQDN of + // services exists. The default is to make sure that there are + // exactly 2 sub-domains in the FQDN. + relaxedDomainValidation = defaultBool("OPERATOR_RELAXED_DOMAIN_VALIDATION", false) + // noFqdnDotAppend prevents the dot ('.') appending to the FQDN + // of egress proxy services + noFqdnDotAppend = defaultBool("OPERATOR_NO_FQDN_DOT_APPEND", false) ) var opts []kzap.Opts @@ -135,6 +148,11 @@ func main() { defaultProxyClass: defaultProxyClass, loginServer: loginServer, ingressClassName: ingressClassName, + validationOpts: validationOpts{ + baseDomain: baseDomain, + relaxedDomainValidation: relaxedDomainValidation, + }, + noFqdnDotAppend: noFqdnDotAppend, } runReconcilers(rOpts) } @@ -146,6 +164,7 @@ func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsCl var ( clientIDPath = defaultEnv("CLIENT_ID_FILE", "") clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") + customTokenURL = defaultEnv("TOKEN_URL", "") hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") @@ -154,7 +173,7 @@ func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsCl if clientIDPath == "" || clientSecretPath == "" { startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer) + tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer, customTokenURL) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } @@ -332,6 +351,8 @@ func runReconcilers(opts reconcilerOpts) { tsNamespace: opts.tailscaleNamespace, clock: tstime.DefaultClock{}, defaultProxyClass: opts.defaultProxyClass, + validationOpts: opts.validationOpts, + noFqdnDotAppend: opts.noFqdnDotAppend, }) if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) @@ -413,16 +434,17 @@ func runReconcilers(opts reconcilerOpts) { Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Watches(&discoveryv1.EndpointSlice{}, ingressSvcFromEpsFilter). Complete(&HAServiceReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("service-pg-reconciler"), - lc: lc, - clock: tstime.DefaultClock{}, - operatorID: id, - tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("service-pg-reconciler"), + lc: lc, + clock: tstime.DefaultClock{}, + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + validationOpts: opts.validationOpts, }) if err != nil { startlog.Fatalf("could not create service-pg-reconciler: %v", err) @@ -481,11 +503,12 @@ func runReconcilers(opts reconcilerOpts) { Watches(&corev1.Service{}, egressSvcFilter). Watches(&tsapi.ProxyGroup{}, egressProxyGroupFilter). Complete(&egressSvcsReconciler{ - Client: mgr.GetClient(), - tsNamespace: opts.tailscaleNamespace, - recorder: eventRecorder, - clock: tstime.DefaultClock{}, - logger: opts.log.Named("egress-svcs-reconciler"), + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + clock: tstime.DefaultClock{}, + logger: opts.log.Named("egress-svcs-reconciler"), + validationOpts: opts.validationOpts, }) if err != nil { startlog.Fatalf("could not create egress Services reconciler: %v", err) @@ -661,6 +684,18 @@ func runReconcilers(opts reconcilerOpts) { } } +type validationOpts struct { + baseDomain string + relaxedDomainValidation bool +} + +func (v validationOpts) domain() string { + if v.baseDomain == "" { + return defaultDomain + } + return v.baseDomain +} + type reconcilerOpts struct { log *zap.SugaredLogger tsServer *tsnet.Server @@ -704,6 +739,12 @@ type reconcilerOpts struct { // ingressClassName is the name of the ingress class used by reconcilers of Ingress resources. This defaults // to "tailscale" but can be customised. ingressClassName string + // validationOpts are used to control validations happening in the + // reconcilers + validationOpts validationOpts + // noFqdnDotAppend prevents the addition of a dot ('.") to the end of + // destination FQDNs in egress proxies + noFqdnDotAppend bool } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each @@ -1085,9 +1126,13 @@ func serviceHandler(_ context.Context, o client.Object) []reconcile.Request { } // isMagicDNSName reports whether name is a full tailnet node FQDN (with or -// without final dot). -func isMagicDNSName(name string) bool { - validMagicDNSName := regexp.MustCompile(`^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+\.ts\.net\.?$`) +// without final dot). The behaviour can be controlled with the given opts. +func isMagicDNSName(name string, opts validationOpts) bool { + baseDomainEscaped := strings.ReplaceAll(opts.domain(), `.`, `\.`) + validMagicDNSName := regexp.MustCompile(`^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+\.` + baseDomainEscaped + `\.?$`) + if opts.relaxedDomainValidation { + validMagicDNSName = regexp.MustCompile(`^([a-zA-Z0-9-]+\.)+` + baseDomainEscaped + `\.?$`) + } return validMagicDNSName.MatchString(name) } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 1f700f13a4fc0..82c9ad9c11c77 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1523,8 +1523,9 @@ func TestProxyFirewallMode(t *testing.T) { func Test_isMagicDNSName(t *testing.T) { tests := []struct { - in string - want bool + in string + want bool + validationOpts *validationOpts }{ { in: "foo.tail4567.ts.net", @@ -1538,10 +1539,33 @@ func Test_isMagicDNSName(t *testing.T) { in: "foo.tail4567", want: false, }, + { + in: "foo.ts.net", + want: false, + }, + { + in: "foo.tail4567.example.com.", + want: true, + validationOpts: &validationOpts{baseDomain: "example.com"}, + }, + { + in: "foo.example.com.", + want: false, + validationOpts: &validationOpts{baseDomain: "example.com"}, + }, + { + in: "foo.example.com.", + want: true, + validationOpts: &validationOpts{baseDomain: "example.com", relaxedDomainValidation: true}, + }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { - if got := isMagicDNSName(tt.in); got != tt.want { + opts := validationOpts{} + if tt.validationOpts != nil { + opts = *tt.validationOpts + } + if got := isMagicDNSName(tt.in, opts); got != tt.want { t.Errorf("isMagicDNSName(%q) = %v, want %v", tt.in, got, tt.want) } }) diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 9846513c78d74..279e439b90829 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -66,6 +66,8 @@ type HAServiceReconciler struct { defaultTags []string operatorID string // stableID of the operator's Tailscale device + validationOpts validationOpts + clock tstime.Clock mu sync.Mutex // protects following @@ -861,7 +863,7 @@ func (r *HAServiceReconciler) validateService(ctx context.Context, svc *corev1.S errs = append(errs, fmt.Errorf("ProxyGroup %q is of type %q but must be of type %q", pg.Name, pg.Spec.Type, tsapi.ProxyGroupTypeIngress)) } - if violations := validateService(svc); len(violations) > 0 { + if violations := validateService(svc, r.validationOpts); len(violations) > 0 { errs = append(errs, fmt.Errorf("invalid Service: %s", strings.Join(violations, ", "))) } svcList := &corev1.ServiceList{} diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 52c8bec7ff32a..114e23b1af786 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -68,6 +68,10 @@ type ServiceReconciler struct { clock tstime.Clock defaultProxyClass string + + validationOpts validationOpts + + noFqdnDotAppend bool } var ( @@ -223,7 +227,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyInvalid, msg, a.clock, logger) return nil } - if violations := validateService(svc); len(violations) > 0 { + if violations := validateService(svc, a.validationOpts); len(violations) > 0 { msg := fmt.Sprintf("unable to provision proxy resources: invalid Service: %s", strings.Join(violations, ", ")) a.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) a.logger.Error(msg) @@ -293,7 +297,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len())) } else if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { fqdn := svc.Annotations[AnnotationTailnetTargetFQDN] - if !strings.HasSuffix(fqdn, ".") { + if !strings.HasSuffix(fqdn, ".") && !a.noFqdnDotAppend { fqdn = fqdn + "." } sts.TailnetTargetFQDN = fqdn @@ -369,13 +373,13 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } -func validateService(svc *corev1.Service) []string { +func validateService(svc *corev1.Service, opts validationOpts) []string { violations := make([]string, 0) if svc.Annotations[AnnotationTailnetTargetFQDN] != "" && svc.Annotations[AnnotationTailnetTargetIP] != "" { violations = append(violations, fmt.Sprintf("only one of annotations %s and %s can be set", AnnotationTailnetTargetIP, AnnotationTailnetTargetFQDN)) } if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { - if !isMagicDNSName(fqdn) { + if !isMagicDNSName(fqdn, opts) { violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q does not appear to be a valid MagicDNS name", AnnotationTailnetTargetFQDN, fqdn)) } } diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index a94d55afed604..fed6329a3ecf4 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -22,7 +22,7 @@ const ( defaultTailnet = "-" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) { +func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer, customTokenURL string) (tsClient, error) { clientID, err := os.ReadFile(clientIDPath) if err != nil { return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) @@ -36,6 +36,9 @@ func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServe if loginServer != "" { tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath) } + if customTokenURL != "" { + tokenURL = customTokenURL + } credentials := clientcredentials.Config{ ClientID: string(clientID), ClientSecret: string(clientSecret), diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 18bf1cb50400f..b0cd362a88594 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -390,6 +390,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | +| `cmd` _string array_ | Cmd can be used to overwrite the command used when running the nameserver image. | | | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#envvar-v1-core) array_ | Env can be used to pass environment variables to the nameserver
container. | | | +| `podLabels` _object (keys:string, values:string)_ | PodLabels are the labels which will be attached to the nameserver
pod. They can be used to define network policies. | | | #### NameserverImage diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0178d60eab606..1eef25d6d41fb 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -82,6 +83,17 @@ type Nameserver struct { // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. // +optional Image *NameserverImage `json:"image,omitempty"` + // Cmd can be used to overwrite the command used when running the nameserver image. + // +optional + Cmd []string `json:"cmd,omitempty"` + // Env can be used to pass environment variables to the nameserver + // container. + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + // PodLabels are the labels which will be attached to the nameserver + // pod. They can be used to define network policies. + // +optional + PodLabels map[string]string `json:"podLabels,omitempty"` } type NameserverImage struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index ffc04d3b9dde3..122e519bdc5a0 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -365,6 +365,25 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverImage) **out = **in } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver.