Skip to content

Feat: Run inside a cluster by evaluating incluster config #3088

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,37 @@ Binaries for Linux, Windows and Mac are available as tarballs in the [release pa

---

## Running directly inside a Kubernetes cluster

You can run k9s directly inside a Kubernetes cluster by creating a Pod with the official image (quay.io/derailed/k9s). This works great with tools like [Telepresence](https://telepresence.io/) or [Engity's Bifröst](https://bifroest.engity.org/) that let you connect to Kubernetes clusters from your computer as if you were right inside them.

They usually require you to create a matching Service Account, like the following:

```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: k9s
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k9s
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: k9s
namespace: default
```

This allows you to use the Service Account `k9s` within the `default` namespace with full cluster permissions. For more details on how to use and define Service Accounts, please refer to [the official Kubernetes documentation](https://kubernetes.io/docs/concepts/security/service-accounts/#how-to-use).

---

## PreFlight Checks

* K9s uses 256 colors terminal mode. On `Nix system make sure TERM is set accordingly.
Expand Down
60 changes: 59 additions & 1 deletion internal/client/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@ import (
"sync"
"time"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
restclient "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)

Expand All @@ -23,6 +24,8 @@ const (

// UsePersistentConfig caches client config to avoid reloads.
UsePersistentConfig = true

inClusterConfig = "incluster"
)

// Config tracks a kubernetes configuration.
Expand Down Expand Up @@ -85,6 +88,10 @@ func (c *Config) SwitchContext(name string) error {
if err != nil {
return fmt.Errorf("context %q does not exist", name)
}
if name == inClusterConfig && ct.LocationOfOrigin == inClusterConfig {
return nil
}

// !!BOZO!! Do you need to reset the flags?
flags := genericclioptions.NewConfigFlags(UsePersistentConfig)
flags.Context, flags.ClusterName = &name, &ct.Cluster
Expand Down Expand Up @@ -129,6 +136,9 @@ func (c *Config) CurrentClusterName() (string, error) {

ct, ok := cfg.Contexts[cfg.CurrentContext]
if !ok {
if c.isInCluster(cfg) {
return inClusterConfig, nil
}
return "", fmt.Errorf("invalid current context specified: %q", cfg.CurrentContext)
}
if isSet(c.flags.Context) {
Expand All @@ -152,6 +162,10 @@ func (c *Config) CurrentContextName() (string, error) {
return "", fmt.Errorf("fail to load rawConfig: %w", err)
}

if c.isInCluster(cfg) {
return inClusterConfig, nil
}

return cfg.CurrentContext, nil
}

Expand All @@ -160,6 +174,7 @@ func (c *Config) CurrentContextNamespace() (string, error) {
if err != nil {
return "", err
}

context, err := c.GetContext(name)
if err != nil {
return "", err
Expand All @@ -183,10 +198,15 @@ func (c *Config) GetContext(n string) (*api.Context, error) {
if err != nil {
return nil, err
}

if c, ok := cfg.Contexts[n]; ok {
return c, nil
}

if n == inClusterConfig {
return c.newInclusterContext(), nil
}

return nil, fmt.Errorf("getcontext - invalid context specified: %q", n)
}

Expand All @@ -202,6 +222,12 @@ func (c *Config) Contexts() (map[string]*api.Context, error) {
return nil, err
}

if len(cfg.Contexts) == 0 && c.isInCluster(cfg) {
return map[string]*api.Context{
inClusterConfig: c.newInclusterContext(),
}, nil
}

return cfg.Contexts, nil
}

Expand Down Expand Up @@ -314,6 +340,9 @@ func (c *Config) CurrentUserName() (string, error) {
if ctx, ok := cfg.Contexts[current]; ok {
return ctx.AuthInfo, nil
}
if c.isInCluster(cfg) {
return inClusterConfig, nil
}

return "", errors.New("unable to locate current user")
}
Expand Down Expand Up @@ -341,6 +370,31 @@ func (c *Config) ConfigAccess() (clientcmd.ConfigAccess, error) {
return c.clientConfig().ConfigAccess(), nil
}

func (c *Config) newInclusterContext() *api.Context {
ns, _, _ := c.clientConfig().Namespace()
if ns == "" {
ns = DefaultNamespace
}
return &api.Context{
LocationOfOrigin: inClusterConfig,
Cluster: inClusterConfig,
Namespace: ns,
AuthInfo: inClusterConfig,
Extensions: make(map[string]runtime.Object),
}
}

func (c *Config) isInCluster(cfg api.Config) bool {
if (cfg.CurrentContext == "" || cfg.CurrentContext == inClusterConfig) &&
len(cfg.Contexts) == 0 &&
isEmptyString(c.flags.KubeConfig) &&
isEmptyString(c.flags.ClusterName) &&
isEmptyString(c.flags.APIServer) {
return true
}
return false
}

// ----------------------------------------------------------------------------
// Helpers...

Expand All @@ -351,3 +405,7 @@ func isSet(s *string) bool {
func areSet(s *[]string) bool {
return s != nil && len(*s) != 0
}

func isEmptyString(s *string) bool {
return s != nil && *s == ""
}