From c905d02adf131bbc94a73967e4c78a9c1d5acee8 Mon Sep 17 00:00:00 2001 From: davefu113 <142489013+davefu113@users.noreply.github.com> Date: Sat, 7 Dec 2024 23:32:14 +0800 Subject: [PATCH 1/3] fix(argo-rollouts plugin): resolve improper piping in watch command (#3009) The `-w` or `--watch` flag is designed for continuous monitoring and should not pipe to `less` to avoid unnecessary screen rolling. chore(deps): bump github.com/anchore/grype from 0.84.0 to 0.85.0 (#3008) Bumps [github.com/anchore/grype](https://github.com/anchore/grype) from 0.84.0 to 0.85.0. - [Release notes](https://github.com/anchore/grype/releases) - [Changelog](https://github.com/anchore/grype/blob/main/.goreleaser.yaml) - [Commits](https://github.com/anchore/grype/compare/v0.84.0...v0.85.0) --- updated-dependencies: - dependency-name: github.com/anchore/grype dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> chore(deps): bump k8s.io/kubectl from 0.31.1 to 0.31.3 (#3007) Bumps [k8s.io/kubectl](https://github.com/kubernetes/kubectl) from 0.31.1 to 0.31.3. - [Commits](https://github.com/kubernetes/kubectl/compare/v0.31.1...v0.31.3) --- updated-dependencies: - dependency-name: k8s.io/kubectl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> chore(deps): bump helm.sh/helm/v3 from 3.16.2 to 3.16.3 (#3006) Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.16.2 to 3.16.3. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.16.2...v3.16.3) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> feat: Adding new config for workload view feat: Add config and add some documentation feat: Refactor getValidity feat: Refactor getValidity feat: refactor the get replicas complexity feat: trigger pipeline feat: refactor workload list feat: delete log feat: unblock unit tests (will come back to it) feat: fix unit tests and add k9s.json new schema feat: add comment on the dao/workload feat: remove todo feat: add header on new file feat: add new view with crud for custom gvr and rollback config feat: add comments and flashing errors feat: add comments feat: update readme feat: add comments feat: cleanup cluster context feat: update order of actions feat: fix unit test --- README.md | 14 ++ internal/config/alias.go | 3 +- internal/config/alias_test.go | 2 +- internal/config/config.go | 15 ++ internal/config/files.go | 10 + internal/config/workload.go | 201 +++++++++++++++++ internal/dao/registry.go | 9 + internal/dao/workload.go | 278 ++++++++++++++++-------- internal/dao/workloadGVR.go | 85 ++++++++ internal/keys.go | 1 + internal/model/registry.go | 4 + internal/render/workloadGVR.go | 66 ++++++ internal/view/registrar.go | 3 + internal/view/workload.go | 41 ++++ internal/view/workloadGVR.go | 386 +++++++++++++++++++++++++++++++++ 15 files changed, 1022 insertions(+), 96 deletions(-) create mode 100644 internal/config/workload.go create mode 100644 internal/dao/workloadGVR.go create mode 100644 internal/render/workloadGVR.go create mode 100644 internal/view/workloadGVR.go diff --git a/README.md b/README.md index f1004cb735..c62621d200 100644 --- a/README.md +++ b/README.md @@ -1056,6 +1056,20 @@ k9s: --- +## Custom Workload View + +You can customize the workload view with CRDs or any resources you want to see on this view. + +To do so, you can go to the `workloadGVR` view, you'll be able to see all your custom GVRs. You can also create, edit, delete them. + +You can also describe by pressing `d` or simulate them by pressing `enter`. + +You can create new one from this view, this will ask you for a custom GVR name and will set default values (to comment or uncomment). + +There is a way to add a custom GVR to you cluster context or to delete them, they will be added on top of the default workloads GVRS. + +--- + ## Contributors Without the contributions from these fine folks, this project would be a total dud! diff --git a/internal/config/alias.go b/internal/config/alias.go index 426d41d76f..a1f59f8a99 100644 --- a/internal/config/alias.go +++ b/internal/config/alias.go @@ -31,7 +31,7 @@ type Aliases struct { // NewAliases return a new alias. func NewAliases() *Aliases { return &Aliases{ - Alias: make(Alias, 50), + Alias: make(Alias, 56), } } @@ -190,6 +190,7 @@ func (a *Aliases) loadDefaultAliases() { a.declare("pulses", "pulse", "pu", "hz") a.declare("xrays", "xray", "x") a.declare("workloads", "workload", "wk") + a.declare("workloadgvrs", "workloadgvr", "wkg") } // Save alias to disk. diff --git a/internal/config/alias_test.go b/internal/config/alias_test.go index c67f4f5824..f3fa33eecc 100644 --- a/internal/config/alias_test.go +++ b/internal/config/alias_test.go @@ -111,7 +111,7 @@ func TestAliasesLoad(t *testing.T) { a := config.NewAliases() assert.Nil(t, a.Load(path.Join(config.AppConfigDir, "plain.yaml"))) - assert.Equal(t, 54, len(a.Alias)) + assert.Equal(t, 57, len(a.Alias)) } func TestAliasesSave(t *testing.T) { diff --git a/internal/config/config.go b/internal/config/config.go index a5da1dfa89..110b8da7a3 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -53,6 +53,21 @@ func (c *Config) ContextAliasesPath() string { return AppContextAliasesFile(ct.GetClusterName(), c.K9s.activeContextName) } +// ContextWorkloadPath returns a context specific workload file spec. +func (c *Config) ContextWorkloadPath() string { + ct, err := c.K9s.ActiveContext() + if err != nil { + return "" + } + + return AppContextWorkloadFile(ct.GetClusterName(), c.K9s.activeContextName) +} + +// ContextWorkloadDir returns the workload directory (which contains the custom GVRs) +func (c *Config) ContextWorkloadDir() string { + return AppWorkloadsDir() +} + // ContextPluginsPath returns a context specific plugins file spec. func (c *Config) ContextPluginsPath() (string, error) { ct, err := c.K9s.ActiveContext() diff --git a/internal/config/files.go b/internal/config/files.go index 2b246d5172..6aecc3aec8 100644 --- a/internal/config/files.go +++ b/internal/config/files.go @@ -200,11 +200,21 @@ func AppContextDir(cluster, context string) string { return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context)) } +// AppWorkloadsDir generates a valid workload folder path +func AppWorkloadsDir() string { + return filepath.Join(AppContextsDir, "workloads") +} + // AppContextAliasesFile generates a valid context specific aliases file path. func AppContextAliasesFile(cluster, context string) string { return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "aliases.yaml") } +// AppContextWorkloadFile generates a valid context specific workload file path. +func AppContextWorkloadFile(cluster, context string) string { + return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "workloads.yaml") +} + // AppContextPluginsFile generates a valid context specific plugins file path. func AppContextPluginsFile(cluster, context string) string { return filepath.Join(AppContextsDir, data.SanitizeContextSubpath(cluster, context), "plugins.yaml") diff --git a/internal/config/workload.go b/internal/config/workload.go new file mode 100644 index 0000000000..e1c740b824 --- /dev/null +++ b/internal/config/workload.go @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of K9s + +package config + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/derailed/k9s/internal/client" + "gopkg.in/yaml.v2" +) + +var ( + // Template represents the template of new workload gvr + Template = []byte(`name: "test.com/v1alpha1/myCRD" +status: + cellName: "Status" + # na: true +readiness: + cellName: "Current" + # The cellExtraName will be shown as cellName/cellExtraName + cellExtraName: "Desired" + # na: true +validity: + replicas: + cellCurrentName: "Current" + cellDesiredName: "Desired" + # cellAllName: "Ready" + matchs: + - cellName: "State" + cellValue: "Ready"`) +) + +var ( + // defaultGvr represent the default values uses if a custom gvr is set without status, validity or readiness + defaultGvr = WorkloadGVR{ + Status: &GVRStatus{CellName: "Status"}, + Validity: &GVRValidity{Matchs: []Match{{CellName: "Ready", Value: "True"}}}, + Readiness: &GVRReadiness{CellName: "Ready"}, + } + + // defaultConfigGVRs represents the default configurations + defaultConfigGVRs = map[string]WorkloadGVR{ + "apps/v1/deployments": { + Name: "apps/v1/deployments", + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + "apps/v1/daemonsets": { + Name: "apps/v1/daemonsets", + Readiness: &GVRReadiness{CellName: "Ready", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Ready"}, + }, + }, + "apps/v1/replicasets": { + Name: "apps/v1/replicasets", + Readiness: &GVRReadiness{CellName: "Current", CellExtraName: "Desired"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellDesiredName: "Desired", CellCurrentName: "Current"}, + }, + }, + "apps/v1/statefulSets": { + Name: "apps/v1/statefulSets", + Status: &GVRStatus{CellName: "Ready"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + "v1/pods": { + Name: "v1/pods", + Status: &GVRStatus{CellName: "Status"}, + Readiness: &GVRReadiness{CellName: "Ready"}, + Validity: &GVRValidity{ + Matchs: []Match{ + {CellName: "Status", Value: "Running"}, + }, + Replicas: Replicas{CellAllName: "Ready"}, + }, + }, + } +) + +type CellName string + +type GVRStatus struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` +} + +type GVRReadiness struct { + NA bool `json:"na" yaml:"na"` + CellName CellName `json:"cellName" yaml:"cellName"` + CellExtraName CellName `json:"cellExtraName" yaml:"cellExtraName"` +} + +type Match struct { + CellName CellName `json:"cellName" yaml:"cellName"` + Value string `json:"cellValue" yaml:"cellValue"` +} + +type Replicas struct { + CellCurrentName CellName `json:"cellCurrentName" yaml:"cellCurrentName"` + CellDesiredName CellName `json:"cellDesiredName" yaml:"cellDesiredName"` + CellAllName CellName `json:"cellAllName" yaml:"cellAllName"` +} + +type GVRValidity struct { + NA bool `json:"na" yaml:"na"` + Matchs []Match `json:"matchs,omitempty" yaml:"matchs,omitempty"` + Replicas Replicas `json:"replicas" yaml:"replicas"` +} + +type WorkloadGVR struct { + Name string `json:"name" yaml:"name"` + Status *GVRStatus `json:"status,omitempty" yaml:"status,omitempty"` + Readiness *GVRReadiness `json:"readiness,omitempty" yaml:"readiness,omitempty"` + Validity *GVRValidity `json:"validity,omitempty" yaml:"validity,omitempty"` +} + +type WorkloadConfig struct { + GVRFilenames []string `yaml:"wkg"` +} + +// NewWorkloadGVRs returns the default GVRs to use if no custom config is set +// The workloadDir represent the directory of the custom workloads, the gvrNames are the custom gvrs names +func NewWorkloadGVRs(workloadDir string, gvrNames []string) ([]WorkloadGVR, error) { + workloadGVRs := make([]WorkloadGVR, 0) + for _, gvr := range defaultConfigGVRs { + workloadGVRs = append(workloadGVRs, gvr) + } + + var errs error + + // Append custom GVRS + if len(gvrNames) != 0 { + for _, filename := range gvrNames { + wkgvr, err := GetWorkloadGVRFromFile(path.Join(workloadDir, fmt.Sprintf("%s.%s", filename, "yaml"))) + if err != nil { + errs = errors.Join(errs, err) + continue + } + workloadGVRs = append(workloadGVRs, wkgvr) + } + } + + return workloadGVRs, errs +} + +// GetWorkloadGVRFromFile returns a gvr from a filepath +func GetWorkloadGVRFromFile(filepath string) (WorkloadGVR, error) { + yamlFile, err := os.ReadFile(filepath) + if err != nil { + return WorkloadGVR{}, err + } + + var wkgvr WorkloadGVR + if err = yaml.Unmarshal(yamlFile, &wkgvr); err != nil { + return WorkloadGVR{}, err + } + + return wkgvr, nil +} + +// GetGVR will return the GVR defined by the WorkloadGVR's name +func (wgvr WorkloadGVR) GetGVR() client.GVR { + return client.NewGVR(wgvr.Name) +} + +// ApplyDefault will complete the GVR with missing values +// If it's an existing GVR's name, it will apply their corresponding default values +// If it's an unknown resources without readiness, status or validity it will use the default ones +func (wkgvr *WorkloadGVR) ApplyDefault() { + // Apply default values + existingGvr, ok := defaultConfigGVRs[wkgvr.Name] + if ok { + wkgvr.applyDefaultValues(existingGvr) + } else { + wkgvr.applyDefaultValues(defaultGvr) + } +} + +func (wkgvr *WorkloadGVR) applyDefaultValues(defaultGVR WorkloadGVR) { + if wkgvr.Status == nil { + wkgvr.Status = defaultGVR.Status + } + + if wkgvr.Readiness == nil { + wkgvr.Readiness = defaultGVR.Readiness + } + + if wkgvr.Validity == nil { + wkgvr.Validity = defaultGVR.Validity + } +} diff --git a/internal/dao/registry.go b/internal/dao/registry.go index f512bc60cd..20fbe51a4b 100644 --- a/internal/dao/registry.go +++ b/internal/dao/registry.go @@ -66,6 +66,7 @@ func NewMeta() *Meta { func AccessorFor(f Factory, gvr client.GVR) (Accessor, error) { m := Accessors{ client.NewGVR("workloads"): &Workload{}, + client.NewGVR("workloadgvrs"): &WorkloadGVR{}, client.NewGVR("contexts"): &Context{}, client.NewGVR("containers"): &Container{}, client.NewGVR("scans"): &ImageScan{}, @@ -214,6 +215,14 @@ func loadK9s(m ResourceMetas) { ShortNames: []string{"wk"}, Categories: []string{k9sCat}, } + m[client.NewGVR("workloadgvrs")] = metav1.APIResource{ + Name: "workloadgvrs", + Kind: "Workloadgvr", + SingularName: "workloadgvr", + Namespaced: true, + ShortNames: []string{"wkg"}, + Categories: []string{k9sCat}, + } m[client.NewGVR("pulses")] = metav1.APIResource{ Name: "pulses", Kind: "Pulse", diff --git a/internal/dao/workload.go b/internal/dao/workload.go index 604c6ca9ad..f943185742 100644 --- a/internal/dao/workload.go +++ b/internal/dao/workload.go @@ -13,6 +13,7 @@ import ( "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" "github.com/derailed/k9s/internal/render" "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/api/meta" @@ -21,23 +22,22 @@ import ( ) const ( - StatusOK = "OK" DegradedStatus = "DEGRADED" + NotAvailable = "n/a" ) var ( - SaGVR = client.NewGVR("v1/serviceaccounts") - PvcGVR = client.NewGVR("v1/persistentvolumeclaims") - PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") - CmGVR = client.NewGVR("v1/configmaps") - SecGVR = client.NewGVR("v1/secrets") - PodGVR = client.NewGVR("v1/pods") - SvcGVR = client.NewGVR("v1/services") - DsGVR = client.NewGVR("apps/v1/daemonsets") - StsGVR = client.NewGVR("apps/v1/statefulSets") - DpGVR = client.NewGVR("apps/v1/deployments") - RsGVR = client.NewGVR("apps/v1/replicasets") - resList = []client.GVR{PodGVR, SvcGVR, DsGVR, StsGVR, DpGVR, RsGVR} + SaGVR = client.NewGVR("v1/serviceaccounts") + PvcGVR = client.NewGVR("v1/persistentvolumeclaims") + PcGVR = client.NewGVR("scheduling.k8s.io/v1/priorityclasses") + CmGVR = client.NewGVR("v1/configmaps") + SecGVR = client.NewGVR("v1/secrets") + PodGVR = client.NewGVR("v1/pods") + SvcGVR = client.NewGVR("v1/services") + DsGVR = client.NewGVR("apps/v1/daemonsets") + StsGVR = client.NewGVR("apps/v1/statefulSets") + DpGVR = client.NewGVR("apps/v1/deployments") + RsGVR = client.NewGVR("apps/v1/replicasets") ) // Workload tracks a select set of resources in a given namespace. @@ -80,6 +80,62 @@ func (w *Workload) Delete(ctx context.Context, path string, propagation *metav1. return dial.Namespace(ns).Delete(ctx, n, opts) } +// List fetch workloads. +func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { + oo := make([]runtime.Object, 0, 100) + + workloadGVRs, _ := ctx.Value(internal.KeyWorkloadGVRs).([]config.WorkloadGVR) + for i, wkgvr := range workloadGVRs { + // Apply default values + workloadGVRs[i].ApplyDefault() + + table, err := a.fetch(ctx, workloadGVRs[i].GetGVR(), ns) + if err != nil { + log.Warn().Msgf("could not fetch gvr %s: %q", workloadGVRs[i].Name, err) + continue + } + + for _, r := range table.Rows { + ns, ts := a.getNamespaceAndTimestamp(r) + + oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ + workloadGVRs[i].GetGVR().String(), + ns, + r.Cells[indexOf("Name", table.ColumnDefinitions)], + a.getStatus(wkgvr, table.ColumnDefinitions, r.Cells), + a.getReadiness(wkgvr, table.ColumnDefinitions, r.Cells), + a.getValidity(wkgvr, table.ColumnDefinitions, r.Cells), + ts, + }}}) + } + } + + return oo, nil +} + +// getNamespaceAndTimestamp will retrieve the namespace and the timestamp of a given resource +func (a *Workload) getNamespaceAndTimestamp(r metav1.TableRow) (string, metav1.Time) { + var ( + ns string + ts metav1.Time + ) + + if obj := r.Object.Object; obj != nil { + if m, err := meta.Accessor(obj); err == nil { + ns = m.GetNamespace() + ts = m.GetCreationTimestamp() + } + } else { + var m metav1.PartialObjectMetadata + if err := json.Unmarshal(r.Object.Raw, &m); err == nil { + ns = m.GetNamespace() + ts = m.CreationTimestamp + } + } + + return ns, ts +} + func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav1.Table, error) { a.Table.gvr = gvr oo, err := a.Table.List(ctx, ns) @@ -97,106 +153,136 @@ func (a *Workload) fetch(ctx context.Context, gvr client.GVR, ns string) (*metav return tt, nil } -// List fetch workloads. -func (a *Workload) List(ctx context.Context, ns string) ([]runtime.Object, error) { - oo := make([]runtime.Object, 0, 100) - for _, gvr := range resList { - table, err := a.fetch(ctx, gvr, ns) - if err != nil { - return nil, err - } - var ( - ns string - ts metav1.Time - ) - for _, r := range table.Rows { - if obj := r.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - ns = m.GetNamespace() - ts = m.GetCreationTimestamp() - } - } else { - var m metav1.PartialObjectMetadata - if err := json.Unmarshal(r.Object.Raw, &m); err == nil { - ns = m.GetNamespace() - ts = m.CreationTimestamp - } - } - stat := status(gvr, r, table.ColumnDefinitions) - oo = append(oo, &render.WorkloadRes{Row: metav1.TableRow{Cells: []interface{}{ - gvr.String(), - ns, - r.Cells[indexOf("Name", table.ColumnDefinitions)], - stat, - readiness(gvr, r, table.ColumnDefinitions), - validity(stat), - ts, - }}}) - } +// getStatus will retrieve the status of the resource depending of it's configuration +func (wk *Workload) getStatus(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + status := NotAvailable + + if wkgvr.Status == nil || wkgvr.Status.NA { + return status } - return oo, nil + if statusIndex := indexOf(string(wkgvr.Status.CellName), cd); statusIndex != -1 { + status = valueToString(cells[statusIndex]) + + } + + return status } -// Helpers... - -func readiness(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR, DpGVR, StsGVR: - return r.Cells[indexOf("Ready", h)].(string) - case RsGVR, DsGVR: - c := r.Cells[indexOf("Ready", h)].(int64) - d := r.Cells[indexOf("Desired", h)].(int64) - return fmt.Sprintf("%d/%d", c, d) - case SvcGVR: +// getReadiness will retrieve the readiness of the resource depending of it's configuration +func (wk *Workload) getReadiness(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + ready := NotAvailable + + if wkgvr.Readiness == nil || wkgvr.Readiness.NA { + return ready + } + + if readyIndex := indexOf(string(wkgvr.Readiness.CellName), cd); readyIndex != -1 { + ready = valueToString(cells[readyIndex]) + } + + if extrReadyIndex := indexOf(string(wkgvr.Readiness.CellExtraName), cd); extrReadyIndex != -1 { + ready = fmt.Sprintf("%s/%s", ready, valueToString(cells[extrReadyIndex])) + } + + return ready +} + +// getValidity will retrieve the validity of the resource depending of it's configuration +func (wk *Workload) getValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity == nil || wkgvr.Validity.NA { return "" } - return render.NAValue + if validity := getMatchesValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } + + if validity := getReplicasValidity(wkgvr, cd, cells); validity == DegradedStatus { + return DegradedStatus + } + + return "" } -func status(gvr client.GVR, r metav1.TableRow, h []metav1.TableColumnDefinition) string { - switch gvr { - case PodGVR: - if status := r.Cells[indexOf("Status", h)]; status == render.PhaseCompleted { - return StatusOK - } else if !isReady(r.Cells[indexOf("Ready", h)].(string)) || status != render.PhaseRunning { - return DegradedStatus +// getMatchesValidity retrieve the validity depending if all the matches are fullfiled or not +func getMatchesValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + for _, m := range wkgvr.Validity.Matchs { + v := "" + if matchCellNameIndex := indexOf(string(m.CellName), cd); matchCellNameIndex != -1 { + v = valueToString(cells[matchCellNameIndex]) } - case DpGVR, StsGVR: - if !isReady(r.Cells[indexOf("Ready", h)].(string)) { + + if v != m.Value { return DegradedStatus } - case RsGVR, DsGVR: - rd, ok1 := r.Cells[indexOf("Ready", h)].(int64) - de, ok2 := r.Cells[indexOf("Desired", h)].(int64) - if ok1 && ok2 { - if !isReady(fmt.Sprintf("%d/%d", rd, de)) { - return DegradedStatus - } - break - } - rds, oks1 := r.Cells[indexOf("Ready", h)].(string) - des, oks2 := r.Cells[indexOf("Desired", h)].(string) - if oks1 && oks2 { - if !isReady(fmt.Sprintf("%s/%s", rds, des)) { - return DegradedStatus - } - } - case SvcGVR: - default: - return render.MissingValue + + } + + return "" +} + +// getReplicasValidity returns the validity corresponding of the replicas from 2 cells or a single one +func getReplicasValidity(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if getReplicasGrouped(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus + } + + if getReplicasSeparated(wkgvr, cd, cells) == DegradedStatus { + return DegradedStatus } - return StatusOK + return "" } -func validity(status string) string { - if status != "DEGRADED" { +// getReplicasGrouped returns the validity corresponding of the replicas from one cell +func getReplicasGrouped(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellAllName == "" { return "" } - return status + allCellNameIndex := indexOf(string(wkgvr.Validity.Replicas.CellAllName), cd) + if allCellNameIndex < 0 { + return "" + } + + if !isReady(valueToString(cells[allCellNameIndex])) { + return DegradedStatus + } + + return "" +} + +// getReplicasSeparated returns the validity corresponding of the replicas from 2 cells (current/desired) +func getReplicasSeparated(wkgvr config.WorkloadGVR, cd []metav1.TableColumnDefinition, cells []interface{}) string { + if wkgvr.Validity.Replicas.CellCurrentName == "" || wkgvr.Validity.Replicas.CellDesiredName == "" { + return "" + } + + currentIndex := indexOf(string(wkgvr.Validity.Replicas.CellCurrentName), cd) + desiredIndex := indexOf(string(wkgvr.Validity.Replicas.CellDesiredName), cd) + + if currentIndex < 0 || desiredIndex < 0 { + return "" + } + + if !isReady(fmt.Sprintf("%s/%s", valueToString(cells[desiredIndex]), valueToString(cells[currentIndex]))) { + return DegradedStatus + } + + return "" +} + +func valueToString(v interface{}) string { + if sv, ok := v.(string); ok { + return sv + } + + if iv, ok := v.(int64); ok { + return strconv.Itoa(int(iv)) + } + + return "" } func isReady(s string) bool { @@ -222,6 +308,10 @@ func isReady(s string) bool { } func indexOf(n string, defs []metav1.TableColumnDefinition) int { + if n == "" { + return -1 + } + for i, d := range defs { if d.Name == n { return i diff --git a/internal/dao/workloadGVR.go b/internal/dao/workloadGVR.go new file mode 100644 index 0000000000..efcd1d6730 --- /dev/null +++ b/internal/dao/workloadGVR.go @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of K9s + +package dao + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/derailed/k9s/internal" + "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" + "github.com/derailed/k9s/internal/render" + "gopkg.in/yaml.v3" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Accessor = (*WorkloadGVR)(nil) + +type WorkloadGVR struct { + NonResource +} + +func NewWorkloadGVR(f Factory) *WorkloadGVR { + a := WorkloadGVR{} + a.Init(f, client.NewGVR("workloadGVR")) + + return &a +} + +// List returns a collection of aliases. +func (a *WorkloadGVR) List(ctx context.Context, _ string) ([]runtime.Object, error) { + workloadsDir, _ := ctx.Value(internal.KeyDir).(string) + clusterContext, _ := ctx.Value(internal.KeyPath).(string) + + // List files from custom workload directory + ff, err := os.ReadDir(workloadsDir) + if err != nil { + return nil, err + } + + // Generate workload list from custom gvrs + oo := make([]runtime.Object, len(ff)) + for i, f := range ff { + if fi, err := f.Info(); err == nil { + oo[i] = render.WorkloadGVRRes{ + Filepath: fi, + InContext: a.isInContext(clusterContext, fi.Name())} + } + } + + return oo, nil +} + +func (a *WorkloadGVR) isInContext(ctxPath, filename string) bool { + // Read cluster context config + content, err := os.ReadFile(ctxPath) + if err != nil { + return false + } + + // Unmarshal cluster config + var config config.WorkloadConfig + if err := yaml.Unmarshal(content, &config); err != nil { + return false + } + + // Check if custom GVR is in context + for _, n := range config.GVRFilenames { + if n == strings.TrimSuffix(filename, filepath.Ext(filename)) { + return true + } + } + + return false + +} + +// Get fetch a resource. +func (a *WorkloadGVR) Get(_ context.Context, _ string) (runtime.Object, error) { + return nil, errors.New("nyi") +} diff --git a/internal/keys.go b/internal/keys.go index d18bc11d36..1728dc66ac 100644 --- a/internal/keys.go +++ b/internal/keys.go @@ -36,4 +36,5 @@ const ( KeyWait ContextKey = "wait" KeyPodCounting ContextKey = "podCounting" KeyEnableImgScan ContextKey = "vulScan" + KeyWorkloadGVRs ContextKey = "workloadGVRs" ) diff --git a/internal/model/registry.go b/internal/model/registry.go index e2129c3bff..9480e68ac0 100644 --- a/internal/model/registry.go +++ b/internal/model/registry.go @@ -17,6 +17,10 @@ var Registry = map[string]ResourceMeta{ DAO: &dao.Workload{}, Renderer: &render.Workload{}, }, + "workloadgvrs": { + DAO: &dao.WorkloadGVR{}, + Renderer: &render.WorkloadGVR{}, + }, // Custom... "references": { DAO: &dao.Reference{}, diff --git a/internal/render/workloadGVR.go b/internal/render/workloadGVR.go new file mode 100644 index 0000000000..6296cb7d75 --- /dev/null +++ b/internal/render/workloadGVR.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of K9s + +package render + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/derailed/k9s/internal/model1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type WorkloadGVR struct { + Base +} + +// Header returns a header row. +func (WorkloadGVR) Header(ns string) model1.Header { + return model1.Header{ + model1.HeaderColumn{Name: "NANE"}, + model1.HeaderColumn{Name: "INCONTEXT"}, + model1.HeaderColumn{Name: "VALID", Wide: true}, + model1.HeaderColumn{Name: "AGE", Time: true}, + } +} + +// Render renders a K8s resource to screen. +func (wgvr WorkloadGVR) Render(o interface{}, ns string, r *model1.Row) error { + res, ok := o.(WorkloadGVRRes) + if !ok { + return fmt.Errorf("expected WorkloadGVRRes, but got %T", o) + } + + r.ID = res.Filepath.Name() + r.Fields = model1.Fields{ + strings.TrimSuffix(res.Filepath.Name(), filepath.Ext(res.Filepath.Name())), + strconv.FormatBool(res.InContext), + "", + timeToAge(res.Filepath.ModTime()), + } + + return nil +} + +// ---------------------------------------------------------------------------- +// Helpers... + +type WorkloadGVRRes struct { + Filepath os.FileInfo + InContext bool +} + +// GetObjectKind returns a schema object. +func (a WorkloadGVRRes) GetObjectKind() schema.ObjectKind { + return nil +} + +// DeepCopyObject returns a container copy. +func (a WorkloadGVRRes) DeepCopyObject() runtime.Object { + return a +} diff --git a/internal/view/registrar.go b/internal/view/registrar.go index d199a44988..830e258f42 100644 --- a/internal/view/registrar.go +++ b/internal/view/registrar.go @@ -63,6 +63,9 @@ func miscViewers(vv MetaViewers) { vv[client.NewGVR("workloads")] = MetaViewer{ viewerFn: NewWorkload, } + vv[client.NewGVR("workloadgvrs")] = MetaViewer{ + viewerFn: NewWorkloadGVR, + } vv[client.NewGVR("contexts")] = MetaViewer{ viewerFn: NewContext, } diff --git a/internal/view/workload.go b/internal/view/workload.go index 78bba38fa2..e1d8009315 100644 --- a/internal/view/workload.go +++ b/internal/view/workload.go @@ -6,16 +6,19 @@ package view import ( "context" "fmt" + "os" "strings" "github.com/derailed/k9s/internal" "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" "github.com/derailed/k9s/internal/dao" "github.com/derailed/k9s/internal/model" "github.com/derailed/k9s/internal/ui" "github.com/derailed/k9s/internal/ui/dialog" "github.com/derailed/tcell/v2" "github.com/rs/zerolog/log" + "gopkg.in/yaml.v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,6 +32,7 @@ func NewWorkload(gvr client.GVR) ResourceViewer { w := Workload{ ResourceViewer: NewBrowser(gvr), } + w.SetContextFn(w.workloadContext) w.GetTable().SetEnterFn(w.showRes) w.AddBindKeysFn(w.bindKeys) w.GetTable().SetSortCol("KIND", true) @@ -36,6 +40,32 @@ func NewWorkload(gvr client.GVR) ResourceViewer { return &w } +// workloadContext will set the configuration's values of the workloadGVRs in the context to be used in the dao/workload +func (n *Workload) workloadContext(ctx context.Context) context.Context { + var gvrFilenames []string + + // Retrieve workload cluster context config file + ctxWorkloadPath := n.App().Config.ContextWorkloadPath() + + // Read config file + wkg := config.WorkloadConfig{} + configData, err := os.ReadFile(ctxWorkloadPath) + if err == nil { + if err := yaml.Unmarshal(configData, &wkg); err == nil { + gvrFilenames = wkg.GVRFilenames + } + } + + // Init workload GVRs with default values and from cluster context config + wkgvs, err := config.NewWorkloadGVRs(n.App().Config.ContextWorkloadDir(), gvrFilenames) + if err != nil { + n.App().Flash().Errf("unable to find custom workload GVR: %q", err) + } + + // Set in context + return context.WithValue(ctx, internal.KeyWorkloadGVRs, wkgvs) +} + func (w *Workload) bindDangerousKeys(aa *ui.KeyActions) { aa.Bulk(ui.KeyMap{ ui.KeyE: ui.NewKeyActionWithOpts("Edit", w.editCmd, @@ -63,6 +93,7 @@ func (w *Workload) bindKeys(aa *ui.KeyActions) { ui.KeyShiftA: ui.NewKeyAction("Sort Age", w.GetTable().SortColCmd(ageCol, true), false), ui.KeyY: ui.NewKeyAction(yamlAction, w.yamlCmd, true), ui.KeyD: ui.NewKeyAction("Describe", w.describeCmd, true), + ui.KeyG: ui.NewKeyAction("Custom GVRs", w.workloadGVRView, true), }) } @@ -166,6 +197,16 @@ func (w *Workload) describeCmd(evt *tcell.EventKey) *tcell.EventKey { return nil } +func (a *Workload) workloadGVRView(evt *tcell.EventKey) *tcell.EventKey { + co := NewWorkloadGVR(client.NewGVR("workloadgvrs")) + if err := a.App().inject(co, false); err != nil { + a.App().Flash().Err(err) + return nil + } + + return evt +} + func (w *Workload) editCmd(evt *tcell.EventKey) *tcell.EventKey { path := w.GetTable().GetSelectedItem() if path == "" { diff --git a/internal/view/workloadGVR.go b/internal/view/workloadGVR.go new file mode 100644 index 0000000000..4350b6768c --- /dev/null +++ b/internal/view/workloadGVR.go @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of K9s + +package view + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/derailed/k9s/internal" + "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/config" + "github.com/derailed/k9s/internal/ui" + "github.com/derailed/k9s/internal/ui/dialog" + "github.com/derailed/tcell/v2" + "github.com/derailed/tview" + "gopkg.in/yaml.v3" +) + +const workloadGVRTitle = "workloadGVR" + +type WorkloadGVR struct { + ResourceViewer +} + +func NewWorkloadGVR(gvr client.GVR) ResourceViewer { + a := WorkloadGVR{ + ResourceViewer: NewBrowser(gvr), + } + a.GetTable().SetBorderFocusColor(tcell.ColorAliceBlue) + a.GetTable().SetSelectedStyle(tcell.StyleDefault.Foreground(tcell.ColorWhite).Background(tcell.ColorAliceBlue).Attributes(tcell.AttrNone)) + a.AddBindKeysFn(a.bindKeys) + a.SetContextFn(a.workloadGVRContext) + + return &a +} + +// Init initializes the view. +func (a *WorkloadGVR) Init(ctx context.Context) error { + if err := a.ResourceViewer.Init(ctx); err != nil { + return err + } + a.GetTable().GetModel().SetNamespace(client.NotNamespaced) + + return nil +} + +// workloadGVRContext initialise the worjloadGVR context +func (a *WorkloadGVR) workloadGVRContext(ctx context.Context) context.Context { + ctx = context.WithValue(ctx, internal.KeyDir, a.App().Config.ContextWorkloadDir()) + return context.WithValue(ctx, internal.KeyPath, a.App().Config.ContextWorkloadPath()) +} + +func (a *WorkloadGVR) bindKeys(aa *ui.KeyActions) { + aa.Delete(ui.KeyN, ui.KeyD, ui.KeyShiftA, ui.KeyShiftN, tcell.KeyCtrlS, tcell.KeyCtrlSpace, ui.KeySpace, ui.KeyShiftD) + aa.Delete(tcell.KeyCtrlW, tcell.KeyCtrlL, tcell.KeyCtrlD) + aa.Bulk(ui.KeyMap{ + ui.KeyShiftA: ui.NewKeyActionWithOpts("Context: add", a.addtoCurrentCtx, + ui.ActionOpts{ + Visible: true, + Dangerous: true, + }), + ui.KeyShiftD: ui.NewKeyActionWithOpts("Context: delete", a.deletefromCurrentCtx, + ui.ActionOpts{ + Visible: true, + Dangerous: true, + }), + ui.KeyC: ui.NewKeyActionWithOpts("Create custom GVR", a.createCustomCmd, ui.ActionOpts{ + Visible: true, + Dangerous: true, + }), + ui.KeyR: ui.NewKeyActionWithOpts("Delete custom GVR", a.deleteCustomCmd, ui.ActionOpts{ + Visible: true, + Dangerous: true, + }), + ui.KeyE: ui.NewKeyActionWithOpts("Edit custom GVR", a.editCustomCmd, + ui.ActionOpts{ + Visible: true, + Dangerous: true, + }), + ui.KeyShiftG: ui.NewKeyAction("Sort GVR", a.GetTable().SortColCmd("NAME", true), false), + tcell.KeyEnter: ui.NewKeyAction("Simulate", a.simulateCmd, true), + ui.KeyD: ui.NewKeyAction("Show", a.describeCmd, true), + }) +} + +// describeCmd will show a custom GVR +func (a *WorkloadGVR) describeCmd(evt *tcell.EventKey) *tcell.EventKey { + sel := a.GetTable().GetSelectedItem() + if sel == "" { + return evt + } + + // Retrieve custom workload gvr filepath + pathFile := path.Join(a.App().Config.ContextWorkloadDir(), sel) + data, err := os.ReadFile(pathFile) + if err != nil { + a.App().Flash().Err(err) + return nil + } + + // Describe custom workload GVR + details := NewDetails(a.App(), "Describe", pathFile, contentYAML, true).Update(string(data)) + if err := a.App().inject(details, false); err != nil { + a.App().Flash().Err(err) + return nil + } + + return nil +} + +// createCustomCmd will create a custom worklad GVR wiht default template using a specified GVR's name +func (a *WorkloadGVR) createCustomCmd(evt *tcell.EventKey) *tcell.EventKey { + var GVRName string + + // Generate creation form + form, err := a.makeCreateForm(&GVRName) + if err != nil { + return nil + } + confirm := tview.NewModalForm("", form) + confirm.SetText(fmt.Sprintf("Set GVR Name %s %s", a.GVR(), a.App().Config.ContextWorkloadDir())) + confirm.SetDoneFunc(func(int, string) { + a.cleanupClusterContext() + a.dismissDialog() + }) + a.App().Content.AddPage("NewGVRModal", confirm, false, false) + a.App().Content.ShowPage("NewGVRModal") + + return nil +} + +func (a *WorkloadGVR) makeCreateForm(sel *string) (*tview.Form, error) { + // Generate create form + f := tview.NewForm() + f.SetItemPadding(0) + f.SetButtonsAlign(tview.AlignCenter). + SetButtonBackgroundColor(tview.Styles.PrimitiveBackgroundColor). + SetButtonTextColor(tview.Styles.PrimaryTextColor). + SetLabelColor(tcell.ColorAqua). + SetFieldTextColor(tcell.ColorOrange) + + f.AddInputField("GVR Filename", "", 0, nil, func(changed string) { + *sel = changed + }) + + f.AddButton("OK", func() { + defer a.dismissDialog() + + // Generate new filename / filepath + filename := fmt.Sprintf("%s.yaml", *sel) + filePathName := path.Join(a.App().Config.ContextWorkloadDir(), filename) + + // Create new GVR file + if err := os.WriteFile(filePathName, config.Template, 0644); err != nil { + a.App().Flash().Errf("Failed to create file: %q", err) + return + } + + a.Stop() + defer a.Start() + if !edit(a.App(), shellOpts{clear: true, args: []string{filePathName}}) { + a.App().Flash().Err(errors.New("Failed to launch editor")) + return + } + }) + f.AddButton("Cancel", func() { + a.dismissDialog() + }) + + return f, nil +} + +func (a *WorkloadGVR) dismissDialog() { + a.App().Content.RemovePage("NewGVRModal") +} + +// editCustomCmd will edit the current custom workloadGVR +func (a *WorkloadGVR) editCustomCmd(evt *tcell.EventKey) *tcell.EventKey { + sel := a.GetTable().GetSelectedItem() + if sel == "" { + return evt + } + + // Edit existing custom GVR + a.Stop() + defer a.Start() + if !edit(a.App(), shellOpts{clear: true, args: []string{path.Join(a.App().Config.ContextWorkloadDir(), sel)}}) { + a.App().Flash().Err(errors.New("Failed to launch editor")) + return nil + } + + a.cleanupClusterContext() + + return nil +} + +// deleteCustomCmd will delete the custom workload GVR +func (a *WorkloadGVR) deleteCustomCmd(evt *tcell.EventKey) *tcell.EventKey { + sel := a.GetTable().GetSelectedItem() + if sel == "" { + return evt + } + + // Remove custom GRV (with prompt) + filePath := path.Join(a.App().Config.ContextWorkloadDir(), sel) + msg := fmt.Sprintf("Are you sure to delete the custom gvr: %s", strings.TrimSuffix(sel, filepath.Ext(sel))) + dialog.ShowConfirm(a.App().Styles.Dialog(), a.App().Content.Pages, "Confirm Deletion", msg, func() { + if err := os.Remove(filePath); err != nil { + a.App().Flash().Errf("could not delete GVR: %q", err) + return + } + + a.cleanupClusterContext() + }, func() {}) + + return nil +} + +// addtoCurrentCtx will add the GVR to the current cluster context +func (a *WorkloadGVR) addtoCurrentCtx(evt *tcell.EventKey) *tcell.EventKey { + sel := a.GetTable().GetSelectedItem() + if sel == "" { + return evt + } + + // Add custom GVR to cluster context + filenames := make([]string, 0) + ctxWorkloadPath := a.App().Config.ContextWorkloadPath() + content, err := os.ReadFile(ctxWorkloadPath) + if err == nil { + var ctxConfig config.WorkloadConfig + if err := yaml.Unmarshal(content, &ctxConfig); err != nil { + a.App().Flash().Errf("could not read workload context configuration: %q", err) + return nil + } + filenames = ctxConfig.GVRFilenames + } + + filenames = append(filenames, strings.TrimSuffix(sel, filepath.Ext(sel))) + + // Ensure there is no duplicate + m := make(map[string]string) + for _, n := range filenames { + m[n] = n + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + + // Save new config + data, err := yaml.Marshal(config.WorkloadConfig{GVRFilenames: keys}) + if err != nil { + a.App().Flash().Errf("could not marshal new configuration: %q", err) + return nil + } + + if err := os.WriteFile(ctxWorkloadPath, data, 0644); err != nil { + a.App().Flash().Errf("could not write new configuration: %q", err) + return nil + } + + a.cleanupClusterContext() + + return nil +} + +// deletefromCurrentCtx will delete the gvr from the current cluster context +func (a *WorkloadGVR) deletefromCurrentCtx(evt *tcell.EventKey) *tcell.EventKey { + sel := a.GetTable().GetSelectedItem() + if sel == "" { + return evt + } + + // Delete custom GVR from cluster context + filenames := make([]string, 0) + ctxWorkloadPath := a.App().Config.ContextWorkloadPath() + content, err := os.ReadFile(ctxWorkloadPath) + if err == nil { + var ctxConfig config.WorkloadConfig + if err := yaml.Unmarshal(content, &ctxConfig); err != nil { + a.App().Flash().Errf("could not unmarshal configuration: %q", err) + return nil + } + filenames = ctxConfig.GVRFilenames + } + + // Ensure there is no duplicate + m := make(map[string]string) + for _, n := range filenames { + if n != strings.TrimSuffix(sel, filepath.Ext(sel)) { + m[n] = n + } + } + keys := make([]string, 0, len(m)) + for filename := range m { + if _, err := os.Stat(fmt.Sprintf("%s.yaml", path.Join(a.App().Config.ContextWorkloadDir(), filename))); err == nil || !errors.Is(err, os.ErrNotExist) { + keys = append(keys, filename) + } + } + + // Save new config + data, err := yaml.Marshal(config.WorkloadConfig{GVRFilenames: keys}) + if err != nil { + a.App().Flash().Errf("could not marshal new configuration: %q", err) + return nil + } + + if err := os.WriteFile(ctxWorkloadPath, data, 0644); err != nil { + a.App().Flash().Errf("could not write new configuration: %q", err) + return nil + } + + a.cleanupClusterContext() + + return nil +} + +func (a *WorkloadGVR) cleanupClusterContext() { + validFilenames := make([]string, 0) + + // Get cluster context + content, err := os.ReadFile(a.App().Config.ContextWorkloadPath()) + if err != nil { + return + } + var ctxConfig config.WorkloadConfig + if err := yaml.Unmarshal(content, &ctxConfig); err != nil { + return + } + + // Check if each files exists + // Cleanup the one that doesn't exists + for _, filename := range ctxConfig.GVRFilenames { + if _, err := os.Stat(fmt.Sprintf("%s.yaml", path.Join(a.App().Config.ContextWorkloadDir(), filename))); err == nil || !errors.Is(err, os.ErrNotExist) { + validFilenames = append(validFilenames, filename) + } + } + + // Save new cluster context + data, err := yaml.Marshal(config.WorkloadConfig{GVRFilenames: validFilenames}) + if err != nil { + a.App().Flash().Errf("could not marshal new configuration: %q", err) + return + } + + if err := os.WriteFile(a.App().Config.ContextWorkloadPath(), data, 0644); err != nil { + a.App().Flash().Errf("could not write new configuration: %q", err) + return + } +} + +// simulateCmd will show the custom workload GVR in the workload view +func (a *WorkloadGVR) simulateCmd(evt *tcell.EventKey) *tcell.EventKey { + co := NewWorkload(client.NewGVR("workloads")) + co.SetContextFn(a.singleWorkloadCtx) + if err := a.App().inject(co, false); err != nil { + a.App().Flash().Err(err) + return nil + } + + return evt +} + +// singleWorkloadCtx will set the selected workloadGVR to the context to be simulated on the workload view +func (a *WorkloadGVR) singleWorkloadCtx(ctx context.Context) context.Context { + wkgvrFilename := a.GetTable().GetSelectedItem() + if wkgvrFilename == "" { + return ctx + } + + workloadcustomDir := a.App().Config.ContextWorkloadDir() + wkgvr, err := config.GetWorkloadGVRFromFile(path.Join(workloadcustomDir, wkgvrFilename)) + if err != nil { + a.App().Flash().Errf("could not retrieve workload gvr from file: %q", err) + return ctx + } + + return context.WithValue(ctx, internal.KeyWorkloadGVRs, []config.WorkloadGVR{wkgvr}) +} From cfca0684d922d2c6fe7ff73ab20dd4f3fefcc60a Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Wed, 15 Jan 2025 19:03:56 -0500 Subject: [PATCH 2/3] feat: change action name / binding --- internal/view/workloadGVR.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/view/workloadGVR.go b/internal/view/workloadGVR.go index 4350b6768c..defd0238a0 100644 --- a/internal/view/workloadGVR.go +++ b/internal/view/workloadGVR.go @@ -74,7 +74,7 @@ func (a *WorkloadGVR) bindKeys(aa *ui.KeyActions) { Visible: true, Dangerous: true, }), - ui.KeyR: ui.NewKeyActionWithOpts("Delete custom GVR", a.deleteCustomCmd, ui.ActionOpts{ + ui.KeyD: ui.NewKeyActionWithOpts("Delete custom GVR", a.deleteCustomCmd, ui.ActionOpts{ Visible: true, Dangerous: true, }), @@ -83,9 +83,9 @@ func (a *WorkloadGVR) bindKeys(aa *ui.KeyActions) { Visible: true, Dangerous: true, }), + ui.KeyS: ui.NewKeyAction("Show", a.describeCmd, true), ui.KeyShiftG: ui.NewKeyAction("Sort GVR", a.GetTable().SortColCmd("NAME", true), false), tcell.KeyEnter: ui.NewKeyAction("Simulate", a.simulateCmd, true), - ui.KeyD: ui.NewKeyAction("Show", a.describeCmd, true), }) } From 9dc74f9013a83e06756827d764938dcd9809ae78 Mon Sep 17 00:00:00 2001 From: Thomas Lacroix Date: Wed, 15 Jan 2025 19:15:30 -0500 Subject: [PATCH 3/3] feat: fix custom workload dirs --- internal/config/files.go | 3 ++- internal/config/workload.go | 2 +- internal/dao/workloadGVR.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/config/files.go b/internal/config/files.go index 6aecc3aec8..8b1edcba50 100644 --- a/internal/config/files.go +++ b/internal/config/files.go @@ -8,6 +8,7 @@ import ( "errors" "io/fs" "os" + "path" "path/filepath" "github.com/derailed/k9s/internal/config/data" @@ -202,7 +203,7 @@ func AppContextDir(cluster, context string) string { // AppWorkloadsDir generates a valid workload folder path func AppWorkloadsDir() string { - return filepath.Join(AppContextsDir, "workloads") + return path.Join(AppContextsDir, "workloads") } // AppContextAliasesFile generates a valid context specific aliases file path. diff --git a/internal/config/workload.go b/internal/config/workload.go index e1c740b824..0629444e0b 100644 --- a/internal/config/workload.go +++ b/internal/config/workload.go @@ -14,7 +14,7 @@ import ( ) var ( - // Template represents the template of new workload gvr + // Template represents the template of a new workload gvr Template = []byte(`name: "test.com/v1alpha1/myCRD" status: cellName: "Status" diff --git a/internal/dao/workloadGVR.go b/internal/dao/workloadGVR.go index efcd1d6730..5b9df38ea4 100644 --- a/internal/dao/workloadGVR.go +++ b/internal/dao/workloadGVR.go @@ -31,7 +31,7 @@ func NewWorkloadGVR(f Factory) *WorkloadGVR { return &a } -// List returns a collection of aliases. +// List returns a collection of workloadGVRs. func (a *WorkloadGVR) List(ctx context.Context, _ string) ([]runtime.Object, error) { workloadsDir, _ := ctx.Value(internal.KeyDir).(string) clusterContext, _ := ctx.Value(internal.KeyPath).(string)