Skip to content

Add flag to include api server audit logs in support bundle #9701

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/eksctl-anywhere/cmd/generatebundleconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
}
defer close(ctx, deps)

return deps.DignosticCollectorFactory.DiagnosticBundleWorkloadCluster(clusterSpec, deps.Provider, kubeconfig.FromClusterName(clusterSpec.Cluster.Name))
return deps.DignosticCollectorFactory.DiagnosticBundleWorkloadCluster(clusterSpec, deps.Provider, kubeconfig.FromClusterName(clusterSpec.Cluster.Name), false)

Check warning on line 87 in cmd/eksctl-anywhere/cmd/generatebundleconfig.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/generatebundleconfig.go#L87

Added line #L87 was not covered by tests
}

func (gsbo *generateSupportBundleOptions) generateDefaultBundleConfig(ctx context.Context) (diagnostics.DiagnosticBundle, error) {
Expand Down
8 changes: 5 additions & 3 deletions cmd/eksctl-anywhere/cmd/supportbundle.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
hardwareFileName string
tinkerbellBootstrapIP string
bundlesManifest string
auditLogs bool
}

var csbo = &createSupportBundleOptions{}
Expand All @@ -38,7 +39,7 @@
if err := csbo.validate(cmd.Context()); err != nil {
return err
}
if err := csbo.createBundle(cmd.Context(), csbo.since, csbo.sinceTime, csbo.bundleConfig, csbo.bundlesManifest); err != nil {
if err := csbo.createBundle(cmd.Context(), csbo.since, csbo.sinceTime, csbo.bundleConfig, csbo.bundlesManifest, csbo.auditLogs); err != nil {

Check warning on line 42 in cmd/eksctl-anywhere/cmd/supportbundle.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/supportbundle.go#L42

Added line #L42 was not covered by tests
return fmt.Errorf("failed to create support bundle: %v", err)
}
return nil
Expand All @@ -53,6 +54,7 @@
supportbundleCmd.Flags().StringVarP(&csbo.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
supportbundleCmd.Flags().StringVarP(&csbo.wConfig, "w-config", "w", "", "Kubeconfig file to use when creating support bundle for a workload cluster")
supportbundleCmd.Flags().StringVarP(&csbo.bundlesManifest, "bundles-manifest", "", "", "Bundles manifest to use when generating support bundle (required for generating support bundle in airgap environment)")
supportbundleCmd.Flags().BoolVarP(&csbo.auditLogs, "audit-logs", "", false, "Include the latest api server audit log file in the support bundle")
err := supportbundleCmd.MarkFlagRequired("filename")
if err != nil {
log.Fatalf("Error marking flag as required: %v", err)
Expand All @@ -73,7 +75,7 @@
return nil
}

func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since, sinceTime, bundleConfig string, bundlesManifest string) error {
func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since, sinceTime, bundleConfig string, bundlesManifest string, auditLogs bool) error {

Check warning on line 78 in cmd/eksctl-anywhere/cmd/supportbundle.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/supportbundle.go#L78

Added line #L78 was not covered by tests
var opts []cluster.FileSpecBuilderOpt
if bundlesManifest != "" {
opts = append(opts, cluster.WithOverrideBundlesManifest(bundlesManifest))
Expand All @@ -92,7 +94,7 @@
}
defer close(ctx, deps)

supportBundle, err := deps.DignosticCollectorFactory.DiagnosticBundle(clusterSpec, deps.Provider, getKubeconfigPath(clusterSpec.Cluster.Name, csbo.wConfig), bundleConfig)
supportBundle, err := deps.DignosticCollectorFactory.DiagnosticBundle(clusterSpec, deps.Provider, getKubeconfigPath(clusterSpec.Cluster.Name, csbo.wConfig), bundleConfig, auditLogs)

Check warning on line 97 in cmd/eksctl-anywhere/cmd/supportbundle.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/supportbundle.go#L97

Added line #L97 was not covered by tests
if err != nil {
return fmt.Errorf("failed to parse collector: %v", err)
}
Expand Down
1 change: 1 addition & 0 deletions docs/content/en/docs/clustermgmt/support/supportbundle.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ If you want to generate support bundle in an airgapped environment, the `--bundl
of your eks-a bundles manifest yaml file.
```
Flags:
--audit-logs Include the latest api server audit log file in the support bundle
--bundle-config string Bundle Config file to use when generating support bundle
-f, --filename string Filename that contains EKS-A cluster configuration
-h, --help Help for support-bundle
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ anywhere generate support-bundle -f my-cluster.yaml [flags]
### Options

```
--audit-logs Include the latest api server audit log file in the support bundle
--bundle-config string Bundle Config file to use when generating support bundle
-f, --filename string Filename that contains EKS-A cluster configuration
-h, --help help for support-bundle
Expand Down
2 changes: 1 addition & 1 deletion pkg/clustermanager/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ func (c *ClusterManager) SaveLogsWorkloadCluster(ctx context.Context, provider p
return nil
}

bundle, err := c.diagnosticsFactory.DiagnosticBundleWorkloadCluster(spec, provider, cluster.KubeconfigFile)
bundle, err := c.diagnosticsFactory.DiagnosticBundleWorkloadCluster(spec, provider, cluster.KubeconfigFile, false)
if err != nil {
logger.V(5).Info("Error generating support bundle for workload cluster", "error", err)
return nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/clustermanager/cluster_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func TestClusterManagerSaveLogsSuccess(t *testing.T) {
m.diagnosticsFactory.EXPECT().DiagnosticBundleManagementCluster(clusterSpec, bootstrapCluster.KubeconfigFile).Return(b, nil)
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))

m.diagnosticsFactory.EXPECT().DiagnosticBundleWorkloadCluster(clusterSpec, m.provider, workloadCluster.KubeconfigFile).Return(b, nil)
m.diagnosticsFactory.EXPECT().DiagnosticBundleWorkloadCluster(clusterSpec, m.provider, workloadCluster.KubeconfigFile, false).Return(b, nil)
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))

if err := c.SaveLogsManagementCluster(ctx, clusterSpec, bootstrapCluster); err != nil {
Expand Down
11 changes: 11 additions & 0 deletions pkg/diagnostics/collector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ type Collect struct {
Exec *exec `json:"exec,omitempty"`
RunPod *runPod `json:"runPod,omitempty"`
Run *Run `json:"run,omitempty"`
RunDaemonSet *RunDaemonSet `json:"runDaemonSet,omitempty"`
}

type clusterResources struct {
Expand Down Expand Up @@ -108,3 +109,13 @@ type Run struct {
Input map[string]string `json:"input,omitempty"`
Timeout string `json:"timeout,omitempty"`
}

// RunDaemonSet is used to define config for daemonset ran on hosts via troubleshoot.
type RunDaemonSet struct {
collectorMeta `json:",inline"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
PodSpec *v1.PodSpec `json:"podSpec"`
Timeout string `json:"timeout,omitempty"`
imagePullSecrets `json:",inline"`
}
44 changes: 44 additions & 0 deletions pkg/diagnostics/collectors.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,50 @@ func (c *EKSACollectorFactory) HostCollectors(datacenter v1alpha1.Ref) []*Collec
}
}

// AuditLogCollectors returns the audit log collectors that run on all control plane nodes.
func (c *EKSACollectorFactory) AuditLogCollectors() []*Collect {
return []*Collect{
{
RunDaemonSet: &RunDaemonSet{
Name: "audit-logs",
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "audit-logs-collector",
Image: c.DiagnosticCollectorImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{
"cat /hostlogs/kubernetes/api-audit.log",
},
VolumeMounts: []v1.VolumeMount{{
Name: "host-var-log",
MountPath: "/hostlogs",
ReadOnly: true,
}},
}},
Volumes: []v1.Volume{{
Name: "host-var-log",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/log",
},
},
}},
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
Tolerations: []v1.Toleration{{
Key: "node-role.kubernetes.io/control-plane",
Operator: "Exists",
Effect: "NoSchedule",
}},
},
Timeout: "60s",
},
},
}
}

// DataCenterConfigCollectors returns the collectors for the provider datacenter config in the cluster spec.
func (c *EKSACollectorFactory) DataCenterConfigCollectors(datacenter v1alpha1.Ref, spec *cluster.Spec) []*Collect {
switch datacenter.Kind {
Expand Down
34 changes: 34 additions & 0 deletions pkg/diagnostics/collectors_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,3 +198,37 @@ func TestHostCollectors(t *testing.T) {
})
}
}

func TestAuditLogCollectors(t *testing.T) {
tests := []struct {
name string
diagnosticCollectorImage string
}{
{
name: "audit logs happy case",
diagnosticCollectorImage: "test-image",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewGomegaWithT(t)
factory := diagnostics.NewCollectorFactory(tt.diagnosticCollectorImage, test.NewFileReader())
collectors := factory.AuditLogCollectors()

g.Expect(collectors).To(HaveLen(1), "AuditLogCollectors() should return exactly one collector")

collector := collectors[0]
g.Expect(collector.RunDaemonSet).NotTo(BeNil(), "AuditLogCollectors() should return a RunDaemonSet collector")

podSpec := collector.RunDaemonSet.PodSpec
g.Expect(podSpec).NotTo(BeNil(), "PodSpec should not be nil")
g.Expect(podSpec.Containers).To(HaveLen(1), "PodSpec should have exactly one container")
g.Expect(podSpec.Containers[0].VolumeMounts).To(HaveLen(1), "Container should have exactly one volume mount")
g.Expect(podSpec.Volumes).To(HaveLen(1), "PodSpec should have exactly one volume")
g.Expect(podSpec.NodeSelector).To(HaveKeyWithValue("node-role.kubernetes.io/control-plane", ""), "NodeSelector should target control-plane nodes")
g.Expect(podSpec.Tolerations).To(HaveLen(1), "PodSpec should have exactly one toleration")
g.Expect(podSpec.Tolerations[0].Key).To(Equal("node-role.kubernetes.io/control-plane"), "Toleration key should be 'node-role.kubernetes.io/control-plane'")
})
}
}
12 changes: 11 additions & 1 deletion pkg/diagnostics/diagnostic_bundle.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func newDiagnosticBundleManagementCluster(af AnalyzerFactory, cf CollectorFactor
}

func newDiagnosticBundleFromSpec(af AnalyzerFactory, cf CollectorFactory, spec *cluster.Spec, provider providers.Provider,
client BundleClient, kubectl *executables.Kubectl, kubeconfig string, writer filewriter.FileWriter,
client BundleClient, kubectl *executables.Kubectl, kubeconfig string, writer filewriter.FileWriter, auditLogs bool,
) (*EksaDiagnosticBundle, error) {
b := &EksaDiagnosticBundle{
bundle: &supportBundle{
Expand Down Expand Up @@ -124,6 +124,10 @@ func newDiagnosticBundleFromSpec(af AnalyzerFactory, cf CollectorFactory, spec *
WithPackagesCollectors().
WithLogTextAnalyzers()

if auditLogs {
b = b.WithAuditLogs()
}

err := b.WriteBundleConfig()
if err != nil {
return nil, fmt.Errorf("writing bundle config: %v", err)
Expand Down Expand Up @@ -270,6 +274,12 @@ func (e *EksaDiagnosticBundle) WithHostCollectors(config v1alpha1.Ref) *EksaDiag
return e.WithDefaultHostCollectors(config)
}

// WithAuditLogs configures bundle to collect audit logs from control plane nodes.
func (e *EksaDiagnosticBundle) WithAuditLogs() *EksaDiagnosticBundle {
e.bundle.Spec.Collectors = append(e.bundle.Spec.Collectors, e.collectorFactory.AuditLogCollectors()...)
return e
}

// WithDefaultHostCollectors collects the default collectors that run on the host machine.
func (e *EksaDiagnosticBundle) WithDefaultHostCollectors(config v1alpha1.Ref) *EksaDiagnosticBundle {
e.hostBundle.Spec.Collectors = append(e.hostBundle.Spec.Collectors, e.collectorFactory.HostCollectors(config)...)
Expand Down
119 changes: 115 additions & 4 deletions pkg/diagnostics/diagnostic_bundle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"time"

"github.com/golang/mock/gomock"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/aws/eks-anywhere/internal/test"
Expand Down Expand Up @@ -142,7 +143,7 @@ func TestGenerateBundleConfigWithExternalEtcd(t *testing.T) {
}

f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
})
}

Expand Down Expand Up @@ -203,7 +204,7 @@ func TestGenerateBundleConfigWithOidc(t *testing.T) {
}

f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
})
}

Expand Down Expand Up @@ -264,7 +265,7 @@ func TestGenerateBundleConfigWithGitOps(t *testing.T) {
}

f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
})
}

Expand Down Expand Up @@ -390,7 +391,7 @@ func TestBundleFromSpecComplete(t *testing.T) {
}

f := diagnostics.NewFactory(opts)
b, _ := f.DiagnosticBundleWorkloadCluster(spec, p, kubeconfig)
b, _ := f.DiagnosticBundleWorkloadCluster(spec, p, kubeconfig, false)
err = b.CollectAndAnalyze(ctx, sinceTimeValue)
if err != nil {
t.Errorf("CollectAndAnalyze() error = %v, wantErr nil", err)
Expand Down Expand Up @@ -635,3 +636,113 @@ func TestTinkerbellHostCollectors(t *testing.T) {
})
}
}

func TestDiagnosticBundleWithAuditLogsEnabled(t *testing.T) {
// Create a cluster spec for testing
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-with-audit-logs",
},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "test-datacenter",
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})

t.Run("Generate diagnostic bundle with audit logs enabled", func(t *testing.T) {
kubeconfig := "test-cluster-with-audit.kubeconfig"

provider := givenProvider(t)
provider.EXPECT().MachineConfigs(spec).Return(machineConfigs())

a := givenMockAnalyzerFactory(t)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().PackageAnalyzers().Return(nil)

c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
c.EXPECT().HostCollectors(spec.Cluster.Spec.DatacenterRef).Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().PackagesCollectors().Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)

auditLogCollectors := []*diagnostics.Collect{
{
RunDaemonSet: &diagnostics.RunDaemonSet{
Name: "audit-logs",
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "audit-logs-collector",
Image: "test-image",
Command: []string{"/bin/sh", "-c"},
Args: []string{
"cat /hostlogs/kubernetes/api-audit.log",
},
VolumeMounts: []v1.VolumeMount{{
Name: "host-var-log",
MountPath: "/hostlogs",
ReadOnly: true,
}},
}},
Volumes: []v1.Volume{{
Name: "host-var-log",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/log",
},
},
}},
NodeSelector: map[string]string{
"node-role.kubernetes.io/control-plane": "",
},
Tolerations: []v1.Toleration{{
Key: "node-role.kubernetes.io/control-plane",
Operator: "Exists",
Effect: "NoSchedule",
}},
},
Timeout: "60s",
},
},
}
c.EXPECT().AuditLogCollectors().Return(auditLogCollectors)

w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any()).AnyTimes()

opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}

factory := diagnostics.NewFactory(opts)
bundle, err := factory.DiagnosticBundleWorkloadCluster(spec, provider, kubeconfig, true)
if err != nil {
t.Errorf("DiagnosticBundleWorkloadCluster() error = %v, wantErr nil", err)
return
}

if bundle == nil {
t.Errorf("DiagnosticBundleWorkloadCluster() returned nil bundle")
return
}
})
}
Loading