Skip to content

Commit d6341d2

Browse files
committed
Add option to include api server audit logs in support bundle
1 parent 08b3131 commit d6341d2

File tree

14 files changed

+255
-26
lines changed

14 files changed

+255
-26
lines changed

cmd/eksctl-anywhere/cmd/generatebundleconfig.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ func (gsbo *generateSupportBundleOptions) generateBundleConfig(ctx context.Conte
8484
}
8585
defer close(ctx, deps)
8686

87-
return deps.DignosticCollectorFactory.DiagnosticBundleWorkloadCluster(clusterSpec, deps.Provider, kubeconfig.FromClusterName(clusterSpec.Cluster.Name))
87+
return deps.DignosticCollectorFactory.DiagnosticBundleWorkloadCluster(clusterSpec, deps.Provider, kubeconfig.FromClusterName(clusterSpec.Cluster.Name), false)
8888
}
8989

9090
func (gsbo *generateSupportBundleOptions) generateDefaultBundleConfig(ctx context.Context) (diagnostics.DiagnosticBundle, error) {

cmd/eksctl-anywhere/cmd/supportbundle.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ type createSupportBundleOptions struct {
2424
hardwareFileName string
2525
tinkerbellBootstrapIP string
2626
bundlesManifest string
27+
auditLogs bool
2728
}
2829

2930
var csbo = &createSupportBundleOptions{}
@@ -38,7 +39,7 @@ var supportbundleCmd = &cobra.Command{
3839
if err := csbo.validate(cmd.Context()); err != nil {
3940
return err
4041
}
41-
if err := csbo.createBundle(cmd.Context(), csbo.since, csbo.sinceTime, csbo.bundleConfig, csbo.bundlesManifest); err != nil {
42+
if err := csbo.createBundle(cmd.Context(), csbo.since, csbo.sinceTime, csbo.bundleConfig, csbo.bundlesManifest, csbo.auditLogs); err != nil {
4243
return fmt.Errorf("failed to create support bundle: %v", err)
4344
}
4445
return nil
@@ -53,6 +54,7 @@ func init() {
5354
supportbundleCmd.Flags().StringVarP(&csbo.fileName, "filename", "f", "", "Filename that contains EKS-A cluster configuration")
5455
supportbundleCmd.Flags().StringVarP(&csbo.wConfig, "w-config", "w", "", "Kubeconfig file to use when creating support bundle for a workload cluster")
5556
supportbundleCmd.Flags().StringVarP(&csbo.bundlesManifest, "bundles-manifest", "", "", "Bundles manifest to use when generating support bundle (required for generating support bundle in airgap environment)")
57+
supportbundleCmd.Flags().BoolVarP(&csbo.auditLogs, "audit-logs", "", false, "Include the latest api server audit log file in the support bundle")
5658
err := supportbundleCmd.MarkFlagRequired("filename")
5759
if err != nil {
5860
log.Fatalf("Error marking flag as required: %v", err)
@@ -73,7 +75,7 @@ func (csbo *createSupportBundleOptions) validate(ctx context.Context) error {
7375
return nil
7476
}
7577

76-
func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since, sinceTime, bundleConfig string, bundlesManifest string) error {
78+
func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since, sinceTime, bundleConfig string, bundlesManifest string, auditLogs bool) error {
7779
var opts []cluster.FileSpecBuilderOpt
7880
if bundlesManifest != "" {
7981
opts = append(opts, cluster.WithOverrideBundlesManifest(bundlesManifest))
@@ -92,7 +94,7 @@ func (csbo *createSupportBundleOptions) createBundle(ctx context.Context, since,
9294
}
9395
defer close(ctx, deps)
9496

95-
supportBundle, err := deps.DignosticCollectorFactory.DiagnosticBundle(clusterSpec, deps.Provider, getKubeconfigPath(clusterSpec.Cluster.Name, csbo.wConfig), bundleConfig)
97+
supportBundle, err := deps.DignosticCollectorFactory.DiagnosticBundle(clusterSpec, deps.Provider, getKubeconfigPath(clusterSpec.Cluster.Name, csbo.wConfig), bundleConfig, auditLogs)
9698
if err != nil {
9799
return fmt.Errorf("failed to parse collector: %v", err)
98100
}

docs/content/en/docs/clustermgmt/support/supportbundle.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ If you want to generate support bundle in an airgapped environment, the `--bundl
4343
of your eks-a bundles manifest yaml file.
4444
```
4545
Flags:
46+
--audit-logs Include the latest api server audit log file in the support bundle
4647
--bundle-config string Bundle Config file to use when generating support bundle
4748
-f, --filename string Filename that contains EKS-A cluster configuration
4849
-h, --help Help for support-bundle

docs/content/en/docs/reference/eksctl/anywhere_generate_support-bundle.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ anywhere generate support-bundle -f my-cluster.yaml [flags]
1818
### Options
1919

2020
```
21+
--audit-logs Include the latest api server audit log file in the support bundle
2122
--bundle-config string Bundle Config file to use when generating support bundle
2223
-f, --filename string Filename that contains EKS-A cluster configuration
2324
-h, --help help for support-bundle

pkg/clustermanager/cluster_manager.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ func (c *ClusterManager) SaveLogsWorkloadCluster(ctx context.Context, provider p
445445
return nil
446446
}
447447

448-
bundle, err := c.diagnosticsFactory.DiagnosticBundleWorkloadCluster(spec, provider, cluster.KubeconfigFile)
448+
bundle, err := c.diagnosticsFactory.DiagnosticBundleWorkloadCluster(spec, provider, cluster.KubeconfigFile, false)
449449
if err != nil {
450450
logger.V(5).Info("Error generating support bundle for workload cluster", "error", err)
451451
return nil

pkg/clustermanager/cluster_manager_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func TestClusterManagerSaveLogsSuccess(t *testing.T) {
141141
m.diagnosticsFactory.EXPECT().DiagnosticBundleManagementCluster(clusterSpec, bootstrapCluster.KubeconfigFile).Return(b, nil)
142142
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))
143143

144-
m.diagnosticsFactory.EXPECT().DiagnosticBundleWorkloadCluster(clusterSpec, m.provider, workloadCluster.KubeconfigFile).Return(b, nil)
144+
m.diagnosticsFactory.EXPECT().DiagnosticBundleWorkloadCluster(clusterSpec, m.provider, workloadCluster.KubeconfigFile, false).Return(b, nil)
145145
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))
146146

147147
if err := c.SaveLogsManagementCluster(ctx, clusterSpec, bootstrapCluster); err != nil {

pkg/diagnostics/collector_types.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ type Collect struct {
1515
Exec *exec `json:"exec,omitempty"`
1616
RunPod *runPod `json:"runPod,omitempty"`
1717
Run *Run `json:"run,omitempty"`
18+
RunDaemonSet *RunDaemonSet `json:"runDaemonSet,omitempty"`
1819
}
1920

2021
type clusterResources struct {
@@ -108,3 +109,13 @@ type Run struct {
108109
Input map[string]string `json:"input,omitempty"`
109110
Timeout string `json:"timeout,omitempty"`
110111
}
112+
113+
// RunDaemonSet is used to define config for daemonset ran on hosts via troubleshoot.
114+
type RunDaemonSet struct {
115+
collectorMeta `json:",inline"`
116+
Name string `json:"name,omitempty"`
117+
Namespace string `json:"namespace,omitempty"`
118+
PodSpec *v1.PodSpec `json:"podSpec"`
119+
Timeout string `json:"timeout,omitempty"`
120+
imagePullSecrets `json:",inline"`
121+
}

pkg/diagnostics/collectors.go

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,50 @@ func (c *EKSACollectorFactory) HostCollectors(datacenter v1alpha1.Ref) []*Collec
8787
}
8888
}
8989

90+
// AuditLogCollectors returns the audit log collectors that run on all control plane nodes.
91+
func (c *EKSACollectorFactory) AuditLogCollectors() []*Collect {
92+
return []*Collect{
93+
{
94+
RunDaemonSet: &RunDaemonSet{
95+
Name: "audit-logs",
96+
Namespace: constants.EksaDiagnosticsNamespace,
97+
PodSpec: &v1.PodSpec{
98+
Containers: []v1.Container{{
99+
Name: "audit-logs-collector",
100+
Image: c.DiagnosticCollectorImage,
101+
Command: []string{"/bin/sh", "-c"},
102+
Args: []string{
103+
"cat /hostlogs/kubernetes/api-audit.log",
104+
},
105+
VolumeMounts: []v1.VolumeMount{{
106+
Name: "host-var-log",
107+
MountPath: "/hostlogs",
108+
ReadOnly: true,
109+
}},
110+
}},
111+
Volumes: []v1.Volume{{
112+
Name: "host-var-log",
113+
VolumeSource: v1.VolumeSource{
114+
HostPath: &v1.HostPathVolumeSource{
115+
Path: "/var/log",
116+
},
117+
},
118+
}},
119+
NodeSelector: map[string]string{
120+
"node-role.kubernetes.io/control-plane": "",
121+
},
122+
Tolerations: []v1.Toleration{{
123+
Key: "node-role.kubernetes.io/control-plane",
124+
Operator: "Exists",
125+
Effect: "NoSchedule",
126+
}},
127+
},
128+
Timeout: "60s",
129+
},
130+
},
131+
}
132+
}
133+
90134
// DataCenterConfigCollectors returns the collectors for the provider datacenter config in the cluster spec.
91135
func (c *EKSACollectorFactory) DataCenterConfigCollectors(datacenter v1alpha1.Ref, spec *cluster.Spec) []*Collect {
92136
switch datacenter.Kind {

pkg/diagnostics/collectors_test.go

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,3 +198,37 @@ func TestHostCollectors(t *testing.T) {
198198
})
199199
}
200200
}
201+
202+
func TestAuditLogCollectors(t *testing.T) {
203+
tests := []struct {
204+
name string
205+
diagnosticCollectorImage string
206+
}{
207+
{
208+
name: "audit logs happy case",
209+
diagnosticCollectorImage: "test-image",
210+
},
211+
}
212+
213+
for _, tt := range tests {
214+
t.Run(tt.name, func(t *testing.T) {
215+
g := NewGomegaWithT(t)
216+
factory := diagnostics.NewCollectorFactory(tt.diagnosticCollectorImage, test.NewFileReader())
217+
collectors := factory.AuditLogCollectors()
218+
219+
g.Expect(collectors).To(HaveLen(1), "AuditLogCollectors() should return exactly one collector")
220+
221+
collector := collectors[0]
222+
g.Expect(collector.RunDaemonSet).NotTo(BeNil(), "AuditLogCollectors() should return a RunDaemonSet collector")
223+
224+
podSpec := collector.RunDaemonSet.PodSpec
225+
g.Expect(podSpec).NotTo(BeNil(), "PodSpec should not be nil")
226+
g.Expect(podSpec.Containers).To(HaveLen(1), "PodSpec should have exactly one container")
227+
g.Expect(podSpec.Containers[0].VolumeMounts).To(HaveLen(1), "Container should have exactly one volume mount")
228+
g.Expect(podSpec.Volumes).To(HaveLen(1), "PodSpec should have exactly one volume")
229+
g.Expect(podSpec.NodeSelector).To(HaveKeyWithValue("node-role.kubernetes.io/control-plane", ""), "NodeSelector should target control-plane nodes")
230+
g.Expect(podSpec.Tolerations).To(HaveLen(1), "PodSpec should have exactly one toleration")
231+
g.Expect(podSpec.Tolerations[0].Key).To(Equal("node-role.kubernetes.io/control-plane"), "Toleration key should be 'node-role.kubernetes.io/control-plane'")
232+
})
233+
}
234+
}

pkg/diagnostics/diagnostic_bundle.go

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ func newDiagnosticBundleManagementCluster(af AnalyzerFactory, cf CollectorFactor
8787
}
8888

8989
func newDiagnosticBundleFromSpec(af AnalyzerFactory, cf CollectorFactory, spec *cluster.Spec, provider providers.Provider,
90-
client BundleClient, kubectl *executables.Kubectl, kubeconfig string, writer filewriter.FileWriter,
90+
client BundleClient, kubectl *executables.Kubectl, kubeconfig string, writer filewriter.FileWriter, auditLogs bool,
9191
) (*EksaDiagnosticBundle, error) {
9292
b := &EksaDiagnosticBundle{
9393
bundle: &supportBundle{
@@ -122,7 +122,8 @@ func newDiagnosticBundleFromSpec(af AnalyzerFactory, cf CollectorFactory, spec *
122122
WithDefaultCollectors().
123123
WithFileCollectors([]string{logger.GetOutputFilePath()}).
124124
WithPackagesCollectors().
125-
WithLogTextAnalyzers()
125+
WithLogTextAnalyzers().
126+
WithAuditLogs(auditLogs)
126127

127128
err := b.WriteBundleConfig()
128129
if err != nil {
@@ -270,6 +271,15 @@ func (e *EksaDiagnosticBundle) WithHostCollectors(config v1alpha1.Ref) *EksaDiag
270271
return e.WithDefaultHostCollectors(config)
271272
}
272273

274+
// WithAuditLogs configures bundle to collect audit logs from control plane nodes.
275+
func (e *EksaDiagnosticBundle) WithAuditLogs(auditLogs bool) *EksaDiagnosticBundle {
276+
if !auditLogs {
277+
return e
278+
}
279+
e.bundle.Spec.Collectors = append(e.bundle.Spec.Collectors, e.collectorFactory.AuditLogCollectors()...)
280+
return e
281+
}
282+
273283
// WithDefaultHostCollectors collects the default collectors that run on the host machine.
274284
func (e *EksaDiagnosticBundle) WithDefaultHostCollectors(config v1alpha1.Ref) *EksaDiagnosticBundle {
275285
e.hostBundle.Spec.Collectors = append(e.hostBundle.Spec.Collectors, e.collectorFactory.HostCollectors(config)...)

pkg/diagnostics/diagnostic_bundle_test.go

Lines changed: 115 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"time"
88

99
"github.com/golang/mock/gomock"
10+
v1 "k8s.io/api/core/v1"
1011
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1112

1213
"github.com/aws/eks-anywhere/internal/test"
@@ -142,7 +143,7 @@ func TestGenerateBundleConfigWithExternalEtcd(t *testing.T) {
142143
}
143144

144145
f := diagnostics.NewFactory(opts)
145-
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
146+
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
146147
})
147148
}
148149

@@ -203,7 +204,7 @@ func TestGenerateBundleConfigWithOidc(t *testing.T) {
203204
}
204205

205206
f := diagnostics.NewFactory(opts)
206-
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
207+
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
207208
})
208209
}
209210

@@ -264,7 +265,7 @@ func TestGenerateBundleConfigWithGitOps(t *testing.T) {
264265
}
265266

266267
f := diagnostics.NewFactory(opts)
267-
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
268+
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "", false)
268269
})
269270
}
270271

@@ -390,7 +391,7 @@ func TestBundleFromSpecComplete(t *testing.T) {
390391
}
391392

392393
f := diagnostics.NewFactory(opts)
393-
b, _ := f.DiagnosticBundleWorkloadCluster(spec, p, kubeconfig)
394+
b, _ := f.DiagnosticBundleWorkloadCluster(spec, p, kubeconfig, false)
394395
err = b.CollectAndAnalyze(ctx, sinceTimeValue)
395396
if err != nil {
396397
t.Errorf("CollectAndAnalyze() error = %v, wantErr nil", err)
@@ -635,3 +636,113 @@ func TestTinkerbellHostCollectors(t *testing.T) {
635636
})
636637
}
637638
}
639+
640+
func TestDiagnosticBundleWithAuditLogsEnabled(t *testing.T) {
641+
// Create a cluster spec for testing
642+
spec := test.NewClusterSpec(func(s *cluster.Spec) {
643+
s.Cluster = &eksav1alpha1.Cluster{
644+
TypeMeta: metav1.TypeMeta{},
645+
ObjectMeta: metav1.ObjectMeta{
646+
Name: "test-cluster-with-audit-logs",
647+
},
648+
Spec: eksav1alpha1.ClusterSpec{
649+
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
650+
Endpoint: &eksav1alpha1.Endpoint{
651+
Host: "1.1.1.1",
652+
},
653+
},
654+
DatacenterRef: eksav1alpha1.Ref{
655+
Kind: eksav1alpha1.VSphereDatacenterKind,
656+
Name: "test-datacenter",
657+
},
658+
},
659+
Status: eksav1alpha1.ClusterStatus{},
660+
}
661+
})
662+
663+
t.Run("Generate diagnostic bundle with audit logs enabled", func(t *testing.T) {
664+
kubeconfig := "test-cluster-with-audit.kubeconfig"
665+
666+
provider := givenProvider(t)
667+
provider.EXPECT().MachineConfigs(spec).Return(machineConfigs())
668+
669+
a := givenMockAnalyzerFactory(t)
670+
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
671+
a.EXPECT().DefaultAnalyzers().Return(nil)
672+
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
673+
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
674+
a.EXPECT().PackageAnalyzers().Return(nil)
675+
676+
c := givenMockCollectorsFactory(t)
677+
c.EXPECT().DefaultCollectors().Return(nil)
678+
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
679+
c.EXPECT().HostCollectors(spec.Cluster.Spec.DatacenterRef).Return(nil)
680+
c.EXPECT().ManagementClusterCollectors().Return(nil)
681+
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
682+
c.EXPECT().PackagesCollectors().Return(nil)
683+
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
684+
685+
auditLogCollectors := []*diagnostics.Collect{
686+
{
687+
RunDaemonSet: &diagnostics.RunDaemonSet{
688+
Name: "audit-logs",
689+
Namespace: constants.EksaDiagnosticsNamespace,
690+
PodSpec: &v1.PodSpec{
691+
Containers: []v1.Container{{
692+
Name: "audit-logs-collector",
693+
Image: "test-image",
694+
Command: []string{"/bin/sh", "-c"},
695+
Args: []string{
696+
"cat /hostlogs/kubernetes/api-audit.log",
697+
},
698+
VolumeMounts: []v1.VolumeMount{{
699+
Name: "host-var-log",
700+
MountPath: "/hostlogs",
701+
ReadOnly: true,
702+
}},
703+
}},
704+
Volumes: []v1.Volume{{
705+
Name: "host-var-log",
706+
VolumeSource: v1.VolumeSource{
707+
HostPath: &v1.HostPathVolumeSource{
708+
Path: "/var/log",
709+
},
710+
},
711+
}},
712+
NodeSelector: map[string]string{
713+
"node-role.kubernetes.io/control-plane": "",
714+
},
715+
Tolerations: []v1.Toleration{{
716+
Key: "node-role.kubernetes.io/control-plane",
717+
Operator: "Exists",
718+
Effect: "NoSchedule",
719+
}},
720+
},
721+
Timeout: "60s",
722+
},
723+
},
724+
}
725+
c.EXPECT().AuditLogCollectors().Return(auditLogCollectors)
726+
727+
w := givenWriter(t)
728+
w.EXPECT().Write(gomock.Any(), gomock.Any()).AnyTimes()
729+
730+
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
731+
AnalyzerFactory: a,
732+
CollectorFactory: c,
733+
Writer: w,
734+
}
735+
736+
factory := diagnostics.NewFactory(opts)
737+
bundle, err := factory.DiagnosticBundleWorkloadCluster(spec, provider, kubeconfig, true)
738+
if err != nil {
739+
t.Errorf("DiagnosticBundleWorkloadCluster() error = %v, wantErr nil", err)
740+
return
741+
}
742+
743+
if bundle == nil {
744+
t.Errorf("DiagnosticBundleWorkloadCluster() returned nil bundle")
745+
return
746+
}
747+
})
748+
}

0 commit comments

Comments
 (0)