diff --git a/examples/configuration/README.md b/examples/configuration/README.md index 3836185a..e6901571 100644 --- a/examples/configuration/README.md +++ b/examples/configuration/README.md @@ -8,6 +8,7 @@ Create a file called `config.yaml` in `/etc/rancher/agent` with the contents lik workDirectory: /var/lib/rancher/agent/work localPlanDirectory: /var/lib/rancher/agent/plans remoteEnabled: true +remoteSkipAlreadyApplied: false connectionInfoFile: /etc/rancher/agent/conninfo.yaml preserveWorkDirectory: true ``` @@ -49,4 +50,4 @@ data: plan: e2luc3RydWN0aW9uczpbbmFtZTppbnN0YWxsLWszc119IHtpbnN0cnVjdGlvbnM6W2ltYWdlOmRvY2tlci5pby9yYW5jaGVyL3N5c3RlbS1hZ2VudC1pbnN0YWxsZXItazNzOnYxLjIxLjAtazNzMV19Cg== ``` -The above secret is going to install K3s. \ No newline at end of file +The above secret is going to install K3s. diff --git a/main.go b/main.go index 544b1b1b..1a7a6af5 100644 --- a/main.go +++ b/main.go @@ -92,7 +92,7 @@ func run(c *cli.Context) error { return fmt.Errorf("unable to parse connection info file: %w", err) } - k8splan.Watch(topContext, *applyinator, connInfo) + k8splan.Watch(topContext, *applyinator, connInfo, cf.RemoteSkipAlreadyApplied) } if cf.LocalEnabled { diff --git a/pkg/config/config.go b/pkg/config/config.go index a04e1778..a97e9c55 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -17,6 +17,7 @@ type AgentConfig struct { LocalPlanDir string `json:"localPlanDirectory,omitempty"` AppliedPlanDir string `json:"appliedPlanDirectory,omitempty"` RemoteEnabled bool `json:"remoteEnabled,omitempty"` + RemoteSkipAlreadyApplied bool `json:"remoteSkipAlreadyApplied,omitempty"` ConnectionInfoFile string `json:"connectionInfoFile,omitempty"` PreserveWorkDir bool `json:"preserveWorkDirectory,omitempty"` ImagesDir string `json:"imagesDirectory,omitempty"` diff --git a/pkg/k8splan/watcher.go b/pkg/k8splan/watcher.go index 68fb84db..bdaa8327 100644 --- a/pkg/k8splan/watcher.go +++ b/pkg/k8splan/watcher.go @@ -54,10 +54,11 @@ const ( cooldownTimerDuration = "30s" ) -func Watch(ctx context.Context, applyinator applyinator.Applyinator, connInfo config.ConnectionInfo) { +func Watch(ctx context.Context, applyinator applyinator.Applyinator, connInfo config.ConnectionInfo, skipAlreadyApplied bool) { w := &watcher{ - connInfo: connInfo, - applyinator: applyinator, + connInfo: connInfo, + applyinator: applyinator, + skipAlreadyApplied: skipAlreadyApplied, } go w.start(ctx) @@ -68,6 +69,7 @@ type watcher struct { applyinator applyinator.Applyinator lastAppliedResourceVersion string secretUID string + skipAlreadyApplied bool } func toInt(resourceVersion string) int { @@ -198,12 +200,14 @@ func (w *watcher) start(ctx context.Context) { } logrus.Tracef("[K8s] Calculated checksum to be %s", cp.Checksum) + alreadyApplied := false if secretChecksumData, ok := secret.Data[appliedChecksumKey]; ok { secretChecksum := string(secretChecksumData) logrus.Tracef("[K8s] Remote plan had an applied checksum value of %s", secretChecksum) if secretChecksum == cp.Checksum { logrus.Debugf("[K8s] Applied checksum was the same as the plan from remote. Not applying.") needsApplied = false + alreadyApplied = true } } @@ -213,6 +217,11 @@ func (w *watcher) start(ctx context.Context) { hasRunOnce = true } + if w.skipAlreadyApplied && alreadyApplied { + logrus.Debugf("[K8s] Skipping already applied plan.") + needsApplied = false + } + // Check to see if we've exceeded our failure count threshold var maxFailureThreshold int if rawMaxFailureThreshold, ok := secret.Data[maxFailuresKey]; ok && len(rawMaxFailureThreshold) > 0 {