diff --git a/vertical-pod-autoscaler/common/flags.go b/vertical-pod-autoscaler/common/flags.go
index 82da351c20bf..b1b301705dc7 100644
--- a/vertical-pod-autoscaler/common/flags.go
+++ b/vertical-pod-autoscaler/common/flags.go
@@ -35,18 +35,38 @@ type CommonFlags struct {
IgnoredVpaObjectNamespaces string
}
+// DefaultCommonConfig returns the default values for common flags
+func DefaultCommonConfig() *CommonFlags {
+ return &CommonFlags{
+ KubeConfig: "",
+ KubeApiQps: 50.0,
+ KubeApiBurst: 100.0,
+ EnableProfiling: false,
+ VpaObjectNamespace: corev1.NamespaceAll,
+ IgnoredVpaObjectNamespaces: "",
+ }
+}
+
// InitCommonFlags initializes the common flags
func InitCommonFlags() *CommonFlags {
- cf := &CommonFlags{}
- flag.StringVar(&cf.KubeConfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
- flag.Float64Var(&cf.KubeApiQps, "kube-api-qps", 50.0, "QPS limit when making requests to Kubernetes apiserver")
- flag.Float64Var(&cf.KubeApiBurst, "kube-api-burst", 100.0, "QPS burst limit when making requests to Kubernetes apiserver")
- flag.BoolVar(&cf.EnableProfiling, "profiling", false, "Is debug/pprof endpoint enabled")
- flag.StringVar(&cf.VpaObjectNamespace, "vpa-object-namespace", corev1.NamespaceAll, "Specifies the namespace to search for VPA objects. Leave empty to include all namespaces. If provided, the garbage collector will only clean this namespace.")
- flag.StringVar(&cf.IgnoredVpaObjectNamespaces, "ignored-vpa-object-namespaces", "", "A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector.")
+ cf := DefaultCommonConfig()
+ flag.StringVar(&cf.KubeConfig, "kubeconfig", cf.KubeConfig, "Path to a kubeconfig. Only required if out-of-cluster.")
+ flag.Float64Var(&cf.KubeApiQps, "kube-api-qps", cf.KubeApiQps, "QPS limit when making requests to Kubernetes apiserver")
+ flag.Float64Var(&cf.KubeApiBurst, "kube-api-burst", cf.KubeApiBurst, "QPS burst limit when making requests to Kubernetes apiserver")
+ flag.BoolVar(&cf.EnableProfiling, "profiling", cf.EnableProfiling, "Is debug/pprof endpoint enabled")
+ flag.StringVar(&cf.VpaObjectNamespace, "vpa-object-namespace", cf.VpaObjectNamespace, "Specifies the namespace to search for VPA objects. Leave empty to include all namespaces. If provided, the garbage collector will only clean this namespace.")
+ flag.StringVar(&cf.IgnoredVpaObjectNamespaces, "ignored-vpa-object-namespaces", cf.IgnoredVpaObjectNamespaces, "A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector.")
return cf
}
+// ValidateCommonConfig performs validation of the common flags
+func ValidateCommonConfig(config *CommonFlags) {
+ if len(config.VpaObjectNamespace) > 0 && len(config.IgnoredVpaObjectNamespaces) > 0 {
+ klog.ErrorS(nil, "--vpa-object-namespace and --ignored-vpa-object-namespaces are mutually exclusive and can't be set together.")
+ klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+ }
+}
+
// InitLoggingFlags initializes the logging flags
func InitLoggingFlags() {
// Set the default log level to 4 (info)
diff --git a/vertical-pod-autoscaler/docs/flags.md b/vertical-pod-autoscaler/docs/flags.md
index 29db1ec9605b..569349476ffe 100644
--- a/vertical-pod-autoscaler/docs/flags.md
+++ b/vertical-pod-autoscaler/docs/flags.md
@@ -149,7 +149,7 @@ This document is auto-generated from the flag definitions in the VPA updater cod
| `alsologtostderr` | | | log to standard error as well as files (no effect when -logtostderr=true) |
| `evict-after-oom-threshold` | | 10m0s | duration The default duration to evict pods that have OOMed in less than evict-after-oom-threshold since start. |
| `eviction-rate-burst` | int | 1 | Burst of pods that can be evicted. |
-| `eviction-rate-limit` | float | | Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable
the rate limiter. (default -1) |
+| `eviction-rate-limit` | float | -1 | Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable the rate limiter. |
| `eviction-tolerance` | float | 0.5 | Fraction of replica count that can be evicted for update, if more than one pod can be evicted. |
| `feature-gates` | mapStringBool | | A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true\|false (ALPHA - default=false)
AllBeta=true\|false (BETA - default=false)
CPUStartupBoost=true\|false (ALPHA - default=false)
PerVPAConfig=true\|false (ALPHA - default=false) |
| `ignored-vpa-object-namespaces` | string | | A comma-separated list of namespaces to ignore when searching for VPA objects. Leave empty to avoid ignoring any namespaces. These namespaces will not be cleaned by the garbage collector. |
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/certs.go b/vertical-pod-autoscaler/pkg/admission-controller/certs.go
index 2fe64d88bb30..d15d4211e691 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/certs.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/certs.go
@@ -32,11 +32,6 @@ import (
"k8s.io/klog/v2"
)
-type certsConfig struct {
- clientCaFile, tlsCertFile, tlsPrivateKey *string
- reload *bool
-}
-
func readFile(filePath string) []byte {
res, err := os.ReadFile(filePath)
if err != nil {
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/config.go b/vertical-pod-autoscaler/pkg/admission-controller/config.go
index dc89fa268017..bda6b58f67f9 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/config.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/config.go
@@ -28,6 +28,8 @@ import (
"k8s.io/client-go/kubernetes"
typedadmregv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
"k8s.io/klog/v2"
+
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/config"
)
const (
@@ -36,7 +38,7 @@ const (
)
// MutatingWebhookConfigurationInterface
-func configTLS(cfg certsConfig, minTlsVersion, ciphers string, stop <-chan struct{}, mutatingWebhookClient typedadmregv1.MutatingWebhookConfigurationInterface) *tls.Config {
+func configTLS(cfg config.CertsConfig, minTlsVersion, ciphers string, stop <-chan struct{}, mutatingWebhookClient typedadmregv1.MutatingWebhookConfigurationInterface) *tls.Config {
var tlsVersion uint16
var ciphersuites []uint16
reverseCipherMap := make(map[string]uint16)
@@ -67,11 +69,11 @@ func configTLS(cfg certsConfig, minTlsVersion, ciphers string, stop <-chan struc
MinVersion: tlsVersion,
CipherSuites: ciphersuites,
}
- if *cfg.reload {
+ if cfg.Reload {
cr := certReloader{
- tlsCertPath: *cfg.tlsCertFile,
- tlsKeyPath: *cfg.tlsPrivateKey,
- clientCaPath: *cfg.clientCaFile,
+ tlsCertPath: cfg.TlsCertFile,
+ tlsKeyPath: cfg.TlsPrivateKey,
+ clientCaPath: cfg.ClientCaFile,
mutatingWebhookClient: mutatingWebhookClient,
}
if err := cr.load(); err != nil {
@@ -82,7 +84,7 @@ func configTLS(cfg certsConfig, minTlsVersion, ciphers string, stop <-chan struc
}
config.GetCertificate = cr.getCertificate
} else {
- cert, err := tls.LoadX509KeyPair(*cfg.tlsCertFile, *cfg.tlsPrivateKey)
+ cert, err := tls.LoadX509KeyPair(cfg.TlsCertFile, cfg.TlsPrivateKey)
if err != nil {
klog.Fatal(err)
}
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/config/config.go b/vertical-pod-autoscaler/pkg/admission-controller/config/config.go
new file mode 100644
index 000000000000..47dacbf3e734
--- /dev/null
+++ b/vertical-pod-autoscaler/pkg/admission-controller/config/config.go
@@ -0,0 +1,133 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "flag"
+ "os"
+
+ "github.com/spf13/pflag"
+ "k8s.io/apimachinery/pkg/api/resource"
+ kube_flag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+
+ "k8s.io/autoscaler/vertical-pod-autoscaler/common"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
+)
+
+// CertsConfig holds configuration related to TLS certificates
+type CertsConfig struct {
+ ClientCaFile string
+ TlsCertFile string
+ TlsPrivateKey string
+ Reload bool
+}
+
+// AdmissionControllerConfig holds all configuration for the admission controller component
+type AdmissionControllerConfig struct {
+ // Common flags
+ CommonFlags *common.CommonFlags
+
+ CertsConfiguration *CertsConfig
+
+ Ciphers string
+ MinTlsVersion string
+ Port int
+ Address string
+ Namespace string
+ ServiceName string
+ WebhookAddress string
+ WebhookPort string
+ WebhookTimeout int
+ WebhookFailurePolicy bool
+ RegisterWebhook bool
+ WebhookLabels string
+ RegisterByURL bool
+
+ MaxAllowedCPUBoost resource.QuantityValue
+}
+
+// DefaultAdmissionControllerConfig returns a AdmissionControllerConfig with default values
+func DefaultAdmissionControllerConfig() *AdmissionControllerConfig {
+ return &AdmissionControllerConfig{
+ CommonFlags: common.DefaultCommonConfig(),
+ CertsConfiguration: &CertsConfig{
+ ClientCaFile: "/etc/tls-certs/caCert.pem",
+ TlsCertFile: "/etc/tls-certs/serverCert.pem",
+ TlsPrivateKey: "/etc/tls-certs/serverKey.pem",
+ Reload: false,
+ },
+ Ciphers: "",
+ MinTlsVersion: "tls1_2",
+ Port: 8000,
+ Address: ":8944",
+ Namespace: os.Getenv("NAMESPACE"),
+ ServiceName: "vpa-webhook",
+ WebhookAddress: "",
+ WebhookPort: "",
+ WebhookTimeout: 30,
+ WebhookFailurePolicy: false,
+ RegisterWebhook: true,
+ WebhookLabels: "",
+ RegisterByURL: false,
+
+ MaxAllowedCPUBoost: resource.QuantityValue{},
+ }
+}
+
+// InitAdmissionControllerFlags initializes the flags for the admission controller component
+func InitAdmissionControllerFlags() *AdmissionControllerConfig {
+ config := DefaultAdmissionControllerConfig()
+ config.CommonFlags = common.InitCommonFlags()
+
+ flag.StringVar(&config.CertsConfiguration.ClientCaFile, "client-ca-file", config.CertsConfiguration.ClientCaFile, "Path to CA PEM file.")
+ flag.StringVar(&config.CertsConfiguration.TlsCertFile, "tls-cert-file", config.CertsConfiguration.TlsCertFile, "Path to server certificate PEM file.")
+ flag.StringVar(&config.CertsConfiguration.TlsPrivateKey, "tls-private-key", config.CertsConfiguration.TlsPrivateKey, "Path to server certificate key PEM file.")
+ flag.BoolVar(&config.CertsConfiguration.Reload, "reload-cert", config.CertsConfiguration.Reload, "If set to true, reload leaf and CA certificates when changed.")
+
+ flag.StringVar(&config.Ciphers, "tls-ciphers", config.Ciphers, "A comma-separated or colon-separated list of ciphers to accept. Only works when min-tls-version is set to tls1_2.")
+ flag.StringVar(&config.MinTlsVersion, "min-tls-version", config.MinTlsVersion, "The minimum TLS version to accept. Must be set to either tls1_2 (default) or tls1_3.")
+ flag.IntVar(&config.Port, "port", config.Port, "The port to listen on.")
+ flag.StringVar(&config.Address, "address", config.Address, "The address to expose Prometheus metrics.")
+ flag.StringVar(&config.ServiceName, "webhook-service", config.ServiceName, "Kubernetes service under which webhook is registered. Used when registerByURL is set to false.")
+ flag.StringVar(&config.WebhookAddress, "webhook-address", config.WebhookAddress, "Address under which webhook is registered. Used when registerByURL is set to true.")
+ flag.StringVar(&config.WebhookPort, "webhook-port", config.WebhookPort, "Server Port for Webhook")
+ flag.IntVar(&config.WebhookTimeout, "webhook-timeout-seconds", config.WebhookTimeout, "Timeout in seconds that the API server should wait for this webhook to respond before failing.")
+ flag.BoolVar(&config.WebhookFailurePolicy, "webhook-failure-policy-fail", config.WebhookFailurePolicy, "If set to true, will configure the admission webhook failurePolicy to \"Fail\". Use with caution.")
+ flag.BoolVar(&config.RegisterWebhook, "register-webhook", config.RegisterWebhook, "If set to true, admission webhook object will be created on start up to register with the API server.")
+ flag.StringVar(&config.WebhookLabels, "webhook-labels", config.WebhookLabels, "Comma separated list of labels to add to the webhook object. Format: key1:value1,key2:value2")
+ flag.BoolVar(&config.RegisterByURL, "register-by-url", config.RegisterByURL, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name")
+
+ flag.Var(&config.MaxAllowedCPUBoost, "max-allowed-cpu-boost", "Maximum amount of CPU that will be applied for a container with boost.")
+
+ // These need to happen last. kube_flag.InitFlags() synchronizes and parses
+ // flags from the flag package to pflag, so feature gates must be added to
+ // pflag before InitFlags() is called.
+ klog.InitFlags(nil)
+ common.InitLoggingFlags()
+ features.MutableFeatureGate.AddFlag(pflag.CommandLine)
+ kube_flag.InitFlags()
+
+ ValidateAdmissionControllerConfig(config)
+
+ return config
+}
+
+// ValidateAdmissionControllerConfig performs validation of the admission-controller flags
+func ValidateAdmissionControllerConfig(config *AdmissionControllerConfig) {
+ common.ValidateCommonConfig(config.CommonFlags)
+}
diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go
index 84e075cd1164..6f834891d2d2 100644
--- a/vertical-pod-autoscaler/pkg/admission-controller/main.go
+++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go
@@ -17,29 +17,25 @@ limitations under the License.
package main
import (
- "flag"
"fmt"
"net/http"
"os"
"strings"
"time"
- "github.com/spf13/pflag"
- "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/informers"
kube_client "k8s.io/client-go/kubernetes"
typedadmregv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
- kube_flag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
"k8s.io/autoscaler/vertical-pod-autoscaler/common"
+ admissioncontroller_config "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/config"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/logic"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
- "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target"
controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
@@ -59,58 +55,24 @@ const (
webHookDelay = 10 * time.Second
)
-var (
- certsConfiguration = &certsConfig{
- clientCaFile: flag.String("client-ca-file", "/etc/tls-certs/caCert.pem", "Path to CA PEM file."),
- tlsCertFile: flag.String("tls-cert-file", "/etc/tls-certs/serverCert.pem", "Path to server certificate PEM file."),
- tlsPrivateKey: flag.String("tls-private-key", "/etc/tls-certs/serverKey.pem", "Path to server certificate key PEM file."),
- reload: flag.Bool("reload-cert", false, "If set to true, reload leaf and CA certificates when changed."),
- }
- ciphers = flag.String("tls-ciphers", "", "A comma-separated or colon-separated list of ciphers to accept. Only works when min-tls-version is set to tls1_2.")
- minTlsVersion = flag.String("min-tls-version", "tls1_2", "The minimum TLS version to accept. Must be set to either tls1_2 (default) or tls1_3.")
- port = flag.Int("port", 8000, "The port to listen on.")
- address = flag.String("address", ":8944", "The address to expose Prometheus metrics.")
- namespace = os.Getenv("NAMESPACE")
- serviceName = flag.String("webhook-service", "vpa-webhook", "Kubernetes service under which webhook is registered. Used when registerByURL is set to false.")
- webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.")
- webhookPort = flag.String("webhook-port", "", "Server Port for Webhook")
- webhookTimeout = flag.Int("webhook-timeout-seconds", 30, "Timeout in seconds that the API server should wait for this webhook to respond before failing.")
- webHookFailurePolicy = flag.Bool("webhook-failure-policy-fail", false, "If set to true, will configure the admission webhook failurePolicy to \"Fail\". Use with caution.")
- registerWebhook = flag.Bool("register-webhook", true, "If set to true, admission webhook object will be created on start up to register with the API server.")
- webhookLabels = flag.String("webhook-labels", "", "Comma separated list of labels to add to the webhook object. Format: key1:value1,key2:value2")
- registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name")
- maxAllowedCPUBoost = resource.QuantityValue{}
-)
-
-func init() {
- flag.Var(&maxAllowedCPUBoost, "max-allowed-cpu-boost", "Maximum amount of CPU that will be applied for a container with boost.")
-}
-
func main() {
- commonFlags := common.InitCommonFlags()
- klog.InitFlags(nil)
- common.InitLoggingFlags()
- features.MutableFeatureGate.AddFlag(pflag.CommandLine)
- kube_flag.InitFlags()
- klog.V(1).InfoS("Starting Vertical Pod Autoscaler Admission Controller", "version", common.VerticalPodAutoscalerVersion())
+ config := admissioncontroller_config.InitAdmissionControllerFlags()
- if len(commonFlags.VpaObjectNamespace) > 0 && len(commonFlags.IgnoredVpaObjectNamespaces) > 0 {
- klog.ErrorS(nil, "--vpa-object-namespace and --ignored-vpa-object-namespaces are mutually exclusive and can't be set together.")
- klog.FlushAndExit(klog.ExitFlushTimeout, 1)
- }
+ klog.V(1).InfoS("Starting Vertical Pod Autoscaler Admission Controller", "version", common.VerticalPodAutoscalerVersion())
healthCheck := metrics.NewHealthCheck(time.Minute)
metrics_admission.Register()
- server.Initialize(&commonFlags.EnableProfiling, healthCheck, address)
+ server.Initialize(&config.CommonFlags.EnableProfiling, healthCheck, &config.Address)
+
+ kubeConfig := common.CreateKubeConfigOrDie(config.CommonFlags.KubeConfig, float32(config.CommonFlags.KubeApiQps), int(config.CommonFlags.KubeApiBurst))
- config := common.CreateKubeConfigOrDie(commonFlags.KubeConfig, float32(commonFlags.KubeApiQps), int(commonFlags.KubeApiBurst))
+ vpaClient := vpa_clientset.NewForConfigOrDie(kubeConfig)
+ vpaLister := vpa_api_util.NewVpasLister(vpaClient, make(chan struct{}), config.CommonFlags.VpaObjectNamespace)
+ kubeClient := kube_client.NewForConfigOrDie(kubeConfig)
+ factory := informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultResyncPeriod, informers.WithNamespace(config.CommonFlags.VpaObjectNamespace))
+ targetSelectorFetcher := target.NewVpaTargetSelectorFetcher(kubeConfig, kubeClient, factory)
+ controllerFetcher := controllerfetcher.NewControllerFetcher(kubeConfig, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
- vpaClient := vpa_clientset.NewForConfigOrDie(config)
- vpaLister := vpa_api_util.NewVpasLister(vpaClient, make(chan struct{}), commonFlags.VpaObjectNamespace)
- kubeClient := kube_client.NewForConfigOrDie(config)
- factory := informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultResyncPeriod, informers.WithNamespace(commonFlags.VpaObjectNamespace))
- targetSelectorFetcher := target.NewVpaTargetSelectorFetcher(config, kubeClient, factory)
- controllerFetcher := controllerfetcher.NewControllerFetcher(config, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
podPreprocessor := pod.NewDefaultPreProcessor()
vpaPreprocessor := vpa.NewDefaultPreProcessor()
var limitRangeCalculator limitrange.LimitRangeCalculator
@@ -140,8 +102,8 @@ func main() {
}
statusNamespace := status.AdmissionControllerStatusNamespace
- if namespace != "" {
- statusNamespace = namespace
+ if config.Namespace != "" {
+ statusNamespace = config.Namespace
}
statusUpdater := status.NewUpdater(
kubeClient,
@@ -151,37 +113,37 @@ func main() {
hostname,
)
- calculators := []patch.Calculator{patch.NewResourceUpdatesCalculator(recommendationProvider, maxAllowedCPUBoost), patch.NewObservedContainersCalculator()}
+ calculators := []patch.Calculator{patch.NewResourceUpdatesCalculator(recommendationProvider, config.MaxAllowedCPUBoost), patch.NewObservedContainersCalculator()}
as := logic.NewAdmissionServer(podPreprocessor, vpaPreprocessor, limitRangeCalculator, vpaMatcher, calculators)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
as.Serve(w, r)
healthCheck.UpdateLastActivity()
})
var mutatingWebhookClient typedadmregv1.MutatingWebhookConfigurationInterface
- if *registerWebhook {
+ if config.RegisterWebhook {
mutatingWebhookClient = kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations()
}
server := &http.Server{
- Addr: fmt.Sprintf(":%d", *port),
- TLSConfig: configTLS(*certsConfiguration, *minTlsVersion, *ciphers, stopCh, mutatingWebhookClient),
+ Addr: fmt.Sprintf(":%d", config.Port),
+ TLSConfig: configTLS(*config.CertsConfiguration, config.MinTlsVersion, config.Ciphers, stopCh, mutatingWebhookClient),
}
- url := fmt.Sprintf("%v:%v", *webhookAddress, *webhookPort)
- ignoredNamespaces := strings.Split(commonFlags.IgnoredVpaObjectNamespaces, ",")
+ url := fmt.Sprintf("%v:%v", config.WebhookAddress, config.WebhookPort)
+ ignoredNamespaces := strings.Split(config.CommonFlags.IgnoredVpaObjectNamespaces, ",")
go func() {
- if *registerWebhook {
+ if config.RegisterWebhook {
selfRegistration(
kubeClient,
- readFile(*certsConfiguration.clientCaFile),
+ readFile(config.CertsConfiguration.ClientCaFile),
webHookDelay,
- namespace,
- *serviceName,
+ config.Namespace,
+ config.ServiceName,
url,
- *registerByURL,
- int32(*webhookTimeout),
- commonFlags.VpaObjectNamespace,
+ config.RegisterByURL,
+ int32(config.WebhookTimeout),
+ config.CommonFlags.VpaObjectNamespace,
ignoredNamespaces,
- *webHookFailurePolicy,
- *webhookLabels,
+ config.WebhookFailurePolicy,
+ config.WebhookLabels,
)
}
// Start status updates after the webhook is initialized.
diff --git a/vertical-pod-autoscaler/pkg/recommender/config/config.go b/vertical-pod-autoscaler/pkg/recommender/config/config.go
new file mode 100644
index 000000000000..00475b06bf1e
--- /dev/null
+++ b/vertical-pod-autoscaler/pkg/recommender/config/config.go
@@ -0,0 +1,281 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "flag"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spf13/pflag"
+ "k8s.io/apimachinery/pkg/api/resource"
+ kube_flag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+
+ "k8s.io/autoscaler/vertical-pod-autoscaler/common"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
+)
+
+// RecommenderConfig holds all configuration for the recommender component
+type RecommenderConfig struct {
+ // Common flags
+ CommonFlags *common.CommonFlags
+
+ // Recommender-specific flags
+ RecommenderName string
+ MetricsFetcherInterval time.Duration
+ CheckpointsGCInterval time.Duration
+ CheckpointsWriteTimeout time.Duration
+ Address string
+ Storage string
+ MemorySaver bool
+ UpdateWorkerCount int
+ MinCheckpointsPerRun int
+
+ // Recommendation configuration
+ SafetyMarginFraction float64
+ PodMinCPUMillicores float64
+ PodMinMemoryMb float64
+ TargetCPUPercentile float64
+ LowerBoundCPUPercentile float64
+ UpperBoundCPUPercentile float64
+ ConfidenceIntervalCPU time.Duration
+ TargetMemoryPercentile float64
+ LowerBoundMemoryPercentile float64
+ UpperBoundMemoryPercentile float64
+ ConfidenceIntervalMemory time.Duration
+ HumanizeMemory bool
+ RoundCPUMillicores int
+ RoundMemoryBytes int
+
+ // Prometheus history provider configuration
+ PrometheusAddress string
+ PrometheusInsecure bool
+ PrometheusJobName string
+ HistoryLength string
+ HistoryResolution string
+ QueryTimeout string
+ PodLabelPrefix string
+ PodLabelsMetricName string
+ PodNamespaceLabel string
+ PodNameLabel string
+ CtrNamespaceLabel string
+ CtrPodNameLabel string
+ CtrNameLabel string
+ Username string
+ Password string
+ PrometheusBearerToken string
+ PrometheusBearerTokenFile string
+
+ // External metrics provider configuration
+ UseExternalMetrics bool
+ ExternalCpuMetric string
+ ExternalMemoryMetric string
+
+ // Aggregation configuration
+ MemoryAggregationInterval time.Duration
+ MemoryAggregationIntervalCount int64
+ MemoryHistogramDecayHalfLife time.Duration
+ CpuHistogramDecayHalfLife time.Duration
+ OOMBumpUpRatio float64
+ OOMMinBumpUp float64
+
+ // Post processors configuration
+ PostProcessorCPUasInteger bool
+ MaxAllowedCPU resource.QuantityValue
+ MaxAllowedMemory resource.QuantityValue
+}
+
+// DefaultRecommenderConfig returns a RecommenderConfig with default values
+func DefaultRecommenderConfig() *RecommenderConfig {
+ return &RecommenderConfig{
+ CommonFlags: common.DefaultCommonConfig(),
+
+ // Recommender-specific flags
+ RecommenderName: input.DefaultRecommenderName,
+ MetricsFetcherInterval: 1 * time.Minute,
+ CheckpointsGCInterval: 10 * time.Minute,
+ CheckpointsWriteTimeout: time.Minute,
+ Address: ":8942",
+ Storage: "",
+ MemorySaver: false,
+ UpdateWorkerCount: 10,
+ MinCheckpointsPerRun: 10,
+
+ // Recommendation configuration
+ SafetyMarginFraction: 0.15,
+ PodMinCPUMillicores: 25,
+ PodMinMemoryMb: 250,
+ TargetCPUPercentile: 0.9,
+ LowerBoundCPUPercentile: 0.5,
+ UpperBoundCPUPercentile: 0.95,
+ ConfidenceIntervalCPU: 24 * time.Hour,
+ TargetMemoryPercentile: 0.9,
+ LowerBoundMemoryPercentile: 0.5,
+ UpperBoundMemoryPercentile: 0.95,
+ ConfidenceIntervalMemory: 24 * time.Hour,
+ HumanizeMemory: false,
+ RoundCPUMillicores: 1,
+ RoundMemoryBytes: 1,
+
+ // Prometheus history provider flags
+ PrometheusAddress: "http://prometheus.monitoring.svc",
+ PrometheusInsecure: false,
+ PrometheusJobName: "kubernetes-cadvisor",
+ HistoryLength: "8d",
+ HistoryResolution: "1h",
+ QueryTimeout: "5m",
+ PodLabelPrefix: "pod_label_",
+ PodLabelsMetricName: "up{job=\"kubernetes-pods\"}",
+ PodNamespaceLabel: "kubernetes_namespace",
+ PodNameLabel: "kubernetes_pod_name",
+ CtrNamespaceLabel: "namespace",
+ CtrPodNameLabel: "pod_name",
+ CtrNameLabel: "name",
+ Username: "",
+ Password: "",
+ PrometheusBearerToken: "",
+ PrometheusBearerTokenFile: "",
+
+ // External metrics provider flags
+ UseExternalMetrics: false,
+ ExternalCpuMetric: "",
+ ExternalMemoryMetric: "",
+
+ // Aggregation configuration flags
+ MemoryAggregationInterval: model.DefaultMemoryAggregationInterval,
+ MemoryAggregationIntervalCount: model.DefaultMemoryAggregationIntervalCount,
+ MemoryHistogramDecayHalfLife: model.DefaultMemoryHistogramDecayHalfLife,
+ CpuHistogramDecayHalfLife: model.DefaultCPUHistogramDecayHalfLife,
+ OOMBumpUpRatio: model.DefaultOOMBumpUpRatio,
+ OOMMinBumpUp: model.DefaultOOMMinBumpUp,
+
+ // Post processors flags
+ PostProcessorCPUasInteger: false,
+ MaxAllowedCPU: resource.QuantityValue{},
+ MaxAllowedMemory: resource.QuantityValue{},
+ }
+}
+
+// InitRecommenderFlags initializes flags for the recommender component
+func InitRecommenderFlags() *RecommenderConfig {
+ config := DefaultRecommenderConfig()
+ config.CommonFlags = common.InitCommonFlags()
+
+ flag.StringVar(&config.RecommenderName, "recommender-name", config.RecommenderName, "Set the recommender name. Recommender will generate recommendations for VPAs that configure the same recommender name. If the recommender name is left as default it will also generate recommendations that don't explicitly specify recommender. You shouldn't run two recommenders with the same name in a cluster.")
+ flag.DurationVar(&config.MetricsFetcherInterval, "recommender-interval", config.MetricsFetcherInterval, `How often metrics should be fetched`)
+ flag.DurationVar(&config.CheckpointsGCInterval, "checkpoints-gc-interval", config.CheckpointsGCInterval, `How often orphaned checkpoints should be garbage collected`)
+ flag.DurationVar(&config.CheckpointsWriteTimeout, "checkpoints-timeout", config.CheckpointsWriteTimeout, `Timeout for writing checkpoints since the start of the recommender's main loop`)
+ flag.StringVar(&config.Address, "address", config.Address, "The address to expose Prometheus metrics.")
+ flag.StringVar(&config.Storage, "storage", config.Storage, `Specifies storage mode. Supported values: prometheus, checkpoint (default)`)
+ flag.BoolVar(&config.MemorySaver, "memory-saver", config.MemorySaver, `If true, only track pods which have an associated VPA`)
+ flag.IntVar(&config.UpdateWorkerCount, "update-worker-count", config.UpdateWorkerCount, "Number of concurrent workers to update VPA recommendations and checkpoints. When increasing this setting, make sure the client-side rate limits ('kube-api-qps' and 'kube-api-burst') are either increased or turned off as well. Determines the minimum number of VPA checkpoints written per recommender loop.")
+ // MinCheckpointsPerRun is deprecated but kept for warning/compatibility.
+ flag.IntVar(&config.MinCheckpointsPerRun, "min-checkpoints", config.MinCheckpointsPerRun, "Minimum number of checkpoints to write per recommender's main loop. WARNING: this flag is deprecated and doesn't have any effect. It will be removed in a future release. Refer to update-worker-count to influence the minimum number of checkpoints written per loop.")
+
+ // Recommendation configuration flags
+ flag.Float64Var(&config.SafetyMarginFraction, "recommendation-margin-fraction", config.SafetyMarginFraction, `Fraction of usage added as the safety margin to the recommended request`)
+ flag.Float64Var(&config.PodMinCPUMillicores, "pod-recommendation-min-cpu-millicores", config.PodMinCPUMillicores, `Minimum CPU recommendation for a pod`)
+ flag.Float64Var(&config.PodMinMemoryMb, "pod-recommendation-min-memory-mb", config.PodMinMemoryMb, `Minimum memory recommendation for a pod`)
+ flag.Float64Var(&config.TargetCPUPercentile, "target-cpu-percentile", config.TargetCPUPercentile, "CPU usage percentile that will be used as a base for CPU target recommendation. Doesn't affect CPU lower bound, CPU upper bound nor memory recommendations.")
+ flag.Float64Var(&config.LowerBoundCPUPercentile, "recommendation-lower-bound-cpu-percentile", config.LowerBoundCPUPercentile, `CPU usage percentile that will be used for the lower bound on CPU recommendation.`)
+ flag.Float64Var(&config.UpperBoundCPUPercentile, "recommendation-upper-bound-cpu-percentile", config.UpperBoundCPUPercentile, `CPU usage percentile that will be used for the upper bound on CPU recommendation.`)
+ flag.DurationVar(&config.ConfidenceIntervalCPU, "confidence-interval-cpu", config.ConfidenceIntervalCPU, "The time interval used for computing the confidence multiplier for the CPU lower and upper bound. Default: 24h")
+ flag.Float64Var(&config.TargetMemoryPercentile, "target-memory-percentile", config.TargetMemoryPercentile, "Memory usage percentile that will be used as a base for memory target recommendation. Doesn't affect memory lower bound nor memory upper bound.")
+ flag.Float64Var(&config.LowerBoundMemoryPercentile, "recommendation-lower-bound-memory-percentile", config.LowerBoundMemoryPercentile, `Memory usage percentile that will be used for the lower bound on memory recommendation.`)
+ flag.Float64Var(&config.UpperBoundMemoryPercentile, "recommendation-upper-bound-memory-percentile", config.UpperBoundMemoryPercentile, `Memory usage percentile that will be used for the upper bound on memory recommendation.`)
+ flag.DurationVar(&config.ConfidenceIntervalMemory, "confidence-interval-memory", config.ConfidenceIntervalMemory, "The time interval used for computing the confidence multiplier for the memory lower and upper bound. Default: 24h")
+ flag.BoolVar(&config.HumanizeMemory, "humanize-memory", config.HumanizeMemory, "DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead.")
+ flag.IntVar(&config.RoundCPUMillicores, "round-cpu-millicores", config.RoundCPUMillicores, `CPU recommendation rounding factor in millicores. The CPU value will always be rounded up to the nearest multiple of this factor.`)
+ flag.IntVar(&config.RoundMemoryBytes, "round-memory-bytes", config.RoundMemoryBytes, `Memory recommendation rounding factor in bytes. The Memory value will always be rounded up to the nearest multiple of this factor.`)
+
+ // Prometheus history provider flags
+ flag.StringVar(&config.PrometheusAddress, "prometheus-address", config.PrometheusAddress, `Where to reach for Prometheus metrics`)
+ flag.BoolVar(&config.PrometheusInsecure, "prometheus-insecure", config.PrometheusInsecure, `Skip tls verify if https is used in the prometheus-address`)
+ flag.StringVar(&config.PrometheusJobName, "prometheus-cadvisor-job-name", config.PrometheusJobName, `Name of the prometheus job name which scrapes the cAdvisor metrics`)
+ flag.StringVar(&config.HistoryLength, "history-length", config.HistoryLength, `How much time back prometheus have to be queried to get historical metrics`)
+ flag.StringVar(&config.HistoryResolution, "history-resolution", config.HistoryResolution, `Resolution at which Prometheus is queried for historical metrics`)
+ flag.StringVar(&config.QueryTimeout, "prometheus-query-timeout", config.QueryTimeout, `How long to wait before killing long queries`)
+ flag.StringVar(&config.PodLabelPrefix, "pod-label-prefix", config.PodLabelPrefix, `Which prefix to look for pod labels in metrics`)
+ flag.StringVar(&config.PodLabelsMetricName, "metric-for-pod-labels", config.PodLabelsMetricName, `Which metric to look for pod labels in metrics`)
+ flag.StringVar(&config.PodNamespaceLabel, "pod-namespace-label", config.PodNamespaceLabel, `Label name to look for pod namespaces`)
+ flag.StringVar(&config.PodNameLabel, "pod-name-label", config.PodNameLabel, `Label name to look for pod names`)
+ flag.StringVar(&config.CtrNamespaceLabel, "container-namespace-label", config.CtrNamespaceLabel, `Label name to look for container namespaces`)
+ flag.StringVar(&config.CtrPodNameLabel, "container-pod-name-label", config.CtrPodNameLabel, `Label name to look for container pod names`)
+ flag.StringVar(&config.CtrNameLabel, "container-name-label", config.CtrNameLabel, `Label name to look for container names`)
+ flag.StringVar(&config.Username, "username", config.Username, "The username used in the prometheus server basic auth. Can also be set via the PROMETHEUS_USERNAME environment variable")
+ flag.StringVar(&config.Password, "password", config.Password, "The password used in the prometheus server basic auth. Can also be set via the PROMETHEUS_PASSWORD environment variable")
+ flag.StringVar(&config.PrometheusBearerToken, "prometheus-bearer-token", config.PrometheusBearerToken, "The bearer token used in the Prometheus server bearer token auth")
+ flag.StringVar(&config.PrometheusBearerTokenFile, "prometheus-bearer-token-file", config.PrometheusBearerTokenFile, "Path to the bearer token file used for authentication by the Prometheus server")
+
+ // External metrics provider flags
+ flag.BoolVar(&config.UseExternalMetrics, "use-external-metrics", config.UseExternalMetrics, "ALPHA. Use an external metrics provider instead of metrics_server.")
+ flag.StringVar(&config.ExternalCpuMetric, "external-metrics-cpu-metric", config.ExternalCpuMetric, "ALPHA. Metric to use with external metrics provider for CPU usage.")
+ flag.StringVar(&config.ExternalMemoryMetric, "external-metrics-memory-metric", config.ExternalMemoryMetric, "ALPHA. Metric to use with external metrics provider for memory usage.")
+
+ // Aggregation configuration flags
+ flag.DurationVar(&config.MemoryAggregationInterval, "memory-aggregation-interval", config.MemoryAggregationInterval, `The length of a single interval, for which the peak memory usage is computed. Memory usage peaks are aggregated in multiples of this interval. In other words there is one memory usage sample per interval (the maximum usage over that interval)`)
+ flag.Int64Var(&config.MemoryAggregationIntervalCount, "memory-aggregation-interval-count", config.MemoryAggregationIntervalCount, `The number of consecutive memory-aggregation-intervals which make up the MemoryAggregationWindowLength which in turn is the period for memory usage aggregation by VPA. In other words, MemoryAggregationWindowLength = memory-aggregation-interval * memory-aggregation-interval-count.`)
+ flag.DurationVar(&config.MemoryHistogramDecayHalfLife, "memory-histogram-decay-half-life", config.MemoryHistogramDecayHalfLife, `The amount of time it takes a historical memory usage sample to lose half of its weight. In other words, a fresh usage sample is twice as 'important' as one with age equal to the half life period.`)
+ flag.DurationVar(&config.CpuHistogramDecayHalfLife, "cpu-histogram-decay-half-life", config.CpuHistogramDecayHalfLife, `The amount of time it takes a historical CPU usage sample to lose half of its weight.`)
+ flag.Float64Var(&config.OOMBumpUpRatio, "oom-bump-up-ratio", config.OOMBumpUpRatio, `Default memory bump up ratio when OOM occurs. This value applies to all VPAs unless overridden in the VPA spec. Default is 1.2.`)
+ flag.Float64Var(&config.OOMMinBumpUp, "oom-min-bump-up-bytes", config.OOMMinBumpUp, `Default minimal increase of memory (in bytes) when OOM occurs. This value applies to all VPAs unless overridden in the VPA spec. Default is 100 * 1024 * 1024 (100Mi).`)
+
+ // Post processors flags
+ // CPU as integer to benefit for CPU management Static Policy ( https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy )
+ flag.BoolVar(&config.PostProcessorCPUasInteger, "cpu-integer-post-processor-enabled", config.PostProcessorCPUasInteger, "Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental)")
+ flag.Var(&config.MaxAllowedCPU, "container-recommendation-max-allowed-cpu", "Maximum amount of CPU that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed.")
+ flag.Var(&config.MaxAllowedMemory, "container-recommendation-max-allowed-memory", "Maximum amount of memory that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed.")
+
+ // These need to happen last. kube_flag.InitFlags() synchronizes and parses
+ // flags from the flag package to pflag, so feature gates must be added to
+ // pflag before InitFlags() is called.
+ klog.InitFlags(nil)
+ common.InitLoggingFlags()
+ features.MutableFeatureGate.AddFlag(pflag.CommandLine)
+ kube_flag.InitFlags()
+
+ ValidateRecommenderConfig(config)
+
+ return config
+}
+
+// ValidateRecommenderConfig performs validation of the recommender flags
+func ValidateRecommenderConfig(config *RecommenderConfig) {
+ common.ValidateCommonConfig(config.CommonFlags)
+
+ if config.MinCheckpointsPerRun != 10 { // Default value is 10
+ klog.InfoS("DEPRECATION WARNING: The 'min-checkpoints' flag is deprecated and has no effect. It will be removed in a future release.")
+ }
+
+ if config.PrometheusBearerToken != "" && config.PrometheusBearerTokenFile != "" && config.Username != "" {
+ klog.ErrorS(nil, "--bearer-token, --bearer-token-file and --username are mutually exclusive and can't be set together.")
+ klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+ }
+
+ if config.PrometheusBearerTokenFile != "" {
+ fileContent, err := os.ReadFile(config.PrometheusBearerTokenFile)
+ if err != nil {
+ klog.ErrorS(err, "Unable to read bearer token file", "filename", config.PrometheusBearerTokenFile)
+ klog.FlushAndExit(klog.ExitFlushTimeout, 1)
+ }
+ config.PrometheusBearerTokenFile = strings.TrimSpace(string(fileContent))
+ }
+}
diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go
index b7b6f8a75b21..e3b530448537 100644
--- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go
+++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go
@@ -17,7 +17,6 @@ limitations under the License.
package logic
import (
- "flag"
"sort"
"time"
@@ -25,22 +24,27 @@ import (
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
)
-var (
- safetyMarginFraction = flag.Float64("recommendation-margin-fraction", 0.15, `Fraction of usage added as the safety margin to the recommended request`)
- podMinCPUMillicores = flag.Float64("pod-recommendation-min-cpu-millicores", 25, `Minimum CPU recommendation for a pod`)
- podMinMemoryMb = flag.Float64("pod-recommendation-min-memory-mb", 250, `Minimum memory recommendation for a pod`)
- targetCPUPercentile = flag.Float64("target-cpu-percentile", 0.9, "CPU usage percentile that will be used as a base for CPU target recommendation. Doesn't affect CPU lower bound, CPU upper bound nor memory recommendations.")
- lowerBoundCPUPercentile = flag.Float64("recommendation-lower-bound-cpu-percentile", 0.5, `CPU usage percentile that will be used for the lower bound on CPU recommendation.`)
- upperBoundCPUPercentile = flag.Float64("recommendation-upper-bound-cpu-percentile", 0.95, `CPU usage percentile that will be used for the upper bound on CPU recommendation.`)
- confidenceIntervalCPU = flag.Duration("confidence-interval-cpu", time.Hour*24, "The time interval used for computing the confidence multiplier for the CPU lower and upper bound. Default: 24h")
- targetMemoryPercentile = flag.Float64("target-memory-percentile", 0.9, "Memory usage percentile that will be used as a base for memory target recommendation. Doesn't affect memory lower bound nor memory upper bound.")
- lowerBoundMemoryPercentile = flag.Float64("recommendation-lower-bound-memory-percentile", 0.5, `Memory usage percentile that will be used for the lower bound on memory recommendation.`)
- upperBoundMemoryPercentile = flag.Float64("recommendation-upper-bound-memory-percentile", 0.95, `Memory usage percentile that will be used for the upper bound on memory recommendation.`)
- confidenceIntervalMemory = flag.Duration("confidence-interval-memory", time.Hour*24, "The time interval used for computing the confidence multiplier for the memory lower and upper bound. Default: 24h")
- humanizeMemory = flag.Bool("humanize-memory", false, "DEPRECATED: Convert memory values in recommendations to the highest appropriate SI unit with up to 2 decimal places for better readability. This flag is deprecated and will be removed in a future version. Use --round-memory-bytes instead.")
- roundCPUMillicores = flag.Int("round-cpu-millicores", 1, `CPU recommendation rounding factor in millicores. The CPU value will always be rounded up to the nearest multiple of this factor.`)
- roundMemoryBytes = flag.Int("round-memory-bytes", 1, `Memory recommendation rounding factor in bytes. The Memory value will always be rounded up to the nearest multiple of this factor.`)
-)
+// RecommendationConfig groups all inputs used for resource estimation.
+type RecommendationConfig struct {
+ SafetyMarginFraction float64
+ PodMinCPUMillicores float64
+ PodMinMemoryMb float64
+ TargetCPUPercentile float64
+ LowerBoundCPUPercentile float64
+ UpperBoundCPUPercentile float64
+ ConfidenceIntervalCPU time.Duration
+ TargetMemoryPercentile float64
+ LowerBoundMemoryPercentile float64
+ UpperBoundMemoryPercentile float64
+ ConfidenceIntervalMemory time.Duration
+}
+
+// RecommendationFormat controls how numeric values are rendered in outputs.
+type RecommendationFormat struct {
+ HumanizeMemory bool
+ RoundCPUMillicores int
+ RoundMemoryBytes int
+}
// PodResourceRecommender computes resource recommendation for a Vpa object.
type PodResourceRecommender interface {
@@ -68,6 +72,8 @@ type podResourceRecommender struct {
lowerBoundMemory MemoryEstimator
upperBoundCPU CPUEstimator
upperBoundMemory MemoryEstimator
+ minCPUMillicores float64
+ minMemoryMb float64
}
func (r *podResourceRecommender) GetRecommendedPodResources(containerNameToAggregateStateMap model.ContainerNameToAggregateStateMap) RecommendedPodResources {
@@ -77,16 +83,18 @@ func (r *podResourceRecommender) GetRecommendedPodResources(containerNameToAggre
}
fraction := 1.0 / float64(len(containerNameToAggregateStateMap))
- minCPU := model.ScaleResource(model.CPUAmountFromCores(*podMinCPUMillicores*0.001), fraction)
- minMemory := model.ScaleResource(model.MemoryAmountFromBytes(*podMinMemoryMb*1024*1024), fraction)
+ minCPU := model.ScaleResource(model.CPUAmountFromCores(r.minCPUMillicores*0.001), fraction)
+ minMemory := model.ScaleResource(model.MemoryAmountFromBytes(r.minMemoryMb*1024*1024), fraction)
recommender := &podResourceRecommender{
- WithCPUMinResource(minCPU, r.targetCPU),
- WithMemoryMinResource(minMemory, r.targetMemory),
- WithCPUMinResource(minCPU, r.lowerBoundCPU),
- WithMemoryMinResource(minMemory, r.lowerBoundMemory),
- WithCPUMinResource(minCPU, r.upperBoundCPU),
- WithMemoryMinResource(minMemory, r.upperBoundMemory),
+ targetCPU: WithCPUMinResource(minCPU, r.targetCPU),
+ targetMemory: WithMemoryMinResource(minMemory, r.targetMemory),
+ lowerBoundCPU: WithCPUMinResource(minCPU, r.lowerBoundCPU),
+ lowerBoundMemory: WithMemoryMinResource(minMemory, r.lowerBoundMemory),
+ upperBoundCPU: WithCPUMinResource(minCPU, r.upperBoundCPU),
+ upperBoundMemory: WithMemoryMinResource(minMemory, r.upperBoundMemory),
+ minCPUMillicores: r.minCPUMillicores,
+ minMemoryMb: r.minMemoryMb,
}
for containerName, aggregatedContainerState := range containerNameToAggregateStateMap {
@@ -120,24 +128,24 @@ func FilterControlledResources(estimation model.Resources, controlledResources [
}
// CreatePodResourceRecommender returns the primary recommender.
-func CreatePodResourceRecommender() PodResourceRecommender {
- targetCPU := NewPercentileCPUEstimator(*targetCPUPercentile)
- lowerBoundCPU := NewPercentileCPUEstimator(*lowerBoundCPUPercentile)
- upperBoundCPU := NewPercentileCPUEstimator(*upperBoundCPUPercentile)
+func CreatePodResourceRecommender(config RecommendationConfig) PodResourceRecommender {
+ targetCPU := NewPercentileCPUEstimator(config.TargetCPUPercentile)
+ lowerBoundCPU := NewPercentileCPUEstimator(config.LowerBoundCPUPercentile)
+ upperBoundCPU := NewPercentileCPUEstimator(config.UpperBoundCPUPercentile)
// Create base memory estimators
- targetMemory := NewPercentileMemoryEstimator(*targetMemoryPercentile)
- lowerBoundMemory := NewPercentileMemoryEstimator(*lowerBoundMemoryPercentile)
- upperBoundMemory := NewPercentileMemoryEstimator(*upperBoundMemoryPercentile)
+ targetMemory := NewPercentileMemoryEstimator(config.TargetMemoryPercentile)
+ lowerBoundMemory := NewPercentileMemoryEstimator(config.LowerBoundMemoryPercentile)
+ upperBoundMemory := NewPercentileMemoryEstimator(config.UpperBoundMemoryPercentile)
// Apply safety margins
- targetCPU = WithCPUMargin(*safetyMarginFraction, targetCPU)
- lowerBoundCPU = WithCPUMargin(*safetyMarginFraction, lowerBoundCPU)
- upperBoundCPU = WithCPUMargin(*safetyMarginFraction, upperBoundCPU)
+ targetCPU = WithCPUMargin(config.SafetyMarginFraction, targetCPU)
+ lowerBoundCPU = WithCPUMargin(config.SafetyMarginFraction, lowerBoundCPU)
+ upperBoundCPU = WithCPUMargin(config.SafetyMarginFraction, upperBoundCPU)
- targetMemory = WithMemoryMargin(*safetyMarginFraction, targetMemory)
- lowerBoundMemory = WithMemoryMargin(*safetyMarginFraction, lowerBoundMemory)
- upperBoundMemory = WithMemoryMargin(*safetyMarginFraction, upperBoundMemory)
+ targetMemory = WithMemoryMargin(config.SafetyMarginFraction, targetMemory)
+ lowerBoundMemory = WithMemoryMargin(config.SafetyMarginFraction, lowerBoundMemory)
+ upperBoundMemory = WithMemoryMargin(config.SafetyMarginFraction, upperBoundMemory)
// Apply confidence multiplier to the upper bound estimator. This means
// that the updater will be less eager to evict pods with short history
@@ -151,8 +159,8 @@ func CreatePodResourceRecommender() PodResourceRecommender {
// 24h history : *2
// 1 week history : *1.14
- upperBoundCPU = WithCPUConfidenceMultiplier(1.0, 1.0, upperBoundCPU, *confidenceIntervalCPU)
- upperBoundMemory = WithMemoryConfidenceMultiplier(1.0, 1.0, upperBoundMemory, *confidenceIntervalMemory)
+ upperBoundCPU = WithCPUConfidenceMultiplier(1.0, 1.0, upperBoundCPU, config.ConfidenceIntervalCPU)
+ upperBoundMemory = WithMemoryConfidenceMultiplier(1.0, 1.0, upperBoundMemory, config.ConfidenceIntervalMemory)
// Apply confidence multiplier to the lower bound estimator. This means
// that the updater will be less eager to evict pods with short history
@@ -166,8 +174,8 @@ func CreatePodResourceRecommender() PodResourceRecommender {
// 5m history : *0.6 (force pod eviction if the request is < 0.6 * lower bound)
// 30m history : *0.9
// 60m history : *0.95
- lowerBoundCPU = WithCPUConfidenceMultiplier(0.001, -2.0, lowerBoundCPU, *confidenceIntervalCPU)
- lowerBoundMemory = WithMemoryConfidenceMultiplier(0.001, -2.0, lowerBoundMemory, *confidenceIntervalMemory)
+ lowerBoundCPU = WithCPUConfidenceMultiplier(0.001, -2.0, lowerBoundCPU, config.ConfidenceIntervalCPU)
+ lowerBoundMemory = WithMemoryConfidenceMultiplier(0.001, -2.0, lowerBoundMemory, config.ConfidenceIntervalMemory)
return &podResourceRecommender{
targetCPU,
targetMemory,
@@ -175,12 +183,14 @@ func CreatePodResourceRecommender() PodResourceRecommender {
lowerBoundMemory,
upperBoundCPU,
upperBoundMemory,
+ config.PodMinCPUMillicores,
+ config.PodMinMemoryMb,
}
}
// MapToListOfRecommendedContainerResources converts the map of RecommendedContainerResources into a stable sorted list
// This can be used to get a stable sequence while ranging on the data
-func MapToListOfRecommendedContainerResources(resources RecommendedPodResources) *vpa_types.RecommendedPodResources {
+func MapToListOfRecommendedContainerResources(resources RecommendedPodResources, format RecommendationFormat) *vpa_types.RecommendedPodResources {
containerResources := make([]vpa_types.RecommendedContainerResources, 0, len(resources))
// Sort the container names from the map. This is because maps are an
// unordered data structure, and iterating through the map will return
@@ -194,10 +204,10 @@ func MapToListOfRecommendedContainerResources(resources RecommendedPodResources)
for _, name := range containerNames {
containerResources = append(containerResources, vpa_types.RecommendedContainerResources{
ContainerName: name,
- Target: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes),
- LowerBound: model.ResourcesAsResourceList(resources[name].LowerBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes),
- UpperBound: model.ResourcesAsResourceList(resources[name].UpperBound, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes),
- UncappedTarget: model.ResourcesAsResourceList(resources[name].Target, *humanizeMemory, *roundCPUMillicores, *roundMemoryBytes),
+ Target: model.ResourcesAsResourceList(resources[name].Target, format.HumanizeMemory, format.RoundCPUMillicores, format.RoundMemoryBytes),
+ LowerBound: model.ResourcesAsResourceList(resources[name].LowerBound, format.HumanizeMemory, format.RoundCPUMillicores, format.RoundMemoryBytes),
+ UpperBound: model.ResourcesAsResourceList(resources[name].UpperBound, format.HumanizeMemory, format.RoundCPUMillicores, format.RoundMemoryBytes),
+ UncappedTarget: model.ResourcesAsResourceList(resources[name].Target, format.HumanizeMemory, format.RoundCPUMillicores, format.RoundMemoryBytes),
})
}
recommendation := &vpa_types.RecommendedPodResources{
diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go
index 164f6d16334c..9cee460d179d 100644
--- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go
+++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go
@@ -25,6 +25,8 @@ import (
)
func TestMinResourcesApplied(t *testing.T) {
+ minCPUMillicores := 25.
+ minMemoryMb := 250.
constCPUEstimator := NewConstCPUEstimator(model.CPUAmountFromCores(0.001))
constMemoryEstimator := NewConstMemoryEstimator(model.MemoryAmountFromBytes(1e6))
@@ -35,6 +37,8 @@ func TestMinResourcesApplied(t *testing.T) {
lowerBoundMemory: constMemoryEstimator,
upperBoundCPU: constCPUEstimator,
upperBoundMemory: constMemoryEstimator,
+ minCPUMillicores: minCPUMillicores,
+ minMemoryMb: minMemoryMb,
}
containerNameToAggregateStateMap := model.ContainerNameToAggregateStateMap{
@@ -42,11 +46,13 @@ func TestMinResourcesApplied(t *testing.T) {
}
recommendedResources := recommender.GetRecommendedPodResources(containerNameToAggregateStateMap)
- assert.Equal(t, model.CPUAmountFromCores(*podMinCPUMillicores/1000), recommendedResources["container-1"].Target[model.ResourceCPU])
- assert.Equal(t, model.MemoryAmountFromBytes(*podMinMemoryMb*1024*1024), recommendedResources["container-1"].Target[model.ResourceMemory])
+ assert.Equal(t, model.CPUAmountFromCores(minCPUMillicores/1000), recommendedResources["container-1"].Target[model.ResourceCPU])
+ assert.Equal(t, model.MemoryAmountFromBytes(minMemoryMb*1024*1024), recommendedResources["container-1"].Target[model.ResourceMemory])
}
func TestMinResourcesSplitAcrossContainers(t *testing.T) {
+ minCPUMillicores := 25.
+ minMemoryMb := 250.
constCPUEstimator := NewConstCPUEstimator(model.CPUAmountFromCores(0.001))
constMemoryEstimator := NewConstMemoryEstimator(model.MemoryAmountFromBytes(1e6))
@@ -57,6 +63,8 @@ func TestMinResourcesSplitAcrossContainers(t *testing.T) {
lowerBoundMemory: constMemoryEstimator,
upperBoundCPU: constCPUEstimator,
upperBoundMemory: constMemoryEstimator,
+ minCPUMillicores: minCPUMillicores,
+ minMemoryMb: minMemoryMb,
}
containerNameToAggregateStateMap := model.ContainerNameToAggregateStateMap{
@@ -65,10 +73,10 @@ func TestMinResourcesSplitAcrossContainers(t *testing.T) {
}
recommendedResources := recommender.GetRecommendedPodResources(containerNameToAggregateStateMap)
- assert.Equal(t, model.CPUAmountFromCores((*podMinCPUMillicores/1000)/2), recommendedResources["container-1"].Target[model.ResourceCPU])
- assert.Equal(t, model.CPUAmountFromCores((*podMinCPUMillicores/1000)/2), recommendedResources["container-2"].Target[model.ResourceCPU])
- assert.Equal(t, model.MemoryAmountFromBytes((*podMinMemoryMb*1024*1024)/2), recommendedResources["container-1"].Target[model.ResourceMemory])
- assert.Equal(t, model.MemoryAmountFromBytes((*podMinMemoryMb*1024*1024)/2), recommendedResources["container-2"].Target[model.ResourceMemory])
+ assert.Equal(t, model.CPUAmountFromCores((minCPUMillicores/1000)/2), recommendedResources["container-1"].Target[model.ResourceCPU])
+ assert.Equal(t, model.CPUAmountFromCores((minCPUMillicores/1000)/2), recommendedResources["container-2"].Target[model.ResourceCPU])
+ assert.Equal(t, model.MemoryAmountFromBytes((minMemoryMb*1024*1024)/2), recommendedResources["container-1"].Target[model.ResourceMemory])
+ assert.Equal(t, model.MemoryAmountFromBytes((minMemoryMb*1024*1024)/2), recommendedResources["container-2"].Target[model.ResourceMemory])
}
func TestControlledResourcesFiltered(t *testing.T) {
@@ -168,7 +176,7 @@ func TestMapToListOfRecommendedContainerResources(t *testing.T) {
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
- outRecommendations := MapToListOfRecommendedContainerResources(tc.resources)
+ outRecommendations := MapToListOfRecommendedContainerResources(tc.resources, RecommendationFormat{RoundCPUMillicores: 1, RoundMemoryBytes: 1})
for i, outRecommendation := range outRecommendations.ContainerRecommendations {
containerName := tc.expectedLast[i]
assert.Equal(t, containerName, outRecommendation.ContainerName)
diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go
index 7f1aba8b2c02..8fbc4c3304fa 100644
--- a/vertical-pod-autoscaler/pkg/recommender/main.go
+++ b/vertical-pod-autoscaler/pkg/recommender/main.go
@@ -18,7 +18,6 @@ package main
import (
"context"
- "flag"
"fmt"
"os"
"strings"
@@ -26,14 +25,12 @@ import (
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers"
kube_client "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
- kube_flag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"k8s.io/klog/v2"
@@ -41,8 +38,8 @@ import (
"k8s.io/autoscaler/vertical-pod-autoscaler/common"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
- "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/checkpoint"
+ recommender_config "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/config"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/history"
input_metrics "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/metrics"
@@ -59,62 +56,6 @@ import (
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
-var (
- recommenderName = flag.String("recommender-name", input.DefaultRecommenderName, "Set the recommender name. Recommender will generate recommendations for VPAs that configure the same recommender name. If the recommender name is left as default it will also generate recommendations that don't explicitly specify recommender. You shouldn't run two recommenders with the same name in a cluster.")
- metricsFetcherInterval = flag.Duration("recommender-interval", 1*time.Minute, `How often metrics should be fetched`)
- checkpointsGCInterval = flag.Duration("checkpoints-gc-interval", 10*time.Minute, `How often orphaned checkpoints should be garbage collected`)
- address = flag.String("address", ":8942", "The address to expose Prometheus metrics.")
- storage = flag.String("storage", "", `Specifies storage mode. Supported values: prometheus, checkpoint (default)`)
- memorySaver = flag.Bool("memory-saver", false, `If true, only track pods which have an associated VPA`)
- updateWorkerCount = flag.Int("update-worker-count", 10, "Number of concurrent workers to update VPA recommendations and checkpoints. When increasing this setting, make sure the client-side rate limits ('kube-api-qps' and 'kube-api-burst') are either increased or turned off as well. Determines the minimum number of VPA checkpoints written per recommender loop.")
-)
-
-// Prometheus history provider flags
-var (
- prometheusAddress = flag.String("prometheus-address", "http://prometheus.monitoring.svc", `Where to reach for Prometheus metrics`)
- prometheusInsecure = flag.Bool("prometheus-insecure", false, `Skip tls verify if https is used in the prometheus-address`)
- prometheusJobName = flag.String("prometheus-cadvisor-job-name", "kubernetes-cadvisor", `Name of the prometheus job name which scrapes the cAdvisor metrics`)
- historyLength = flag.String("history-length", "8d", `How much time back prometheus have to be queried to get historical metrics`)
- historyResolution = flag.String("history-resolution", "1h", `Resolution at which Prometheus is queried for historical metrics`)
- queryTimeout = flag.String("prometheus-query-timeout", "5m", `How long to wait before killing long queries`)
- podLabelPrefix = flag.String("pod-label-prefix", "pod_label_", `Which prefix to look for pod labels in metrics`)
- podLabelsMetricName = flag.String("metric-for-pod-labels", "up{job=\"kubernetes-pods\"}", `Which metric to look for pod labels in metrics`)
- podNamespaceLabel = flag.String("pod-namespace-label", "kubernetes_namespace", `Label name to look for pod namespaces`)
- podNameLabel = flag.String("pod-name-label", "kubernetes_pod_name", `Label name to look for pod names`)
- ctrNamespaceLabel = flag.String("container-namespace-label", "namespace", `Label name to look for container namespaces`)
- ctrPodNameLabel = flag.String("container-pod-name-label", "pod_name", `Label name to look for container pod names`)
- ctrNameLabel = flag.String("container-name-label", "name", `Label name to look for container names`)
- username = flag.String("username", "", "The username used in the prometheus server basic auth. Can also be set via the PROMETHEUS_USERNAME environment variable")
- password = flag.String("password", "", "The password used in the prometheus server basic auth. Can also be set via the PROMETHEUS_PASSWORD environment variable")
- prometheusBearerToken = flag.String("prometheus-bearer-token", "", "The bearer token used in the Prometheus server bearer token auth")
- prometheusBearerTokenFile = flag.String("prometheus-bearer-token-file", "", "Path to the bearer token file used for authentication by the Prometheus server")
-)
-
-// External metrics provider flags
-var (
- useExternalMetrics = flag.Bool("use-external-metrics", false, "ALPHA. Use an external metrics provider instead of metrics_server.")
- externalCpuMetric = flag.String("external-metrics-cpu-metric", "", "ALPHA. Metric to use with external metrics provider for CPU usage.")
- externalMemoryMetric = flag.String("external-metrics-memory-metric", "", "ALPHA. Metric to use with external metrics provider for memory usage.")
-)
-
-// Aggregation configuration flags
-var (
- memoryAggregationInterval = flag.Duration("memory-aggregation-interval", model.DefaultMemoryAggregationInterval, `The length of a single interval, for which the peak memory usage is computed. Memory usage peaks are aggregated in multiples of this interval. In other words there is one memory usage sample per interval (the maximum usage over that interval)`)
- memoryAggregationIntervalCount = flag.Int64("memory-aggregation-interval-count", model.DefaultMemoryAggregationIntervalCount, `The number of consecutive memory-aggregation-intervals which make up the MemoryAggregationWindowLength which in turn is the period for memory usage aggregation by VPA. In other words, MemoryAggregationWindowLength = memory-aggregation-interval * memory-aggregation-interval-count.`)
- memoryHistogramDecayHalfLife = flag.Duration("memory-histogram-decay-half-life", model.DefaultMemoryHistogramDecayHalfLife, `The amount of time it takes a historical memory usage sample to lose half of its weight. In other words, a fresh usage sample is twice as 'important' as one with age equal to the half life period.`)
- cpuHistogramDecayHalfLife = flag.Duration("cpu-histogram-decay-half-life", model.DefaultCPUHistogramDecayHalfLife, `The amount of time it takes a historical CPU usage sample to lose half of its weight.`)
- oomBumpUpRatio = flag.Float64("oom-bump-up-ratio", model.DefaultOOMBumpUpRatio, `Default memory bump up ratio when OOM occurs. This value applies to all VPAs unless overridden in the VPA spec. Default is 1.2.`)
- oomMinBumpUp = flag.Float64("oom-min-bump-up-bytes", model.DefaultOOMMinBumpUp, `Default minimal increase of memory (in bytes) when OOM occurs. This value applies to all VPAs unless overridden in the VPA spec. Default is 100 * 1024 * 1024 (100Mi).`)
-)
-
-// Post processors flags
-var (
- // CPU as integer to benefit for CPU management Static Policy ( https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy )
- postProcessorCPUasInteger = flag.Bool("cpu-integer-post-processor-enabled", false, "Enable the cpu-integer recommendation post processor. The post processor will round up CPU recommendations to a whole CPU for pods which were opted in by setting an appropriate label on VPA object (experimental)")
- maxAllowedCPU = resource.QuantityValue{}
- maxAllowedMemory = resource.QuantityValue{}
-)
-
const (
// aggregateContainerStateGCInterval defines how often expired AggregateContainerStates are garbage collected.
aggregateContainerStateGCInterval = 1 * time.Hour
@@ -125,57 +66,27 @@ const (
defaultResyncPeriod time.Duration = 10 * time.Minute
)
-func init() {
- flag.Var(&maxAllowedCPU, "container-recommendation-max-allowed-cpu", "Maximum amount of CPU that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed.")
- flag.Var(&maxAllowedMemory, "container-recommendation-max-allowed-memory", "Maximum amount of memory that will be recommended for a container. VerticalPodAutoscaler-level maximum allowed takes precedence over the global maximum allowed.")
-}
+var config *recommender_config.RecommenderConfig
func main() {
- commonFlags := common.InitCommonFlags()
- klog.InitFlags(nil)
- common.InitLoggingFlags()
-
+ // Leader election needs to be initialized before any other flag, because it may be used in other flag's validation.
leaderElection := defaultLeaderElectionConfiguration()
componentbaseoptions.BindLeaderElectionFlags(&leaderElection, pflag.CommandLine)
- features.MutableFeatureGate.AddFlag(pflag.CommandLine)
-
- kube_flag.InitFlags()
- klog.V(1).InfoS("Vertical Pod Autoscaler Recommender", "version", common.VerticalPodAutoscalerVersion(), "recommenderName", *recommenderName)
+ config = recommender_config.InitRecommenderFlags()
- if len(commonFlags.VpaObjectNamespace) > 0 && len(commonFlags.IgnoredVpaObjectNamespaces) > 0 {
- klog.ErrorS(nil, "--vpa-object-namespace and --ignored-vpa-object-namespaces are mutually exclusive and can't be set together.")
- klog.FlushAndExit(klog.ExitFlushTimeout, 1)
- }
-
- if *routines.MinCheckpointsPerRun != 10 { // Default value is 10
- klog.InfoS("DEPRECATION WARNING: The 'min-checkpoints' flag is deprecated and has no effect. It will be removed in a future release.")
- }
-
- if *prometheusBearerToken != "" && *prometheusBearerTokenFile != "" && *username != "" {
- klog.ErrorS(nil, "--bearer-token, --bearer-token-file and --username are mutually exclusive and can't be set together.")
- klog.FlushAndExit(klog.ExitFlushTimeout, 1)
- }
-
- if *prometheusBearerTokenFile != "" {
- fileContent, err := os.ReadFile(*prometheusBearerTokenFile)
- if err != nil {
- klog.ErrorS(err, "Unable to read bearer token file", "filename", *prometheusBearerTokenFile)
- klog.FlushAndExit(klog.ExitFlushTimeout, 1)
- }
- *prometheusBearerToken = strings.TrimSpace(string(fileContent))
- }
+ klog.V(1).InfoS("Vertical Pod Autoscaler Recommender", "version", common.VerticalPodAutoscalerVersion(), "recommenderName", config.RecommenderName)
ctx := context.Background()
- healthCheck := metrics.NewHealthCheck(*metricsFetcherInterval * 5)
+ healthCheck := metrics.NewHealthCheck(config.MetricsFetcherInterval * 5)
metrics_recommender.Register()
metrics_quality.Register()
metrics_resources.Register()
- server.Initialize(&commonFlags.EnableProfiling, healthCheck, address)
+ server.Initialize(&config.CommonFlags.EnableProfiling, healthCheck, &config.Address)
if !leaderElection.LeaderElect {
- run(ctx, healthCheck, commonFlags)
+ run(ctx, healthCheck, config.CommonFlags)
} else {
id, err := os.Hostname()
if err != nil {
@@ -184,8 +95,8 @@ func main() {
}
id = id + "_" + string(uuid.NewUUID())
- config := common.CreateKubeConfigOrDie(commonFlags.KubeConfig, float32(commonFlags.KubeApiQps), int(commonFlags.KubeApiBurst))
- kubeClient := kube_client.NewForConfigOrDie(config)
+ kubeconfig := common.CreateKubeConfigOrDie(config.CommonFlags.KubeConfig, float32(config.CommonFlags.KubeApiQps), int(config.CommonFlags.KubeApiBurst))
+ kubeClient := kube_client.NewForConfigOrDie(kubeconfig)
lock, err := resourcelock.New(
leaderElection.ResourceLock,
@@ -210,7 +121,7 @@ func main() {
ReleaseOnCancel: true,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
- run(ctx, healthCheck, commonFlags)
+ run(ctx, healthCheck, config.CommonFlags)
},
OnStoppedLeading: func() {
klog.Fatal("lost master")
@@ -243,11 +154,11 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
// Create a stop channel that will be used to signal shutdown
stopCh := make(chan struct{})
defer close(stopCh)
- config := common.CreateKubeConfigOrDie(commonFlag.KubeConfig, float32(commonFlag.KubeApiQps), int(commonFlag.KubeApiBurst))
- kubeClient := kube_client.NewForConfigOrDie(config)
+ kubeConfig := common.CreateKubeConfigOrDie(commonFlag.KubeConfig, float32(commonFlag.KubeApiQps), int(commonFlag.KubeApiBurst))
+ kubeClient := kube_client.NewForConfigOrDie(kubeConfig)
clusterState := model.NewClusterState(aggregateContainerStateGCInterval)
factory := informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultResyncPeriod, informers.WithNamespace(commonFlag.VpaObjectNamespace))
- controllerFetcher := controllerfetcher.NewControllerFetcher(config, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
+ controllerFetcher := controllerfetcher.NewControllerFetcher(kubeConfig, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
podLister, oomObserver := input.NewPodListerAndOOMObserver(ctx, kubeClient, commonFlag.VpaObjectNamespace, stopCh)
factory.Start(stopCh)
@@ -259,12 +170,12 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
}
}
- model.InitializeAggregationsConfig(model.NewAggregationsConfig(*memoryAggregationInterval, *memoryAggregationIntervalCount, *memoryHistogramDecayHalfLife, *cpuHistogramDecayHalfLife, *oomBumpUpRatio, *oomMinBumpUp))
+ model.InitializeAggregationsConfig(model.NewAggregationsConfig(config.MemoryAggregationInterval, config.MemoryAggregationIntervalCount, config.MemoryHistogramDecayHalfLife, config.CpuHistogramDecayHalfLife, config.OOMBumpUpRatio, config.OOMMinBumpUp))
- useCheckpoints := *storage != "prometheus"
+ useCheckpoints := config.Storage != "prometheus"
var postProcessors []routines.RecommendationPostProcessor
- if *postProcessorCPUasInteger {
+ if config.PostProcessorCPUasInteger {
postProcessors = append(postProcessors, &routines.IntegerCPUPostProcessor{})
}
@@ -272,20 +183,20 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
// CappingPostProcessor, should always come in the last position for post-processing
postProcessors = append(postProcessors, routines.NewCappingRecommendationProcessor(globalMaxAllowed))
var source input_metrics.PodMetricsLister
- if *useExternalMetrics {
+ if config.UseExternalMetrics {
resourceMetrics := map[corev1.ResourceName]string{}
- if externalCpuMetric != nil && *externalCpuMetric != "" {
- resourceMetrics[corev1.ResourceCPU] = *externalCpuMetric
+ if config.ExternalCpuMetric != "" {
+ resourceMetrics[corev1.ResourceCPU] = config.ExternalCpuMetric
}
- if externalMemoryMetric != nil && *externalMemoryMetric != "" {
- resourceMetrics[corev1.ResourceMemory] = *externalMemoryMetric
+ if config.ExternalMemoryMetric != "" {
+ resourceMetrics[corev1.ResourceMemory] = config.ExternalMemoryMetric
}
- externalClientOptions := &input_metrics.ExternalClientOptions{ResourceMetrics: resourceMetrics, ContainerNameLabel: *ctrNameLabel}
+ externalClientOptions := &input_metrics.ExternalClientOptions{ResourceMetrics: resourceMetrics, ContainerNameLabel: config.CtrNameLabel}
klog.V(1).InfoS("Using External Metrics", "options", externalClientOptions)
- source = input_metrics.NewExternalClient(config, clusterState, *externalClientOptions)
+ source = input_metrics.NewExternalClient(kubeConfig, clusterState, *externalClientOptions)
} else {
klog.V(1).InfoS("Using Metrics Server")
- source = input_metrics.NewPodMetricsesSource(resourceclient.NewForConfigOrDie(config))
+ source = input_metrics.NewPodMetricsesSource(resourceclient.NewForConfigOrDie(kubeConfig))
}
ignoredNamespaces := strings.Split(commonFlag.IgnoredVpaObjectNamespaces, ",")
@@ -295,33 +206,51 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
OOMObserver: oomObserver,
KubeClient: kubeClient,
MetricsClient: input_metrics.NewMetricsClient(source, commonFlag.VpaObjectNamespace, "default-metrics-client"),
- VpaCheckpointClient: vpa_clientset.NewForConfigOrDie(config).AutoscalingV1(),
- VpaLister: vpa_api_util.NewVpasLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{}), commonFlag.VpaObjectNamespace),
- VpaCheckpointLister: vpa_api_util.NewVpaCheckpointLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{}), commonFlag.VpaObjectNamespace),
+ VpaCheckpointClient: vpa_clientset.NewForConfigOrDie(kubeConfig).AutoscalingV1(),
+ VpaLister: vpa_api_util.NewVpasLister(vpa_clientset.NewForConfigOrDie(kubeConfig), make(chan struct{}), commonFlag.VpaObjectNamespace),
+ VpaCheckpointLister: vpa_api_util.NewVpaCheckpointLister(vpa_clientset.NewForConfigOrDie(kubeConfig), make(chan struct{}), commonFlag.VpaObjectNamespace),
ClusterState: clusterState,
- SelectorFetcher: target.NewVpaTargetSelectorFetcher(config, kubeClient, factory),
- MemorySaveMode: *memorySaver,
+ SelectorFetcher: target.NewVpaTargetSelectorFetcher(kubeConfig, kubeClient, factory),
+ MemorySaveMode: config.MemorySaver,
ControllerFetcher: controllerFetcher,
- RecommenderName: *recommenderName,
+ RecommenderName: config.RecommenderName,
IgnoredNamespaces: ignoredNamespaces,
VpaObjectNamespace: commonFlag.VpaObjectNamespace,
}.Make()
controllerFetcher.Start(ctx, scaleCacheLoopPeriod)
recommender := routines.RecommenderFactory{
- ClusterState: clusterState,
- ClusterStateFeeder: clusterStateFeeder,
- ControllerFetcher: controllerFetcher,
- CheckpointWriter: checkpoint.NewCheckpointWriter(clusterState, vpa_clientset.NewForConfigOrDie(config).AutoscalingV1()),
- VpaClient: vpa_clientset.NewForConfigOrDie(config).AutoscalingV1(),
- PodResourceRecommender: logic.CreatePodResourceRecommender(),
+ ClusterState: clusterState,
+ ClusterStateFeeder: clusterStateFeeder,
+ ControllerFetcher: controllerFetcher,
+ CheckpointWriter: checkpoint.NewCheckpointWriter(clusterState, vpa_clientset.NewForConfigOrDie(kubeConfig).AutoscalingV1()),
+ VpaClient: vpa_clientset.NewForConfigOrDie(kubeConfig).AutoscalingV1(),
+ PodResourceRecommender: logic.CreatePodResourceRecommender(logic.RecommendationConfig{
+ SafetyMarginFraction: config.SafetyMarginFraction,
+ PodMinCPUMillicores: config.PodMinCPUMillicores,
+ PodMinMemoryMb: config.PodMinMemoryMb,
+ TargetCPUPercentile: config.TargetCPUPercentile,
+ LowerBoundCPUPercentile: config.LowerBoundCPUPercentile,
+ UpperBoundCPUPercentile: config.UpperBoundCPUPercentile,
+ ConfidenceIntervalCPU: config.ConfidenceIntervalCPU,
+ TargetMemoryPercentile: config.TargetMemoryPercentile,
+ LowerBoundMemoryPercentile: config.LowerBoundMemoryPercentile,
+ UpperBoundMemoryPercentile: config.UpperBoundMemoryPercentile,
+ ConfidenceIntervalMemory: config.ConfidenceIntervalMemory,
+ }),
+ RecommendationFormat: logic.RecommendationFormat{
+ HumanizeMemory: config.HumanizeMemory,
+ RoundCPUMillicores: config.RoundCPUMillicores,
+ RoundMemoryBytes: config.RoundMemoryBytes,
+ },
RecommendationPostProcessors: postProcessors,
- CheckpointsGCInterval: *checkpointsGCInterval,
+ CheckpointsGCInterval: config.CheckpointsGCInterval,
+ CheckpointsWriteTimeout: config.CheckpointsWriteTimeout,
UseCheckpoints: useCheckpoints,
- UpdateWorkerCount: *updateWorkerCount,
+ UpdateWorkerCount: config.UpdateWorkerCount,
}.Make()
- promQueryTimeout, err := time.ParseDuration(*queryTimeout)
+ promQueryTimeout, err := time.ParseDuration(config.QueryTimeout)
if err != nil {
klog.ErrorS(err, "Could not parse --prometheus-query-timeout as a time.Duration")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
@@ -331,24 +260,24 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
recommender.GetClusterStateFeeder().InitFromCheckpoints(ctx)
} else {
config := history.PrometheusHistoryProviderConfig{
- Address: *prometheusAddress,
- Insecure: *prometheusInsecure,
+ Address: config.PrometheusAddress,
+ Insecure: config.PrometheusInsecure,
QueryTimeout: promQueryTimeout,
- HistoryLength: *historyLength,
- HistoryResolution: *historyResolution,
- PodLabelPrefix: *podLabelPrefix,
- PodLabelsMetricName: *podLabelsMetricName,
- PodNamespaceLabel: *podNamespaceLabel,
- PodNameLabel: *podNameLabel,
- CtrNamespaceLabel: *ctrNamespaceLabel,
- CtrPodNameLabel: *ctrPodNameLabel,
- CtrNameLabel: *ctrNameLabel,
- CadvisorMetricsJobName: *prometheusJobName,
+ HistoryLength: config.HistoryLength,
+ HistoryResolution: config.HistoryResolution,
+ PodLabelPrefix: config.PodLabelPrefix,
+ PodLabelsMetricName: config.PodLabelsMetricName,
+ PodNamespaceLabel: config.PodNamespaceLabel,
+ PodNameLabel: config.PodNameLabel,
+ CtrNamespaceLabel: config.CtrNamespaceLabel,
+ CtrPodNameLabel: config.CtrPodNameLabel,
+ CtrNameLabel: config.CtrNameLabel,
+ CadvisorMetricsJobName: config.PrometheusJobName,
Namespace: commonFlag.VpaObjectNamespace,
Authentication: history.PrometheusCredentials{
- BearerToken: *prometheusBearerToken,
- Username: *username,
- Password: *password,
+ BearerToken: config.PrometheusBearerToken,
+ Username: config.Username,
+ Password: config.Password,
},
}
provider, err := history.NewPrometheusHistoryProvider(config)
@@ -362,7 +291,7 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
// Start updating health check endpoint.
healthCheck.StartMonitoring()
- ticker := time.Tick(*metricsFetcherInterval)
+ ticker := time.Tick(config.MetricsFetcherInterval)
for range ticker {
recommender.RunOnce()
healthCheck.UpdateLastActivity()
@@ -371,11 +300,11 @@ func run(ctx context.Context, healthCheck *metrics.HealthCheck, commonFlag *comm
func initGlobalMaxAllowed() corev1.ResourceList {
result := make(corev1.ResourceList)
- if !maxAllowedCPU.IsZero() {
- result[corev1.ResourceCPU] = maxAllowedCPU.Quantity
+ if !config.MaxAllowedCPU.IsZero() {
+ result[corev1.ResourceCPU] = config.MaxAllowedCPU.Quantity
}
- if !maxAllowedMemory.IsZero() {
- result[corev1.ResourceMemory] = maxAllowedMemory.Quantity
+ if !config.MaxAllowedMemory.IsZero() {
+ result[corev1.ResourceMemory] = config.MaxAllowedMemory.Quantity
}
return result
diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
index 1883096c7fa0..778e06694e70 100644
--- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
+++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go
@@ -18,7 +18,6 @@ package routines
import (
"context"
- "flag"
"sync"
"time"
@@ -35,12 +34,6 @@ import (
vpa_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
-var (
- checkpointsWriteTimeout = flag.Duration("checkpoints-timeout", time.Minute, `Timeout for writing checkpoints since the start of the recommender's main loop`)
- // MinCheckpointsPerRun is exported to allow displaying a deprecation warning. TODO (voelzmo): remove this flag and the warning in a future release.
- MinCheckpointsPerRun = flag.Int("min-checkpoints", 10, "Minimum number of checkpoints to write per recommender's main loop. WARNING: this flag is deprecated and doesn't have any effect. It will be removed in a future release. Refer to update-worker-count to influence the minimum number of checkpoints written per loop.")
-)
-
// Recommender recommend resources for certain containers, based on utilization periodically got from metrics api.
type Recommender interface {
// RunOnce performs one iteration of recommender duties followed by update of recommendations in VPA objects.
@@ -62,10 +55,12 @@ type recommender struct {
clusterStateFeeder input.ClusterStateFeeder
checkpointWriter checkpoint.CheckpointWriter
checkpointsGCInterval time.Duration
+ checkpointsWriteTimeout time.Duration
controllerFetcher controllerfetcher.ControllerFetcher
lastCheckpointGC time.Time
vpaClient vpa_api.VerticalPodAutoscalersGetter
podResourceRecommender logic.PodResourceRecommender
+ recommendationFormat logic.RecommendationFormat
useCheckpoints bool
lastAggregateContainerStateGC time.Time
recommendationPostProcessor []RecommendationPostProcessor
@@ -84,7 +79,7 @@ func processVPAUpdate(r *recommender, vpa *model.Vpa, observedVpa *vpaautoscalin
resources := r.podResourceRecommender.GetRecommendedPodResources(GetContainerNameToAggregateStateMap(vpa))
had := vpa.HasRecommendation()
- listOfResourceRecommendation := logic.MapToListOfRecommendedContainerResources(resources)
+ listOfResourceRecommendation := logic.MapToListOfRecommendedContainerResources(resources, r.recommendationFormat)
for _, postProcessor := range r.recommendationPostProcessor {
listOfResourceRecommendation = postProcessor.Process(observedVpa, listOfResourceRecommendation)
@@ -188,7 +183,7 @@ func (r *recommender) RunOnce() {
r.UpdateVPAs()
timer.ObserveStep("UpdateVPAs")
- stepCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(*checkpointsWriteTimeout))
+ stepCtx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(r.checkpointsWriteTimeout))
defer cancelFunc()
r.MaintainCheckpoints(stepCtx)
timer.ObserveStep("MaintainCheckpoints")
@@ -206,13 +201,15 @@ type RecommenderFactory struct {
ControllerFetcher controllerfetcher.ControllerFetcher
CheckpointWriter checkpoint.CheckpointWriter
PodResourceRecommender logic.PodResourceRecommender
+ RecommendationFormat logic.RecommendationFormat
VpaClient vpa_api.VerticalPodAutoscalersGetter
RecommendationPostProcessors []RecommendationPostProcessor
- CheckpointsGCInterval time.Duration
- UseCheckpoints bool
- UpdateWorkerCount int
+ CheckpointsGCInterval time.Duration
+ CheckpointsWriteTimeout time.Duration
+ UseCheckpoints bool
+ UpdateWorkerCount int
}
// Make creates a new recommender instance,
@@ -223,10 +220,12 @@ func (c RecommenderFactory) Make() Recommender {
clusterStateFeeder: c.ClusterStateFeeder,
checkpointWriter: c.CheckpointWriter,
checkpointsGCInterval: c.CheckpointsGCInterval,
+ checkpointsWriteTimeout: c.CheckpointsWriteTimeout,
controllerFetcher: c.ControllerFetcher,
useCheckpoints: c.UseCheckpoints,
vpaClient: c.VpaClient,
podResourceRecommender: c.PodResourceRecommender,
+ recommendationFormat: c.RecommendationFormat,
recommendationPostProcessor: c.RecommendationPostProcessors,
lastAggregateContainerStateGC: time.Now(),
lastCheckpointGC: time.Now(),
diff --git a/vertical-pod-autoscaler/pkg/updater/config/config.go b/vertical-pod-autoscaler/pkg/updater/config/config.go
new file mode 100644
index 000000000000..6dbc142e00a0
--- /dev/null
+++ b/vertical-pod-autoscaler/pkg/updater/config/config.go
@@ -0,0 +1,106 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "flag"
+ "os"
+ "time"
+
+ "github.com/spf13/pflag"
+ kube_flag "k8s.io/component-base/cli/flag"
+ "k8s.io/klog/v2"
+
+ "k8s.io/autoscaler/vertical-pod-autoscaler/common"
+ "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
+)
+
+// UpdaterConfig holds all configuration for the admission controller component
+type UpdaterConfig struct {
+ // Common flags
+ CommonFlags *common.CommonFlags
+
+ UpdaterInterval time.Duration
+ MinReplicas int
+ EvictionToleranceFraction float64
+ EvictionRateLimit float64
+ EvictionRateBurst int
+ Namespace string
+ Address string
+ UseAdmissionControllerStatus bool
+ InPlaceSkipDisruptionBudget bool
+
+ DefaultUpdateThreshold float64
+ PodLifetimeUpdateThreshold time.Duration
+ EvictAfterOOMThreshold time.Duration
+}
+
+// DefaultUpdaterConfig returns a UpdaterConfig with default values
+func DefaultUpdaterConfig() *UpdaterConfig {
+ return &UpdaterConfig{
+ CommonFlags: common.DefaultCommonConfig(),
+ UpdaterInterval: 1 * time.Minute,
+ MinReplicas: 2,
+ EvictionToleranceFraction: 0.5,
+ EvictionRateLimit: -1,
+ EvictionRateBurst: 1,
+ Namespace: os.Getenv("NAMESPACE"),
+ Address: ":8943",
+ UseAdmissionControllerStatus: true,
+ InPlaceSkipDisruptionBudget: false,
+
+ DefaultUpdateThreshold: 0.1,
+ PodLifetimeUpdateThreshold: time.Hour * 12,
+ EvictAfterOOMThreshold: 10 * time.Minute,
+ }
+}
+
+// InitUpdaterFlags initializes flags for the updater component
+func InitUpdaterFlags() *UpdaterConfig {
+ config := DefaultUpdaterConfig()
+ config.CommonFlags = common.InitCommonFlags()
+
+ flag.DurationVar(&config.UpdaterInterval, "updater-interval", config.UpdaterInterval, "How often updater should run")
+ flag.IntVar(&config.MinReplicas, "min-replicas", config.MinReplicas, "Minimum number of replicas to perform update")
+ flag.Float64Var(&config.EvictionToleranceFraction, "eviction-tolerance", config.EvictionToleranceFraction, "Fraction of replica count that can be evicted for update, if more than one pod can be evicted.")
+ flag.Float64Var(&config.EvictionRateLimit, "eviction-rate-limit", config.EvictionRateLimit, "Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable the rate limiter.")
+ flag.IntVar(&config.EvictionRateBurst, "eviction-rate-burst", config.EvictionRateBurst, "Burst of pods that can be evicted.")
+ flag.StringVar(&config.Address, "address", config.Address, "The address to expose Prometheus metrics.")
+ flag.BoolVar(&config.UseAdmissionControllerStatus, "use-admission-controller-status", config.UseAdmissionControllerStatus, "If true, updater will only evict pods when admission controller status is valid.")
+ flag.BoolVar(&config.InPlaceSkipDisruptionBudget, "in-place-skip-disruption-budget", config.InPlaceSkipDisruptionBudget, "[ALPHA] If true, VPA updater skips disruption budget checks for in-place pod updates when all containers have NotRequired resize policy (or no policy defined) for both CPU and memory resources. Disruption budgets are still respected when any container has RestartContainer resize policy for any resource.")
+
+ flag.Float64Var(&config.DefaultUpdateThreshold, "pod-update-threshold", config.DefaultUpdateThreshold, "Ignore updates that have priority lower than the value of this flag")
+ flag.DurationVar(&config.PodLifetimeUpdateThreshold, "in-recommendation-bounds-eviction-lifetime-threshold", config.PodLifetimeUpdateThreshold, "Pods that live for at least that long can be evicted even if their request is within the [MinRecommended...MaxRecommended] range")
+ flag.DurationVar(&config.EvictAfterOOMThreshold, "evict-after-oom-threshold", config.EvictAfterOOMThreshold, `The default duration to evict pods that have OOMed in less than evict-after-oom-threshold since start.`)
+
+ // These need to happen last. kube_flag.InitFlags() synchronizes and parses
+ // flags from the flag package to pflag, so feature gates must be added to
+ // pflag before InitFlags() is called.
+ klog.InitFlags(nil)
+ common.InitLoggingFlags()
+ features.MutableFeatureGate.AddFlag(pflag.CommandLine)
+ kube_flag.InitFlags()
+
+ ValidateUpdaterConfig(config)
+
+ return config
+}
+
+// ValidateUpdaterConfig performs validation of the updater flags
+func ValidateUpdaterConfig(config *UpdaterConfig) {
+ common.ValidateCommonConfig(config.CommonFlags)
+}
diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater.go b/vertical-pod-autoscaler/pkg/updater/logic/updater.go
index a73b8f9a77f1..7cca5d72db74 100644
--- a/vertical-pod-autoscaler/pkg/updater/logic/updater.go
+++ b/vertical-pod-autoscaler/pkg/updater/logic/updater.go
@@ -82,6 +82,9 @@ type updater struct {
statusValidator status.Validator
controllerFetcher controllerfetcher.ControllerFetcher
ignoredNamespaces []string
+ defaultUpdateThreshold float64
+ podLifetimeUpdateThreshold time.Duration
+ evictAfterOOMThreshold time.Duration
}
// NewUpdater creates Updater with given configuration
@@ -94,6 +97,9 @@ func NewUpdater(
evictionToleranceFraction float64,
useAdmissionControllerStatus bool,
inPlaceSkipDisruptionBudget bool,
+ defaultUpdateThreshold float64,
+ podLifetimeUpdateThreshold time.Duration,
+ evictAfterOOMThreshold time.Duration,
statusNamespace string,
recommendationProcessor vpa_api_util.RecommendationProcessor,
evictionAdmission priority.PodEvictionAdmission,
@@ -136,7 +142,10 @@ func NewUpdater(
status.AdmissionControllerStatusName,
statusNamespace,
),
- ignoredNamespaces: ignoredNamespaces,
+ ignoredNamespaces: ignoredNamespaces,
+ defaultUpdateThreshold: defaultUpdateThreshold,
+ podLifetimeUpdateThreshold: podLifetimeUpdateThreshold,
+ evictAfterOOMThreshold: evictAfterOOMThreshold,
}, nil
}
@@ -409,9 +418,14 @@ func getRateLimiter(rateLimit float64, rateLimitBurst int) *rate.Limiter {
// getPodsUpdateOrder returns list of pods that should be updated ordered by update priority
func (u *updater) getPodsUpdateOrder(pods []*corev1.Pod, vpa *vpa_types.VerticalPodAutoscaler) []*corev1.Pod {
+ updateconfig := priority.UpdateConfig{
+ MinChangePriority: u.defaultUpdateThreshold,
+ PodLifetimeUpdateThreshold: u.podLifetimeUpdateThreshold,
+ EvictAfterOOMThreshold: u.evictAfterOOMThreshold,
+ }
priorityCalculator := priority.NewUpdatePriorityCalculator(
vpa,
- nil,
+ updateconfig,
u.recommendationProcessor,
u.priorityProcessor)
diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go
index 2148e021bd43..2bc5acaae3f6 100644
--- a/vertical-pod-autoscaler/pkg/updater/main.go
+++ b/vertical-pod-autoscaler/pkg/updater/main.go
@@ -18,7 +18,6 @@ package main
import (
"context"
- "flag"
"fmt"
"os"
"strings"
@@ -31,7 +30,6 @@ import (
kube_client "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
- kube_flag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"k8s.io/klog/v2"
@@ -40,9 +38,9 @@ import (
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
- "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target"
controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher"
+ updater_config "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/config"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/inplace"
updater "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/logic"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/priority"
@@ -54,37 +52,6 @@ import (
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
-var (
- updaterInterval = flag.Duration("updater-interval", 1*time.Minute,
- `How often updater should run`)
-
- minReplicas = flag.Int("min-replicas", 2,
- `Minimum number of replicas to perform update`)
-
- evictionToleranceFraction = flag.Float64("eviction-tolerance", 0.5,
- `Fraction of replica count that can be evicted for update, if more than one pod can be evicted.`)
-
- evictionRateLimit = flag.Float64("eviction-rate-limit", -1,
- `Number of pods that can be evicted per seconds. A rate limit set to 0 or -1 will disable
- the rate limiter.`)
-
- evictionRateBurst = flag.Int("eviction-rate-burst", 1, `Burst of pods that can be evicted.`)
-
- address = flag.String("address", ":8943", "The address to expose Prometheus metrics.")
-
- useAdmissionControllerStatus = flag.Bool("use-admission-controller-status", true,
- "If true, updater will only evict pods when admission controller status is valid.")
-
- inPlaceSkipDisruptionBudget = flag.Bool(
- "in-place-skip-disruption-budget",
- false,
- "[ALPHA] If true, VPA updater skips disruption budget checks for in-place pod updates when all containers have NotRequired resize policy (or no policy defined) for both CPU and memory resources. "+
- "Disruption budgets are still respected when any container has RestartContainer resize policy for any resource.",
- )
-
- namespace = os.Getenv("NAMESPACE")
-)
-
const (
defaultResyncPeriod time.Duration = 10 * time.Minute
scaleCacheEntryLifetime time.Duration = time.Hour
@@ -92,31 +59,24 @@ const (
scaleCacheEntryJitterFactor float64 = 1.
)
-func main() {
- commonFlags := common.InitCommonFlags()
- klog.InitFlags(nil)
- common.InitLoggingFlags()
+var config *updater_config.UpdaterConfig
+func main() {
+ // Leader election needs to be initialized before any other flag, because it may be used in other flag's validation.
leaderElection := defaultLeaderElectionConfiguration()
componentbaseoptions.BindLeaderElectionFlags(&leaderElection, pflag.CommandLine)
- features.MutableFeatureGate.AddFlag(pflag.CommandLine)
+ config = updater_config.InitUpdaterFlags()
- kube_flag.InitFlags()
klog.V(1).InfoS("Vertical Pod Autoscaler Updater", "version", common.VerticalPodAutoscalerVersion())
- if len(commonFlags.VpaObjectNamespace) > 0 && len(commonFlags.IgnoredVpaObjectNamespaces) > 0 {
- klog.ErrorS(nil, "--vpa-object-namespace and --ignored-vpa-object-namespaces are mutually exclusive and can't be set together.")
- klog.FlushAndExit(klog.ExitFlushTimeout, 1)
- }
-
- healthCheck := metrics.NewHealthCheck(*updaterInterval * 5)
- server.Initialize(&commonFlags.EnableProfiling, healthCheck, address)
+ healthCheck := metrics.NewHealthCheck(config.UpdaterInterval * 5)
+ server.Initialize(&config.CommonFlags.EnableProfiling, healthCheck, &config.Address)
metrics_updater.Register()
if !leaderElection.LeaderElect {
- run(healthCheck, commonFlags)
+ run(healthCheck, config.CommonFlags)
} else {
id, err := os.Hostname()
if err != nil {
@@ -125,8 +85,8 @@ func main() {
}
id = id + "_" + string(uuid.NewUUID())
- config := common.CreateKubeConfigOrDie(commonFlags.KubeConfig, float32(commonFlags.KubeApiQps), int(commonFlags.KubeApiBurst))
- kubeClient := kube_client.NewForConfigOrDie(config)
+ kubeConfig := common.CreateKubeConfigOrDie(config.CommonFlags.KubeConfig, float32(config.CommonFlags.KubeApiQps), int(config.CommonFlags.KubeApiBurst))
+ kubeClient := kube_client.NewForConfigOrDie(kubeConfig)
lock, err := resourcelock.New(
leaderElection.ResourceLock,
@@ -151,7 +111,7 @@ func main() {
ReleaseOnCancel: true,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
- run(healthCheck, commonFlags)
+ run(healthCheck, config.CommonFlags)
},
OnStoppedLeading: func() {
klog.Fatal("lost master")
@@ -182,12 +142,14 @@ func defaultLeaderElectionConfiguration() componentbaseconfig.LeaderElectionConf
func run(healthCheck *metrics.HealthCheck, commonFlag *common.CommonFlags) {
stopCh := make(chan struct{})
defer close(stopCh)
- config := common.CreateKubeConfigOrDie(commonFlag.KubeConfig, float32(commonFlag.KubeApiQps), int(commonFlag.KubeApiBurst))
- kubeClient := kube_client.NewForConfigOrDie(config)
- vpaClient := vpa_clientset.NewForConfigOrDie(config)
+
+ kubeConfig := common.CreateKubeConfigOrDie(commonFlag.KubeConfig, float32(commonFlag.KubeApiQps), int(commonFlag.KubeApiBurst))
+ kubeClient := kube_client.NewForConfigOrDie(kubeConfig)
+ vpaClient := vpa_clientset.NewForConfigOrDie(kubeConfig)
factory := informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultResyncPeriod, informers.WithNamespace(commonFlag.VpaObjectNamespace))
- targetSelectorFetcher := target.NewVpaTargetSelectorFetcher(config, kubeClient, factory)
- controllerFetcher := controllerfetcher.NewControllerFetcher(config, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
+ targetSelectorFetcher := target.NewVpaTargetSelectorFetcher(kubeConfig, kubeClient, factory)
+ controllerFetcher := controllerfetcher.NewControllerFetcher(kubeConfig, kubeClient, factory, scaleCacheEntryFreshnessTime, scaleCacheEntryLifetime, scaleCacheEntryJitterFactor)
+
var limitRangeCalculator limitrange.LimitRangeCalculator
limitRangeCalculator, err := limitrange.NewLimitsRangeCalculator(factory)
if err != nil {
@@ -205,8 +167,8 @@ func run(healthCheck *metrics.HealthCheck, commonFlag *common.CommonFlags) {
}
admissionControllerStatusNamespace := status.AdmissionControllerStatusNamespace
- if namespace != "" {
- admissionControllerStatusNamespace = namespace
+ if config.Namespace != "" {
+ admissionControllerStatusNamespace = config.Namespace
}
ignoredNamespaces := strings.Split(commonFlag.IgnoredVpaObjectNamespaces, ",")
@@ -219,12 +181,15 @@ func run(healthCheck *metrics.HealthCheck, commonFlag *common.CommonFlags) {
updater, err := updater.NewUpdater(
kubeClient,
vpaClient,
- *minReplicas,
- *evictionRateLimit,
- *evictionRateBurst,
- *evictionToleranceFraction,
- *useAdmissionControllerStatus,
- *inPlaceSkipDisruptionBudget,
+ config.MinReplicas,
+ config.EvictionRateLimit,
+ config.EvictionRateBurst,
+ config.EvictionToleranceFraction,
+ config.UseAdmissionControllerStatus,
+ config.InPlaceSkipDisruptionBudget,
+ config.DefaultUpdateThreshold,
+ config.PodLifetimeUpdateThreshold,
+ config.EvictAfterOOMThreshold,
admissionControllerStatusNamespace,
vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator),
priority.NewScalingDirectionPodEvictionAdmission(),
@@ -243,9 +208,9 @@ func run(healthCheck *metrics.HealthCheck, commonFlag *common.CommonFlags) {
// Start updating health check endpoint.
healthCheck.StartMonitoring()
- ticker := time.Tick(*updaterInterval)
+ ticker := time.Tick(config.UpdaterInterval)
for range ticker {
- ctx, cancel := context.WithTimeout(context.Background(), *updaterInterval)
+ ctx, cancel := context.WithTimeout(context.Background(), config.UpdaterInterval)
updater.RunOnce(ctx)
healthCheck.UpdateLastActivity()
cancel()
diff --git a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go
index 6a723cf2d293..77aa5c2a9eef 100644
--- a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go
+++ b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go
@@ -17,7 +17,6 @@ limitations under the License.
package priority
import (
- "flag"
"sort"
"strconv"
"strings"
@@ -34,15 +33,6 @@ import (
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
)
-var (
- defaultUpdateThreshold = flag.Float64("pod-update-threshold", 0.1, "Ignore updates that have priority lower than the value of this flag")
-
- podLifetimeUpdateThreshold = flag.Duration("in-recommendation-bounds-eviction-lifetime-threshold", time.Hour*12, "Pods that live for at least that long can be evicted even if their request is within the [MinRecommended...MaxRecommended] range")
-
- evictAfterOOMThreshold = flag.Duration("evict-after-oom-threshold", 10*time.Minute,
- `The default duration to evict pods that have OOMed in less than evict-after-oom-threshold since start.`)
-)
-
// UpdatePriorityCalculator is responsible for prioritizing updates on pods.
// It can returns a sorted list of pods in order of update priority.
// Update priority is proportional to fraction by which resources should be increased / decreased.
@@ -51,7 +41,7 @@ var (
type UpdatePriorityCalculator struct {
vpa *vpa_types.VerticalPodAutoscaler
pods []prioritizedPod
- config *UpdateConfig
+ config UpdateConfig
recommendationProcessor vpa_api_util.RecommendationProcessor
priorityProcessor PriorityProcessor
}
@@ -60,7 +50,9 @@ type UpdatePriorityCalculator struct {
type UpdateConfig struct {
// MinChangePriority is the minimum change priority that will trigger a update.
// TODO: should have separate for Mem and CPU?
- MinChangePriority float64
+ MinChangePriority float64
+ PodLifetimeUpdateThreshold time.Duration
+ EvictAfterOOMThreshold time.Duration
}
// NewUpdatePriorityCalculator creates new UpdatePriorityCalculator for the given VPA object
@@ -68,12 +60,9 @@ type UpdateConfig struct {
// If the vpa resource policy is nil, there will be no policy restriction on update.
// If the given update config is nil, default values are used.
func NewUpdatePriorityCalculator(vpa *vpa_types.VerticalPodAutoscaler,
- config *UpdateConfig,
+ config UpdateConfig,
recommendationProcessor vpa_api_util.RecommendationProcessor,
priorityProcessor PriorityProcessor) UpdatePriorityCalculator {
- if config == nil {
- config = &UpdateConfig{MinChangePriority: *defaultUpdateThreshold}
- }
return UpdatePriorityCalculator{
vpa: vpa,
config: config,
@@ -129,7 +118,7 @@ func (calc *UpdatePriorityCalculator) AddPod(pod *corev1.Pod, now time.Time) {
klog.V(4).InfoS("Not updating pod, missing field pod.Status.StartTime", "pod", klog.KObj(pod))
return
}
- if now.Before(pod.Status.StartTime.Add(*podLifetimeUpdateThreshold)) {
+ if now.Before(pod.Status.StartTime.Add(calc.config.PodLifetimeUpdateThreshold)) {
klog.V(4).InfoS("Not updating a short-lived pod, request within recommended range", "pod", klog.KObj(pod))
return
}
@@ -205,7 +194,7 @@ func (calc *UpdatePriorityCalculator) GetProcessedRecommendationTargets(r *vpa_t
// if the PerVPAConfig feature flag is enabled and the value is set, otherwise
// falls back to the global evictAfterOOMThreshold flag.
func (calc *UpdatePriorityCalculator) getEvictOOMThreshold() time.Duration {
- evictOOMThreshold := *evictAfterOOMThreshold
+ evictOOMThreshold := calc.config.EvictAfterOOMThreshold
if calc.vpa.Spec.UpdatePolicy == nil || calc.vpa.Spec.UpdatePolicy.EvictAfterOOMSeconds == nil {
return evictOOMThreshold
diff --git a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go
index c8d582e0c6e5..e9ae6db34925 100644
--- a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go
+++ b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go
@@ -35,6 +35,14 @@ const (
containerName = "container1"
)
+var (
+ updateconfig = UpdateConfig{
+ MinChangePriority: 0.1,
+ PodLifetimeUpdateThreshold: time.Hour * 12,
+ EvictAfterOOMThreshold: 10 * time.Minute,
+ }
+)
+
// TODO(bskiba): Refactor the SortPriority tests as a testcase list test.
func TestSortPriority(t *testing.T) {
pod1 := test.Pod().WithName("POD1").AddContainer(test.Container().WithName(containerName).WithCPURequest(resource.MustParse("2")).Get()).Get()
@@ -50,7 +58,7 @@ func TestSortPriority(t *testing.T) {
"POD3": {ResourceDiff: 9.0},
"POD4": {ResourceDiff: 2.33},
})
- calculator := NewUpdatePriorityCalculator(vpa, nil, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ calculator := NewUpdatePriorityCalculator(vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
timestampNow := pod1.Status.StartTime.Add(time.Hour * 24)
calculator.AddPod(pod1, timestampNow)
@@ -74,7 +82,7 @@ func TestSortPriorityResourcesDecrease(t *testing.T) {
"POD2": {ScaleUp: false, ResourceDiff: 0.25},
"POD3": {ScaleUp: false, ResourceDiff: 0.5},
})
- calculator := NewUpdatePriorityCalculator(vpa, nil, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ calculator := NewUpdatePriorityCalculator(vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
timestampNow := pod1.Status.StartTime.Add(time.Hour * 24)
calculator.AddPod(pod1, timestampNow)
@@ -96,7 +104,7 @@ func TestUpdateNotRequired(t *testing.T) {
priorityProcessor := NewFakeProcessor(map[string]PodPriority{"POD1": {
ResourceDiff: 0.0,
}})
- calculator := NewUpdatePriorityCalculator(vpa, nil, &test.FakeRecommendationProcessor{},
+ calculator := NewUpdatePriorityCalculator(vpa, updateconfig, &test.FakeRecommendationProcessor{},
priorityProcessor)
timestampNow := pod1.Status.StartTime.Add(time.Hour * 24)
@@ -119,7 +127,7 @@ func TestUseProcessor(t *testing.T) {
"POD1": {ResourceDiff: 0.0},
})
calculator := NewUpdatePriorityCalculator(
- vpa, nil, recommendationProcessor, priorityProcessor)
+ vpa, updateconfig, recommendationProcessor, priorityProcessor)
timestampNow := pod1.Status.StartTime.Add(time.Hour * 24)
calculator.AddPod(pod1, timestampNow)
@@ -152,7 +160,7 @@ func TestUpdateLonglivedPods(t *testing.T) {
})
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}, &test.FakeRecommendationProcessor{}, priorityProcessor)
// Pretend that the test pods started 13 hours ago.
timestampNow := pods[0].Status.StartTime.Add(time.Hour * 13)
@@ -185,8 +193,10 @@ func TestUpdateShortlivedPods(t *testing.T) {
"POD3": {OutsideRecommendedRange: true, ScaleUp: false, ResourceDiff: 0.9},
})
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
+
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
// Pretend that the test pods started 11 hours ago.
timestampNow := pods[0].Status.StartTime.Add(time.Hour * 11)
@@ -225,8 +235,10 @@ func TestUpdatePodWithQuickOOM(t *testing.T) {
"POD1": {ScaleUp: true, ResourceDiff: 0.25},
})
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
+
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
calculator.AddPod(pod, timestampNow)
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
@@ -261,8 +273,10 @@ func TestDontUpdatePodWithQuickOOMNoResourceChange(t *testing.T) {
"POD1": {ScaleUp: true, ResourceDiff: 0.0},
})
+ updateconfig := UpdateConfig{MinChangePriority: 0.1, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
+
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.1}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
calculator.AddPod(pod, timestampNow)
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
@@ -296,8 +310,9 @@ func TestDontUpdatePodWithOOMAfterLongRun(t *testing.T) {
priorityProcessor := NewFakeProcessor(map[string]PodPriority{
"POD1": {ScaleUp: true, ResourceDiff: 0.0},
})
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
calculator.AddPod(pod, timestampNow)
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
@@ -357,8 +372,9 @@ func TestQuickOOM_VpaOvservedContainers(t *testing.T) {
priorityProcessor := NewFakeProcessor(map[string]PodPriority{
"POD1": {ScaleUp: true, ResourceDiff: 0.25}})
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
calculator.AddPod(pod, timestampNow)
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
@@ -447,8 +463,9 @@ func TestQuickOOM_ContainerResourcePolicy(t *testing.T) {
}
priorityProcessor := NewFakeProcessor(map[string]PodPriority{
"POD1": {ScaleUp: true, ResourceDiff: 0.25}})
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
calculator := NewUpdatePriorityCalculator(
- vpa, &UpdateConfig{MinChangePriority: 0.5}, &test.FakeRecommendationProcessor{}, priorityProcessor)
+ vpa, updateconfig, &test.FakeRecommendationProcessor{}, priorityProcessor)
calculator.AddPod(pod, timestampNow)
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
@@ -459,7 +476,8 @@ func TestQuickOOM_ContainerResourcePolicy(t *testing.T) {
}
func TestNoPods(t *testing.T) {
- calculator := NewUpdatePriorityCalculator(nil, nil, &test.FakeRecommendationProcessor{},
+ updateconfig := UpdateConfig{MinChangePriority: 0.5, PodLifetimeUpdateThreshold: time.Hour * 12, EvictAfterOOMThreshold: 10 * time.Minute}
+ calculator := NewUpdatePriorityCalculator(nil, updateconfig, &test.FakeRecommendationProcessor{},
NewFakeProcessor(map[string]PodPriority{}))
result := calculator.GetSortedPods(NewDefaultPodEvictionAdmission())
assert.Exactly(t, []*corev1.Pod{}, result)
@@ -486,7 +504,7 @@ func TestAdmission(t *testing.T) {
"POD2": {ScaleUp: true, ResourceDiff: 1.5},
"POD3": {ScaleUp: true, ResourceDiff: 9.0},
"POD4": {ScaleUp: true, ResourceDiff: 2.33}})
- calculator := NewUpdatePriorityCalculator(vpa, nil,
+ calculator := NewUpdatePriorityCalculator(vpa, updateconfig,
&test.FakeRecommendationProcessor{}, priorityProcessor)
timestampNow := pod1.Status.StartTime.Add(time.Hour * 24)
@@ -623,7 +641,7 @@ func TestAddPodLogs(t *testing.T) {
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).WithTarget("10", "").Get()
priorityProcessor := NewFakeProcessor(map[string]PodPriority{
"POD1": {ScaleUp: true, ResourceDiff: 4.0}})
- calculator := NewUpdatePriorityCalculator(vpa, nil,
+ calculator := NewUpdatePriorityCalculator(vpa, updateconfig,
&test.FakeRecommendationProcessor{}, priorityProcessor)
actualLog := calculator.GetProcessedRecommendationTargets(tc.givenRec)