From 19510a8af36954f6bcf8eb2d91baad6df4da923a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:34:42 -0800 Subject: [PATCH 01/18] chore(deps): bump the go-deps group across 1 directory with 15 updates (#7567) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 60 +++++++++++------------ go.sum | 151 +++++++++++++++++++++++++++++++++------------------------ 2 files changed, 118 insertions(+), 93 deletions(-) diff --git a/go.mod b/go.mod index 74dab8736af0..245ff1c46ced 100644 --- a/go.mod +++ b/go.mod @@ -4,20 +4,20 @@ go 1.23.2 require ( github.com/Pallinder/go-randomdata v1.2.0 - github.com/PuerkitoBio/goquery v1.10.0 + github.com/PuerkitoBio/goquery v1.10.1 github.com/avast/retry-go v3.0.0+incompatible - github.com/aws/aws-sdk-go-v2 v1.32.6 - github.com/aws/aws-sdk-go-v2/config v1.28.6 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.197.0 - github.com/aws/aws-sdk-go-v2/service/eks v1.54.0 - github.com/aws/aws-sdk-go-v2/service/fis v1.31.2 - github.com/aws/aws-sdk-go-v2/service/iam v1.38.2 - github.com/aws/aws-sdk-go-v2/service/pricing v1.32.7 - github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2 - github.com/aws/aws-sdk-go-v2/service/ssm v1.56.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.8 + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/config v1.28.7 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 + github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 + github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 + github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 + github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 + github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 + github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 github.com/aws/smithy-go v1.22.1 github.com/awslabs/amazon-eks-ami/nodeadm v0.0.0-20240229193347-cfab22a10647 @@ -26,8 +26,8 @@ require ( github.com/imdario/mergo v0.3.16 github.com/jonathan-innis/aws-sdk-go-prometheus v0.1.1 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/onsi/ginkgo/v2 v2.22.0 - github.com/onsi/gomega v1.36.1 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pelletier/go-toml/v2 v2.2.3 github.com/prometheus/client_golang v1.20.5 @@ -49,16 +49,16 @@ require ( require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/andybalholm/cascadia v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/andybalholm/cascadia v1.3.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.48 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -76,7 +76,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -99,15 +99,15 @@ require ( github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 5df86c3e428f..e75e2847827a 100644 --- a/go.sum +++ b/go.sum @@ -2,54 +2,54 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Pallinder/go-randomdata v1.2.0 h1:DZ41wBchNRb/0GfsePLiSwb0PHZmT67XY00lCDlaYPg= github.com/Pallinder/go-randomdata v1.2.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqCQQixsA8HyRZfV9Y= -github.com/PuerkitoBio/goquery v1.10.0 h1:6fiXdLuUvYs2OJSvNRqlNPoBm6YABE226xrbavY5Wv4= -github.com/PuerkitoBio/goquery v1.10.0/go.mod h1:TjZZl68Q3eGHNBA8CWaxAN7rOU1EbDz3CWuolcO5Yu4= -github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/PuerkitoBio/goquery v1.10.1 h1:Y8JGYUkXWTGRB6Ars3+j3kN0xg1YqqlwvdTV8WTFQcU= +github.com/PuerkitoBio/goquery v1.10.1/go.mod h1:IYiHrOMps66ag56LEH7QYDDupKXyo5A8qrjIx3ZtujY= +github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= +github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= -github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= -github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.7 h1:GduUnoTXlhkgnxTD93g1nv4tVPILbdNQOzav+Wpg7AE= +github.com/aws/aws-sdk-go-v2/config v1.28.7/go.mod h1:vZGX6GVkIE8uECSUHB6MWAUsd4ZcG2Yq/dMa4refR3M= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.197.0 h1:Bo20e0LV3Qbkr7yZVGuOxvWbf9Vf3nqss5WyerHr6Ic= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.197.0/go.mod h1:00zqVNJFK6UASrTnuvjJHJuaqUdkVz5tW8Ip+VhzuNg= -github.com/aws/aws-sdk-go-v2/service/eks v1.54.0 h1:78/Za9/4c5boz78pcKvJV4WfzVHcFwebpfAUzS6XYUg= -github.com/aws/aws-sdk-go-v2/service/eks v1.54.0/go.mod h1:ZzOjZXGGUQxOq+T3xmfPLKCZe4OaB5vm1LdGaC8IPn4= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.2 h1:sgNhe7x7r4SffGdtbZteb0AHqCmw5ZHDIiyMCEl6BWs= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.2/go.mod h1:MDGBuQGY9Y4zvv6Bi5tDF4Am+D7fRCvk+nUVndGr0l0= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.2 h1:8iFKuRj/FJipy/aDZ2lbq0DYuEHdrxp0qVsdi+ZEwnE= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.2/go.mod h1:UBe4z0VZnbXGp6xaCW1ulE9pndjfpsnrU206rWZcR0Y= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 h1:YbNopxjd9baM83YEEmkaYHi+NuJt0AszeaSLqo0CVr0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1/go.mod h1:mwr3iRm8u1+kkEx4ftDM2Q6Yr0XQFBKrP036ng+k5Lk= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 h1:x31cGGE/t/QkrHVh5m2uWvYwDiaDXpj88nh6OdnI5r0= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0/go.mod h1:kNUWaiotRWCnfQlprrxSMg8ALqbZyA9xLCwKXuLumSk= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 h1:Pyde+VIhO71j5j+BXiwA2civiljvIRLkKFpCSEpw29E= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.3/go.mod h1:lMzi+Vbnzlq6fPfIvHPWoX2LHKM2S2EOn5z6Vx71nmw= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 h1:2sFIoFzU1IEL9epJWubJm9Dhrn45aTNEJuwsesaCGnk= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.3/go.mod h1:KzlNINwfr/47tKkEhgk0r10/OZq3rjtyWy0txL3lM+I= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6 h1:nbmKXZzXPJn41CcD4HsHsGWqvKjLKz9kWu6XxvLmf1s= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.6/go.mod h1:SJhcisfKfAawsdNQoZMBEjg+vyN2lH6rO6fP+T94z5Y= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.7 h1:9UDHX1ZgcXUTAGcyxmw04r/6OVG/aUpQ7dZUziR+vTM= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.7/go.mod h1:68s1DYctoo30LibzEY6gLajXbQEhxpn49+zYFy+Q5Xs= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2 h1:mFLfxLZB/TVQwNJAYox4WaxpIu+dFVIcExrmRmRCOhw= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2/go.mod h1:GnvfTdlvcpD+or3oslHPOn4Mu6KaCwlCp+0p0oqWnrM= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.1 h1:cfVjoEwOMOJOI6VoRQua0nI0KjZV9EAnR8bKaMeSppE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.1/go.mod h1:fGHwAnTdNrLKhgl+UEeq9uEL4n3Ng4MJucA+7Xi3sC4= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.8 h1:chzp64fl/hknlRR9jlstQDB4bYaf848v7KmzUB13omA= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.8/go.mod h1:6r72p62vXJL+0VTgk9rVV7i9+C0qTcx+HuL56XT9Pus= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 h1:EqGlayejoCRXmnVC6lXl6phCm9R2+k35e0gWsO9G5DI= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7/go.mod h1:BTw+t+/E5F3ZnDai/wSOYM54WUVjSdewE7Jvwtb7o+w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 h1:8eUsivBQzZHqe/3FE+cqwfH+0p5Jo8PFM/QYQSmeZ+M= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7/go.mod h1:kLPQvGUmxn/fqiCrDeohwG33bq2pQpGeY62yRO6Nrh0= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 h1:R3X3UwwZKYLCNVVeJ+WLefvrjI5HonYCMlf40BYvJ8E= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8/go.mod h1:4kkTK4zhY31emmt9VGgq3S+ElECNsiI5h6bqSBt71b0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 h1:WpoMCoS4+qOkkuWQommvDRboKYzK91En6eXO/k5dXr0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4/go.mod h1:171mrsbgz6DahPMnLJzQiH3bXXrdsWhpE9USZiM19Lk= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 h1:MOxvXH2kRP5exvqJxAZ0/H9Ar51VmADJh95SgZE8u60= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2/go.mod h1:RKWoqC9FlgMCkrfVOtgfqfwdaUIaq8H93UAt4xNaR0A= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 h1:CvuUmnXI7ebaUAhbJcDy9YQx8wHR69eZ9I7q5hszt/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 h1:F2rBfNAL5UyswqoeWv9zs74N/NanhK16ydHW1pahX6E= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 h1:Xgv/hyNgvLda/M9l9qxXc4UFSgppnRczLxlMs5Ae/QY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 h1:9zoIQ/6NA9b70dDvhYvi4IA3jcLDEu2UEALXLsvmQkI= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9/go.mod h1:otxD6AyG1ABYxxhFX6eua+C4vntFe45igc3ake0mkuE= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 h1:m9rhsGhdepdQV96tZgfy68oU75AWAjOH8u65OefTjwA= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881/go.mod h1:+Mk5k0b6HpKobxNq+B56DOhZ+I/NiPhd5MIBhQMSTSs= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= @@ -102,8 +102,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -151,10 +151,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= -github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= @@ -210,12 +210,19 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -223,9 +230,12 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -233,6 +243,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -243,22 +256,32 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -267,16 +290,18 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From 7131b10752c417c0d748290948dff84b391c172d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 09:43:07 -0800 Subject: [PATCH 02/18] chore: Update data from AWS APIs (#7532) Co-authored-by: APICodeGen Co-authored-by: Amanuel Engeda <74629455+engedaam@users.noreply.github.com> --- .../instancetype/zz_generated.bandwidth.go | 1661 +++++++++-------- .../instancetype/zz_generated.vpclimits.go | 242 ++- .../pricing/zz_generated.pricing_aws.go | 8 +- .../zz_generated.pricing_aws_us_gov.go | 10 +- 4 files changed, 1090 insertions(+), 831 deletions(-) diff --git a/pkg/providers/instancetype/zz_generated.bandwidth.go b/pkg/providers/instancetype/zz_generated.bandwidth.go index 18cf57c36ce8..1e7aa1a07cb0 100644 --- a/pkg/providers/instancetype/zz_generated.bandwidth.go +++ b/pkg/providers/instancetype/zz_generated.bandwidth.go @@ -67,833 +67,838 @@ var ( // t2.nano has vague bandwidth information, bandwidth is Low to Moderate // t2.small has vague bandwidth information, bandwidth is Low to Moderate // t2.xlarge has vague bandwidth information, bandwidth is Moderate - "t3.nano": 32, - "t3a.nano": 32, - "t4g.nano": 32, - "t3.micro": 64, - "t3a.micro": 64, - "t4g.micro": 64, - "t3.small": 128, - "t3a.small": 128, - "t4g.small": 128, - "t3.medium": 256, - "t3a.medium": 256, - "t4g.medium": 256, - "c7a.medium": 390, - "c7i-flex.large": 390, - "m7a.medium": 390, - "m7i-flex.large": 390, - "r7a.medium": 390, - "a1.medium": 500, - "c6g.medium": 500, - "c6gd.medium": 500, - "m6g.medium": 500, - "m6gd.medium": 500, - "r6g.medium": 500, - "r6gd.medium": 500, - "x2gd.medium": 500, - "t3.large": 512, - "t3a.large": 512, - "t4g.large": 512, - "c7g.medium": 520, - "c7gd.medium": 520, - "c8g.medium": 520, - "m7g.medium": 520, - "m7gd.medium": 520, - "m8g.medium": 520, - "r7g.medium": 520, - "r7gd.medium": 520, - "r8g.medium": 520, - "x8g.medium": 520, - "x1e.xlarge": 625, - "a1.large": 750, - "c5.large": 750, - "c5a.large": 750, - "c5ad.large": 750, - "c5d.large": 750, - "c6g.large": 750, - "c6gd.large": 750, - "i3.large": 750, - "m5.large": 750, - "m5a.large": 750, - "m5ad.large": 750, - "m5d.large": 750, - "m6g.large": 750, - "m6gd.large": 750, - "r4.large": 750, - "r5.large": 750, - "r5a.large": 750, - "r5ad.large": 750, - "r5b.large": 750, - "r5d.large": 750, - "r6g.large": 750, - "r6gd.large": 750, - "x2gd.large": 750, - "z1d.large": 750, - "c6a.large": 781, - "c6i.large": 781, - "c6id.large": 781, - "c7a.large": 781, - "c7i-flex.xlarge": 781, - "c7i.large": 781, - "i4g.large": 781, - "i4i.large": 781, - "m6a.large": 781, - "m6i.large": 781, - "m6id.large": 781, - "m7a.large": 781, - "m7i-flex.xlarge": 781, - "m7i.large": 781, - "r6a.large": 781, - "r6i.large": 781, - "r6id.large": 781, - "r7a.large": 781, - "r7i.large": 781, - "r7iz.large": 781, - "c7g.large": 937, - "c7gd.large": 937, - "c8g.large": 937, - "m7g.large": 937, - "m7gd.large": 937, - "m8g.large": 937, - "r7g.large": 937, - "r7gd.large": 937, - "r8g.large": 937, - "x8g.large": 937, - "t3.xlarge": 1024, - "t3a.xlarge": 1024, - "t4g.xlarge": 1024, - "i8g.large": 1172, - "a1.xlarge": 1250, - "c5.xlarge": 1250, - "c5a.xlarge": 1250, - "c5ad.xlarge": 1250, - "c5d.xlarge": 1250, - "c6g.xlarge": 1250, - "c6gd.xlarge": 1250, - "g5g.xlarge": 1250, - "i3.xlarge": 1250, - "m5.xlarge": 1250, - "m5a.xlarge": 1250, - "m5ad.xlarge": 1250, - "m5d.xlarge": 1250, - "m6g.xlarge": 1250, - "m6gd.xlarge": 1250, - "r4.xlarge": 1250, - "r5.xlarge": 1250, - "r5a.xlarge": 1250, - "r5ad.xlarge": 1250, - "r5b.xlarge": 1250, - "r5d.xlarge": 1250, - "r6g.xlarge": 1250, - "r6gd.xlarge": 1250, - "x1e.2xlarge": 1250, - "x2gd.xlarge": 1250, - "z1d.xlarge": 1250, - "c6a.xlarge": 1562, - "c6i.xlarge": 1562, - "c6id.xlarge": 1562, - "c7a.xlarge": 1562, - "c7i-flex.2xlarge": 1562, - "c7i.xlarge": 1562, - "is4gen.medium": 1562, - "m6a.xlarge": 1562, - "m6i.xlarge": 1562, - "m6id.xlarge": 1562, - "m7a.xlarge": 1562, - "m7i-flex.2xlarge": 1562, - "m7i.xlarge": 1562, - "r6a.xlarge": 1562, - "r6i.xlarge": 1562, - "r6id.xlarge": 1562, - "r7a.xlarge": 1562, - "r7i.xlarge": 1562, - "r7iz.xlarge": 1562, - "c6gn.medium": 1600, - "i4g.xlarge": 1875, - "i4i.xlarge": 1875, - "x2iedn.xlarge": 1875, - "c7g.xlarge": 1876, - "c7gd.xlarge": 1876, - "c8g.xlarge": 1876, - "m7g.xlarge": 1876, - "m7gd.xlarge": 1876, - "m8g.xlarge": 1876, - "r7g.xlarge": 1876, - "r7gd.xlarge": 1876, - "r8g.xlarge": 1876, - "x8g.xlarge": 1876, - "g4ad.xlarge": 2000, - "t3.2xlarge": 2048, - "t3a.2xlarge": 2048, - "t4g.2xlarge": 2048, - "i7ie.large": 2083, - "inf2.xlarge": 2083, - "i3en.large": 2100, - "m5dn.large": 2100, - "m5n.large": 2100, - "r5dn.large": 2100, - "r5n.large": 2100, - "i8g.xlarge": 2344, - "a1.2xlarge": 2500, - "c5.2xlarge": 2500, - "c5a.2xlarge": 2500, - "c5ad.2xlarge": 2500, - "c5d.2xlarge": 2500, - "c6g.2xlarge": 2500, - "c6gd.2xlarge": 2500, - "g5.xlarge": 2500, - "g5g.2xlarge": 2500, - "g6.xlarge": 2500, - "g6e.xlarge": 2500, - "h1.2xlarge": 2500, - "i3.2xlarge": 2500, - "m5.2xlarge": 2500, - "m5a.2xlarge": 2500, - "m5ad.2xlarge": 2500, - "m5d.2xlarge": 2500, - "m6g.2xlarge": 2500, - "m6gd.2xlarge": 2500, - "r4.2xlarge": 2500, - "r5.2xlarge": 2500, - "r5a.2xlarge": 2500, - "r5ad.2xlarge": 2500, - "r5b.2xlarge": 2500, - "r5d.2xlarge": 2500, - "r6g.2xlarge": 2500, - "r6gd.2xlarge": 2500, - "x1e.4xlarge": 2500, - "x2gd.2xlarge": 2500, - "z1d.2xlarge": 2500, - "c5n.large": 3000, - "c6gn.large": 3000, - "d3.xlarge": 3000, - "m5zn.large": 3000, - "vt1.3xlarge": 3120, - "c6a.2xlarge": 3125, - "c6i.2xlarge": 3125, - "c6id.2xlarge": 3125, - "c6in.large": 3125, - "c7a.2xlarge": 3125, - "c7gn.medium": 3125, - "c7i-flex.4xlarge": 3125, - "c7i.2xlarge": 3125, - "im4gn.large": 3125, - "is4gen.large": 3125, - "m6a.2xlarge": 3125, - "m6i.2xlarge": 3125, - "m6id.2xlarge": 3125, - "m6idn.large": 3125, - "m6in.large": 3125, - "m7a.2xlarge": 3125, - "m7i-flex.4xlarge": 3125, - "m7i.2xlarge": 3125, - "r6a.2xlarge": 3125, - "r6i.2xlarge": 3125, - "r6id.2xlarge": 3125, - "r6idn.large": 3125, - "r6in.large": 3125, - "r7a.2xlarge": 3125, - "r7i.2xlarge": 3125, - "r7iz.2xlarge": 3125, - "trn1.2xlarge": 3125, - "c7g.2xlarge": 3750, - "c7gd.2xlarge": 3750, - "c8g.2xlarge": 3750, - "m7g.2xlarge": 3750, - "m7gd.2xlarge": 3750, - "m8g.2xlarge": 3750, - "r7g.2xlarge": 3750, - "r7gd.2xlarge": 3750, - "r8g.2xlarge": 3750, - "x8g.2xlarge": 3750, - "m5dn.xlarge": 4100, - "m5n.xlarge": 4100, - "r5dn.xlarge": 4100, - "r5n.xlarge": 4100, - "i7ie.xlarge": 4166, - "g4ad.2xlarge": 4167, - "i3en.xlarge": 4200, - "i4g.2xlarge": 4687, - "i4i.2xlarge": 4687, - "i8g.2xlarge": 4688, - "a1.4xlarge": 5000, - "a1.metal": 5000, - "c5.4xlarge": 5000, - "c5a.4xlarge": 5000, - "c5ad.4xlarge": 5000, - "c5d.4xlarge": 5000, - "c5n.xlarge": 5000, - "c6g.4xlarge": 5000, - "c6gd.4xlarge": 5000, - "g4dn.xlarge": 5000, - "g5.2xlarge": 5000, - "g5g.4xlarge": 5000, - "g6.2xlarge": 5000, - "g6e.2xlarge": 5000, - "h1.4xlarge": 5000, - "i3.4xlarge": 5000, - "inf1.2xlarge": 5000, - "inf1.xlarge": 5000, - "m5.4xlarge": 5000, - "m5a.4xlarge": 5000, - "m5ad.4xlarge": 5000, - "m5d.4xlarge": 5000, - "m5zn.xlarge": 5000, - "m6g.4xlarge": 5000, - "m6gd.4xlarge": 5000, - "r4.4xlarge": 5000, - "r5.4xlarge": 5000, - "r5a.4xlarge": 5000, - "r5ad.4xlarge": 5000, - "r5b.4xlarge": 5000, - "r5d.4xlarge": 5000, - "r6g.4xlarge": 5000, - "r6gd.4xlarge": 5000, - "x1e.8xlarge": 5000, - "x2gd.4xlarge": 5000, - "x2iedn.2xlarge": 5000, - "z1d.3xlarge": 5000, - "d3.2xlarge": 6000, - "d3en.xlarge": 6000, - "c6a.4xlarge": 6250, - "c6i.4xlarge": 6250, - "c6id.4xlarge": 6250, - "c6in.xlarge": 6250, - "c7a.4xlarge": 6250, - "c7gn.large": 6250, - "c7i-flex.8xlarge": 6250, - "c7i.4xlarge": 6250, - "im4gn.xlarge": 6250, - "is4gen.xlarge": 6250, - "m6a.4xlarge": 6250, - "m6i.4xlarge": 6250, - "m6id.4xlarge": 6250, - "m6idn.xlarge": 6250, - "m6in.xlarge": 6250, - "m7a.4xlarge": 6250, - "m7i-flex.8xlarge": 6250, - "m7i.4xlarge": 6250, - "r6a.4xlarge": 6250, - "r6i.4xlarge": 6250, - "r6id.4xlarge": 6250, - "r6idn.xlarge": 6250, - "r6in.xlarge": 6250, - "r7a.4xlarge": 6250, - "r7i.4xlarge": 6250, - "r7iz.4xlarge": 6250, - "vt1.6xlarge": 6250, - "c6gn.xlarge": 6300, - "c7g.4xlarge": 7500, - "c7gd.4xlarge": 7500, - "c8g.4xlarge": 7500, - "m5a.8xlarge": 7500, - "m5ad.8xlarge": 7500, - "m7g.4xlarge": 7500, - "m7gd.4xlarge": 7500, - "m8g.4xlarge": 7500, - "r5a.8xlarge": 7500, - "r5ad.8xlarge": 7500, - "r7g.4xlarge": 7500, - "r7gd.4xlarge": 7500, - "r8g.4xlarge": 7500, - "x8g.4xlarge": 7500, - "m5dn.2xlarge": 8125, - "m5n.2xlarge": 8125, - "r5dn.2xlarge": 8125, - "r5n.2xlarge": 8125, - "g4ad.4xlarge": 8333, - "i7ie.2xlarge": 8333, - "i3en.2xlarge": 8400, - "i4g.4xlarge": 9375, - "i4i.4xlarge": 9375, - "i8g.4xlarge": 9375, - "c3.8xlarge": 10000, - "c4.8xlarge": 10000, - "c5a.8xlarge": 10000, - "c5ad.8xlarge": 10000, - "c5n.2xlarge": 10000, - "d2.8xlarge": 10000, - "g3.8xlarge": 10000, - "g4dn.2xlarge": 10000, - "g5.4xlarge": 10000, - "g6.4xlarge": 10000, - "gr6.4xlarge": 10000, - "h1.8xlarge": 10000, - "i2.8xlarge": 10000, - "i3.8xlarge": 10000, - "m4.10xlarge": 10000, - "m5.8xlarge": 10000, - "m5a.12xlarge": 10000, - "m5ad.12xlarge": 10000, - "m5d.8xlarge": 10000, - "m5zn.2xlarge": 10000, - "mac2-m1ultra.metal": 10000, - "mac2-m2.metal": 10000, - "mac2-m2pro.metal": 10000, - "mac2.metal": 10000, - "p2.8xlarge": 10000, - "p3.8xlarge": 10000, - "r3.8xlarge": 10000, - "r4.8xlarge": 10000, - "r5.8xlarge": 10000, - "r5a.12xlarge": 10000, - "r5ad.12xlarge": 10000, - "r5b.8xlarge": 10000, - "r5d.8xlarge": 10000, - "x1.16xlarge": 10000, - "x1e.16xlarge": 10000, - "c5.12xlarge": 12000, - "c5.9xlarge": 12000, - "c5a.12xlarge": 12000, - "c5ad.12xlarge": 12000, - "c5d.12xlarge": 12000, - "c5d.9xlarge": 12000, - "c6g.8xlarge": 12000, - "c6gd.8xlarge": 12000, - "g5g.8xlarge": 12000, - "m5.12xlarge": 12000, - "m5a.16xlarge": 12000, - "m5ad.16xlarge": 12000, - "m5d.12xlarge": 12000, - "m6g.8xlarge": 12000, - "m6gd.8xlarge": 12000, - "r5.12xlarge": 12000, - "r5a.16xlarge": 12000, - "r5ad.16xlarge": 12000, - "r5b.12xlarge": 12000, - "r5d.12xlarge": 12000, - "r6g.8xlarge": 12000, - "r6gd.8xlarge": 12000, - "x2gd.8xlarge": 12000, - "z1d.6xlarge": 12000, - "c6a.8xlarge": 12500, - "c6gn.2xlarge": 12500, - "c6i.8xlarge": 12500, - "c6id.8xlarge": 12500, - "c6in.2xlarge": 12500, - "c7a.8xlarge": 12500, - "c7gn.xlarge": 12500, - "c7i.8xlarge": 12500, - "d3.4xlarge": 12500, - "d3en.2xlarge": 12500, - "i3en.3xlarge": 12500, - "i7ie.3xlarge": 12500, - "i7ie.6xlarge": 12500, - "i8g.8xlarge": 12500, - "im4gn.2xlarge": 12500, - "is4gen.2xlarge": 12500, - "m6a.8xlarge": 12500, - "m6i.8xlarge": 12500, - "m6id.8xlarge": 12500, - "m6idn.2xlarge": 12500, - "m6in.2xlarge": 12500, - "m7a.8xlarge": 12500, - "m7i.8xlarge": 12500, - "r6a.8xlarge": 12500, - "r6i.8xlarge": 12500, - "r6id.8xlarge": 12500, - "r6idn.2xlarge": 12500, - "r6in.2xlarge": 12500, - "r7a.8xlarge": 12500, - "r7i.8xlarge": 12500, - "r7iz.8xlarge": 12500, - "x2iedn.4xlarge": 12500, - "x2iezn.2xlarge": 12500, - "i8g.12xlarge": 14063, - "c5n.4xlarge": 15000, - "c7g.8xlarge": 15000, - "c7gd.8xlarge": 15000, - "c8g.8xlarge": 15000, - "g4ad.8xlarge": 15000, - "m5zn.3xlarge": 15000, - "m7g.8xlarge": 15000, - "m7gd.8xlarge": 15000, - "m8g.8xlarge": 15000, - "r7g.8xlarge": 15000, - "r7gd.8xlarge": 15000, - "r8g.8xlarge": 15000, - "x2iezn.4xlarge": 15000, - "x8g.8xlarge": 15000, - "m5dn.4xlarge": 16250, - "m5n.4xlarge": 16250, - "r5dn.4xlarge": 16250, - "r5n.4xlarge": 16250, - "inf2.8xlarge": 16667, - "c6a.12xlarge": 18750, - "c6i.12xlarge": 18750, - "c6id.12xlarge": 18750, - "c7a.12xlarge": 18750, - "c7i.12xlarge": 18750, - "i4g.8xlarge": 18750, - "i4i.8xlarge": 18750, - "i8g.16xlarge": 18750, - "m6a.12xlarge": 18750, - "m6i.12xlarge": 18750, - "m6id.12xlarge": 18750, - "m7a.12xlarge": 18750, - "m7i.12xlarge": 18750, - "r6a.12xlarge": 18750, - "r6i.12xlarge": 18750, - "r6id.12xlarge": 18750, - "r7a.12xlarge": 18750, - "r7i.12xlarge": 18750, - "c5a.16xlarge": 20000, - "c5a.24xlarge": 20000, - "c5ad.16xlarge": 20000, - "c5ad.24xlarge": 20000, - "c6g.12xlarge": 20000, - "c6gd.12xlarge": 20000, - "g4dn.4xlarge": 20000, - "g6e.4xlarge": 20000, - "m5.16xlarge": 20000, - "m5a.24xlarge": 20000, - "m5ad.24xlarge": 20000, - "m5d.16xlarge": 20000, - "m6g.12xlarge": 20000, - "m6gd.12xlarge": 20000, - "r5.16xlarge": 20000, - "r5a.24xlarge": 20000, - "r5ad.24xlarge": 20000, - "r5b.16xlarge": 20000, - "r5d.16xlarge": 20000, - "r6g.12xlarge": 20000, - "r6gd.12xlarge": 20000, - "x2gd.12xlarge": 20000, - "c7g.12xlarge": 22500, - "c7gd.12xlarge": 22500, - "c8g.12xlarge": 22500, - "m7g.12xlarge": 22500, - "m7gd.12xlarge": 22500, - "m8g.12xlarge": 22500, - "r7g.12xlarge": 22500, - "r7gd.12xlarge": 22500, - "r8g.12xlarge": 22500, - "x8g.12xlarge": 22500, - "c5.18xlarge": 25000, - "c5.24xlarge": 25000, - "c5.metal": 25000, - "c5d.18xlarge": 25000, - "c5d.24xlarge": 25000, - "c5d.metal": 25000, - "c6a.16xlarge": 25000, - "c6g.16xlarge": 25000, - "c6g.metal": 25000, - "c6gd.16xlarge": 25000, - "c6gd.metal": 25000, - "c6gn.4xlarge": 25000, - "c6i.16xlarge": 25000, - "c6id.16xlarge": 25000, - "c6in.4xlarge": 25000, - "c7a.16xlarge": 25000, - "c7gn.2xlarge": 25000, - "c7i.16xlarge": 25000, - "d3.8xlarge": 25000, - "d3en.4xlarge": 25000, - "f1.16xlarge": 25000, - "g3.16xlarge": 25000, - "g4ad.16xlarge": 25000, - "g5.16xlarge": 25000, - "g5.8xlarge": 25000, - "g5g.16xlarge": 25000, - "g5g.metal": 25000, - "g6.16xlarge": 25000, - "g6.8xlarge": 25000, - "g6e.8xlarge": 25000, - "gr6.8xlarge": 25000, - "h1.16xlarge": 25000, - "i3.16xlarge": 25000, - "i3.metal": 25000, - "i3en.6xlarge": 25000, - "i7ie.12xlarge": 25000, - "im4gn.4xlarge": 25000, - "inf1.6xlarge": 25000, - "is4gen.4xlarge": 25000, - "m4.16xlarge": 25000, - "m5.24xlarge": 25000, - "m5.metal": 25000, - "m5d.24xlarge": 25000, - "m5d.metal": 25000, - "m5dn.8xlarge": 25000, - "m5n.8xlarge": 25000, - "m6a.16xlarge": 25000, - "m6g.16xlarge": 25000, - "m6g.metal": 25000, - "m6gd.16xlarge": 25000, - "m6gd.metal": 25000, - "m6i.16xlarge": 25000, - "m6id.16xlarge": 25000, - "m6idn.4xlarge": 25000, - "m6in.4xlarge": 25000, - "m7a.16xlarge": 25000, - "m7i.16xlarge": 25000, - "mac1.metal": 25000, - "p2.16xlarge": 25000, - "p3.16xlarge": 25000, - "r4.16xlarge": 25000, - "r5.24xlarge": 25000, - "r5.metal": 25000, - "r5b.24xlarge": 25000, - "r5b.metal": 25000, - "r5d.24xlarge": 25000, - "r5d.metal": 25000, - "r5dn.8xlarge": 25000, - "r5n.8xlarge": 25000, - "r6a.16xlarge": 25000, - "r6g.16xlarge": 25000, - "r6g.metal": 25000, - "r6gd.16xlarge": 25000, - "r6gd.metal": 25000, - "r6i.16xlarge": 25000, - "r6id.16xlarge": 25000, - "r6idn.4xlarge": 25000, - "r6in.4xlarge": 25000, - "r7a.16xlarge": 25000, - "r7i.16xlarge": 25000, - "r7iz.12xlarge": 25000, - "r7iz.16xlarge": 25000, - "r7iz.metal-16xl": 25000, - "vt1.24xlarge": 25000, - "x1.32xlarge": 25000, - "x1e.32xlarge": 25000, - "x2gd.16xlarge": 25000, - "x2gd.metal": 25000, - "x2iedn.8xlarge": 25000, - "z1d.12xlarge": 25000, - "z1d.metal": 25000, - "i4i.12xlarge": 28120, - "i8g.24xlarge": 28125, - "i8g.metal-24xl": 28125, - "c7g.16xlarge": 30000, - "c7g.metal": 30000, - "c7gd.16xlarge": 30000, - "c7gd.metal": 30000, - "c8g.16xlarge": 30000, - "m7g.16xlarge": 30000, - "m7g.metal": 30000, - "m7gd.16xlarge": 30000, - "m7gd.metal": 30000, - "m8g.16xlarge": 30000, - "r7g.16xlarge": 30000, - "r7g.metal": 30000, - "r7gd.16xlarge": 30000, - "r7gd.metal": 30000, - "r8g.16xlarge": 30000, - "x8g.16xlarge": 30000, - "g6e.16xlarge": 35000, - "c6a.24xlarge": 37500, - "c6i.24xlarge": 37500, - "c6id.24xlarge": 37500, - "c7a.24xlarge": 37500, - "c7i.24xlarge": 37500, - "c7i.metal-24xl": 37500, - "i4g.16xlarge": 37500, - "i4i.16xlarge": 37500, - "i7ie.18xlarge": 37500, - "m6a.24xlarge": 37500, - "m6i.24xlarge": 37500, - "m6id.24xlarge": 37500, - "m7a.24xlarge": 37500, - "m7i.24xlarge": 37500, - "m7i.metal-24xl": 37500, - "r6a.24xlarge": 37500, - "r6i.24xlarge": 37500, - "r6id.24xlarge": 37500, - "r7a.24xlarge": 37500, - "r7i.24xlarge": 37500, - "r7i.metal-24xl": 37500, - "c8g.24xlarge": 40000, - "c8g.metal-24xl": 40000, - "d3en.6xlarge": 40000, - "g5.12xlarge": 40000, - "g6.12xlarge": 40000, - "m8g.24xlarge": 40000, - "m8g.metal-24xl": 40000, - "r8g.24xlarge": 40000, - "r8g.metal-24xl": 40000, - "x8g.24xlarge": 40000, - "x8g.metal-24xl": 40000, - "c5n.9xlarge": 50000, - "c6a.32xlarge": 50000, - "c6a.48xlarge": 50000, - "c6a.metal": 50000, - "c6gn.8xlarge": 50000, - "c6i.32xlarge": 50000, - "c6i.metal": 50000, - "c6id.32xlarge": 50000, - "c6id.metal": 50000, - "c6in.8xlarge": 50000, - "c7a.32xlarge": 50000, - "c7a.48xlarge": 50000, - "c7a.metal-48xl": 50000, - "c7gn.4xlarge": 50000, - "c7i.48xlarge": 50000, - "c7i.metal-48xl": 50000, - "c8g.48xlarge": 50000, - "c8g.metal-48xl": 50000, - "d3en.8xlarge": 50000, - "g4dn.12xlarge": 50000, - "g4dn.16xlarge": 50000, - "g4dn.8xlarge": 50000, - "g5.24xlarge": 50000, - "g6.24xlarge": 50000, - "i3en.12xlarge": 50000, - "i7ie.24xlarge": 50000, - "im4gn.8xlarge": 50000, - "inf2.24xlarge": 50000, - "is4gen.8xlarge": 50000, - "m5dn.12xlarge": 50000, - "m5n.12xlarge": 50000, - "m5zn.6xlarge": 50000, - "m6a.32xlarge": 50000, - "m6a.48xlarge": 50000, - "m6a.metal": 50000, - "m6i.32xlarge": 50000, - "m6i.metal": 50000, - "m6id.32xlarge": 50000, - "m6id.metal": 50000, - "m6idn.8xlarge": 50000, - "m6in.8xlarge": 50000, - "m7a.32xlarge": 50000, - "m7a.48xlarge": 50000, - "m7a.metal-48xl": 50000, - "m7i.48xlarge": 50000, - "m7i.metal-48xl": 50000, - "m8g.48xlarge": 50000, - "m8g.metal-48xl": 50000, - "r5dn.12xlarge": 50000, - "r5n.12xlarge": 50000, - "r6a.32xlarge": 50000, - "r6a.48xlarge": 50000, - "r6a.metal": 50000, - "r6i.32xlarge": 50000, - "r6i.metal": 50000, - "r6id.32xlarge": 50000, - "r6id.metal": 50000, - "r6idn.8xlarge": 50000, - "r6in.8xlarge": 50000, - "r7a.32xlarge": 50000, - "r7a.48xlarge": 50000, - "r7a.metal-48xl": 50000, - "r7i.48xlarge": 50000, - "r7i.metal-48xl": 50000, - "r7iz.32xlarge": 50000, - "r7iz.metal-32xl": 50000, - "r8g.48xlarge": 50000, - "r8g.metal-48xl": 50000, - "u-3tb1.56xlarge": 50000, - "x2idn.16xlarge": 50000, - "x2iedn.16xlarge": 50000, - "x2iezn.6xlarge": 50000, - "x8g.48xlarge": 50000, - "x8g.metal-48xl": 50000, - "i4i.24xlarge": 56250, - "c6gn.12xlarge": 75000, - "c6in.12xlarge": 75000, - "d3en.12xlarge": 75000, - "i4i.32xlarge": 75000, - "i4i.metal": 75000, - "m5dn.16xlarge": 75000, - "m5n.16xlarge": 75000, - "m6idn.12xlarge": 75000, - "m6in.12xlarge": 75000, - "r5dn.16xlarge": 75000, - "r5n.16xlarge": 75000, - "r6idn.12xlarge": 75000, - "r6in.12xlarge": 75000, - "x2idn.24xlarge": 75000, - "x2iedn.24xlarge": 75000, - "x2iezn.8xlarge": 75000, - "c5n.18xlarge": 100000, - "c5n.metal": 100000, - "c6gn.16xlarge": 100000, - "c6in.16xlarge": 100000, - "c7gn.8xlarge": 100000, - "dl2q.24xlarge": 100000, - "g4dn.metal": 100000, - "g5.48xlarge": 100000, - "g6.48xlarge": 100000, - "g6e.12xlarge": 100000, - "hpc6a.48xlarge": 100000, - "i3en.24xlarge": 100000, - "i3en.metal": 100000, - "i7ie.48xlarge": 100000, - "im4gn.16xlarge": 100000, - "inf1.24xlarge": 100000, - "inf2.48xlarge": 100000, - "m5dn.24xlarge": 100000, - "m5dn.metal": 100000, - "m5n.24xlarge": 100000, - "m5n.metal": 100000, - "m5zn.12xlarge": 100000, - "m5zn.metal": 100000, - "m6idn.16xlarge": 100000, - "m6in.16xlarge": 100000, - "p3dn.24xlarge": 100000, - "r5dn.24xlarge": 100000, - "r5dn.metal": 100000, - "r5n.24xlarge": 100000, - "r5n.metal": 100000, - "r6idn.16xlarge": 100000, - "r6in.16xlarge": 100000, - "u-12tb1.112xlarge": 100000, - "u-12tb1.metal": 100000, - "u-18tb1.112xlarge": 100000, - "u-18tb1.metal": 100000, - "u-24tb1.112xlarge": 100000, - "u-24tb1.metal": 100000, - "u-6tb1.112xlarge": 100000, - "u-6tb1.56xlarge": 100000, - "u-6tb1.metal": 100000, - "u-9tb1.112xlarge": 100000, - "u-9tb1.metal": 100000, - "u7i-12tb.224xlarge": 100000, - "x2idn.32xlarge": 100000, - "x2idn.metal": 100000, - "x2iedn.32xlarge": 100000, - "x2iedn.metal": 100000, - "x2iezn.12xlarge": 100000, - "x2iezn.metal": 100000, - "c6in.24xlarge": 150000, - "c7gn.12xlarge": 150000, - "m6idn.24xlarge": 150000, - "m6in.24xlarge": 150000, - "r6idn.24xlarge": 150000, - "r6in.24xlarge": 150000, - "c6in.32xlarge": 200000, - "c6in.metal": 200000, - "c7gn.16xlarge": 200000, - "c7gn.metal": 200000, - "g6e.24xlarge": 200000, - "hpc6id.32xlarge": 200000, - "hpc7g.16xlarge": 200000, - "hpc7g.4xlarge": 200000, - "hpc7g.8xlarge": 200000, - "m6idn.32xlarge": 200000, - "m6idn.metal": 200000, - "m6in.32xlarge": 200000, - "m6in.metal": 200000, - "r6idn.32xlarge": 200000, - "r6idn.metal": 200000, - "r6in.32xlarge": 200000, - "r6in.metal": 200000, - "u7in-16tb.224xlarge": 200000, - "u7in-24tb.224xlarge": 200000, - "u7in-32tb.224xlarge": 200000, - "hpc7a.12xlarge": 300000, - "hpc7a.24xlarge": 300000, - "hpc7a.48xlarge": 300000, - "hpc7a.96xlarge": 300000, - "dl1.24xlarge": 400000, - "g6e.48xlarge": 400000, - "p4d.24xlarge": 400000, - "p4de.24xlarge": 400000, - "trn1.32xlarge": 800000, - "trn1n.32xlarge": 1600000, - "p5.48xlarge": 3200000, - "p5e.48xlarge": 3200000, - "p5en.48xlarge": 3200000, - "trn2.48xlarge": 3200000, - "trn2u.48xlarge": 3200000, + "t3.nano": 32, + "t3a.nano": 32, + "t4g.nano": 32, + "t3.micro": 64, + "t3a.micro": 64, + "t4g.micro": 64, + "t3.small": 128, + "t3a.small": 128, + "t4g.small": 128, + "t3.medium": 256, + "t3a.medium": 256, + "t4g.medium": 256, + "c7a.medium": 390, + "c7i-flex.large": 390, + "m7a.medium": 390, + "m7i-flex.large": 390, + "r7a.medium": 390, + "a1.medium": 500, + "c6g.medium": 500, + "c6gd.medium": 500, + "m6g.medium": 500, + "m6gd.medium": 500, + "r6g.medium": 500, + "r6gd.medium": 500, + "x2gd.medium": 500, + "t3.large": 512, + "t3a.large": 512, + "t4g.large": 512, + "c7g.medium": 520, + "c7gd.medium": 520, + "c8g.medium": 520, + "m7g.medium": 520, + "m7gd.medium": 520, + "m8g.medium": 520, + "r7g.medium": 520, + "r7gd.medium": 520, + "r8g.medium": 520, + "x8g.medium": 520, + "x1e.xlarge": 625, + "a1.large": 750, + "c5.large": 750, + "c5a.large": 750, + "c5ad.large": 750, + "c5d.large": 750, + "c6g.large": 750, + "c6gd.large": 750, + "i3.large": 750, + "m5.large": 750, + "m5a.large": 750, + "m5ad.large": 750, + "m5d.large": 750, + "m6g.large": 750, + "m6gd.large": 750, + "r4.large": 750, + "r5.large": 750, + "r5a.large": 750, + "r5ad.large": 750, + "r5b.large": 750, + "r5d.large": 750, + "r6g.large": 750, + "r6gd.large": 750, + "x2gd.large": 750, + "z1d.large": 750, + "c6a.large": 781, + "c6i.large": 781, + "c6id.large": 781, + "c7a.large": 781, + "c7i-flex.xlarge": 781, + "c7i.large": 781, + "i4g.large": 781, + "i4i.large": 781, + "m6a.large": 781, + "m6i.large": 781, + "m6id.large": 781, + "m7a.large": 781, + "m7i-flex.xlarge": 781, + "m7i.large": 781, + "r6a.large": 781, + "r6i.large": 781, + "r6id.large": 781, + "r7a.large": 781, + "r7i.large": 781, + "r7iz.large": 781, + "c7g.large": 937, + "c7gd.large": 937, + "c8g.large": 937, + "m7g.large": 937, + "m7gd.large": 937, + "m8g.large": 937, + "r7g.large": 937, + "r7gd.large": 937, + "r8g.large": 937, + "x8g.large": 937, + "t3.xlarge": 1024, + "t3a.xlarge": 1024, + "t4g.xlarge": 1024, + "i8g.large": 1172, + "a1.xlarge": 1250, + "c5.xlarge": 1250, + "c5a.xlarge": 1250, + "c5ad.xlarge": 1250, + "c5d.xlarge": 1250, + "c6g.xlarge": 1250, + "c6gd.xlarge": 1250, + "g5g.xlarge": 1250, + "i3.xlarge": 1250, + "m5.xlarge": 1250, + "m5a.xlarge": 1250, + "m5ad.xlarge": 1250, + "m5d.xlarge": 1250, + "m6g.xlarge": 1250, + "m6gd.xlarge": 1250, + "r4.xlarge": 1250, + "r5.xlarge": 1250, + "r5a.xlarge": 1250, + "r5ad.xlarge": 1250, + "r5b.xlarge": 1250, + "r5d.xlarge": 1250, + "r6g.xlarge": 1250, + "r6gd.xlarge": 1250, + "x1e.2xlarge": 1250, + "x2gd.xlarge": 1250, + "z1d.xlarge": 1250, + "c6a.xlarge": 1562, + "c6i.xlarge": 1562, + "c6id.xlarge": 1562, + "c7a.xlarge": 1562, + "c7i-flex.2xlarge": 1562, + "c7i.xlarge": 1562, + "is4gen.medium": 1562, + "m6a.xlarge": 1562, + "m6i.xlarge": 1562, + "m6id.xlarge": 1562, + "m7a.xlarge": 1562, + "m7i-flex.2xlarge": 1562, + "m7i.xlarge": 1562, + "r6a.xlarge": 1562, + "r6i.xlarge": 1562, + "r6id.xlarge": 1562, + "r7a.xlarge": 1562, + "r7i.xlarge": 1562, + "r7iz.xlarge": 1562, + "c6gn.medium": 1600, + "i4g.xlarge": 1875, + "i4i.xlarge": 1875, + "x2iedn.xlarge": 1875, + "c7g.xlarge": 1876, + "c7gd.xlarge": 1876, + "c8g.xlarge": 1876, + "m7g.xlarge": 1876, + "m7gd.xlarge": 1876, + "m8g.xlarge": 1876, + "r7g.xlarge": 1876, + "r7gd.xlarge": 1876, + "r8g.xlarge": 1876, + "x8g.xlarge": 1876, + "g4ad.xlarge": 2000, + "t3.2xlarge": 2048, + "t3a.2xlarge": 2048, + "t4g.2xlarge": 2048, + "i7ie.large": 2083, + "inf2.xlarge": 2083, + "i3en.large": 2100, + "m5dn.large": 2100, + "m5n.large": 2100, + "r5dn.large": 2100, + "r5n.large": 2100, + "i8g.xlarge": 2344, + "a1.2xlarge": 2500, + "c5.2xlarge": 2500, + "c5a.2xlarge": 2500, + "c5ad.2xlarge": 2500, + "c5d.2xlarge": 2500, + "c6g.2xlarge": 2500, + "c6gd.2xlarge": 2500, + "g5.xlarge": 2500, + "g5g.2xlarge": 2500, + "g6.xlarge": 2500, + "g6e.xlarge": 2500, + "h1.2xlarge": 2500, + "i3.2xlarge": 2500, + "m5.2xlarge": 2500, + "m5a.2xlarge": 2500, + "m5ad.2xlarge": 2500, + "m5d.2xlarge": 2500, + "m6g.2xlarge": 2500, + "m6gd.2xlarge": 2500, + "r4.2xlarge": 2500, + "r5.2xlarge": 2500, + "r5a.2xlarge": 2500, + "r5ad.2xlarge": 2500, + "r5b.2xlarge": 2500, + "r5d.2xlarge": 2500, + "r6g.2xlarge": 2500, + "r6gd.2xlarge": 2500, + "x1e.4xlarge": 2500, + "x2gd.2xlarge": 2500, + "z1d.2xlarge": 2500, + "c5n.large": 3000, + "c6gn.large": 3000, + "d3.xlarge": 3000, + "m5zn.large": 3000, + "vt1.3xlarge": 3120, + "c6a.2xlarge": 3125, + "c6i.2xlarge": 3125, + "c6id.2xlarge": 3125, + "c6in.large": 3125, + "c7a.2xlarge": 3125, + "c7gn.medium": 3125, + "c7i-flex.4xlarge": 3125, + "c7i.2xlarge": 3125, + "im4gn.large": 3125, + "is4gen.large": 3125, + "m6a.2xlarge": 3125, + "m6i.2xlarge": 3125, + "m6id.2xlarge": 3125, + "m6idn.large": 3125, + "m6in.large": 3125, + "m7a.2xlarge": 3125, + "m7i-flex.4xlarge": 3125, + "m7i.2xlarge": 3125, + "r6a.2xlarge": 3125, + "r6i.2xlarge": 3125, + "r6id.2xlarge": 3125, + "r6idn.large": 3125, + "r6in.large": 3125, + "r7a.2xlarge": 3125, + "r7i.2xlarge": 3125, + "r7iz.2xlarge": 3125, + "trn1.2xlarge": 3125, + "c7g.2xlarge": 3750, + "c7gd.2xlarge": 3750, + "c8g.2xlarge": 3750, + "m7g.2xlarge": 3750, + "m7gd.2xlarge": 3750, + "m8g.2xlarge": 3750, + "r7g.2xlarge": 3750, + "r7gd.2xlarge": 3750, + "r8g.2xlarge": 3750, + "x8g.2xlarge": 3750, + "m5dn.xlarge": 4100, + "m5n.xlarge": 4100, + "r5dn.xlarge": 4100, + "r5n.xlarge": 4100, + "i7ie.xlarge": 4166, + "g4ad.2xlarge": 4167, + "i3en.xlarge": 4200, + "i4g.2xlarge": 4687, + "i4i.2xlarge": 4687, + "i8g.2xlarge": 4688, + "a1.4xlarge": 5000, + "a1.metal": 5000, + "c5.4xlarge": 5000, + "c5a.4xlarge": 5000, + "c5ad.4xlarge": 5000, + "c5d.4xlarge": 5000, + "c5n.xlarge": 5000, + "c6g.4xlarge": 5000, + "c6gd.4xlarge": 5000, + "g4dn.xlarge": 5000, + "g5.2xlarge": 5000, + "g5g.4xlarge": 5000, + "g6.2xlarge": 5000, + "g6e.2xlarge": 5000, + "h1.4xlarge": 5000, + "i3.4xlarge": 5000, + "inf1.2xlarge": 5000, + "inf1.xlarge": 5000, + "m5.4xlarge": 5000, + "m5a.4xlarge": 5000, + "m5ad.4xlarge": 5000, + "m5d.4xlarge": 5000, + "m5zn.xlarge": 5000, + "m6g.4xlarge": 5000, + "m6gd.4xlarge": 5000, + "r4.4xlarge": 5000, + "r5.4xlarge": 5000, + "r5a.4xlarge": 5000, + "r5ad.4xlarge": 5000, + "r5b.4xlarge": 5000, + "r5d.4xlarge": 5000, + "r6g.4xlarge": 5000, + "r6gd.4xlarge": 5000, + "x1e.8xlarge": 5000, + "x2gd.4xlarge": 5000, + "x2iedn.2xlarge": 5000, + "z1d.3xlarge": 5000, + "d3.2xlarge": 6000, + "d3en.xlarge": 6000, + "c6a.4xlarge": 6250, + "c6i.4xlarge": 6250, + "c6id.4xlarge": 6250, + "c6in.xlarge": 6250, + "c7a.4xlarge": 6250, + "c7gn.large": 6250, + "c7i-flex.8xlarge": 6250, + "c7i.4xlarge": 6250, + "im4gn.xlarge": 6250, + "is4gen.xlarge": 6250, + "m6a.4xlarge": 6250, + "m6i.4xlarge": 6250, + "m6id.4xlarge": 6250, + "m6idn.xlarge": 6250, + "m6in.xlarge": 6250, + "m7a.4xlarge": 6250, + "m7i-flex.8xlarge": 6250, + "m7i.4xlarge": 6250, + "r6a.4xlarge": 6250, + "r6i.4xlarge": 6250, + "r6id.4xlarge": 6250, + "r6idn.xlarge": 6250, + "r6in.xlarge": 6250, + "r7a.4xlarge": 6250, + "r7i.4xlarge": 6250, + "r7iz.4xlarge": 6250, + "vt1.6xlarge": 6250, + "c6gn.xlarge": 6300, + "c7g.4xlarge": 7500, + "c7gd.4xlarge": 7500, + "c8g.4xlarge": 7500, + "m5a.8xlarge": 7500, + "m5ad.8xlarge": 7500, + "m7g.4xlarge": 7500, + "m7gd.4xlarge": 7500, + "m8g.4xlarge": 7500, + "r5a.8xlarge": 7500, + "r5ad.8xlarge": 7500, + "r7g.4xlarge": 7500, + "r7gd.4xlarge": 7500, + "r8g.4xlarge": 7500, + "x8g.4xlarge": 7500, + "m5dn.2xlarge": 8125, + "m5n.2xlarge": 8125, + "r5dn.2xlarge": 8125, + "r5n.2xlarge": 8125, + "g4ad.4xlarge": 8333, + "i7ie.2xlarge": 8333, + "i3en.2xlarge": 8400, + "i4g.4xlarge": 9375, + "i4i.4xlarge": 9375, + "i8g.4xlarge": 9375, + "c3.8xlarge": 10000, + "c4.8xlarge": 10000, + "c5a.8xlarge": 10000, + "c5ad.8xlarge": 10000, + "c5n.2xlarge": 10000, + "d2.8xlarge": 10000, + "g3.8xlarge": 10000, + "g4dn.2xlarge": 10000, + "g5.4xlarge": 10000, + "g6.4xlarge": 10000, + "gr6.4xlarge": 10000, + "h1.8xlarge": 10000, + "i2.8xlarge": 10000, + "i3.8xlarge": 10000, + "m4.10xlarge": 10000, + "m5.8xlarge": 10000, + "m5a.12xlarge": 10000, + "m5ad.12xlarge": 10000, + "m5d.8xlarge": 10000, + "m5zn.2xlarge": 10000, + "mac2-m1ultra.metal": 10000, + "mac2-m2.metal": 10000, + "mac2-m2pro.metal": 10000, + "mac2.metal": 10000, + "p2.8xlarge": 10000, + "p3.8xlarge": 10000, + "r3.8xlarge": 10000, + "r4.8xlarge": 10000, + "r5.8xlarge": 10000, + "r5a.12xlarge": 10000, + "r5ad.12xlarge": 10000, + "r5b.8xlarge": 10000, + "r5d.8xlarge": 10000, + "x1.16xlarge": 10000, + "x1e.16xlarge": 10000, + "c5.12xlarge": 12000, + "c5.9xlarge": 12000, + "c5a.12xlarge": 12000, + "c5ad.12xlarge": 12000, + "c5d.12xlarge": 12000, + "c5d.9xlarge": 12000, + "c6g.8xlarge": 12000, + "c6gd.8xlarge": 12000, + "g5g.8xlarge": 12000, + "m5.12xlarge": 12000, + "m5a.16xlarge": 12000, + "m5ad.16xlarge": 12000, + "m5d.12xlarge": 12000, + "m6g.8xlarge": 12000, + "m6gd.8xlarge": 12000, + "r5.12xlarge": 12000, + "r5a.16xlarge": 12000, + "r5ad.16xlarge": 12000, + "r5b.12xlarge": 12000, + "r5d.12xlarge": 12000, + "r6g.8xlarge": 12000, + "r6gd.8xlarge": 12000, + "x2gd.8xlarge": 12000, + "z1d.6xlarge": 12000, + "c6a.8xlarge": 12500, + "c6gn.2xlarge": 12500, + "c6i.8xlarge": 12500, + "c6id.8xlarge": 12500, + "c6in.2xlarge": 12500, + "c7a.8xlarge": 12500, + "c7gn.xlarge": 12500, + "c7i.8xlarge": 12500, + "d3.4xlarge": 12500, + "d3en.2xlarge": 12500, + "i3en.3xlarge": 12500, + "i7ie.3xlarge": 12500, + "i7ie.6xlarge": 12500, + "i8g.8xlarge": 12500, + "im4gn.2xlarge": 12500, + "is4gen.2xlarge": 12500, + "m6a.8xlarge": 12500, + "m6i.8xlarge": 12500, + "m6id.8xlarge": 12500, + "m6idn.2xlarge": 12500, + "m6in.2xlarge": 12500, + "m7a.8xlarge": 12500, + "m7i.8xlarge": 12500, + "r6a.8xlarge": 12500, + "r6i.8xlarge": 12500, + "r6id.8xlarge": 12500, + "r6idn.2xlarge": 12500, + "r6in.2xlarge": 12500, + "r7a.8xlarge": 12500, + "r7i.8xlarge": 12500, + "r7iz.8xlarge": 12500, + "x2iedn.4xlarge": 12500, + "x2iezn.2xlarge": 12500, + "i8g.12xlarge": 14063, + "c5n.4xlarge": 15000, + "c7g.8xlarge": 15000, + "c7gd.8xlarge": 15000, + "c8g.8xlarge": 15000, + "g4ad.8xlarge": 15000, + "m5zn.3xlarge": 15000, + "m7g.8xlarge": 15000, + "m7gd.8xlarge": 15000, + "m8g.8xlarge": 15000, + "r7g.8xlarge": 15000, + "r7gd.8xlarge": 15000, + "r8g.8xlarge": 15000, + "x2iezn.4xlarge": 15000, + "x8g.8xlarge": 15000, + "m5dn.4xlarge": 16250, + "m5n.4xlarge": 16250, + "r5dn.4xlarge": 16250, + "r5n.4xlarge": 16250, + "inf2.8xlarge": 16667, + "c6a.12xlarge": 18750, + "c6i.12xlarge": 18750, + "c6id.12xlarge": 18750, + "c7a.12xlarge": 18750, + "c7i.12xlarge": 18750, + "i4g.8xlarge": 18750, + "i4i.8xlarge": 18750, + "i8g.16xlarge": 18750, + "m6a.12xlarge": 18750, + "m6i.12xlarge": 18750, + "m6id.12xlarge": 18750, + "m7a.12xlarge": 18750, + "m7i.12xlarge": 18750, + "r6a.12xlarge": 18750, + "r6i.12xlarge": 18750, + "r6id.12xlarge": 18750, + "r7a.12xlarge": 18750, + "r7i.12xlarge": 18750, + "c5a.16xlarge": 20000, + "c5a.24xlarge": 20000, + "c5ad.16xlarge": 20000, + "c5ad.24xlarge": 20000, + "c6g.12xlarge": 20000, + "c6gd.12xlarge": 20000, + "g4dn.4xlarge": 20000, + "g6e.4xlarge": 20000, + "m5.16xlarge": 20000, + "m5a.24xlarge": 20000, + "m5ad.24xlarge": 20000, + "m5d.16xlarge": 20000, + "m6g.12xlarge": 20000, + "m6gd.12xlarge": 20000, + "r5.16xlarge": 20000, + "r5a.24xlarge": 20000, + "r5ad.24xlarge": 20000, + "r5b.16xlarge": 20000, + "r5d.16xlarge": 20000, + "r6g.12xlarge": 20000, + "r6gd.12xlarge": 20000, + "x2gd.12xlarge": 20000, + "c7g.12xlarge": 22500, + "c7gd.12xlarge": 22500, + "c8g.12xlarge": 22500, + "m7g.12xlarge": 22500, + "m7gd.12xlarge": 22500, + "m8g.12xlarge": 22500, + "r7g.12xlarge": 22500, + "r7gd.12xlarge": 22500, + "r8g.12xlarge": 22500, + "x8g.12xlarge": 22500, + "c5.18xlarge": 25000, + "c5.24xlarge": 25000, + "c5.metal": 25000, + "c5d.18xlarge": 25000, + "c5d.24xlarge": 25000, + "c5d.metal": 25000, + "c6a.16xlarge": 25000, + "c6g.16xlarge": 25000, + "c6g.metal": 25000, + "c6gd.16xlarge": 25000, + "c6gd.metal": 25000, + "c6gn.4xlarge": 25000, + "c6i.16xlarge": 25000, + "c6id.16xlarge": 25000, + "c6in.4xlarge": 25000, + "c7a.16xlarge": 25000, + "c7gn.2xlarge": 25000, + "c7i.16xlarge": 25000, + "d3.8xlarge": 25000, + "d3en.4xlarge": 25000, + "f1.16xlarge": 25000, + "f2.12xlarge": 25000, + "g3.16xlarge": 25000, + "g4ad.16xlarge": 25000, + "g5.16xlarge": 25000, + "g5.8xlarge": 25000, + "g5g.16xlarge": 25000, + "g5g.metal": 25000, + "g6.16xlarge": 25000, + "g6.8xlarge": 25000, + "g6e.8xlarge": 25000, + "gr6.8xlarge": 25000, + "h1.16xlarge": 25000, + "i3.16xlarge": 25000, + "i3.metal": 25000, + "i3en.6xlarge": 25000, + "i7ie.12xlarge": 25000, + "im4gn.4xlarge": 25000, + "inf1.6xlarge": 25000, + "is4gen.4xlarge": 25000, + "m4.16xlarge": 25000, + "m5.24xlarge": 25000, + "m5.metal": 25000, + "m5d.24xlarge": 25000, + "m5d.metal": 25000, + "m5dn.8xlarge": 25000, + "m5n.8xlarge": 25000, + "m6a.16xlarge": 25000, + "m6g.16xlarge": 25000, + "m6g.metal": 25000, + "m6gd.16xlarge": 25000, + "m6gd.metal": 25000, + "m6i.16xlarge": 25000, + "m6id.16xlarge": 25000, + "m6idn.4xlarge": 25000, + "m6in.4xlarge": 25000, + "m7a.16xlarge": 25000, + "m7i.16xlarge": 25000, + "mac1.metal": 25000, + "p2.16xlarge": 25000, + "p3.16xlarge": 25000, + "r4.16xlarge": 25000, + "r5.24xlarge": 25000, + "r5.metal": 25000, + "r5b.24xlarge": 25000, + "r5b.metal": 25000, + "r5d.24xlarge": 25000, + "r5d.metal": 25000, + "r5dn.8xlarge": 25000, + "r5n.8xlarge": 25000, + "r6a.16xlarge": 25000, + "r6g.16xlarge": 25000, + "r6g.metal": 25000, + "r6gd.16xlarge": 25000, + "r6gd.metal": 25000, + "r6i.16xlarge": 25000, + "r6id.16xlarge": 25000, + "r6idn.4xlarge": 25000, + "r6in.4xlarge": 25000, + "r7a.16xlarge": 25000, + "r7i.16xlarge": 25000, + "r7iz.12xlarge": 25000, + "r7iz.16xlarge": 25000, + "r7iz.metal-16xl": 25000, + "vt1.24xlarge": 25000, + "x1.32xlarge": 25000, + "x1e.32xlarge": 25000, + "x2gd.16xlarge": 25000, + "x2gd.metal": 25000, + "x2iedn.8xlarge": 25000, + "z1d.12xlarge": 25000, + "z1d.metal": 25000, + "i4i.12xlarge": 28120, + "i8g.24xlarge": 28125, + "i8g.metal-24xl": 28125, + "c7g.16xlarge": 30000, + "c7g.metal": 30000, + "c7gd.16xlarge": 30000, + "c7gd.metal": 30000, + "c8g.16xlarge": 30000, + "m7g.16xlarge": 30000, + "m7g.metal": 30000, + "m7gd.16xlarge": 30000, + "m7gd.metal": 30000, + "m8g.16xlarge": 30000, + "r7g.16xlarge": 30000, + "r7g.metal": 30000, + "r7gd.16xlarge": 30000, + "r7gd.metal": 30000, + "r8g.16xlarge": 30000, + "x8g.16xlarge": 30000, + "g6e.16xlarge": 35000, + "c6a.24xlarge": 37500, + "c6i.24xlarge": 37500, + "c6id.24xlarge": 37500, + "c7a.24xlarge": 37500, + "c7i.24xlarge": 37500, + "c7i.metal-24xl": 37500, + "i4g.16xlarge": 37500, + "i4i.16xlarge": 37500, + "i7ie.18xlarge": 37500, + "m6a.24xlarge": 37500, + "m6i.24xlarge": 37500, + "m6id.24xlarge": 37500, + "m7a.24xlarge": 37500, + "m7i.24xlarge": 37500, + "m7i.metal-24xl": 37500, + "r6a.24xlarge": 37500, + "r6i.24xlarge": 37500, + "r6id.24xlarge": 37500, + "r7a.24xlarge": 37500, + "r7i.24xlarge": 37500, + "r7i.metal-24xl": 37500, + "c8g.24xlarge": 40000, + "c8g.metal-24xl": 40000, + "d3en.6xlarge": 40000, + "g5.12xlarge": 40000, + "g6.12xlarge": 40000, + "m8g.24xlarge": 40000, + "m8g.metal-24xl": 40000, + "r8g.24xlarge": 40000, + "r8g.metal-24xl": 40000, + "x8g.24xlarge": 40000, + "x8g.metal-24xl": 40000, + "c5n.9xlarge": 50000, + "c6a.32xlarge": 50000, + "c6a.48xlarge": 50000, + "c6a.metal": 50000, + "c6gn.8xlarge": 50000, + "c6i.32xlarge": 50000, + "c6i.metal": 50000, + "c6id.32xlarge": 50000, + "c6id.metal": 50000, + "c6in.8xlarge": 50000, + "c7a.32xlarge": 50000, + "c7a.48xlarge": 50000, + "c7a.metal-48xl": 50000, + "c7gn.4xlarge": 50000, + "c7i.48xlarge": 50000, + "c7i.metal-48xl": 50000, + "c8g.48xlarge": 50000, + "c8g.metal-48xl": 50000, + "d3en.8xlarge": 50000, + "g4dn.12xlarge": 50000, + "g4dn.16xlarge": 50000, + "g4dn.8xlarge": 50000, + "g5.24xlarge": 50000, + "g6.24xlarge": 50000, + "i3en.12xlarge": 50000, + "i7ie.24xlarge": 50000, + "im4gn.8xlarge": 50000, + "inf2.24xlarge": 50000, + "is4gen.8xlarge": 50000, + "m5dn.12xlarge": 50000, + "m5n.12xlarge": 50000, + "m5zn.6xlarge": 50000, + "m6a.32xlarge": 50000, + "m6a.48xlarge": 50000, + "m6a.metal": 50000, + "m6i.32xlarge": 50000, + "m6i.metal": 50000, + "m6id.32xlarge": 50000, + "m6id.metal": 50000, + "m6idn.8xlarge": 50000, + "m6in.8xlarge": 50000, + "m7a.32xlarge": 50000, + "m7a.48xlarge": 50000, + "m7a.metal-48xl": 50000, + "m7i.48xlarge": 50000, + "m7i.metal-48xl": 50000, + "m8g.48xlarge": 50000, + "m8g.metal-48xl": 50000, + "r5dn.12xlarge": 50000, + "r5n.12xlarge": 50000, + "r6a.32xlarge": 50000, + "r6a.48xlarge": 50000, + "r6a.metal": 50000, + "r6i.32xlarge": 50000, + "r6i.metal": 50000, + "r6id.32xlarge": 50000, + "r6id.metal": 50000, + "r6idn.8xlarge": 50000, + "r6in.8xlarge": 50000, + "r7a.32xlarge": 50000, + "r7a.48xlarge": 50000, + "r7a.metal-48xl": 50000, + "r7i.48xlarge": 50000, + "r7i.metal-48xl": 50000, + "r7iz.32xlarge": 50000, + "r7iz.metal-32xl": 50000, + "r8g.48xlarge": 50000, + "r8g.metal-48xl": 50000, + "u-3tb1.56xlarge": 50000, + "x2idn.16xlarge": 50000, + "x2iedn.16xlarge": 50000, + "x2iezn.6xlarge": 50000, + "x8g.48xlarge": 50000, + "x8g.metal-48xl": 50000, + "i4i.24xlarge": 56250, + "c6gn.12xlarge": 75000, + "c6in.12xlarge": 75000, + "d3en.12xlarge": 75000, + "i4i.32xlarge": 75000, + "i4i.metal": 75000, + "m5dn.16xlarge": 75000, + "m5n.16xlarge": 75000, + "m6idn.12xlarge": 75000, + "m6in.12xlarge": 75000, + "r5dn.16xlarge": 75000, + "r5n.16xlarge": 75000, + "r6idn.12xlarge": 75000, + "r6in.12xlarge": 75000, + "x2idn.24xlarge": 75000, + "x2iedn.24xlarge": 75000, + "x2iezn.8xlarge": 75000, + "c5n.18xlarge": 100000, + "c5n.metal": 100000, + "c6gn.16xlarge": 100000, + "c6in.16xlarge": 100000, + "c7gn.8xlarge": 100000, + "dl2q.24xlarge": 100000, + "f2.48xlarge": 100000, + "g4dn.metal": 100000, + "g5.48xlarge": 100000, + "g6.48xlarge": 100000, + "g6e.12xlarge": 100000, + "hpc6a.48xlarge": 100000, + "i3en.24xlarge": 100000, + "i3en.metal": 100000, + "i7ie.48xlarge": 100000, + "im4gn.16xlarge": 100000, + "inf1.24xlarge": 100000, + "inf2.48xlarge": 100000, + "m5dn.24xlarge": 100000, + "m5dn.metal": 100000, + "m5n.24xlarge": 100000, + "m5n.metal": 100000, + "m5zn.12xlarge": 100000, + "m5zn.metal": 100000, + "m6idn.16xlarge": 100000, + "m6in.16xlarge": 100000, + "p3dn.24xlarge": 100000, + "r5dn.24xlarge": 100000, + "r5dn.metal": 100000, + "r5n.24xlarge": 100000, + "r5n.metal": 100000, + "r6idn.16xlarge": 100000, + "r6in.16xlarge": 100000, + "u-12tb1.112xlarge": 100000, + "u-12tb1.metal": 100000, + "u-18tb1.112xlarge": 100000, + "u-18tb1.metal": 100000, + "u-24tb1.112xlarge": 100000, + "u-24tb1.metal": 100000, + "u-6tb1.112xlarge": 100000, + "u-6tb1.56xlarge": 100000, + "u-6tb1.metal": 100000, + "u-9tb1.112xlarge": 100000, + "u-9tb1.metal": 100000, + "u7i-12tb.224xlarge": 100000, + "u7i-6tb.112xlarge": 100000, + "u7i-8tb.112xlarge": 100000, + "x2idn.32xlarge": 100000, + "x2idn.metal": 100000, + "x2iedn.32xlarge": 100000, + "x2iedn.metal": 100000, + "x2iezn.12xlarge": 100000, + "x2iezn.metal": 100000, + "c6in.24xlarge": 150000, + "c7gn.12xlarge": 150000, + "m6idn.24xlarge": 150000, + "m6in.24xlarge": 150000, + "r6idn.24xlarge": 150000, + "r6in.24xlarge": 150000, + "c6in.32xlarge": 200000, + "c6in.metal": 200000, + "c7gn.16xlarge": 200000, + "c7gn.metal": 200000, + "g6e.24xlarge": 200000, + "hpc6id.32xlarge": 200000, + "hpc7g.16xlarge": 200000, + "hpc7g.4xlarge": 200000, + "hpc7g.8xlarge": 200000, + "m6idn.32xlarge": 200000, + "m6idn.metal": 200000, + "m6in.32xlarge": 200000, + "m6in.metal": 200000, + "r6idn.32xlarge": 200000, + "r6idn.metal": 200000, + "r6in.32xlarge": 200000, + "r6in.metal": 200000, + "u7in-16tb.224xlarge": 200000, + "u7in-24tb.224xlarge": 200000, + "u7in-32tb.224xlarge": 200000, + "u7inh-32tb.480xlarge": 200000, + "hpc7a.12xlarge": 300000, + "hpc7a.24xlarge": 300000, + "hpc7a.48xlarge": 300000, + "hpc7a.96xlarge": 300000, + "dl1.24xlarge": 400000, + "g6e.48xlarge": 400000, + "p4d.24xlarge": 400000, + "p4de.24xlarge": 400000, + "trn1.32xlarge": 800000, + "trn1n.32xlarge": 1600000, + "p5.48xlarge": 3200000, + "p5e.48xlarge": 3200000, + "p5en.48xlarge": 3200000, + "trn2.48xlarge": 3200000, + "trn2u.48xlarge": 3200000, } ) diff --git a/pkg/providers/instancetype/zz_generated.vpclimits.go b/pkg/providers/instancetype/zz_generated.vpclimits.go index 1315d01fd33f..efc8521c4597 100644 --- a/pkg/providers/instancetype/zz_generated.vpclimits.go +++ b/pkg/providers/instancetype/zz_generated.vpclimits.go @@ -17,7 +17,7 @@ // so we can get this information at runtime. // Code generated by go generate; DO NOT EDIT. -// This file was generated at 2024-11-12T06:00:34Z +// This file was generated at 2024-12-13T19:39:12Z // WARNING: please add @ellistarn, @bwagner5, or @jonathan-innis from aws/karpenter to reviewers // if you are updating this file since Karpenter is depending on this file to calculate max pods. @@ -4575,6 +4575,141 @@ var Limits = map[string]*VPCLimits{ Hypervisor: "nitro", IsBareMetal: false, }, + "i8g.12xlarge": { + Interface: 8, + IPv4PerInterface: 30, + IsTrunkingCompatible: true, + BranchInterface: 54, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 8, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.16xlarge": { + Interface: 15, + IPv4PerInterface: 50, + IsTrunkingCompatible: true, + BranchInterface: 107, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 15, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.24xlarge": { + Interface: 15, + IPv4PerInterface: 50, + IsTrunkingCompatible: true, + BranchInterface: 107, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 15, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.2xlarge": { + Interface: 4, + IPv4PerInterface: 15, + IsTrunkingCompatible: true, + BranchInterface: 38, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.4xlarge": { + Interface: 8, + IPv4PerInterface: 30, + IsTrunkingCompatible: true, + BranchInterface: 54, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 8, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.8xlarge": { + Interface: 8, + IPv4PerInterface: 30, + IsTrunkingCompatible: true, + BranchInterface: 54, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 8, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.large": { + Interface: 3, + IPv4PerInterface: 10, + IsTrunkingCompatible: true, + BranchInterface: 9, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 3, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, + "i8g.metal-24xl": { + Interface: 15, + IPv4PerInterface: 50, + IsTrunkingCompatible: true, + BranchInterface: 107, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 15, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "", + IsBareMetal: true, + }, + "i8g.xlarge": { + Interface: 4, + IPv4PerInterface: 15, + IsTrunkingCompatible: true, + BranchInterface: 18, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, "im4gn.16xlarge": { Interface: 15, IPv4PerInterface: 50, @@ -8320,6 +8455,96 @@ var Limits = map[string]*VPCLimits{ Hypervisor: "nitro", IsBareMetal: false, }, + "p5en.48xlarge": { + Interface: 64, + IPv4PerInterface: 50, + IsTrunkingCompatible: true, + BranchInterface: 120, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 0, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 1, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 2, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 3, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 4, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 5, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 6, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 7, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 8, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 9, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 10, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 11, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 12, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 13, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 14, + }, + + { + MaximumNetworkInterfaces: 4, + NetworkCardIndex: 15, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, "r3.2xlarge": { Interface: 4, IPv4PerInterface: 15, @@ -12110,6 +12335,21 @@ var Limits = map[string]*VPCLimits{ Hypervisor: "nitro", IsBareMetal: false, }, + "u7i-6tb.112xlarge": { + Interface: 15, + IPv4PerInterface: 50, + IsTrunkingCompatible: true, + BranchInterface: 107, + DefaultNetworkCardIndex: 0, + NetworkCards: []NetworkCard{ + { + MaximumNetworkInterfaces: 15, + NetworkCardIndex: 0, + }, + }, + Hypervisor: "nitro", + IsBareMetal: false, + }, "u7in-16tb.224xlarge": { Interface: 16, IPv4PerInterface: 50, diff --git a/pkg/providers/pricing/zz_generated.pricing_aws.go b/pkg/providers/pricing/zz_generated.pricing_aws.go index f3dda6fea37a..250570059909 100644 --- a/pkg/providers/pricing/zz_generated.pricing_aws.go +++ b/pkg/providers/pricing/zz_generated.pricing_aws.go @@ -16,7 +16,7 @@ limitations under the License. package pricing -// generated at 2024-12-09T13:15:15Z for us-east-1 +// generated at 2025-01-06T13:11:40Z for us-east-1 import ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -118,6 +118,8 @@ var InitialOnDemandPricesAWS = map[string]map[ec2types.InstanceType]float64{ "dl1.24xlarge": 13.109040, // f1 family "f1.16xlarge": 13.200000, "f1.2xlarge": 1.650000, "f1.4xlarge": 3.300000, + // f2 family + "f2.12xlarge": 3.960000, "f2.48xlarge": 15.840000, // g2 family "g2.2xlarge": 0.650000, "g2.8xlarge": 2.600000, // g3 family @@ -394,6 +396,10 @@ var InitialOnDemandPricesAWS = map[string]map[ec2types.InstanceType]float64{ "u-9tb1.112xlarge": 81.900000, // u7i-12tb family "u7i-12tb.224xlarge": 152.880000, + // u7i-6tb family + "u7i-6tb.112xlarge": 62.790000, + // u7i-8tb family + "u7i-8tb.112xlarge": 83.720000, // u7in-16tb family "u7in-16tb.224xlarge": 203.840000, // u7in-24tb family diff --git a/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go b/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go index c20c32f9f878..ae5d5369dd31 100644 --- a/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go +++ b/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go @@ -16,7 +16,7 @@ limitations under the License. package pricing -// generated at 2024-12-09T13:15:20Z for us-east-1 +// generated at 2025-01-06T13:11:45Z for us-east-1 import ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -55,6 +55,10 @@ var InitialOnDemandPricesUSGov = map[string]map[ec2types.InstanceType]float64{ "c6in.12xlarge": 3.276000, "c6in.16xlarge": 4.368000, "c6in.24xlarge": 6.552000, "c6in.2xlarge": 0.546000, "c6in.32xlarge": 8.736000, "c6in.4xlarge": 1.092000, "c6in.8xlarge": 2.184000, "c6in.large": 0.136500, "c6in.metal": 8.736000, "c6in.xlarge": 0.273000, + // c7g family + "c7g.12xlarge": 2.080800, "c7g.16xlarge": 2.774400, "c7g.2xlarge": 0.346800, "c7g.4xlarge": 0.693600, + "c7g.8xlarge": 1.387200, "c7g.large": 0.086700, "c7g.medium": 0.043400, "c7g.metal": 2.774400, + "c7g.xlarge": 0.173400, // c7i family "c7i.12xlarge": 2.570400, "c7i.16xlarge": 3.427200, "c7i.24xlarge": 5.140800, "c7i.2xlarge": 0.428400, "c7i.48xlarge": 10.281600, "c7i.4xlarge": 0.856800, "c7i.8xlarge": 1.713600, "c7i.large": 0.107100, @@ -235,6 +239,10 @@ var InitialOnDemandPricesUSGov = map[string]map[ec2types.InstanceType]float64{ "c6in.12xlarge": 3.276000, "c6in.16xlarge": 4.368000, "c6in.24xlarge": 6.552000, "c6in.2xlarge": 0.546000, "c6in.32xlarge": 8.736000, "c6in.4xlarge": 1.092000, "c6in.8xlarge": 2.184000, "c6in.large": 0.136500, "c6in.metal": 8.736000, "c6in.xlarge": 0.273000, + // c7g family + "c7g.12xlarge": 2.080800, "c7g.16xlarge": 2.774400, "c7g.2xlarge": 0.346800, "c7g.4xlarge": 0.693600, + "c7g.8xlarge": 1.387200, "c7g.large": 0.086700, "c7g.medium": 0.043400, "c7g.metal": 2.774400, + "c7g.xlarge": 0.173400, // cc2 family "cc2.8xlarge": 2.250000, // d2 family From 4aefd67a8e97518341a287f88c189e215a0101f6 Mon Sep 17 00:00:00 2001 From: Andrii Omelianenko Date: Tue, 7 Jan 2025 19:51:18 +0200 Subject: [PATCH 03/18] docs: fix typo in kubectl command (#7570) Co-authored-by: Andrii Omelianenko --- website/content/en/v1.0/upgrading/v1-migration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/v1.0/upgrading/v1-migration.md b/website/content/en/v1.0/upgrading/v1-migration.md index eb502eb36dab..ff14a312bba2 100644 --- a/website/content/en/v1.0/upgrading/v1-migration.md +++ b/website/content/en/v1.0/upgrading/v1-migration.md @@ -340,7 +340,7 @@ For more information, refer to [kubelet configuration migration]({{ Date: Wed, 8 Jan 2025 03:08:29 +0900 Subject: [PATCH 04/18] Update interruption-handling.md (#7552) --- designs/interruption-handling.md | 56 +++++++++++++++++--------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/designs/interruption-handling.md b/designs/interruption-handling.md index a4e9f6ba6b10..33be4c55b392 100644 --- a/designs/interruption-handling.md +++ b/designs/interruption-handling.md @@ -29,15 +29,17 @@ There are two ways in-which Spot interruption notifications and Rebalance Recomm EC2 IMDS is an HTTP API that can only be locally accessed from an EC2 instance. ``` -`curl 169.254.169.254/latest/meta-data/spot/instance-action +# Termination Check +curl 169.254.169.254/latest/meta-data/spot/instance-action { "action": "terminate", "time": "2022-07-11T17:11:44Z" } -curl 169.254.169.254``/``latest``/``meta``-``data``/``events``/``recommendations``/``rebalance` -`{` -` ``"noticeTime"``:`` ``"2022-07-16T19:18:24Z"` +# Rebalance Check +curl 169.254.169.254/latest/meta-data/events/recommendations/rebalance +{ + "noticeTime": "2022-07-16T19:18:24Z" } ``` @@ -47,19 +49,19 @@ curl 169.254.169.254``/``latest``/``meta``-``data``/``events``/``recommendations EventBridge is an Event Bus service within AWS that allows users to set rules on events to capture and then target destinations for those events. Relevant targets for Spot interruption notifications include SQS, Lambda, and EC2-Terminate-Instance. ``` -`# Example spot interruption notification EventBridge rule` -`$ aws events put``-``rule \` -` ``--``name ``MyK8sSpotTermRule`` \` -` ``--``event``-``pattern ``"{\"source\": [\"aws.ec2\"],\"detail-type\": [\"EC2 Spot Instance Interruption\"]}"` - -`# Example rebalance recommendation EventBridge rule`` -$ aws events put-rule \ - --name MyK8sRebalanceRule \ - --event-pattern "{\"source\": [\"aws.ec2\"],\"detail-type\": [\"EC2 Instance Rebalance Recommendation\"]}" -`` ` -`# Example targeting an SQS queue` -`$ aws events put``-``targets ``--``rule ``MyK8sSpotTermRule`` \` -` ``--``targets ``"Id"``=``"1"``,``"Arn"``=``"arn:aws:sqs:us-east-1:123456789012:MyK8sTermQueue"`` ` +# Example spot interruption notification EventBridge rule +aws events put-rule \ + --name MyK8sSpotTermRule \ + --event-pattern "{\"source\": [\"aws.ec2\"],\"detail-type\": [\"EC2 Spot Instance Interruption\"]}" + +# Example rebalance recommendation EventBridge rule +aws events put-rule \ + --name MyK8sRebalanceRule \ + --event-pattern "{\"source\": [\"aws.ec2\"],\"detail-type\": [\"EC2 Instance Rebalance Recommendation\"]}" + +# Example targeting an SQS queue +aws events put-targets --rule MyK8sSpotTermRule \ + --targets "Id=1,Arn=arn:aws:sqs:us-east-1:123456789012:MyK8sTermQueue" ``` @@ -113,17 +115,17 @@ SQS exposes a VPC Endpoint which will fulfill the isolated VPC use-case. Dynamically creating the SQS infrastructure and EventBridge rules means that Karpenter’s IAM role would need permissions to SQS and EventBridge: ``` -`"sqs:GetQueueUrl",` -`"sqs:ListQueues"``,` -`"sqs:ReceiveMessage"``,` -`"sqs:CreateQueue"``,` -`"sqs:DeleteMessage"``,` -`"events:ListRules",` -"`events:DescribeRule`", -"events:PutRule", +"sqs:GetQueueUrl", +"sqs:ListQueues", +"sqs:ReceiveMessage", +"sqs:CreateQueue", +"sqs:DeleteMessage", +"events:ListRules", +"events:DescribeRule", +"events:PutRule", "events:PutTargets", -"`events:DeleteRule`", -`"events:RemoveTargets"` +"events:DeleteRule", +"events:RemoveTargets" ``` The policy can be setup with a predefined name based on the cluster name. For example, `karpenter-events-${CLUSTER_NAME}` which would allow for a more constrained resource policy. From bfdc2d997432756e33f06cfc2c0bf6ac1bd4ce82 Mon Sep 17 00:00:00 2001 From: Trevor DiMartino Date: Tue, 7 Jan 2025 11:19:11 -0700 Subject: [PATCH 05/18] docs: Convert mentions of `do-not-evict` to `do-not-disrupt` (#7539) --- website/content/en/docs/troubleshooting.md | 6 +++--- website/content/en/preview/troubleshooting.md | 6 +++--- website/content/en/v1.0/troubleshooting.md | 6 +++--- website/content/en/v1.1/troubleshooting.md | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/website/content/en/docs/troubleshooting.md b/website/content/en/docs/troubleshooting.md index 5c44f8a5f235..464597b20414 100644 --- a/website/content/en/docs/troubleshooting.md +++ b/website/content/en/docs/troubleshooting.md @@ -473,11 +473,11 @@ spec: You can set `minAvailable` or `maxUnavailable` as integers or as a percentage. Review what [disruptions are](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/), and [how to configure them](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). -#### `karpenter.sh/do-not-evict` Annotation +#### `karpenter.sh/do-not-disrupt` Annotation -If a pod exists with the annotation `karpenter.sh/do-not-evict: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-evict` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. +If a pod exists with the annotation `karpenter.sh/do-not-disrupt: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-disrupt` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. -If you want to terminate a node with a `do-not-evict` pod, you can simply remove the annotation and the deprovisioning process will continue. +If you want to terminate a node with a `do-not-disrupt` pod, you can simply remove the annotation and the deprovisioning process will continue. #### Scheduling Constraints (Consolidation Only) diff --git a/website/content/en/preview/troubleshooting.md b/website/content/en/preview/troubleshooting.md index 5c44f8a5f235..464597b20414 100644 --- a/website/content/en/preview/troubleshooting.md +++ b/website/content/en/preview/troubleshooting.md @@ -473,11 +473,11 @@ spec: You can set `minAvailable` or `maxUnavailable` as integers or as a percentage. Review what [disruptions are](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/), and [how to configure them](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). -#### `karpenter.sh/do-not-evict` Annotation +#### `karpenter.sh/do-not-disrupt` Annotation -If a pod exists with the annotation `karpenter.sh/do-not-evict: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-evict` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. +If a pod exists with the annotation `karpenter.sh/do-not-disrupt: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-disrupt` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. -If you want to terminate a node with a `do-not-evict` pod, you can simply remove the annotation and the deprovisioning process will continue. +If you want to terminate a node with a `do-not-disrupt` pod, you can simply remove the annotation and the deprovisioning process will continue. #### Scheduling Constraints (Consolidation Only) diff --git a/website/content/en/v1.0/troubleshooting.md b/website/content/en/v1.0/troubleshooting.md index 48a324794f5a..05ec771c53d8 100644 --- a/website/content/en/v1.0/troubleshooting.md +++ b/website/content/en/v1.0/troubleshooting.md @@ -464,11 +464,11 @@ spec: You can set `minAvailable` or `maxUnavailable` as integers or as a percentage. Review what [disruptions are](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/), and [how to configure them](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). -#### `karpenter.sh/do-not-evict` Annotation +#### `karpenter.sh/do-not-disrupt` Annotation -If a pod exists with the annotation `karpenter.sh/do-not-evict: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-evict` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. +If a pod exists with the annotation `karpenter.sh/do-not-disrupt: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-disrupt` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. -If you want to terminate a node with a `do-not-evict` pod, you can simply remove the annotation and the deprovisioning process will continue. +If you want to terminate a node with a `do-not-disrupt` pod, you can simply remove the annotation and the deprovisioning process will continue. #### Scheduling Constraints (Consolidation Only) diff --git a/website/content/en/v1.1/troubleshooting.md b/website/content/en/v1.1/troubleshooting.md index 5c44f8a5f235..464597b20414 100644 --- a/website/content/en/v1.1/troubleshooting.md +++ b/website/content/en/v1.1/troubleshooting.md @@ -473,11 +473,11 @@ spec: You can set `minAvailable` or `maxUnavailable` as integers or as a percentage. Review what [disruptions are](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/), and [how to configure them](https://kubernetes.io/docs/tasks/run-application/configure-pdb/). -#### `karpenter.sh/do-not-evict` Annotation +#### `karpenter.sh/do-not-disrupt` Annotation -If a pod exists with the annotation `karpenter.sh/do-not-evict: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-evict` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. +If a pod exists with the annotation `karpenter.sh/do-not-disrupt: true` on a node, and a request is made to delete the node, Karpenter will not drain any pods from that node or otherwise try to delete the node. Nodes that have pods with a `do-not-disrupt` annotation are not considered for consolidation, though their unused capacity is considered for the purposes of running pods from other nodes which can be consolidated. -If you want to terminate a node with a `do-not-evict` pod, you can simply remove the annotation and the deprovisioning process will continue. +If you want to terminate a node with a `do-not-disrupt` pod, you can simply remove the annotation and the deprovisioning process will continue. #### Scheduling Constraints (Consolidation Only) From 9a138596c11bfcba330bdfcd4522fe2a97e40a61 Mon Sep 17 00:00:00 2001 From: Talbalash-legit <165669855+Talbalash-legit@users.noreply.github.com> Date: Wed, 8 Jan 2025 19:34:01 +0200 Subject: [PATCH 06/18] docs: update migration docs to 0.37.6 (#7569) --- .../content/en/v1.0/upgrading/v1-migration.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/content/en/v1.0/upgrading/v1-migration.md b/website/content/en/v1.0/upgrading/v1-migration.md index ff14a312bba2..dd2b6b07b581 100644 --- a/website/content/en/v1.0/upgrading/v1-migration.md +++ b/website/content/en/v1.0/upgrading/v1-migration.md @@ -179,7 +179,7 @@ You should still review the upgrade procedure; the sequence of operations remain ```bash # Note: v0.33.x and v0.34.x include the v prefix, omit it for versions v0.35+ - export KARPENTER_VERSION="0.37.5" # Replace with your minor version + export KARPENTER_VERSION="0.37.6" # Replace with your minor version ``` 4. Upgrade Karpenter to the latest patch release for your current minor version. @@ -325,7 +325,7 @@ The following releases should be used as rollback targets: * `v0.37.6` * `v0.36.8` -* `v0.35.12` +* `v0.35.11` * `v0.34.12` * `v0.33.11` @@ -357,15 +357,15 @@ For example: `kubectl get nodepool.v1beta1.karpenter.sh`. ``` 2. Configure your target Karpenter version. You should select one of the following versions: - * `0.37.5` - * `0.36.7` - * `0.35.10` - * `v0.34.11` - * `v0.33.10` + * `0.37.6` + * `0.36.8` + * `0.35.11` + * `v0.34.12` + * `v0.33.11` ```bash # Note: v0.33.x and v0.34.x include the v prefix, omit it for versions v0.35+ - export KARPENTER_VERSION="0.37.5" # Replace with your minor version + export KARPENTER_VERSION="0.37.6" # Replace with your minor version ``` 3. Attach the `v1beta1` policy from your target version to your existing NodeRole. From a3201dc1c838c507bb384ba1773e5dd42a8a9824 Mon Sep 17 00:00:00 2001 From: Talbalash-legit <165669855+Talbalash-legit@users.noreply.github.com> Date: Fri, 10 Jan 2025 19:39:30 +0200 Subject: [PATCH 07/18] docs: Add Legit Security to ADOPTERS (#7577) --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index c1664389abe4..19ab7a95fc05 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -59,5 +59,6 @@ If you are open to others contacting you about your use of Karpenter on Slack, a | Whoosh | Using Karpenter to scale the EKS clusters for many purposes | `@vainkop` | [Whoosh](https://whoosh.bike) | | Next Insurance | Using Karpenter to manage the nodes in all our EKS clusters, including dev and prod, on demand and spots | `@moshebs` | [Homepage](https://www.nextinsurance.com)| | Grover Group GmbH | We use Karpenter for efficient and cost effective scaling of our nodes in all of our EKS clusters | `@suraj2410` | [Homepage](https://www.grover.com/de-en) & [Engineering Techblog](https://engineering.grover.com)| +| Legit Security | We run Karpenter across all our EKS clusters to ensure efficient and cost-effective scaling across our infrastructure | `@Tal Balash`, `@Matan Ryngler` | [Homepage](https://www.legitsecurity.com)| | Logz.io | Using Karpenter in all of our EKS clusters for efficient and cost effective scaling of all our K8s workloads | `@pincher95`, `@Samplify` | [Homepage](https://logz.io/)| | X3M ads | We have been using Karpenter for (almost) all our workloads since 2023 | `@mreparaz`, `@fmansilla`, `@mrmartinez95` | [Homepage](https://x3mads.com) | From b631d9e594f982783ae6bc55d098b63c396c32a2 Mon Sep 17 00:00:00 2001 From: edibble21 <85638465+edibble21@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:10:07 -0800 Subject: [PATCH 08/18] Chore: Separate validation for update (#7575) --- pkg/controllers/controllers.go | 2 +- pkg/controllers/providers/version/controller.go | 8 ++++++-- pkg/controllers/providers/version/suite_test.go | 2 +- pkg/providers/version/suite_test.go | 2 +- pkg/providers/version/version.go | 12 ++++++++++-- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/controllers/controllers.go b/pkg/controllers/controllers.go index 350a1cc7b484..2514ebee5aed 100644 --- a/pkg/controllers/controllers.go +++ b/pkg/controllers/controllers.go @@ -92,7 +92,7 @@ func NewControllers( ssminvalidation.NewController(ssmCache, amiProvider), status.NewController[*v1.EC2NodeClass](kubeClient, mgr.GetEventRecorderFor("karpenter"), status.EmitDeprecatedMetrics), opevents.NewController[*corev1.Node](kubeClient, clk), - controllersversion.NewController(versionProvider), + controllersversion.NewController(versionProvider, versionProvider.UpdateVersionWithValidation), } if options.FromContext(ctx).InterruptionQueue != "" { sqsapi := servicesqs.NewFromConfig(cfg) diff --git a/pkg/controllers/providers/version/controller.go b/pkg/controllers/providers/version/controller.go index 711d4c759c24..57fcaab31245 100644 --- a/pkg/controllers/providers/version/controller.go +++ b/pkg/controllers/providers/version/controller.go @@ -28,20 +28,24 @@ import ( "github.com/aws/karpenter-provider-aws/pkg/providers/version" ) +type UpdateVersion func(context.Context) error + type Controller struct { versionProvider *version.DefaultProvider + updateVersion UpdateVersion } -func NewController(versionProvider *version.DefaultProvider) *Controller { +func NewController(versionProvider *version.DefaultProvider, updateVersion UpdateVersion) *Controller { return &Controller{ versionProvider: versionProvider, + updateVersion: updateVersion, } } func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) { ctx = injection.WithControllerName(ctx, "providers.version") - if err := c.versionProvider.UpdateVersion(ctx); err != nil { + if err := c.updateVersion(ctx); err != nil { return reconcile.Result{}, fmt.Errorf("updating version, %w", err) } return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil diff --git a/pkg/controllers/providers/version/suite_test.go b/pkg/controllers/providers/version/suite_test.go index 59c0ddc62eae..005c45d6d8e4 100644 --- a/pkg/controllers/providers/version/suite_test.go +++ b/pkg/controllers/providers/version/suite_test.go @@ -56,7 +56,7 @@ var _ = BeforeSuite(func() { ctx = options.ToContext(ctx, test.Options()) ctx, stop = context.WithCancel(ctx) awsEnv = test.NewEnvironment(ctx, env) - controller = controllersversion.NewController(awsEnv.VersionProvider) + controller = controllersversion.NewController(awsEnv.VersionProvider, awsEnv.VersionProvider.UpdateVersionWithValidation) }) var _ = AfterSuite(func() { diff --git a/pkg/providers/version/suite_test.go b/pkg/providers/version/suite_test.go index 0536e706ece5..29910f8e04e0 100644 --- a/pkg/providers/version/suite_test.go +++ b/pkg/providers/version/suite_test.go @@ -57,7 +57,7 @@ var _ = BeforeSuite(func() { ctx, stop = context.WithCancel(ctx) awsEnv = test.NewEnvironment(ctx, env) testEnv = &environmentaws.Environment{Environment: &common.Environment{KubeClient: env.KubernetesInterface}} - versionController = controllersversion.NewController(awsEnv.VersionProvider) + versionController = controllersversion.NewController(awsEnv.VersionProvider, awsEnv.VersionProvider.UpdateVersionWithValidation) }) var _ = AfterSuite(func() { diff --git a/pkg/providers/version/version.go b/pkg/providers/version/version.go index 9dbeaae0520d..eb0101e43446 100644 --- a/pkg/providers/version/version.go +++ b/pkg/providers/version/version.go @@ -69,7 +69,7 @@ func (p *DefaultProvider) Get(ctx context.Context) string { } func (p *DefaultProvider) UpdateVersion(ctx context.Context) error { - var version, versionSource string + var version string var err error if options.FromContext(ctx).EKSControlPlane { @@ -84,7 +84,15 @@ func (p *DefaultProvider) UpdateVersion(ctx context.Context) error { } } p.version.Store(&version) - if p.cm.HasChanged("kubernetes-version", version) || p.cm.HasChanged("version-source", versionSource) { + return nil +} +func (p *DefaultProvider) UpdateVersionWithValidation(ctx context.Context) error { + err := p.UpdateVersion(ctx) + if err != nil { + return err + } + version := p.Get(ctx) + if p.cm.HasChanged("kubernetes-version", version) { log.FromContext(ctx).WithValues("version", version).V(1).Info("discovered kubernetes version") if err := validateK8sVersion(version); err != nil { return fmt.Errorf("validating kubernetes version, %w", err) From 10202fec21bdebb2aaa255f26d92a11f87b77262 Mon Sep 17 00:00:00 2001 From: Amanuel Engeda <74629455+engedaam@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:11:25 -0800 Subject: [PATCH 09/18] feat: Add Support for Node Monitoring Agent (#7545) --- .../templates/karpenter.sh_nodeclaims.yaml | 4 +++ go.mod | 2 +- go.sum | 4 +-- pkg/apis/crds/karpenter.sh_nodeclaims.yaml | 4 +++ pkg/cloudprovider/cloudprovider.go | 29 ++++++++++++++++++- test/suites/integration/repair_policy_test.go | 27 +++++++++++++++++ 6 files changed, 66 insertions(+), 4 deletions(-) diff --git a/charts/karpenter-crd/templates/karpenter.sh_nodeclaims.yaml b/charts/karpenter-crd/templates/karpenter.sh_nodeclaims.yaml index 487f12a9af1f..01531fea5da8 100644 --- a/charts/karpenter-crd/templates/karpenter.sh_nodeclaims.yaml +++ b/charts/karpenter-crd/templates/karpenter.sh_nodeclaims.yaml @@ -38,6 +38,10 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.imageID + name: ImageID + priority: 1 + type: string - jsonPath: .status.providerID name: ID priority: 1 diff --git a/go.mod b/go.mod index 245ff1c46ced..460f9fefb413 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/controller-runtime v0.19.3 - sigs.k8s.io/karpenter v1.1.1 + sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index e75e2847827a..74afec1f183d 100644 --- a/go.sum +++ b/go.sum @@ -339,8 +339,8 @@ sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8b sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/karpenter v1.1.1 h1:QPpVC8DsaLgJ/YWcFpZKE4m3jD+Qp88/GtSPvMfffck= -sigs.k8s.io/karpenter v1.1.1/go.mod h1:NQouOJNK6s1d4EIKa5cY7nAV3IG74qZ6gPzHBeCZNPw= +sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19 h1:nCaZE6O7772FEEPGgTef05IanE8AWMKf7DBh1LiU1ik= +sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19/go.mod h1:E1mtCutIoQJA05ClYYQo9y+5ujk6U2FxByauGSUXXZs= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml index bfe259dea177..759903b3233e 100644 --- a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +++ b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml @@ -35,6 +35,10 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .status.imageID + name: ImageID + priority: 1 + type: string - jsonPath: .status.providerID name: ID priority: 1 diff --git a/pkg/cloudprovider/cloudprovider.go b/pkg/cloudprovider/cloudprovider.go index 3fe6b6f2706c..1169b1a3a6b0 100644 --- a/pkg/cloudprovider/cloudprovider.go +++ b/pkg/cloudprovider/cloudprovider.go @@ -259,7 +259,7 @@ func getTags(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1. func (c *CloudProvider) RepairPolicies() []cloudprovider.RepairPolicy { return []cloudprovider.RepairPolicy{ - // Supported Kubelet fields + // Supported Kubelet Node Conditions { ConditionType: corev1.NodeReady, ConditionStatus: corev1.ConditionFalse, @@ -270,6 +270,33 @@ func (c *CloudProvider) RepairPolicies() []cloudprovider.RepairPolicy { ConditionStatus: corev1.ConditionUnknown, TolerationDuration: 30 * time.Minute, }, + // Support Node Monitoring Agent Conditions + // + { + ConditionType: "AcceleratedHardwareReady", + ConditionStatus: corev1.ConditionFalse, + TolerationDuration: 10 * time.Minute, + }, + { + ConditionType: "StorageReady", + ConditionStatus: corev1.ConditionFalse, + TolerationDuration: 30 * time.Minute, + }, + { + ConditionType: "NetworkingReady", + ConditionStatus: corev1.ConditionFalse, + TolerationDuration: 30 * time.Minute, + }, + { + ConditionType: "KernelReady", + ConditionStatus: corev1.ConditionFalse, + TolerationDuration: 30 * time.Minute, + }, + { + ConditionType: "ContainerRuntimeReady", + ConditionStatus: corev1.ConditionFalse, + TolerationDuration: 30 * time.Minute, + }, } } diff --git a/test/suites/integration/repair_policy_test.go b/test/suites/integration/repair_policy_test.go index 56d464d972b8..6a91ed940ed0 100644 --- a/test/suites/integration/repair_policy_test.go +++ b/test/suites/integration/repair_policy_test.go @@ -73,6 +73,7 @@ var _ = Describe("Repair Policy", func() { env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }, + // Kubelet Supported Conditions Entry("Node Ready False", corev1.NodeCondition{ Type: corev1.NodeReady, Status: corev1.ConditionFalse, @@ -83,6 +84,32 @@ var _ = Describe("Repair Policy", func() { Status: corev1.ConditionUnknown, LastTransitionTime: metav1.Time{Time: time.Now().Add(-31 * time.Minute)}, }), + // Node Monitoring Agent Supported Conditions + Entry("Node AcceleratedHardwareReady False", corev1.NodeCondition{ + Type: "AcceleratedHardwareReady", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now().Add(-11 * time.Minute)}, + }), + Entry("Node StorageReady False", corev1.NodeCondition{ + Type: "StorageReady", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now().Add(-31 * time.Minute)}, + }), + Entry("Node NetworkingReady False", corev1.NodeCondition{ + Type: "NetworkingReady", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now().Add(-31 * time.Minute)}, + }), + Entry("Node KernelReady False", corev1.NodeCondition{ + Type: "KernelReady", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now().Add(-31 * time.Minute)}, + }), + Entry("Node ContainerRuntimeReady False", corev1.NodeCondition{ + Type: "ContainerRuntimeReady", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now().Add(-31 * time.Minute)}, + }), ) It("should ignore disruption budgets", func() { nodePool.Spec.Disruption.Budgets = []karpenterv1.Budget{ From 0318d5f0abf45ddc58d42f6aee8f7b9b0b404e5e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:21:37 -0800 Subject: [PATCH 10/18] chore(deps): bump golang.org/x/net from 0.24.0 to 0.33.0 in /test/hack/resource (#7579) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/hack/resource/go.mod | 4 ++-- test/hack/resource/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/hack/resource/go.mod b/test/hack/resource/go.mod index fe3daf63140e..7303056fdc6e 100644 --- a/test/hack/resource/go.mod +++ b/test/hack/resource/go.mod @@ -36,8 +36,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/text v0.21.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/apimachinery v0.30.0 // indirect diff --git a/test/hack/resource/go.sum b/test/hack/resource/go.sum index ab9878db89cb..97daed084386 100644 --- a/test/hack/resource/go.sum +++ b/test/hack/resource/go.sum @@ -95,8 +95,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -105,8 +105,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= From f8badcce9cef375ec2779281de7addaac86fc13c Mon Sep 17 00:00:00 2001 From: Amanuel Engeda <74629455+engedaam@users.noreply.github.com> Date: Fri, 10 Jan 2025 14:48:30 -0800 Subject: [PATCH 11/18] chore: Bump kubernetes-sigs/Karpenter (#7581) --- cmd/controller/main.go | 3 +++ go.mod | 11 ++++++----- go.sum | 22 ++++++++++++---------- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index a1ef39fc2498..8dd479f84b41 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/karpenter/pkg/cloudprovider/metrics" corecontrollers "sigs.k8s.io/karpenter/pkg/controllers" + "sigs.k8s.io/karpenter/pkg/controllers/state" coreoperator "sigs.k8s.io/karpenter/pkg/operator" ) @@ -36,6 +37,7 @@ func main() { op.SecurityGroupProvider, ) cloudProvider := metrics.Decorate(awsCloudProvider) + clusterState := state.NewCluster(op.Clock, op.GetClient(), cloudProvider) op. WithControllers(ctx, corecontrollers.NewControllers( @@ -45,6 +47,7 @@ func main() { op.GetClient(), op.EventRecorder, cloudProvider, + clusterState, )...). WithControllers(ctx, controllers.NewControllers( ctx, diff --git a/go.mod b/go.mod index 460f9fefb413..d785420e8ee3 100644 --- a/go.mod +++ b/go.mod @@ -42,8 +42,8 @@ require ( k8s.io/client-go v0.32.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/controller-runtime v0.19.3 - sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19 + sigs.k8s.io/controller-runtime v0.19.4 + sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099 sigs.k8s.io/yaml v1.4.0 ) @@ -65,6 +65,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -104,16 +105,16 @@ require ( golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cloud-provider v0.31.3 // indirect + k8s.io/cloud-provider v0.32.0 // indirect k8s.io/component-base v0.32.0 // indirect - k8s.io/csi-translation-lib v0.31.3 // indirect + k8s.io/csi-translation-lib v0.32.0 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect diff --git a/go.sum b/go.sum index 74afec1f183d..708a74400789 100644 --- a/go.sum +++ b/go.sum @@ -74,6 +74,8 @@ github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -282,8 +284,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -323,24 +325,24 @@ k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -k8s.io/cloud-provider v0.31.3 h1:7C3CHQUUwnv/HWWVIaibZH06iPg663RYQ6C6Zy4FnO8= -k8s.io/cloud-provider v0.31.3/go.mod h1:c7csKppoVb9Ej6upJ28AvHy4B3BtlRMzXfgezsDdPKw= +k8s.io/cloud-provider v0.32.0 h1:QXYJGmwME2q2rprymbmw2GroMChQYc/MWN6l/I4Kgp8= +k8s.io/cloud-provider v0.32.0/go.mod h1:cz3gVodkhgwi2ugj/JUPglIruLSdDaThxawuDyCHfr8= k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= -k8s.io/csi-translation-lib v0.31.3 h1:hxcPRNdtEsk766jCXSKjgH1V8jUNx5tVqdooQ1Ars/M= -k8s.io/csi-translation-lib v0.31.3/go.mod h1:0B1gQwd868XUIDwJYy5gB2jDXWEwlcWvSsfcQEgzbRk= +k8s.io/csi-translation-lib v0.32.0 h1:RAn9RGgYXHJQtDSb6qQ7zvq6QObOejzmsXDARI+f4OQ= +k8s.io/csi-translation-lib v0.32.0/go.mod h1:TjCJzkTNstdOESAXNnEImrYOMIEzP14aqM7H+vkehqw= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= -sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= +sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= +sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19 h1:nCaZE6O7772FEEPGgTef05IanE8AWMKf7DBh1LiU1ik= -sigs.k8s.io/karpenter v1.1.2-0.20241220005608-b3fa6ebffc19/go.mod h1:E1mtCutIoQJA05ClYYQo9y+5ujk6U2FxByauGSUXXZs= +sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099 h1:HpqyjVfGgeE3Sj/GAbvg3mriTyY9i8Ds351d/w8glAI= +sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099/go.mod h1:qizACS4OKCZ5a+8YVK+I8BwKK4fK6D7EhItejdbXRmI= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From 0ce382944cc00fa9e8b303d4e49257443af77de2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 10:30:31 -0800 Subject: [PATCH 12/18] chore(deps): bump the go-deps group with 12 updates (#7584) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 38 ++++++++++++++--------------- go.sum | 76 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/go.mod b/go.mod index d785420e8ee3..4585e620c502 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,18 @@ require ( github.com/Pallinder/go-randomdata v1.2.0 github.com/PuerkitoBio/goquery v1.10.1 github.com/avast/retry-go v3.0.0+incompatible - github.com/aws/aws-sdk-go-v2 v1.32.7 - github.com/aws/aws-sdk-go-v2/config v1.28.7 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 - github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 - github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 - github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 - github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 - github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 - github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 - github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 + github.com/aws/aws-sdk-go-v2 v1.32.8 + github.com/aws/aws-sdk-go-v2/config v1.28.10 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3 + github.com/aws/aws-sdk-go-v2/service/eks v1.56.2 + github.com/aws/aws-sdk-go-v2/service/fis v1.31.4 + github.com/aws/aws-sdk-go-v2/service/iam v1.38.4 + github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9 + github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6 + github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10 github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 github.com/aws/smithy-go v1.22.1 github.com/awslabs/amazon-eks-ami/nodeadm v0.0.0-20240229193347-cfab22a10647 @@ -50,15 +50,15 @@ require ( require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.48 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.51 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/go.sum b/go.sum index 708a74400789..9b71515058ac 100644 --- a/go.sum +++ b/go.sum @@ -8,48 +8,48 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= -github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/config v1.28.7 h1:GduUnoTXlhkgnxTD93g1nv4tVPILbdNQOzav+Wpg7AE= -github.com/aws/aws-sdk-go-v2/config v1.28.7/go.mod h1:vZGX6GVkIE8uECSUHB6MWAUsd4ZcG2Yq/dMa4refR3M= -github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= +github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo= +github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg= +github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 h1:YbNopxjd9baM83YEEmkaYHi+NuJt0AszeaSLqo0CVr0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1/go.mod h1:mwr3iRm8u1+kkEx4ftDM2Q6Yr0XQFBKrP036ng+k5Lk= -github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 h1:x31cGGE/t/QkrHVh5m2uWvYwDiaDXpj88nh6OdnI5r0= -github.com/aws/aws-sdk-go-v2/service/eks v1.56.0/go.mod h1:kNUWaiotRWCnfQlprrxSMg8ALqbZyA9xLCwKXuLumSk= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 h1:Pyde+VIhO71j5j+BXiwA2civiljvIRLkKFpCSEpw29E= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.3/go.mod h1:lMzi+Vbnzlq6fPfIvHPWoX2LHKM2S2EOn5z6Vx71nmw= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 h1:2sFIoFzU1IEL9epJWubJm9Dhrn45aTNEJuwsesaCGnk= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.3/go.mod h1:KzlNINwfr/47tKkEhgk0r10/OZq3rjtyWy0txL3lM+I= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3 h1:h5UPeMBMm29Vjk45QVnH2Qu2QMbzRrWUORwyGjzWQso= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3/go.mod h1:WAFpTnWeO2BNfwpQ8LTTTx9l9/bTztMPrA8gkh41PvI= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.2 h1:NXxglcZhHubtK2SgqavDGkbArM4NYI7QvLr+FpOL3Oo= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.2/go.mod h1:KkH+D6VJmtIVGD9KTxB9yZu4hQP7s9kxWn8lLb7tmVg= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.4 h1:368PLRSPKPYLcRwcUVOZ7/47cXbHK0L3BCukuuIgiJ4= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.4/go.mod h1:dTr6z1mEz80NiibrjBsHZS0ahFcG/R0ZBzoRBkzcFUo= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.4 h1:440YtmP8Cn6Qp7WHYfvz2/Xzmu1v1Vox/FJnzUDDQGM= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.4/go.mod h1:oXqc4hmGhZpj06Zu8z+ahXhdbjq4Uw8pjN9flty0Ync= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 h1:EqGlayejoCRXmnVC6lXl6phCm9R2+k35e0gWsO9G5DI= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7/go.mod h1:BTw+t+/E5F3ZnDai/wSOYM54WUVjSdewE7Jvwtb7o+w= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 h1:8eUsivBQzZHqe/3FE+cqwfH+0p5Jo8PFM/QYQSmeZ+M= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7/go.mod h1:kLPQvGUmxn/fqiCrDeohwG33bq2pQpGeY62yRO6Nrh0= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 h1:R3X3UwwZKYLCNVVeJ+WLefvrjI5HonYCMlf40BYvJ8E= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8/go.mod h1:4kkTK4zhY31emmt9VGgq3S+ElECNsiI5h6bqSBt71b0= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 h1:WpoMCoS4+qOkkuWQommvDRboKYzK91En6eXO/k5dXr0= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4/go.mod h1:171mrsbgz6DahPMnLJzQiH3bXXrdsWhpE9USZiM19Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 h1:MOxvXH2kRP5exvqJxAZ0/H9Ar51VmADJh95SgZE8u60= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2/go.mod h1:RKWoqC9FlgMCkrfVOtgfqfwdaUIaq8H93UAt4xNaR0A= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 h1:CvuUmnXI7ebaUAhbJcDy9YQx8wHR69eZ9I7q5hszt/g= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 h1:F2rBfNAL5UyswqoeWv9zs74N/NanhK16ydHW1pahX6E= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 h1:Xgv/hyNgvLda/M9l9qxXc4UFSgppnRczLxlMs5Ae/QY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.3/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 h1:9zoIQ/6NA9b70dDvhYvi4IA3jcLDEu2UEALXLsvmQkI= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9/go.mod h1:otxD6AyG1ABYxxhFX6eua+C4vntFe45igc3ake0mkuE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8 h1:h56mLNgpqWIL7RZOIQO634Xr569bXGTlIE83t/a0LSE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8/go.mod h1:kK04550Xx95KI0sNmwoB7ciS9QkRwt9TojhoTMXyJdo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9 h1:DYynbLftAXgRuwumB9TFMi8/lxa6EMzDAWlIr7BIDAQ= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9/go.mod h1:WJ2trRtCOyyg9g7xWi9CCYu0TKCzrtsLY60/zZfU9As= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6 h1:0Xj5aASTw9X+KqfPNZY0OhvTKAY1jTJ2X0nhcvsxN5M= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6/go.mod h1:C17b05qSo++jCYngf3cdhCrsxLyxZliBbmYUFfGxLZo= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4 h1:oXh/PjaKtStu7RkaUtuKX6+h/OxXriMa9WyQQhylKG0= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4/go.mod h1:IiHGbiFg4wVdEKrvFi/zxVZbjfEpgSe21N9RwyQFXCU= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 h1:VwhTrsTuVn52an4mXx29PqRzs2Dvu921NpGk7y43tAM= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.6/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10 h1:R7gIzuE1yvmo5W/BNXXqsZToILLLT1tC8/cYY0x4cRY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10/go.mod h1:Kq3W70z1J01kaVX32gzR37X00ciCCqzJUChUDMPCKl0= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 h1:m9rhsGhdepdQV96tZgfy68oU75AWAjOH8u65OefTjwA= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881/go.mod h1:+Mk5k0b6HpKobxNq+B56DOhZ+I/NiPhd5MIBhQMSTSs= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= From 0134ad831a42c28eae8a0e3d6c9c1b9b8fb9aa44 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:42:29 +0000 Subject: [PATCH 13/18] chore: Update data from AWS APIs (#7587) Co-authored-by: APICodeGen --- pkg/providers/instancetype/zz_generated.bandwidth.go | 8 ++++---- pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go | 6 +++++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/providers/instancetype/zz_generated.bandwidth.go b/pkg/providers/instancetype/zz_generated.bandwidth.go index 1e7aa1a07cb0..a065239db46f 100644 --- a/pkg/providers/instancetype/zz_generated.bandwidth.go +++ b/pkg/providers/instancetype/zz_generated.bandwidth.go @@ -210,19 +210,19 @@ var ( "r7i.xlarge": 1562, "r7iz.xlarge": 1562, "c6gn.medium": 1600, + "c8g.xlarge": 1875, "i4g.xlarge": 1875, "i4i.xlarge": 1875, + "m8g.xlarge": 1875, + "r8g.xlarge": 1875, "x2iedn.xlarge": 1875, + "x8g.xlarge": 1875, "c7g.xlarge": 1876, "c7gd.xlarge": 1876, - "c8g.xlarge": 1876, "m7g.xlarge": 1876, "m7gd.xlarge": 1876, - "m8g.xlarge": 1876, "r7g.xlarge": 1876, "r7gd.xlarge": 1876, - "r8g.xlarge": 1876, - "x8g.xlarge": 1876, "g4ad.xlarge": 2000, "t3.2xlarge": 2048, "t3a.2xlarge": 2048, diff --git a/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go b/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go index ae5d5369dd31..871e7a6eb91b 100644 --- a/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go +++ b/pkg/providers/pricing/zz_generated.pricing_aws_us_gov.go @@ -16,7 +16,7 @@ limitations under the License. package pricing -// generated at 2025-01-06T13:11:45Z for us-east-1 +// generated at 2025-01-13T13:11:42Z for us-east-1 import ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -243,6 +243,10 @@ var InitialOnDemandPricesUSGov = map[string]map[ec2types.InstanceType]float64{ "c7g.12xlarge": 2.080800, "c7g.16xlarge": 2.774400, "c7g.2xlarge": 0.346800, "c7g.4xlarge": 0.693600, "c7g.8xlarge": 1.387200, "c7g.large": 0.086700, "c7g.medium": 0.043400, "c7g.metal": 2.774400, "c7g.xlarge": 0.173400, + // c7i family + "c7i.12xlarge": 2.570400, "c7i.16xlarge": 3.427200, "c7i.24xlarge": 5.140800, "c7i.2xlarge": 0.428400, + "c7i.48xlarge": 10.281600, "c7i.4xlarge": 0.856800, "c7i.8xlarge": 1.713600, "c7i.large": 0.107100, + "c7i.metal-24xl": 5.654880, "c7i.metal-48xl": 10.281600, "c7i.xlarge": 0.214200, // cc2 family "cc2.8xlarge": 2.250000, // d2 family From 559cef2dc7ef6fce4032da47efdfcd9618c66641 Mon Sep 17 00:00:00 2001 From: Jonathan Innis Date: Mon, 13 Jan 2025 11:35:25 -0800 Subject: [PATCH 14/18] docs: Update the Managed AMIs task to reflect the v1 API (#7582) --- .../content/en/docs/tasks/managing-amis.md | 177 +++++++++--------- .../content/en/preview/tasks/managing-amis.md | 177 +++++++++--------- .../content/en/v1.0/tasks/managing-amis.md | 177 +++++++++--------- .../content/en/v1.1/tasks/managing-amis.md | 177 +++++++++--------- 4 files changed, 364 insertions(+), 344 deletions(-) diff --git a/website/content/en/docs/tasks/managing-amis.md b/website/content/en/docs/tasks/managing-amis.md index 47d2b3bab9b1..23372cb9ce51 100644 --- a/website/content/en/docs/tasks/managing-amis.md +++ b/website/content/en/docs/tasks/managing-amis.md @@ -6,6 +6,17 @@ description: > Task for managing AMIs in Karpenter --- +{{% alert title="Important" color="warning" %}} +Karpenter __heavily recommends against__ opting-in to use an `amiSelectorTerm` with `@latest` unless you are doing this in a pre-production environment or are willing to accept the risk that a faulty AMI may cause downtime in your production clusters. In general, if using a publicly released version of a well-known AMI type (like AL2, AL2023, or Bottlerocket), we recommend that you pin to a version of that AMI and deploy newer versions of that AMI type in a staged approach when newer patch versions are available. + +```yaml +amiSelectorTerms: + - alias: al2023@v20240807 +``` + +More details are described in [Controlling AMI Replacement]({{< relref "#controlling-ami-replacement" >}}) below. +{{% /alert %}} + Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. Later, it describes the options you have to assert control over how AMIs are used by Karpenter for your clusters. @@ -17,138 +28,132 @@ See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do- Here is how Karpenter assigns AMIs nodes: -* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. -* With that `amiFamily` set, any time Karpenter spins up a new node, it uses the latest [Amazon EKS optimized Amazon Linux 2 AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) release. -* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. +* When you create an `EC2NodeClass`, you are required to specify [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}). [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) allow you to select on AMIs that can be spun-up by this EC2NodeClass based on tags, id, name, or an alias. Multiple AMIs may be specified, and Karpenter will choose the newest compatible AMI when spinning up new nodes. +* Some `amiSelectorTerm` types are static and always resolve to the same AMI (e.g. `id`). However, some are dynamic and may resolve to different AMIs over time. Examples of dynamic types include `alias`, `tags`, and `name` (when using a wildcard). For example, if you specify an `amiSelectorTerm` with an `alias` set to `@latest` (e.g. `al2023@latest`, `al2@latest`, or `bottlerocket@latest`), Karpenter will use the _latest_ release for that AMI type when spinning up a new node. +* When a node is replaced, Karpenter checks to see if a newer AMI is available based on your `amiSelectorTerms`. If a newer AMI is available, Karpenter will automatically use the new AMI to spin up the new node. __In particular, if you are using a dynamic `amiSelectorTerm` type, you may get a new AMI deployed to your environment without having properly tested it.__ -You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. -However, there are situations that will cause node replacements with newer AMIs to happen automatically. -These include: Expiration (if node expiry is set, the node is marked for deletion at a certain time after the node is created), [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}) (if a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads), [Drift]({{< relref "../concepts/disruption/#drift" >}}) (nodes are set for deletion when they drift from the desired state of the `NodeClaims` and new nodes are brought up to replace them), and [Interruption]({{< relref "../concepts/disruption/#interruption" >}}) (nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed). +Whenever a node is replaced, the replacement node will be launched using the newest AMI based on your `amiSelectorTerms`. Nodes may be replaced due to manual deletion, or any of Karpenter's automated methods: +- [**Expiration**]({{< relref "../concepts/disruption/#expiration" >}}): Automatically initiates replacement at a certain time after the node is created. +- [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If Karpenter detects that a cheaper node can be used to run the same workloads, Karpenter may replace the current node automatically. +- [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): If a node's state no longer matches the desired state dictated by the `NodePool` or `EC2NodeClass`, it will be replaced, including if the node's AMI no longer matches the latest AMI selected by the `amiSelectorTerms`. +- [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. -With these types of automated updates in place, there is some risk that the new AMI being used when replacing instances will introduce some regressions or bugs that cause your workloads to be degraded or fail altogether. -The options described below tell you how to take more control over the ways in which Karpenter selects AMIs for your nodes. +The most relevant automated disruption method is [**Drift**]({{< relref "../concepts/disruption/#drift" >}}), since it is initiated when a new AMI is selected-on by your `amiSelectorTerms`. This could be due to a manual update (e.g. a new `id` term was added), or due to a new AMI being resolved by a dynamic term. + +If you're using an `alias` with the `latest` pin (e.g. `al2023@latest`), Karpenter periodically checks for new AMI releases. Since AMI releases are outside your control, this could result in new AMIs being deployed before they have been properly tested in a lower environment. This is why we **strongly recommend** using version pins in production environments when using an alias (e.g. `al2023@v20240807`). {{% alert title="Important" color="warning" %}} If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. {{% /alert %}} -## Choosing AMI options -One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. -Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. -However, with this comes the risk that the new AMI could break or degrade your workloads. - -As the Karpenter team looks for new ways to manage AMIs, the options below offer some means of reducing these risks, based on your own security and ease-of-use requirements. -Here are the advantages and challenges of each of the options described below: - -* [Option 1]({{< relref "#option-1-manage-how-amis-are-tested-and-rolled-out" >}}) (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other options, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. -* [Option 2]({{< relref "#option-2-lock-down-which-amis-are-selected" >}}) (Lock down AMIs): If workloads require a particluar AMI, this option can make sure that it is the only AMI used by Karpenter. This can be used in combination with Option 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. -* [Option 3]({{< relref "#option-3-control-the-pace-of-node-disruptions" >}}) ([Disruption budgets]({{< relref "../concepts/disruption/" >}})): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time respond to rollout issues. - -## Options - -The following lays out the options you have to impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. - -### Option 1: Manage how AMIs are tested and rolled out - -Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. -For example, you could have: - -* **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: - - ```yaml - apiVersion: karpenter.sh/v1 - kind: NodePool - metadata: - name: default - spec: - template: - spec: - nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - name: default - --- - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - metadata: - name: default - spec: - # The latest AMI in this family will be used - amiFamily: AL2 - ``` -* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. One way to do that is to use `amiSelectorTerms` to set the tested AMI to be used in your production cluster. Refer to Option 2 for how to choose a particular AMI by `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in Option 3. - -### Option 2: Lock down which AMIs are selected - -Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. -When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. -This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. - -With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). -These examples show two different ways to identify the same AMI: +## Controlling AMI Replacement + +Karpenter's automated node replacement functionality in tandem with the `EC2NodeClass` gives you a lot of flexibility to control the desired state of nodes on your cluster. For example, you can opt-in to AMI auto-upgrades using `alias` set to `@latest`; however, this has to be weighed heavily against the risk of newer versions of an AMI breaking existing applications on your cluster. Alternatively, you can choose to pin your AMIs in your production clusters to avoid the risk of breaking changes; however, this has to be weighed against the management cost of testing new AMIs in pre-production and keeping up with the latest AMI versions. + +Karpenter offers you various controls to ensure you don't take on too much risk as you rollout new versions of AMIs to your production clusters. Below shows how you can use these controls: + +* [Pinning AMIs]({{< relref "#pinning-amis" >}}): If workloads require a particluar AMI, this control ensures that it is the only AMI used by Karpenter. This can be used in combination with [Testing AMIs]({{< relref "#testing-amis" >}}) where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. +* [Testing AMIs]({{< relref "#testing-amis" >}}): The safest way for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. Combining this with other controls like [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}) can allow you to catch problems before they impact your whole cluster. +* [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time to respond to rollout issues. + +### Pinning AMIs + +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you are required to configure which AMIs you want Karpenter to select on using the `amiSelectorTerms` field. When pinning to a specific `id`, `name`, `tags` or an `alias` that contains a fixed version, Karpenter will only select on a single AMI and won't automatically upgrade your nodes to a new version of an AMI. This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. +). + +{{% alert title="Note" color="primary" %}} +Pinning an AMI to an `alias` type with a fixed version _will_ pin the AMI so long as your K8s control plane version doesn't change. Unlike `id` and `name` types, specifying a version `alias` in your `amiSelectorTerms` will cause Karpenter to consider the K8s control plane version of your cluster when choosing the AMI. If you upgrade your Kubernetes cluster while using this alias type, Karpenter _will_ automatically drift your nodes to a new AMI that still matches the AMI version but also matches your new K8s control plane version. +{{% /alert %}} + +These examples show three different ways to identify the same AMI: ```yaml +# Using alias +# Pinning to this fixed version alias will pull this version of the AMI, +# matching the K8s control plane version of your cluster +amiSelectorTerms: +- alias: al2023@v20240219 +``` + +```yaml +# Using name +# This will only ever select the AMI that contains this exact name amiSelectorTerms: -- tags: - karpenter.sh/discovery: "${CLUSTER_NAME}" - environment: prod - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 ``` -or +```yaml +# Using id +# This will only ever select this specific AMI id +amiSelectorTerms: +- id: ami-052c9ea013e6e3567 +``` ```yaml +# Using tags +# You can use a CI/CD system to test newer versions of an AMI +# and automatically tag them as you validate that they are safe to upgrade to amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: prod -- id: ami-052c9ea013e6e3567 ``` -See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. +### Testing AMIs + +Instead of avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. For example, you could have: -### Option 3: Control the pace of node disruptions +* **Test clusters**: On lower environment clusters, you can run the latest AMIs e.g. `al2023@latest`, `al2@latest`, `bottlerocket@latest`, for your workloads in a safe environment. This ensures that you get the latest patches for AMIs where downtime to applications isn't as critical and allows you to validate patches to AMIs before they are deployed to production. -To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). -Disruption Budgets limit when and to what extent nodes can be disrupted. -You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). -You can set Disruption Budgets in a `NodePool` spec. -Here is an example: +* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. Refer to [Pinning AMIs]({{< relref "#pinning-amis" >}}) for how to choose a particular AMI by `alias`, `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}). + +### Using Disruption Budgets + +To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter's [**Node Disruption Budgets**]({{< relref "#node-disruption-budgets " >}}) as well as ensure that you have [**Pod Disruption Budgets**]({{< relref "#pod-disruption-budgets " >}}) configured for applications on your cluster. Below provides more details on how to configure each. + +#### Node Disruption Budgets + +[Disruption Budgets]({{< relref "../concepts/disruption/#disruption-budgets " >}}) limit when and to what extent nodes can be disrupted. You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml -template: - spec: - expireAfter: 1440h disruption: - consolidationPolicy: WhenEmpty budgets: - nodes: 15% - nodes: "3" - nodes: "0" - schedule: "0 7 * * sat-sun" - duration: 12h + schedule: "0 9 * * sat-sun" + duration: 24h + - nodes: "0" + schedule: "0 17 * * mon-fri" + duration: 16h + reasons: + - Drifted ``` -The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). -With `expireAfter` set to `1440` hours, the node expires after 60 days. -Extending those values causes longer times without disruption. - Settings for budgets in the above example include the following: * **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. * **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `3`. -* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 9am on Saturday and Sunday and continues for 24 (fully blocking disruptions all day). The format of the schedule follows the `crontab` format for identifying dates and times. See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. +* **Reasons**: The fourth `nodes` setting uses `reasons` which implies that this budget only applies to the `Drifted` disruption condition. This setting uses schedule to say that zero disruptions (`0`) are allowed starting at 5pm on Monday, Tuesday, Wednesday, Thursday, and Friday and continues for 16h (effectively blocking rolling nodes due to drift outside of working hours). As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. You need to balance that with your desire to not risk breaking the workloads on your cluster. +#### Pod Disruption Budgets + +[Pod Disruption Budgets](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) allow you to describe how much disruption an application can tolerate before it begins to become unhealthy. This is critical to configure for Karpenter, since Karpenter uses this information to determine if it can continue to replace nodes. Specifically, if replacing a node would cause a Pod Disruption Budget to be breached (for graceful forms of disruption e.g. Drift or Consolidation), Karpenter will not replace the node. + +In a scenario where a faulty AMI is rolling out and begins causing downtime to your applications, configuring Pod Disruption Budgets is critical since this will tell Karpenter that it must stop replacing nodes until your applications become healthy again. This prevents Karpenter from deploying the faulty AMI throughout your cluster, reduces the imact the AMI has on your production applications, and gives you manually intervene in the cluster to remediate the issue. + ## Follow-up The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. diff --git a/website/content/en/preview/tasks/managing-amis.md b/website/content/en/preview/tasks/managing-amis.md index 47d2b3bab9b1..23372cb9ce51 100644 --- a/website/content/en/preview/tasks/managing-amis.md +++ b/website/content/en/preview/tasks/managing-amis.md @@ -6,6 +6,17 @@ description: > Task for managing AMIs in Karpenter --- +{{% alert title="Important" color="warning" %}} +Karpenter __heavily recommends against__ opting-in to use an `amiSelectorTerm` with `@latest` unless you are doing this in a pre-production environment or are willing to accept the risk that a faulty AMI may cause downtime in your production clusters. In general, if using a publicly released version of a well-known AMI type (like AL2, AL2023, or Bottlerocket), we recommend that you pin to a version of that AMI and deploy newer versions of that AMI type in a staged approach when newer patch versions are available. + +```yaml +amiSelectorTerms: + - alias: al2023@v20240807 +``` + +More details are described in [Controlling AMI Replacement]({{< relref "#controlling-ami-replacement" >}}) below. +{{% /alert %}} + Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. Later, it describes the options you have to assert control over how AMIs are used by Karpenter for your clusters. @@ -17,138 +28,132 @@ See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do- Here is how Karpenter assigns AMIs nodes: -* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. -* With that `amiFamily` set, any time Karpenter spins up a new node, it uses the latest [Amazon EKS optimized Amazon Linux 2 AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) release. -* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. +* When you create an `EC2NodeClass`, you are required to specify [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}). [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) allow you to select on AMIs that can be spun-up by this EC2NodeClass based on tags, id, name, or an alias. Multiple AMIs may be specified, and Karpenter will choose the newest compatible AMI when spinning up new nodes. +* Some `amiSelectorTerm` types are static and always resolve to the same AMI (e.g. `id`). However, some are dynamic and may resolve to different AMIs over time. Examples of dynamic types include `alias`, `tags`, and `name` (when using a wildcard). For example, if you specify an `amiSelectorTerm` with an `alias` set to `@latest` (e.g. `al2023@latest`, `al2@latest`, or `bottlerocket@latest`), Karpenter will use the _latest_ release for that AMI type when spinning up a new node. +* When a node is replaced, Karpenter checks to see if a newer AMI is available based on your `amiSelectorTerms`. If a newer AMI is available, Karpenter will automatically use the new AMI to spin up the new node. __In particular, if you are using a dynamic `amiSelectorTerm` type, you may get a new AMI deployed to your environment without having properly tested it.__ -You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. -However, there are situations that will cause node replacements with newer AMIs to happen automatically. -These include: Expiration (if node expiry is set, the node is marked for deletion at a certain time after the node is created), [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}) (if a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads), [Drift]({{< relref "../concepts/disruption/#drift" >}}) (nodes are set for deletion when they drift from the desired state of the `NodeClaims` and new nodes are brought up to replace them), and [Interruption]({{< relref "../concepts/disruption/#interruption" >}}) (nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed). +Whenever a node is replaced, the replacement node will be launched using the newest AMI based on your `amiSelectorTerms`. Nodes may be replaced due to manual deletion, or any of Karpenter's automated methods: +- [**Expiration**]({{< relref "../concepts/disruption/#expiration" >}}): Automatically initiates replacement at a certain time after the node is created. +- [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If Karpenter detects that a cheaper node can be used to run the same workloads, Karpenter may replace the current node automatically. +- [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): If a node's state no longer matches the desired state dictated by the `NodePool` or `EC2NodeClass`, it will be replaced, including if the node's AMI no longer matches the latest AMI selected by the `amiSelectorTerms`. +- [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. -With these types of automated updates in place, there is some risk that the new AMI being used when replacing instances will introduce some regressions or bugs that cause your workloads to be degraded or fail altogether. -The options described below tell you how to take more control over the ways in which Karpenter selects AMIs for your nodes. +The most relevant automated disruption method is [**Drift**]({{< relref "../concepts/disruption/#drift" >}}), since it is initiated when a new AMI is selected-on by your `amiSelectorTerms`. This could be due to a manual update (e.g. a new `id` term was added), or due to a new AMI being resolved by a dynamic term. + +If you're using an `alias` with the `latest` pin (e.g. `al2023@latest`), Karpenter periodically checks for new AMI releases. Since AMI releases are outside your control, this could result in new AMIs being deployed before they have been properly tested in a lower environment. This is why we **strongly recommend** using version pins in production environments when using an alias (e.g. `al2023@v20240807`). {{% alert title="Important" color="warning" %}} If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. {{% /alert %}} -## Choosing AMI options -One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. -Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. -However, with this comes the risk that the new AMI could break or degrade your workloads. - -As the Karpenter team looks for new ways to manage AMIs, the options below offer some means of reducing these risks, based on your own security and ease-of-use requirements. -Here are the advantages and challenges of each of the options described below: - -* [Option 1]({{< relref "#option-1-manage-how-amis-are-tested-and-rolled-out" >}}) (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other options, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. -* [Option 2]({{< relref "#option-2-lock-down-which-amis-are-selected" >}}) (Lock down AMIs): If workloads require a particluar AMI, this option can make sure that it is the only AMI used by Karpenter. This can be used in combination with Option 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. -* [Option 3]({{< relref "#option-3-control-the-pace-of-node-disruptions" >}}) ([Disruption budgets]({{< relref "../concepts/disruption/" >}})): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time respond to rollout issues. - -## Options - -The following lays out the options you have to impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. - -### Option 1: Manage how AMIs are tested and rolled out - -Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. -For example, you could have: - -* **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: - - ```yaml - apiVersion: karpenter.sh/v1 - kind: NodePool - metadata: - name: default - spec: - template: - spec: - nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - name: default - --- - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - metadata: - name: default - spec: - # The latest AMI in this family will be used - amiFamily: AL2 - ``` -* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. One way to do that is to use `amiSelectorTerms` to set the tested AMI to be used in your production cluster. Refer to Option 2 for how to choose a particular AMI by `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in Option 3. - -### Option 2: Lock down which AMIs are selected - -Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. -When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. -This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. - -With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). -These examples show two different ways to identify the same AMI: +## Controlling AMI Replacement + +Karpenter's automated node replacement functionality in tandem with the `EC2NodeClass` gives you a lot of flexibility to control the desired state of nodes on your cluster. For example, you can opt-in to AMI auto-upgrades using `alias` set to `@latest`; however, this has to be weighed heavily against the risk of newer versions of an AMI breaking existing applications on your cluster. Alternatively, you can choose to pin your AMIs in your production clusters to avoid the risk of breaking changes; however, this has to be weighed against the management cost of testing new AMIs in pre-production and keeping up with the latest AMI versions. + +Karpenter offers you various controls to ensure you don't take on too much risk as you rollout new versions of AMIs to your production clusters. Below shows how you can use these controls: + +* [Pinning AMIs]({{< relref "#pinning-amis" >}}): If workloads require a particluar AMI, this control ensures that it is the only AMI used by Karpenter. This can be used in combination with [Testing AMIs]({{< relref "#testing-amis" >}}) where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. +* [Testing AMIs]({{< relref "#testing-amis" >}}): The safest way for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. Combining this with other controls like [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}) can allow you to catch problems before they impact your whole cluster. +* [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time to respond to rollout issues. + +### Pinning AMIs + +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you are required to configure which AMIs you want Karpenter to select on using the `amiSelectorTerms` field. When pinning to a specific `id`, `name`, `tags` or an `alias` that contains a fixed version, Karpenter will only select on a single AMI and won't automatically upgrade your nodes to a new version of an AMI. This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. +). + +{{% alert title="Note" color="primary" %}} +Pinning an AMI to an `alias` type with a fixed version _will_ pin the AMI so long as your K8s control plane version doesn't change. Unlike `id` and `name` types, specifying a version `alias` in your `amiSelectorTerms` will cause Karpenter to consider the K8s control plane version of your cluster when choosing the AMI. If you upgrade your Kubernetes cluster while using this alias type, Karpenter _will_ automatically drift your nodes to a new AMI that still matches the AMI version but also matches your new K8s control plane version. +{{% /alert %}} + +These examples show three different ways to identify the same AMI: ```yaml +# Using alias +# Pinning to this fixed version alias will pull this version of the AMI, +# matching the K8s control plane version of your cluster +amiSelectorTerms: +- alias: al2023@v20240219 +``` + +```yaml +# Using name +# This will only ever select the AMI that contains this exact name amiSelectorTerms: -- tags: - karpenter.sh/discovery: "${CLUSTER_NAME}" - environment: prod - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 ``` -or +```yaml +# Using id +# This will only ever select this specific AMI id +amiSelectorTerms: +- id: ami-052c9ea013e6e3567 +``` ```yaml +# Using tags +# You can use a CI/CD system to test newer versions of an AMI +# and automatically tag them as you validate that they are safe to upgrade to amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: prod -- id: ami-052c9ea013e6e3567 ``` -See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. +### Testing AMIs + +Instead of avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. For example, you could have: -### Option 3: Control the pace of node disruptions +* **Test clusters**: On lower environment clusters, you can run the latest AMIs e.g. `al2023@latest`, `al2@latest`, `bottlerocket@latest`, for your workloads in a safe environment. This ensures that you get the latest patches for AMIs where downtime to applications isn't as critical and allows you to validate patches to AMIs before they are deployed to production. -To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). -Disruption Budgets limit when and to what extent nodes can be disrupted. -You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). -You can set Disruption Budgets in a `NodePool` spec. -Here is an example: +* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. Refer to [Pinning AMIs]({{< relref "#pinning-amis" >}}) for how to choose a particular AMI by `alias`, `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}). + +### Using Disruption Budgets + +To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter's [**Node Disruption Budgets**]({{< relref "#node-disruption-budgets " >}}) as well as ensure that you have [**Pod Disruption Budgets**]({{< relref "#pod-disruption-budgets " >}}) configured for applications on your cluster. Below provides more details on how to configure each. + +#### Node Disruption Budgets + +[Disruption Budgets]({{< relref "../concepts/disruption/#disruption-budgets " >}}) limit when and to what extent nodes can be disrupted. You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml -template: - spec: - expireAfter: 1440h disruption: - consolidationPolicy: WhenEmpty budgets: - nodes: 15% - nodes: "3" - nodes: "0" - schedule: "0 7 * * sat-sun" - duration: 12h + schedule: "0 9 * * sat-sun" + duration: 24h + - nodes: "0" + schedule: "0 17 * * mon-fri" + duration: 16h + reasons: + - Drifted ``` -The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). -With `expireAfter` set to `1440` hours, the node expires after 60 days. -Extending those values causes longer times without disruption. - Settings for budgets in the above example include the following: * **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. * **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `3`. -* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 9am on Saturday and Sunday and continues for 24 (fully blocking disruptions all day). The format of the schedule follows the `crontab` format for identifying dates and times. See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. +* **Reasons**: The fourth `nodes` setting uses `reasons` which implies that this budget only applies to the `Drifted` disruption condition. This setting uses schedule to say that zero disruptions (`0`) are allowed starting at 5pm on Monday, Tuesday, Wednesday, Thursday, and Friday and continues for 16h (effectively blocking rolling nodes due to drift outside of working hours). As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. You need to balance that with your desire to not risk breaking the workloads on your cluster. +#### Pod Disruption Budgets + +[Pod Disruption Budgets](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) allow you to describe how much disruption an application can tolerate before it begins to become unhealthy. This is critical to configure for Karpenter, since Karpenter uses this information to determine if it can continue to replace nodes. Specifically, if replacing a node would cause a Pod Disruption Budget to be breached (for graceful forms of disruption e.g. Drift or Consolidation), Karpenter will not replace the node. + +In a scenario where a faulty AMI is rolling out and begins causing downtime to your applications, configuring Pod Disruption Budgets is critical since this will tell Karpenter that it must stop replacing nodes until your applications become healthy again. This prevents Karpenter from deploying the faulty AMI throughout your cluster, reduces the imact the AMI has on your production applications, and gives you manually intervene in the cluster to remediate the issue. + ## Follow-up The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. diff --git a/website/content/en/v1.0/tasks/managing-amis.md b/website/content/en/v1.0/tasks/managing-amis.md index 47d2b3bab9b1..23372cb9ce51 100644 --- a/website/content/en/v1.0/tasks/managing-amis.md +++ b/website/content/en/v1.0/tasks/managing-amis.md @@ -6,6 +6,17 @@ description: > Task for managing AMIs in Karpenter --- +{{% alert title="Important" color="warning" %}} +Karpenter __heavily recommends against__ opting-in to use an `amiSelectorTerm` with `@latest` unless you are doing this in a pre-production environment or are willing to accept the risk that a faulty AMI may cause downtime in your production clusters. In general, if using a publicly released version of a well-known AMI type (like AL2, AL2023, or Bottlerocket), we recommend that you pin to a version of that AMI and deploy newer versions of that AMI type in a staged approach when newer patch versions are available. + +```yaml +amiSelectorTerms: + - alias: al2023@v20240807 +``` + +More details are described in [Controlling AMI Replacement]({{< relref "#controlling-ami-replacement" >}}) below. +{{% /alert %}} + Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. Later, it describes the options you have to assert control over how AMIs are used by Karpenter for your clusters. @@ -17,138 +28,132 @@ See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do- Here is how Karpenter assigns AMIs nodes: -* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. -* With that `amiFamily` set, any time Karpenter spins up a new node, it uses the latest [Amazon EKS optimized Amazon Linux 2 AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) release. -* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. +* When you create an `EC2NodeClass`, you are required to specify [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}). [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) allow you to select on AMIs that can be spun-up by this EC2NodeClass based on tags, id, name, or an alias. Multiple AMIs may be specified, and Karpenter will choose the newest compatible AMI when spinning up new nodes. +* Some `amiSelectorTerm` types are static and always resolve to the same AMI (e.g. `id`). However, some are dynamic and may resolve to different AMIs over time. Examples of dynamic types include `alias`, `tags`, and `name` (when using a wildcard). For example, if you specify an `amiSelectorTerm` with an `alias` set to `@latest` (e.g. `al2023@latest`, `al2@latest`, or `bottlerocket@latest`), Karpenter will use the _latest_ release for that AMI type when spinning up a new node. +* When a node is replaced, Karpenter checks to see if a newer AMI is available based on your `amiSelectorTerms`. If a newer AMI is available, Karpenter will automatically use the new AMI to spin up the new node. __In particular, if you are using a dynamic `amiSelectorTerm` type, you may get a new AMI deployed to your environment without having properly tested it.__ -You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. -However, there are situations that will cause node replacements with newer AMIs to happen automatically. -These include: Expiration (if node expiry is set, the node is marked for deletion at a certain time after the node is created), [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}) (if a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads), [Drift]({{< relref "../concepts/disruption/#drift" >}}) (nodes are set for deletion when they drift from the desired state of the `NodeClaims` and new nodes are brought up to replace them), and [Interruption]({{< relref "../concepts/disruption/#interruption" >}}) (nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed). +Whenever a node is replaced, the replacement node will be launched using the newest AMI based on your `amiSelectorTerms`. Nodes may be replaced due to manual deletion, or any of Karpenter's automated methods: +- [**Expiration**]({{< relref "../concepts/disruption/#expiration" >}}): Automatically initiates replacement at a certain time after the node is created. +- [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If Karpenter detects that a cheaper node can be used to run the same workloads, Karpenter may replace the current node automatically. +- [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): If a node's state no longer matches the desired state dictated by the `NodePool` or `EC2NodeClass`, it will be replaced, including if the node's AMI no longer matches the latest AMI selected by the `amiSelectorTerms`. +- [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. -With these types of automated updates in place, there is some risk that the new AMI being used when replacing instances will introduce some regressions or bugs that cause your workloads to be degraded or fail altogether. -The options described below tell you how to take more control over the ways in which Karpenter selects AMIs for your nodes. +The most relevant automated disruption method is [**Drift**]({{< relref "../concepts/disruption/#drift" >}}), since it is initiated when a new AMI is selected-on by your `amiSelectorTerms`. This could be due to a manual update (e.g. a new `id` term was added), or due to a new AMI being resolved by a dynamic term. + +If you're using an `alias` with the `latest` pin (e.g. `al2023@latest`), Karpenter periodically checks for new AMI releases. Since AMI releases are outside your control, this could result in new AMIs being deployed before they have been properly tested in a lower environment. This is why we **strongly recommend** using version pins in production environments when using an alias (e.g. `al2023@v20240807`). {{% alert title="Important" color="warning" %}} If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. {{% /alert %}} -## Choosing AMI options -One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. -Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. -However, with this comes the risk that the new AMI could break or degrade your workloads. - -As the Karpenter team looks for new ways to manage AMIs, the options below offer some means of reducing these risks, based on your own security and ease-of-use requirements. -Here are the advantages and challenges of each of the options described below: - -* [Option 1]({{< relref "#option-1-manage-how-amis-are-tested-and-rolled-out" >}}) (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other options, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. -* [Option 2]({{< relref "#option-2-lock-down-which-amis-are-selected" >}}) (Lock down AMIs): If workloads require a particluar AMI, this option can make sure that it is the only AMI used by Karpenter. This can be used in combination with Option 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. -* [Option 3]({{< relref "#option-3-control-the-pace-of-node-disruptions" >}}) ([Disruption budgets]({{< relref "../concepts/disruption/" >}})): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time respond to rollout issues. - -## Options - -The following lays out the options you have to impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. - -### Option 1: Manage how AMIs are tested and rolled out - -Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. -For example, you could have: - -* **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: - - ```yaml - apiVersion: karpenter.sh/v1 - kind: NodePool - metadata: - name: default - spec: - template: - spec: - nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - name: default - --- - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - metadata: - name: default - spec: - # The latest AMI in this family will be used - amiFamily: AL2 - ``` -* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. One way to do that is to use `amiSelectorTerms` to set the tested AMI to be used in your production cluster. Refer to Option 2 for how to choose a particular AMI by `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in Option 3. - -### Option 2: Lock down which AMIs are selected - -Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. -When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. -This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. - -With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). -These examples show two different ways to identify the same AMI: +## Controlling AMI Replacement + +Karpenter's automated node replacement functionality in tandem with the `EC2NodeClass` gives you a lot of flexibility to control the desired state of nodes on your cluster. For example, you can opt-in to AMI auto-upgrades using `alias` set to `@latest`; however, this has to be weighed heavily against the risk of newer versions of an AMI breaking existing applications on your cluster. Alternatively, you can choose to pin your AMIs in your production clusters to avoid the risk of breaking changes; however, this has to be weighed against the management cost of testing new AMIs in pre-production and keeping up with the latest AMI versions. + +Karpenter offers you various controls to ensure you don't take on too much risk as you rollout new versions of AMIs to your production clusters. Below shows how you can use these controls: + +* [Pinning AMIs]({{< relref "#pinning-amis" >}}): If workloads require a particluar AMI, this control ensures that it is the only AMI used by Karpenter. This can be used in combination with [Testing AMIs]({{< relref "#testing-amis" >}}) where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. +* [Testing AMIs]({{< relref "#testing-amis" >}}): The safest way for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. Combining this with other controls like [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}) can allow you to catch problems before they impact your whole cluster. +* [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time to respond to rollout issues. + +### Pinning AMIs + +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you are required to configure which AMIs you want Karpenter to select on using the `amiSelectorTerms` field. When pinning to a specific `id`, `name`, `tags` or an `alias` that contains a fixed version, Karpenter will only select on a single AMI and won't automatically upgrade your nodes to a new version of an AMI. This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. +). + +{{% alert title="Note" color="primary" %}} +Pinning an AMI to an `alias` type with a fixed version _will_ pin the AMI so long as your K8s control plane version doesn't change. Unlike `id` and `name` types, specifying a version `alias` in your `amiSelectorTerms` will cause Karpenter to consider the K8s control plane version of your cluster when choosing the AMI. If you upgrade your Kubernetes cluster while using this alias type, Karpenter _will_ automatically drift your nodes to a new AMI that still matches the AMI version but also matches your new K8s control plane version. +{{% /alert %}} + +These examples show three different ways to identify the same AMI: ```yaml +# Using alias +# Pinning to this fixed version alias will pull this version of the AMI, +# matching the K8s control plane version of your cluster +amiSelectorTerms: +- alias: al2023@v20240219 +``` + +```yaml +# Using name +# This will only ever select the AMI that contains this exact name amiSelectorTerms: -- tags: - karpenter.sh/discovery: "${CLUSTER_NAME}" - environment: prod - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 ``` -or +```yaml +# Using id +# This will only ever select this specific AMI id +amiSelectorTerms: +- id: ami-052c9ea013e6e3567 +``` ```yaml +# Using tags +# You can use a CI/CD system to test newer versions of an AMI +# and automatically tag them as you validate that they are safe to upgrade to amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: prod -- id: ami-052c9ea013e6e3567 ``` -See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. +### Testing AMIs + +Instead of avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. For example, you could have: -### Option 3: Control the pace of node disruptions +* **Test clusters**: On lower environment clusters, you can run the latest AMIs e.g. `al2023@latest`, `al2@latest`, `bottlerocket@latest`, for your workloads in a safe environment. This ensures that you get the latest patches for AMIs where downtime to applications isn't as critical and allows you to validate patches to AMIs before they are deployed to production. -To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). -Disruption Budgets limit when and to what extent nodes can be disrupted. -You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). -You can set Disruption Budgets in a `NodePool` spec. -Here is an example: +* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. Refer to [Pinning AMIs]({{< relref "#pinning-amis" >}}) for how to choose a particular AMI by `alias`, `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}). + +### Using Disruption Budgets + +To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter's [**Node Disruption Budgets**]({{< relref "#node-disruption-budgets " >}}) as well as ensure that you have [**Pod Disruption Budgets**]({{< relref "#pod-disruption-budgets " >}}) configured for applications on your cluster. Below provides more details on how to configure each. + +#### Node Disruption Budgets + +[Disruption Budgets]({{< relref "../concepts/disruption/#disruption-budgets " >}}) limit when and to what extent nodes can be disrupted. You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml -template: - spec: - expireAfter: 1440h disruption: - consolidationPolicy: WhenEmpty budgets: - nodes: 15% - nodes: "3" - nodes: "0" - schedule: "0 7 * * sat-sun" - duration: 12h + schedule: "0 9 * * sat-sun" + duration: 24h + - nodes: "0" + schedule: "0 17 * * mon-fri" + duration: 16h + reasons: + - Drifted ``` -The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). -With `expireAfter` set to `1440` hours, the node expires after 60 days. -Extending those values causes longer times without disruption. - Settings for budgets in the above example include the following: * **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. * **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `3`. -* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 9am on Saturday and Sunday and continues for 24 (fully blocking disruptions all day). The format of the schedule follows the `crontab` format for identifying dates and times. See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. +* **Reasons**: The fourth `nodes` setting uses `reasons` which implies that this budget only applies to the `Drifted` disruption condition. This setting uses schedule to say that zero disruptions (`0`) are allowed starting at 5pm on Monday, Tuesday, Wednesday, Thursday, and Friday and continues for 16h (effectively blocking rolling nodes due to drift outside of working hours). As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. You need to balance that with your desire to not risk breaking the workloads on your cluster. +#### Pod Disruption Budgets + +[Pod Disruption Budgets](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) allow you to describe how much disruption an application can tolerate before it begins to become unhealthy. This is critical to configure for Karpenter, since Karpenter uses this information to determine if it can continue to replace nodes. Specifically, if replacing a node would cause a Pod Disruption Budget to be breached (for graceful forms of disruption e.g. Drift or Consolidation), Karpenter will not replace the node. + +In a scenario where a faulty AMI is rolling out and begins causing downtime to your applications, configuring Pod Disruption Budgets is critical since this will tell Karpenter that it must stop replacing nodes until your applications become healthy again. This prevents Karpenter from deploying the faulty AMI throughout your cluster, reduces the imact the AMI has on your production applications, and gives you manually intervene in the cluster to remediate the issue. + ## Follow-up The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. diff --git a/website/content/en/v1.1/tasks/managing-amis.md b/website/content/en/v1.1/tasks/managing-amis.md index 47d2b3bab9b1..23372cb9ce51 100644 --- a/website/content/en/v1.1/tasks/managing-amis.md +++ b/website/content/en/v1.1/tasks/managing-amis.md @@ -6,6 +6,17 @@ description: > Task for managing AMIs in Karpenter --- +{{% alert title="Important" color="warning" %}} +Karpenter __heavily recommends against__ opting-in to use an `amiSelectorTerm` with `@latest` unless you are doing this in a pre-production environment or are willing to accept the risk that a faulty AMI may cause downtime in your production clusters. In general, if using a publicly released version of a well-known AMI type (like AL2, AL2023, or Bottlerocket), we recommend that you pin to a version of that AMI and deploy newer versions of that AMI type in a staged approach when newer patch versions are available. + +```yaml +amiSelectorTerms: + - alias: al2023@v20240807 +``` + +More details are described in [Controlling AMI Replacement]({{< relref "#controlling-ami-replacement" >}}) below. +{{% /alert %}} + Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. Later, it describes the options you have to assert control over how AMIs are used by Karpenter for your clusters. @@ -17,138 +28,132 @@ See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do- Here is how Karpenter assigns AMIs nodes: -* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. -* With that `amiFamily` set, any time Karpenter spins up a new node, it uses the latest [Amazon EKS optimized Amazon Linux 2 AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) release. -* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. +* When you create an `EC2NodeClass`, you are required to specify [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}). [`amiSelectorTerms`]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) allow you to select on AMIs that can be spun-up by this EC2NodeClass based on tags, id, name, or an alias. Multiple AMIs may be specified, and Karpenter will choose the newest compatible AMI when spinning up new nodes. +* Some `amiSelectorTerm` types are static and always resolve to the same AMI (e.g. `id`). However, some are dynamic and may resolve to different AMIs over time. Examples of dynamic types include `alias`, `tags`, and `name` (when using a wildcard). For example, if you specify an `amiSelectorTerm` with an `alias` set to `@latest` (e.g. `al2023@latest`, `al2@latest`, or `bottlerocket@latest`), Karpenter will use the _latest_ release for that AMI type when spinning up a new node. +* When a node is replaced, Karpenter checks to see if a newer AMI is available based on your `amiSelectorTerms`. If a newer AMI is available, Karpenter will automatically use the new AMI to spin up the new node. __In particular, if you are using a dynamic `amiSelectorTerm` type, you may get a new AMI deployed to your environment without having properly tested it.__ -You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. -However, there are situations that will cause node replacements with newer AMIs to happen automatically. -These include: Expiration (if node expiry is set, the node is marked for deletion at a certain time after the node is created), [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}) (if a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads), [Drift]({{< relref "../concepts/disruption/#drift" >}}) (nodes are set for deletion when they drift from the desired state of the `NodeClaims` and new nodes are brought up to replace them), and [Interruption]({{< relref "../concepts/disruption/#interruption" >}}) (nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed). +Whenever a node is replaced, the replacement node will be launched using the newest AMI based on your `amiSelectorTerms`. Nodes may be replaced due to manual deletion, or any of Karpenter's automated methods: +- [**Expiration**]({{< relref "../concepts/disruption/#expiration" >}}): Automatically initiates replacement at a certain time after the node is created. +- [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If Karpenter detects that a cheaper node can be used to run the same workloads, Karpenter may replace the current node automatically. +- [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): If a node's state no longer matches the desired state dictated by the `NodePool` or `EC2NodeClass`, it will be replaced, including if the node's AMI no longer matches the latest AMI selected by the `amiSelectorTerms`. +- [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. -With these types of automated updates in place, there is some risk that the new AMI being used when replacing instances will introduce some regressions or bugs that cause your workloads to be degraded or fail altogether. -The options described below tell you how to take more control over the ways in which Karpenter selects AMIs for your nodes. +The most relevant automated disruption method is [**Drift**]({{< relref "../concepts/disruption/#drift" >}}), since it is initiated when a new AMI is selected-on by your `amiSelectorTerms`. This could be due to a manual update (e.g. a new `id` term was added), or due to a new AMI being resolved by a dynamic term. + +If you're using an `alias` with the `latest` pin (e.g. `al2023@latest`), Karpenter periodically checks for new AMI releases. Since AMI releases are outside your control, this could result in new AMIs being deployed before they have been properly tested in a lower environment. This is why we **strongly recommend** using version pins in production environments when using an alias (e.g. `al2023@v20240807`). {{% alert title="Important" color="warning" %}} If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. {{% /alert %}} -## Choosing AMI options -One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. -Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. -However, with this comes the risk that the new AMI could break or degrade your workloads. - -As the Karpenter team looks for new ways to manage AMIs, the options below offer some means of reducing these risks, based on your own security and ease-of-use requirements. -Here are the advantages and challenges of each of the options described below: - -* [Option 1]({{< relref "#option-1-manage-how-amis-are-tested-and-rolled-out" >}}) (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other options, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. -* [Option 2]({{< relref "#option-2-lock-down-which-amis-are-selected" >}}) (Lock down AMIs): If workloads require a particluar AMI, this option can make sure that it is the only AMI used by Karpenter. This can be used in combination with Option 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. -* [Option 3]({{< relref "#option-3-control-the-pace-of-node-disruptions" >}}) ([Disruption budgets]({{< relref "../concepts/disruption/" >}})): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time respond to rollout issues. - -## Options - -The following lays out the options you have to impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. - -### Option 1: Manage how AMIs are tested and rolled out - -Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. -For example, you could have: - -* **Test clusters**: On lower environment clusters, you can run the latest AMIs for your workloads in a safe environment. The `EC2NodeClass` for these clusters could be set with a chosen `amiFamily`, but no `amiSelectorTerms` set. For example, the `NodePool` and `EC2NodeClass` could begin with the following: - - ```yaml - apiVersion: karpenter.sh/v1 - kind: NodePool - metadata: - name: default - spec: - template: - spec: - nodeClassRef: - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - name: default - --- - apiVersion: karpenter.k8s.aws/v1 - kind: EC2NodeClass - metadata: - name: default - spec: - # The latest AMI in this family will be used - amiFamily: AL2 - ``` -* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. One way to do that is to use `amiSelectorTerms` to set the tested AMI to be used in your production cluster. Refer to Option 2 for how to choose a particular AMI by `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in Option 3. - -### Option 2: Lock down which AMIs are selected - -Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. -When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. -This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. - -With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). -These examples show two different ways to identify the same AMI: +## Controlling AMI Replacement + +Karpenter's automated node replacement functionality in tandem with the `EC2NodeClass` gives you a lot of flexibility to control the desired state of nodes on your cluster. For example, you can opt-in to AMI auto-upgrades using `alias` set to `@latest`; however, this has to be weighed heavily against the risk of newer versions of an AMI breaking existing applications on your cluster. Alternatively, you can choose to pin your AMIs in your production clusters to avoid the risk of breaking changes; however, this has to be weighed against the management cost of testing new AMIs in pre-production and keeping up with the latest AMI versions. + +Karpenter offers you various controls to ensure you don't take on too much risk as you rollout new versions of AMIs to your production clusters. Below shows how you can use these controls: + +* [Pinning AMIs]({{< relref "#pinning-amis" >}}): If workloads require a particluar AMI, this control ensures that it is the only AMI used by Karpenter. This can be used in combination with [Testing AMIs]({{< relref "#testing-amis" >}}) where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. +* [Testing AMIs]({{< relref "#testing-amis" >}}): The safest way for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but most effectively models how your workloads will run in production, allowing you to catch issues ahead of time. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. Combining this with other controls like [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}) can allow you to catch problems before they impact your whole cluster. +* [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}): This option can be used as a way of mitigating the scope of impact if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using `schedule`). This doesn't prevent a bad AMI from being deployed, but it allows you to control when nodes are upgraded, and gives you more time to respond to rollout issues. + +### Pinning AMIs + +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you are required to configure which AMIs you want Karpenter to select on using the `amiSelectorTerms` field. When pinning to a specific `id`, `name`, `tags` or an `alias` that contains a fixed version, Karpenter will only select on a single AMI and won't automatically upgrade your nodes to a new version of an AMI. This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. +). + +{{% alert title="Note" color="primary" %}} +Pinning an AMI to an `alias` type with a fixed version _will_ pin the AMI so long as your K8s control plane version doesn't change. Unlike `id` and `name` types, specifying a version `alias` in your `amiSelectorTerms` will cause Karpenter to consider the K8s control plane version of your cluster when choosing the AMI. If you upgrade your Kubernetes cluster while using this alias type, Karpenter _will_ automatically drift your nodes to a new AMI that still matches the AMI version but also matches your new K8s control plane version. +{{% /alert %}} + +These examples show three different ways to identify the same AMI: ```yaml +# Using alias +# Pinning to this fixed version alias will pull this version of the AMI, +# matching the K8s control plane version of your cluster +amiSelectorTerms: +- alias: al2023@v20240219 +``` + +```yaml +# Using name +# This will only ever select the AMI that contains this exact name amiSelectorTerms: -- tags: - karpenter.sh/discovery: "${CLUSTER_NAME}" - environment: prod - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 ``` -or +```yaml +# Using id +# This will only ever select this specific AMI id +amiSelectorTerms: +- id: ami-052c9ea013e6e3567 +``` ```yaml +# Using tags +# You can use a CI/CD system to test newer versions of an AMI +# and automatically tag them as you validate that they are safe to upgrade to amiSelectorTerms: - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: prod -- id: ami-052c9ea013e6e3567 ``` -See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. +### Testing AMIs + +Instead of avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. For example, you could have: -### Option 3: Control the pace of node disruptions +* **Test clusters**: On lower environment clusters, you can run the latest AMIs e.g. `al2023@latest`, `al2@latest`, `bottlerocket@latest`, for your workloads in a safe environment. This ensures that you get the latest patches for AMIs where downtime to applications isn't as critical and allows you to validate patches to AMIs before they are deployed to production. -To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). -Disruption Budgets limit when and to what extent nodes can be disrupted. -You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). -You can set Disruption Budgets in a `NodePool` spec. -Here is an example: +* **Production clusters**: After you've confirmed that the AMI works in your lower environments, you can pin the latest AMIs to be deployed in your production clusters to roll out the AMI. Refer to [Pinning AMIs]({{< relref "#pinning-amis" >}}) for how to choose a particular AMI by `alias`, `name` or `id`. Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. So consider implementing that for your production clusters as described in [Using Disruption Budgets]({{< relref "#using-disruption-budgets" >}}). + +### Using Disruption Budgets + +To reduce the risk of entire workloads being immediately degraded when a new AMI is deployed, you can enable Karpenter's [**Node Disruption Budgets**]({{< relref "#node-disruption-budgets " >}}) as well as ensure that you have [**Pod Disruption Budgets**]({{< relref "#pod-disruption-budgets " >}}) configured for applications on your cluster. Below provides more details on how to configure each. + +#### Node Disruption Budgets + +[Disruption Budgets]({{< relref "../concepts/disruption/#disruption-budgets " >}}) limit when and to what extent nodes can be disrupted. You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. Here is an example: ```yaml -template: - spec: - expireAfter: 1440h disruption: - consolidationPolicy: WhenEmpty budgets: - nodes: 15% - nodes: "3" - nodes: "0" - schedule: "0 7 * * sat-sun" - duration: 12h + schedule: "0 9 * * sat-sun" + duration: 24h + - nodes: "0" + schedule: "0 17 * * mon-fri" + duration: 16h + reasons: + - Drifted ``` -The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. -The `consolidationPolicy` field indicates that a node should be disrupted if the node is either empty or underutilized (`WhenEmptyOrUnderutilized`) or not running any pods (`WhenEmpty`). -With `expireAfter` set to `1440` hours, the node expires after 60 days. -Extending those values causes longer times without disruption. - Settings for budgets in the above example include the following: * **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. * **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `3`. -* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 9am on Saturday and Sunday and continues for 24 (fully blocking disruptions all day). The format of the schedule follows the `crontab` format for identifying dates and times. See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. +* **Reasons**: The fourth `nodes` setting uses `reasons` which implies that this budget only applies to the `Drifted` disruption condition. This setting uses schedule to say that zero disruptions (`0`) are allowed starting at 5pm on Monday, Tuesday, Wednesday, Thursday, and Friday and continues for 16h (effectively blocking rolling nodes due to drift outside of working hours). As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. You need to balance that with your desire to not risk breaking the workloads on your cluster. +#### Pod Disruption Budgets + +[Pod Disruption Budgets](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) allow you to describe how much disruption an application can tolerate before it begins to become unhealthy. This is critical to configure for Karpenter, since Karpenter uses this information to determine if it can continue to replace nodes. Specifically, if replacing a node would cause a Pod Disruption Budget to be breached (for graceful forms of disruption e.g. Drift or Consolidation), Karpenter will not replace the node. + +In a scenario where a faulty AMI is rolling out and begins causing downtime to your applications, configuring Pod Disruption Budgets is critical since this will tell Karpenter that it must stop replacing nodes until your applications become healthy again. This prevents Karpenter from deploying the faulty AMI throughout your cluster, reduces the imact the AMI has on your production applications, and gives you manually intervene in the cluster to remediate the issue. + ## Follow-up The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. From b320ff14d169e3382a281daf2d2273c777a75b63 Mon Sep 17 00:00:00 2001 From: Saurav Agarwalla Date: Tue, 14 Jan 2025 17:04:21 -0500 Subject: [PATCH 15/18] =?UTF-8?q?fix:=20unify=20nodeclass=20status=20and?= =?UTF-8?q?=20termination=20controllers=20to=20prevent=20ra=E2=80=A6=20(#7?= =?UTF-8?q?597)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/cloudprovider/suite_test.go | 8 +- pkg/controllers/controllers.go | 6 +- pkg/controllers/nodeclass/{status => }/ami.go | 2 +- .../nodeclass/{status => }/ami_test.go | 26 ++-- .../nodeclass/{termination => }/controller.go | 105 +++++++++++--- .../nodeclass/{termination => }/events.go | 2 +- .../nodeclass/{status => }/instanceprofile.go | 9 +- .../{status => }/instanceprofile_test.go | 14 +- .../{status => }/launchtemplate_test.go | 8 +- .../nodeclass/{status => }/readiness.go | 2 +- .../nodeclass/{status => }/readiness_test.go | 6 +- .../nodeclass/{status => }/securitygroup.go | 2 +- .../{status => }/securitygroup_test.go | 22 +-- .../nodeclass/status/controller.go | 132 ------------------ .../nodeclass/status/suite_test.go | 78 ----------- .../nodeclass/{status => }/subnet.go | 2 +- .../nodeclass/{status => }/subnet_test.go | 24 ++-- .../nodeclass/{termination => }/suite_test.go | 54 ++++--- .../nodeclass/{status => }/validation.go | 2 +- .../nodeclass/{status => }/validation_test.go | 6 +- pkg/providers/launchtemplate/suite_test.go | 10 +- .../en/preview/upgrading/upgrade-guide.md | 9 +- 22 files changed, 202 insertions(+), 327 deletions(-) rename pkg/controllers/nodeclass/{status => }/ami.go (99%) rename pkg/controllers/nodeclass/{status => }/ami_test.go (96%) rename pkg/controllers/nodeclass/{termination => }/controller.go (57%) rename pkg/controllers/nodeclass/{termination => }/events.go (98%) rename pkg/controllers/nodeclass/{status => }/instanceprofile.go (81%) rename pkg/controllers/nodeclass/{status => }/instanceprofile_test.go (93%) rename pkg/controllers/nodeclass/{status => }/launchtemplate_test.go (94%) rename pkg/controllers/nodeclass/{status => }/readiness.go (98%) rename pkg/controllers/nodeclass/{status => }/readiness_test.go (93%) rename pkg/controllers/nodeclass/{status => }/securitygroup.go (99%) rename pkg/controllers/nodeclass/{status => }/securitygroup_test.go (90%) delete mode 100644 pkg/controllers/nodeclass/status/controller.go delete mode 100644 pkg/controllers/nodeclass/status/suite_test.go rename pkg/controllers/nodeclass/{status => }/subnet.go (99%) rename pkg/controllers/nodeclass/{status => }/subnet_test.go (91%) rename pkg/controllers/nodeclass/{termination => }/suite_test.go (85%) rename pkg/controllers/nodeclass/{status => }/validation.go (98%) rename pkg/controllers/nodeclass/{status => }/validation_test.go (94%) diff --git a/pkg/cloudprovider/suite_test.go b/pkg/cloudprovider/suite_test.go index 7fdcf9d37c87..d08c26dcb0d5 100644 --- a/pkg/cloudprovider/suite_test.go +++ b/pkg/cloudprovider/suite_test.go @@ -43,7 +43,7 @@ import ( "github.com/aws/karpenter-provider-aws/pkg/apis" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" - "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" + "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -1156,7 +1156,7 @@ var _ = Describe("CloudProvider", func() { {SubnetId: aws.String("test-subnet-2"), AvailabilityZone: aws.String("test-zone-1a"), AvailabilityZoneId: aws.String("tstz1-1a"), AvailableIpAddressCount: aws.Int32(100), Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String("test-subnet-2")}}}, }}) - controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) + controller := nodeclass.NewController(env.Client, recorder, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectApplied(ctx, env.Client, nodePool, nodeClass) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) @@ -1173,7 +1173,7 @@ var _ = Describe("CloudProvider", func() { {SubnetId: aws.String("test-subnet-2"), AvailabilityZone: aws.String("test-zone-1a"), AvailabilityZoneId: aws.String("tstz1-1a"), AvailableIpAddressCount: aws.Int32(11), Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String("test-subnet-2")}}}, }}) - controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) + controller := nodeclass.NewController(env.Client, recorder, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: aws.Int32(1), } @@ -1214,7 +1214,7 @@ var _ = Describe("CloudProvider", func() { }}) nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{{Tags: map[string]string{"Name": "test-subnet-1"}}} ExpectApplied(ctx, env.Client, nodePool, nodeClass) - controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) + controller := nodeclass.NewController(env.Client, recorder, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) podSubnet1 := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, podSubnet1) diff --git a/pkg/controllers/controllers.go b/pkg/controllers/controllers.go index 2514ebee5aed..c5cbec68a5f2 100644 --- a/pkg/controllers/controllers.go +++ b/pkg/controllers/controllers.go @@ -27,9 +27,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" + nodeclass "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass" nodeclasshash "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/hash" - nodeclassstatus "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" - nodeclasstermination "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/termination" controllersinstancetype "github.com/aws/karpenter-provider-aws/pkg/controllers/providers/instancetype" controllersinstancetypecapacity "github.com/aws/karpenter-provider-aws/pkg/controllers/providers/instancetype/capacity" controllerspricing "github.com/aws/karpenter-provider-aws/pkg/controllers/providers/pricing" @@ -82,8 +81,7 @@ func NewControllers( instanceTypeProvider *instancetype.DefaultProvider) []controller.Controller { controllers := []controller.Controller{ nodeclasshash.NewController(kubeClient), - nodeclassstatus.NewController(kubeClient, subnetProvider, securityGroupProvider, amiProvider, instanceProfileProvider, launchTemplateProvider), - nodeclasstermination.NewController(kubeClient, recorder, instanceProfileProvider, launchTemplateProvider), + nodeclass.NewController(kubeClient, recorder, subnetProvider, securityGroupProvider, amiProvider, instanceProfileProvider, launchTemplateProvider), nodeclaimgarbagecollection.NewController(kubeClient, cloudProvider), nodeclaimtagging.NewController(kubeClient, cloudProvider, instanceProvider), controllerspricing.NewController(pricingProvider), diff --git a/pkg/controllers/nodeclass/status/ami.go b/pkg/controllers/nodeclass/ami.go similarity index 99% rename from pkg/controllers/nodeclass/status/ami.go rename to pkg/controllers/nodeclass/ami.go index 0d2dd0a2d904..26a5a7462909 100644 --- a/pkg/controllers/nodeclass/status/ami.go +++ b/pkg/controllers/nodeclass/ami.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" diff --git a/pkg/controllers/nodeclass/status/ami_test.go b/pkg/controllers/nodeclass/ami_test.go similarity index 96% rename from pkg/controllers/nodeclass/status/ami_test.go rename to pkg/controllers/nodeclass/ami_test.go index 31342d1f7b03..966f13dabf41 100644 --- a/pkg/controllers/nodeclass/status/ami_test.go +++ b/pkg/controllers/nodeclass/ami_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "fmt" @@ -132,7 +132,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { } nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "al2023@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(4)) @@ -216,7 +216,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { } nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "al2@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(4)) @@ -302,7 +302,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { } nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "bottlerocket@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(4)) @@ -384,7 +384,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { } nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "windows2019@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(1)) @@ -419,7 +419,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { } nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "windows2022@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(1)) @@ -459,7 +459,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { Alias: "bottlerocket@latest", }} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(2)) @@ -510,7 +510,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { Tags: map[string]string{"Name": "amd64-standard"}, }} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.AMIs).To(Equal( []v1.AMI{ @@ -530,7 +530,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { It("should get error when resolving AMIs and have status condition set to false", func() { awsEnv.EC2API.NextError.Set(fmt.Errorf("unable to resolve AMI")) ExpectApplied(ctx, env.Client, nodeClass) - _ = ExpectObjectReconcileFailed(ctx, env.Client, statusController, nodeClass) + _ = ExpectObjectReconcileFailed(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeAMIsReady)).To(BeFalse()) }) @@ -571,7 +571,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { }) It("should update nodeclass AMI status with correct deprecation value and conditions", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(2)) @@ -609,7 +609,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { // Flush Cache awsEnv.EC2Cache.Flush() - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(2)) Expect(nodeClass.Status.AMIs).To(Equal( @@ -646,7 +646,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { // Initial reconcile discovers AMIs which are deprecated ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(2)) @@ -709,7 +709,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { awsEnv.EC2Cache.Flush() ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(len(nodeClass.Status.AMIs)).To(Equal(2)) diff --git a/pkg/controllers/nodeclass/termination/controller.go b/pkg/controllers/nodeclass/controller.go similarity index 57% rename from pkg/controllers/nodeclass/termination/controller.go rename to pkg/controllers/nodeclass/controller.go index b53eadf62b0d..9a05ea8cbdbb 100644 --- a/pkg/controllers/nodeclass/termination/controller.go +++ b/pkg/controllers/nodeclass/controller.go @@ -12,18 +12,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package termination +package nodeclass import ( "context" "fmt" "time" + "go.uber.org/multierr" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/karpenter/pkg/operator/injection" nodeclaimutils "sigs.k8s.io/karpenter/pkg/utils/nodeclaim" - - "github.com/aws/karpenter-provider-aws/pkg/providers/launchtemplate" + "sigs.k8s.io/karpenter/pkg/utils/result" "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/equality" @@ -44,34 +44,103 @@ import ( "sigs.k8s.io/karpenter/pkg/events" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" + "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" "github.com/aws/karpenter-provider-aws/pkg/providers/instanceprofile" + "github.com/aws/karpenter-provider-aws/pkg/providers/launchtemplate" + "github.com/aws/karpenter-provider-aws/pkg/providers/securitygroup" + "github.com/aws/karpenter-provider-aws/pkg/providers/subnet" ) +type nodeClassReconciler interface { + Reconcile(context.Context, *v1.EC2NodeClass) (reconcile.Result, error) +} + type Controller struct { - kubeClient client.Client - recorder events.Recorder - instanceProfileProvider instanceprofile.Provider - launchTemplateProvider launchtemplate.Provider + kubeClient client.Client + recorder events.Recorder + launchTemplateProvider launchtemplate.Provider + + ami *AMI + instanceProfile *InstanceProfile + subnet *Subnet + securityGroup *SecurityGroup + validation *Validation + readiness *Readiness //TODO : Remove this when we have sub status conditions } -func NewController(kubeClient client.Client, recorder events.Recorder, - instanceProfileProvider instanceprofile.Provider, launchTemplateProvider launchtemplate.Provider) *Controller { +func NewController(kubeClient client.Client, recorder events.Recorder, subnetProvider subnet.Provider, securityGroupProvider securitygroup.Provider, + amiProvider amifamily.Provider, instanceProfileProvider instanceprofile.Provider, launchTemplateProvider launchtemplate.Provider) *Controller { return &Controller{ - kubeClient: kubeClient, - recorder: recorder, - instanceProfileProvider: instanceProfileProvider, - launchTemplateProvider: launchTemplateProvider, + kubeClient: kubeClient, + recorder: recorder, + launchTemplateProvider: launchTemplateProvider, + ami: &AMI{amiProvider: amiProvider}, + subnet: &Subnet{subnetProvider: subnetProvider}, + securityGroup: &SecurityGroup{securityGroupProvider: securityGroupProvider}, + instanceProfile: &InstanceProfile{instanceProfileProvider: instanceProfileProvider}, + validation: &Validation{}, + readiness: &Readiness{launchTemplateProvider: launchTemplateProvider}, } } +func (c *Controller) Name() string { + return "nodeclass" +} + func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { - ctx = injection.WithControllerName(ctx, "nodeclass.termination") + ctx = injection.WithControllerName(ctx, c.Name()) if !nodeClass.GetDeletionTimestamp().IsZero() { return c.finalize(ctx, nodeClass) } - return reconcile.Result{}, nil + + if !controllerutil.ContainsFinalizer(nodeClass, v1.TerminationFinalizer) { + stored := nodeClass.DeepCopy() + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) + + // We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch + // can cause races due to the fact that it fully replaces the list on a change + // Here, we are updating the finalizer list + if err := c.kubeClient.Patch(ctx, nodeClass, client.MergeFromWithOptions(stored, client.MergeFromWithOptimisticLock{})); err != nil { + if errors.IsConflict(err) { + return reconcile.Result{Requeue: true}, nil + } + return reconcile.Result{}, err + } + } + stored := nodeClass.DeepCopy() + + var results []reconcile.Result + var errs error + for _, reconciler := range []nodeClassReconciler{ + c.ami, + c.subnet, + c.securityGroup, + c.instanceProfile, + c.validation, + c.readiness, + } { + res, err := reconciler.Reconcile(ctx, nodeClass) + errs = multierr.Append(errs, err) + results = append(results, res) + } + + if !equality.Semantic.DeepEqual(stored, nodeClass) { + // We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch + // can cause races due to the fact that it fully replaces the list on a change + // Here, we are updating the status condition list + if err := c.kubeClient.Status().Patch(ctx, nodeClass, client.MergeFromWithOptions(stored, client.MergeFromWithOptimisticLock{})); err != nil { + if errors.IsConflict(err) { + return reconcile.Result{Requeue: true}, nil + } + errs = multierr.Append(errs, client.IgnoreNotFound(err)) + } + } + if errs != nil { + return reconcile.Result{}, errs + } + return result.Min(results...), nil } func (c *Controller) finalize(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { @@ -88,8 +157,8 @@ func (c *Controller) finalize(ctx context.Context, nodeClass *v1.EC2NodeClass) ( return reconcile.Result{RequeueAfter: time.Minute * 10}, nil // periodically fire the event } if nodeClass.Spec.Role != "" { - if err := c.instanceProfileProvider.Delete(ctx, nodeClass); err != nil { - return reconcile.Result{}, fmt.Errorf("deleting instance profile, %w", err) + if _, err := c.instanceProfile.Finalize(ctx, nodeClass); err != nil { + return reconcile.Result{}, err } } if err := c.launchTemplateProvider.DeleteAll(ctx, nodeClass); err != nil { @@ -113,7 +182,7 @@ func (c *Controller) finalize(ctx context.Context, nodeClass *v1.EC2NodeClass) ( func (c *Controller) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). - Named("nodeclass.termination"). + Named(c.Name()). For(&v1.EC2NodeClass{}). Watches( &karpv1.NodeClaim{}, diff --git a/pkg/controllers/nodeclass/termination/events.go b/pkg/controllers/nodeclass/events.go similarity index 98% rename from pkg/controllers/nodeclass/termination/events.go rename to pkg/controllers/nodeclass/events.go index 3823e871c806..45f1c9c18536 100644 --- a/pkg/controllers/nodeclass/termination/events.go +++ b/pkg/controllers/nodeclass/events.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package termination +package nodeclass import ( "fmt" diff --git a/pkg/controllers/nodeclass/status/instanceprofile.go b/pkg/controllers/nodeclass/instanceprofile.go similarity index 81% rename from pkg/controllers/nodeclass/status/instanceprofile.go rename to pkg/controllers/nodeclass/instanceprofile.go index 9ff92d299bae..15402ea618ea 100644 --- a/pkg/controllers/nodeclass/status/instanceprofile.go +++ b/pkg/controllers/nodeclass/instanceprofile.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" @@ -42,3 +42,10 @@ func (ip *InstanceProfile) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeC nodeClass.StatusConditions().SetTrue(v1.ConditionTypeInstanceProfileReady) return reconcile.Result{}, nil } + +func (ip *InstanceProfile) Finalize(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { + if err := ip.instanceProfileProvider.Delete(ctx, nodeClass); err != nil { + return reconcile.Result{}, fmt.Errorf("deleting instance profile, %w", err) + } + return reconcile.Result{}, nil +} diff --git a/pkg/controllers/nodeclass/status/instanceprofile_test.go b/pkg/controllers/nodeclass/instanceprofile_test.go similarity index 93% rename from pkg/controllers/nodeclass/status/instanceprofile_test.go rename to pkg/controllers/nodeclass/instanceprofile_test.go index 62e9420a9680..29d6ca7d6293 100644 --- a/pkg/controllers/nodeclass/status/instanceprofile_test.go +++ b/pkg/controllers/nodeclass/instanceprofile_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "fmt" @@ -39,7 +39,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { It("should create the instance profile when it doesn't exist", func() { nodeClass.Spec.Role = "test-role" ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(awsEnv.IAMAPI.InstanceProfiles[profileName].Roles).To(HaveLen(1)) @@ -64,7 +64,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass.Spec.Role = "test-role" ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(awsEnv.IAMAPI.InstanceProfiles[profileName].Roles).To(HaveLen(1)) @@ -89,7 +89,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass.Spec.Role = "test-role" ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(awsEnv.IAMAPI.InstanceProfiles[profileName].Roles).To(HaveLen(1)) @@ -114,7 +114,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass.Spec.Role = "test-role" ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(awsEnv.IAMAPI.InstanceProfiles[profileName].Roles).To(HaveLen(1)) @@ -129,7 +129,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass.Spec.Role = "" nodeClass.Spec.InstanceProfile = lo.ToPtr("test-instance-profile") ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.InstanceProfile).To(Equal(lo.FromPtr(nodeClass.Spec.InstanceProfile))) @@ -139,7 +139,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass.Spec.Role = "" nodeClass.Spec.InstanceProfile = lo.ToPtr("test-instance-profile") ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.CreateInstanceProfileBehavior.Calls()).To(BeZero()) Expect(awsEnv.IAMAPI.AddRoleToInstanceProfileBehavior.Calls()).To(BeZero()) diff --git a/pkg/controllers/nodeclass/status/launchtemplate_test.go b/pkg/controllers/nodeclass/launchtemplate_test.go similarity index 94% rename from pkg/controllers/nodeclass/status/launchtemplate_test.go rename to pkg/controllers/nodeclass/launchtemplate_test.go index b9273453053d..3b85ab92ceb7 100644 --- a/pkg/controllers/nodeclass/status/launchtemplate_test.go +++ b/pkg/controllers/nodeclass/launchtemplate_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "github.com/aws/aws-sdk-go-v2/service/eks" @@ -59,7 +59,7 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() nodeClass.Spec.AMIFamily = lo.ToPtr(family) nodeClass.Spec.AMISelectorTerms = terms ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.LaunchTemplateProvider.ClusterCIDR.Load()).To(BeNil()) }, Entry(v1.AMIFamilyAL2, v1.AMIFamilyAL2, []v1.AMISelectorTerm{{Alias: "al2@latest"}}), @@ -72,7 +72,7 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() nodeClass.Spec.AMIFamily = lo.ToPtr(v1.AMIFamilyAL2023) nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "al2023@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(lo.FromPtr(awsEnv.LaunchTemplateProvider.ClusterCIDR.Load())).To(Equal("10.100.0.0/16")) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.StatusConditions().IsTrue(status.ConditionReady)).To(BeTrue()) @@ -89,7 +89,7 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() nodeClass.Spec.AMIFamily = lo.ToPtr(v1.AMIFamilyAL2023) nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Alias: "al2023@latest"}} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(lo.FromPtr(awsEnv.LaunchTemplateProvider.ClusterCIDR.Load())).To(Equal("2001:db8::/64")) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.StatusConditions().IsTrue(status.ConditionReady)).To(BeTrue()) diff --git a/pkg/controllers/nodeclass/status/readiness.go b/pkg/controllers/nodeclass/readiness.go similarity index 98% rename from pkg/controllers/nodeclass/status/readiness.go rename to pkg/controllers/nodeclass/readiness.go index 828ae098010b..45b32ea91468 100644 --- a/pkg/controllers/nodeclass/status/readiness.go +++ b/pkg/controllers/nodeclass/readiness.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" diff --git a/pkg/controllers/nodeclass/status/readiness_test.go b/pkg/controllers/nodeclass/readiness_test.go similarity index 93% rename from pkg/controllers/nodeclass/status/readiness_test.go rename to pkg/controllers/nodeclass/readiness_test.go index ed6bd9d5ea91..563f50a355ab 100644 --- a/pkg/controllers/nodeclass/status/readiness_test.go +++ b/pkg/controllers/nodeclass/readiness_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "github.com/awslabs/operatorpkg/status" @@ -51,7 +51,7 @@ var _ = Describe("NodeClass Status Condition Controller", func() { }) It("should update status condition on nodeClass as Ready", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Conditions).To(HaveLen(6)) Expect(nodeClass.StatusConditions().Get(status.ConditionReady).IsTrue()).To(BeTrue()) @@ -63,7 +63,7 @@ var _ = Describe("NodeClass Status Condition Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.StatusConditions().Get(status.ConditionReady).IsFalse()).To(BeTrue()) diff --git a/pkg/controllers/nodeclass/status/securitygroup.go b/pkg/controllers/nodeclass/securitygroup.go similarity index 99% rename from pkg/controllers/nodeclass/status/securitygroup.go rename to pkg/controllers/nodeclass/securitygroup.go index 5e0a21af0b29..dea2757f4aab 100644 --- a/pkg/controllers/nodeclass/status/securitygroup.go +++ b/pkg/controllers/nodeclass/securitygroup.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" diff --git a/pkg/controllers/nodeclass/status/securitygroup_test.go b/pkg/controllers/nodeclass/securitygroup_test.go similarity index 90% rename from pkg/controllers/nodeclass/status/securitygroup_test.go rename to pkg/controllers/nodeclass/securitygroup_test.go index 3d78bf69ab65..b2e421d54ea1 100644 --- a/pkg/controllers/nodeclass/status/securitygroup_test.go +++ b/pkg/controllers/nodeclass/securitygroup_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "github.com/samber/lo" @@ -50,7 +50,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }) It("Should update EC2NodeClass status for Security Groups", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -78,7 +78,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -99,7 +99,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -111,7 +111,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }) It("Should update Security Groups status when the Security Groups selector gets updated by tags", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -137,7 +137,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -153,7 +153,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }) It("Should update Security Groups status when the Security Groups selector gets updated by ids", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -176,7 +176,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -193,14 +193,14 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(BeNil()) Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) }) It("Should not resolve a invalid selectors for an updated Security Groups selector", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { @@ -223,7 +223,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(BeNil()) Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) diff --git a/pkg/controllers/nodeclass/status/controller.go b/pkg/controllers/nodeclass/status/controller.go deleted file mode 100644 index 9845bf5a064b..000000000000 --- a/pkg/controllers/nodeclass/status/controller.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - - "go.uber.org/multierr" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - controllerruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/karpenter/pkg/operator/injection" - - "sigs.k8s.io/karpenter/pkg/utils/result" - - "github.com/awslabs/operatorpkg/reasonable" - - v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" - "github.com/aws/karpenter-provider-aws/pkg/providers/instanceprofile" - "github.com/aws/karpenter-provider-aws/pkg/providers/launchtemplate" - "github.com/aws/karpenter-provider-aws/pkg/providers/securitygroup" - "github.com/aws/karpenter-provider-aws/pkg/providers/subnet" -) - -type nodeClassStatusReconciler interface { - Reconcile(context.Context, *v1.EC2NodeClass) (reconcile.Result, error) -} - -type Controller struct { - kubeClient client.Client - - ami *AMI - instanceprofile *InstanceProfile - subnet *Subnet - securitygroup *SecurityGroup - validation *Validation - readiness *Readiness //TODO : Remove this when we have sub status conditions -} - -func NewController(kubeClient client.Client, subnetProvider subnet.Provider, securityGroupProvider securitygroup.Provider, - amiProvider amifamily.Provider, instanceProfileProvider instanceprofile.Provider, launchTemplateProvider launchtemplate.Provider) *Controller { - return &Controller{ - kubeClient: kubeClient, - - ami: &AMI{amiProvider: amiProvider}, - subnet: &Subnet{subnetProvider: subnetProvider}, - securitygroup: &SecurityGroup{securityGroupProvider: securityGroupProvider}, - instanceprofile: &InstanceProfile{instanceProfileProvider: instanceProfileProvider}, - validation: &Validation{}, - readiness: &Readiness{launchTemplateProvider: launchTemplateProvider}, - } -} - -func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { - ctx = injection.WithControllerName(ctx, "nodeclass.status") - - if !controllerutil.ContainsFinalizer(nodeClass, v1.TerminationFinalizer) { - stored := nodeClass.DeepCopy() - controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) - - // We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch - // can cause races due to the fact that it fully replaces the list on a change - // Here, we are updating the finalizer list - if err := c.kubeClient.Patch(ctx, nodeClass, client.MergeFromWithOptions(stored, client.MergeFromWithOptimisticLock{})); err != nil { - if errors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - return reconcile.Result{}, err - } - } - stored := nodeClass.DeepCopy() - - var results []reconcile.Result - var errs error - for _, reconciler := range []nodeClassStatusReconciler{ - c.ami, - c.subnet, - c.securitygroup, - c.instanceprofile, - c.validation, - c.readiness, - } { - res, err := reconciler.Reconcile(ctx, nodeClass) - errs = multierr.Append(errs, err) - results = append(results, res) - } - - if !equality.Semantic.DeepEqual(stored, nodeClass) { - // We use client.MergeFromWithOptimisticLock because patching a list with a JSON merge patch - // can cause races due to the fact that it fully replaces the list on a change - // Here, we are updating the status condition list - if err := c.kubeClient.Status().Patch(ctx, nodeClass, client.MergeFromWithOptions(stored, client.MergeFromWithOptimisticLock{})); err != nil { - if errors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - errs = multierr.Append(errs, client.IgnoreNotFound(err)) - } - } - if errs != nil { - return reconcile.Result{}, errs - } - return result.Min(results...), nil -} - -func (c *Controller) Register(_ context.Context, m manager.Manager) error { - return controllerruntime.NewControllerManagedBy(m). - Named("nodeclass.status"). - For(&v1.EC2NodeClass{}). - WithOptions(controller.Options{ - RateLimiter: reasonable.RateLimiter(), - MaxConcurrentReconciles: 10, - }). - Complete(reconcile.AsReconciler(m.GetClient(), c)) -} diff --git a/pkg/controllers/nodeclass/status/suite_test.go b/pkg/controllers/nodeclass/status/suite_test.go deleted file mode 100644 index 97ee37a51e07..000000000000 --- a/pkg/controllers/nodeclass/status/suite_test.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status_test - -import ( - "context" - "testing" - - "sigs.k8s.io/karpenter/pkg/test/v1alpha1" - - coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" - coretest "sigs.k8s.io/karpenter/pkg/test" - - "github.com/aws/karpenter-provider-aws/pkg/apis" - v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" - "github.com/aws/karpenter-provider-aws/pkg/operator/options" - "github.com/aws/karpenter-provider-aws/pkg/test" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - . "sigs.k8s.io/karpenter/pkg/test/expectations" - . "sigs.k8s.io/karpenter/pkg/utils/testing" -) - -var ctx context.Context -var env *coretest.Environment -var awsEnv *test.Environment -var nodeClass *v1.EC2NodeClass -var statusController *status.Controller - -func TestAPIs(t *testing.T) { - ctx = TestContextWithLogger(t) - RegisterFailHandler(Fail) - RunSpecs(t, "EC2NodeClass") -} - -var _ = BeforeSuite(func() { - env = coretest.NewEnvironment(coretest.WithCRDs(test.RemoveNodeClassTagValidation(apis.CRDs)...), coretest.WithCRDs(v1alpha1.CRDs...), coretest.WithFieldIndexers(coretest.NodeClaimNodeClassRefFieldIndexer(ctx))) - ctx = coreoptions.ToContext(ctx, coretest.Options()) - ctx = options.ToContext(ctx, test.Options()) - awsEnv = test.NewEnvironment(ctx, env) - - statusController = status.NewController( - env.Client, - awsEnv.SubnetProvider, - awsEnv.SecurityGroupProvider, - awsEnv.AMIProvider, - awsEnv.InstanceProfileProvider, - awsEnv.LaunchTemplateProvider, - ) -}) - -var _ = AfterSuite(func() { - Expect(env.Stop()).To(Succeed(), "Failed to stop environment") -}) - -var _ = BeforeEach(func() { - ctx = coreoptions.ToContext(ctx, coretest.Options()) - nodeClass = test.EC2NodeClass() - awsEnv.Reset() -}) - -var _ = AfterEach(func() { - ExpectCleanedUp(ctx, env.Client) -}) diff --git a/pkg/controllers/nodeclass/status/subnet.go b/pkg/controllers/nodeclass/subnet.go similarity index 99% rename from pkg/controllers/nodeclass/status/subnet.go rename to pkg/controllers/nodeclass/subnet.go index 7c64ba1be264..5ba01e99b106 100644 --- a/pkg/controllers/nodeclass/status/subnet.go +++ b/pkg/controllers/nodeclass/subnet.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" diff --git a/pkg/controllers/nodeclass/status/subnet_test.go b/pkg/controllers/nodeclass/subnet_test.go similarity index 91% rename from pkg/controllers/nodeclass/status/subnet_test.go rename to pkg/controllers/nodeclass/subnet_test.go index 0b88e90b66f7..5770c1a351fc 100644 --- a/pkg/controllers/nodeclass/status/subnet_test.go +++ b/pkg/controllers/nodeclass/subnet_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( "github.com/aws/aws-sdk-go-v2/aws" @@ -53,7 +53,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }) It("Should update EC2NodeClass status for Subnets", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -86,7 +86,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { {SubnetId: aws.String("subnet-test3"), AvailabilityZone: aws.String("test-zone-1c"), AvailabilityZoneId: aws.String("tstz1-1c"), AvailableIpAddressCount: aws.Int32(50)}, }}) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -117,7 +117,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -140,7 +140,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -153,7 +153,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }) It("Should update Subnet status when the Subnet selector gets updated by tags", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -191,7 +191,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -209,7 +209,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }) It("Should update Subnet status when the Subnet selector gets updated by ids", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -240,7 +240,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -258,14 +258,14 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(BeNil()) Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) }) It("Should not resolve a invalid selectors for an updated subnet selector", func() { ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { @@ -296,7 +296,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, } ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(BeNil()) Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) diff --git a/pkg/controllers/nodeclass/termination/suite_test.go b/pkg/controllers/nodeclass/suite_test.go similarity index 85% rename from pkg/controllers/nodeclass/termination/suite_test.go rename to pkg/controllers/nodeclass/suite_test.go index 2ea995d2f1e2..13e96713ced4 100644 --- a/pkg/controllers/nodeclass/termination/suite_test.go +++ b/pkg/controllers/nodeclass/suite_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package termination_test +package nodeclass_test import ( "context" @@ -37,7 +37,7 @@ import ( "github.com/aws/karpenter-provider-aws/pkg/apis" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/termination" + "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -51,7 +51,8 @@ import ( var ctx context.Context var env *coretest.Environment var awsEnv *test.Environment -var terminationController *termination.Controller +var nodeClass *v1.EC2NodeClass +var controller *nodeclass.Controller func TestAPIs(t *testing.T) { ctx = TestContextWithLogger(t) @@ -60,12 +61,19 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { - env = coretest.NewEnvironment(coretest.WithCRDs(apis.CRDs...), coretest.WithCRDs(v1alpha1.CRDs...), coretest.WithFieldIndexers(coretest.NodeClaimNodeClassRefFieldIndexer(ctx))) + env = coretest.NewEnvironment(coretest.WithCRDs(test.RemoveNodeClassTagValidation(apis.CRDs)...), coretest.WithCRDs(v1alpha1.CRDs...), coretest.WithFieldIndexers(coretest.NodeClaimNodeClassRefFieldIndexer(ctx))) ctx = coreoptions.ToContext(ctx, coretest.Options()) ctx = options.ToContext(ctx, test.Options()) awsEnv = test.NewEnvironment(ctx, env) - terminationController = termination.NewController(env.Client, events.NewRecorder(&record.FakeRecorder{}), awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) + controller = nodeclass.NewController( + env.Client, events.NewRecorder(&record.FakeRecorder{}), + awsEnv.SubnetProvider, + awsEnv.SecurityGroupProvider, + awsEnv.AMIProvider, + awsEnv.InstanceProfileProvider, + awsEnv.LaunchTemplateProvider, + ) }) var _ = AfterSuite(func() { @@ -74,6 +82,7 @@ var _ = AfterSuite(func() { var _ = BeforeEach(func() { ctx = coreoptions.ToContext(ctx, coretest.Options()) + nodeClass = test.EC2NodeClass() awsEnv.Reset() }) @@ -82,7 +91,6 @@ var _ = AfterEach(func() { }) var _ = Describe("NodeClass Termination", func() { - var nodeClass *v1.EC2NodeClass var profileName string BeforeEach(func() { nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ @@ -115,11 +123,11 @@ var _ = Describe("NodeClass Termination", func() { Expect(ok).To(BeTrue()) controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) awsEnv.EC2API.NextError.Set(fmt.Errorf("delete Launch Template Error")) - _ = ExpectObjectReconcileFailed(ctx, env.Client, terminationController, nodeClass) + _ = ExpectObjectReconcileFailed(ctx, env.Client, controller, nodeClass) ExpectExists(ctx, env.Client, nodeClass) }) It("should not delete the launch template not associated with the nodeClass", func() { @@ -129,10 +137,10 @@ var _ = Describe("NodeClass Termination", func() { Expect(ok).To(BeTrue()) controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) _, ok = awsEnv.EC2API.LaunchTemplates.Load(launchTemplateName) Expect(ok).To(BeTrue()) ExpectNotFound(ctx, env.Client, nodeClass) @@ -148,9 +156,9 @@ var _ = Describe("NodeClass Termination", func() { Expect(ok).To(BeTrue()) controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) _, ok = awsEnv.EC2API.LaunchTemplates.Load(ltName1) Expect(ok).To(BeFalse()) _, ok = awsEnv.EC2API.LaunchTemplates.Load(ltName2) @@ -171,11 +179,11 @@ var _ = Describe("NodeClass Termination", func() { } controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(0)) ExpectNotFound(ctx, env.Client, nodeClass) }) @@ -187,10 +195,10 @@ var _ = Describe("NodeClass Termination", func() { } controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(0)) ExpectNotFound(ctx, env.Client, nodeClass) }) @@ -200,7 +208,7 @@ var _ = Describe("NodeClass Termination", func() { ExpectApplied(ctx, env.Client, nodeClass) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(0)) ExpectNotFound(ctx, env.Client, nodeClass) }) @@ -232,11 +240,11 @@ var _ = Describe("NodeClass Termination", func() { } controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - res := ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + res := ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(res.RequeueAfter).To(Equal(time.Minute * 10)) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) ExpectExists(ctx, env.Client, nodeClass) @@ -244,7 +252,7 @@ var _ = Describe("NodeClass Termination", func() { // Delete one of the NodeClaims // The NodeClass should still not delete ExpectDeleted(ctx, env.Client, nodeClaims[0]) - res = ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + res = ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(res.RequeueAfter).To(Equal(time.Minute * 10)) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) ExpectExists(ctx, env.Client, nodeClass) @@ -252,7 +260,7 @@ var _ = Describe("NodeClass Termination", func() { // Delete the last NodeClaim // The NodeClass should now delete ExpectDeleted(ctx, env.Client, nodeClaims[1]) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(0)) ExpectNotFound(ctx, env.Client, nodeClass) }) @@ -272,11 +280,11 @@ var _ = Describe("NodeClass Termination", func() { nodeClass.Spec.InstanceProfile = lo.ToPtr("test-instance-profile") controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) - ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) ExpectNotFound(ctx, env.Client, nodeClass) diff --git a/pkg/controllers/nodeclass/status/validation.go b/pkg/controllers/nodeclass/validation.go similarity index 98% rename from pkg/controllers/nodeclass/status/validation.go rename to pkg/controllers/nodeclass/validation.go index 2e0b1eccedc4..a4a0cce16466 100644 --- a/pkg/controllers/nodeclass/status/validation.go +++ b/pkg/controllers/nodeclass/validation.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status +package nodeclass import ( "context" diff --git a/pkg/controllers/nodeclass/status/validation_test.go b/pkg/controllers/nodeclass/validation_test.go similarity index 94% rename from pkg/controllers/nodeclass/status/validation_test.go rename to pkg/controllers/nodeclass/validation_test.go index 9f590db52c6b..24cd4e5ee644 100644 --- a/pkg/controllers/nodeclass/status/validation_test.go +++ b/pkg/controllers/nodeclass/validation_test.go @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package status_test +package nodeclass_test import ( status "github.com/awslabs/operatorpkg/status" @@ -55,7 +55,7 @@ var _ = Describe("NodeClass Validation Status Controller", func() { DescribeTable("should update status condition on nodeClass as NotReady when tag validation fails", func(illegalTag map[string]string) { nodeClass.Spec.Tags = illegalTag ExpectApplied(ctx, env.Client, nodeClass) - err := ExpectObjectReconcileFailed(ctx, env.Client, statusController, nodeClass) + err := ExpectObjectReconcileFailed(ctx, env.Client, controller, nodeClass) Expect(err).To(HaveOccurred()) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Conditions).To(HaveLen(6)) @@ -72,7 +72,7 @@ var _ = Describe("NodeClass Validation Status Controller", func() { It("should update status condition as Ready when tags are valid", func() { nodeClass.Spec.Tags = map[string]string{} ExpectApplied(ctx, env.Client, nodeClass) - ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) + ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeValidationSucceeded).IsTrue()).To(BeTrue()) diff --git a/pkg/providers/launchtemplate/suite_test.go b/pkg/providers/launchtemplate/suite_test.go index dce50be2e14f..1290371a3ac6 100644 --- a/pkg/providers/launchtemplate/suite_test.go +++ b/pkg/providers/launchtemplate/suite_test.go @@ -62,7 +62,7 @@ import ( "github.com/aws/karpenter-provider-aws/pkg/apis" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" - "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" + "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" @@ -81,6 +81,7 @@ var fakeClock *clock.FakeClock var prov *provisioning.Provisioner var cluster *state.Cluster var cloudProvider *cloudprovider.CloudProvider +var recorder events.Recorder func TestAWS(t *testing.T) { ctx = TestContextWithLogger(t) @@ -96,10 +97,11 @@ var _ = BeforeSuite(func() { awsEnv = test.NewEnvironment(ctx, env) fakeClock = &clock.FakeClock{} - cloudProvider = cloudprovider.New(awsEnv.InstanceTypesProvider, awsEnv.InstanceProvider, events.NewRecorder(&record.FakeRecorder{}), + recorder = events.NewRecorder(&record.FakeRecorder{}) + cloudProvider = cloudprovider.New(awsEnv.InstanceTypesProvider, awsEnv.InstanceProvider, recorder, env.Client, awsEnv.AMIProvider, awsEnv.SecurityGroupProvider) cluster = state.NewCluster(fakeClock, env.Client, cloudProvider) - prov = provisioning.NewProvisioner(env.Client, events.NewRecorder(&record.FakeRecorder{}), cloudProvider, cluster, fakeClock) + prov = provisioning.NewProvisioner(env.Client, recorder, cloudProvider, cluster, fakeClock) }) var _ = AfterSuite(func() { @@ -2025,7 +2027,7 @@ essential = true nodeClass.Spec.AMIFamily = lo.ToPtr(v1.AMIFamilyCustom) nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} ExpectApplied(ctx, env.Client, nodeClass) - controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) + controller := nodeclass.NewController(env.Client, recorder, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { diff --git a/website/content/en/preview/upgrading/upgrade-guide.md b/website/content/en/preview/upgrading/upgrade-guide.md index b3eb5ebed69f..26e8cda46f83 100644 --- a/website/content/en/preview/upgrading/upgrade-guide.md +++ b/website/content/en/preview/upgrading/upgrade-guide.md @@ -46,10 +46,11 @@ Karpenter `1.1.0` drops the support for `v1beta1` APIs. {{% /alert %}} * We have recently updated our labels on `karpenter_voluntary_disruption_queue_failures_total` and `karpenter_nodeclaims_disrupted_total` reason label from camille case to snake case. Therefore these reason labels values on those metrics have now been update as such: -- Drifted -> drifted -- Empty -> empty -- Expired -> expired -- Underutilized -> underutilized + - Drifted -> drifted + - Empty -> empty + - Expired -> expired + - Underutilized -> underutilized +* Nodeclass status and termination controllers have been merged into a single `nodeclass` controller. If you are relying on logs or metrics for `nodeclass.termination` or `nodeclass.status` controllers, please make sure that you update them to reference the new `nodeclass` controller. ### Upgrading to `1.1.0`+ From c24b63444c4cbe4661808a58c773f007ef46b96f Mon Sep 17 00:00:00 2001 From: Andrii Omelianenko Date: Thu, 16 Jan 2025 20:31:59 +0200 Subject: [PATCH 16/18] docs: fix graceful-node-shutdown url reference (#7605) Co-authored-by: Andrii Omelianenko --- designs/interruption-handling.md | 2 +- website/content/en/docs/concepts/disruption.md | 2 +- website/content/en/preview/concepts/disruption.md | 2 +- website/content/en/v0.32/concepts/disruption.md | 2 +- website/content/en/v0.37/concepts/disruption.md | 2 +- website/content/en/v1.0/concepts/disruption.md | 2 +- website/content/en/v1.1/concepts/disruption.md | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/designs/interruption-handling.md b/designs/interruption-handling.md index 33be4c55b392..35bdacb1c006 100644 --- a/designs/interruption-handling.md +++ b/designs/interruption-handling.md @@ -146,7 +146,7 @@ The simplest option is to include [NTH IMDS mode](https://quip-amazon.com/EUgPAQ **3B: Build a System Daemon (nthd)** -An option to transparently handle spot interruption notifications is to build a system daemon in a separate repo that performs the IMDS monitoring and triggers an instance shutdown when an interruption is observed. This would rely on K8s’ new [graceful shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown) feature which went beta in K8s 1.21. +An option to transparently handle spot interruption notifications is to build a system daemon in a separate repo that performs the IMDS monitoring and triggers an instance shutdown when an interruption is observed. This would rely on K8s’ new [graceful shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown) feature which went beta in K8s 1.21. With graceful shutdown, the kubelet registers [systemd-inhibitor-locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to stop the shutdown flow until locks are relinquished, which in this case would be when the kubelet has drained pods off of the node. Two parameters were added to the kubelet to tune the drain timeouts: `shutdownGracePeriod` & `shutdownGracePeriodCriticalPods` diff --git a/website/content/en/docs/concepts/disruption.md b/website/content/en/docs/concepts/disruption.md index 40e9e1f83825..6077be7ba9e8 100644 --- a/website/content/en/docs/concepts/disruption.md +++ b/website/content/en/docs/concepts/disruption.md @@ -28,7 +28,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disrupted:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disrupted:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. diff --git a/website/content/en/preview/concepts/disruption.md b/website/content/en/preview/concepts/disruption.md index 40e9e1f83825..6077be7ba9e8 100644 --- a/website/content/en/preview/concepts/disruption.md +++ b/website/content/en/preview/concepts/disruption.md @@ -28,7 +28,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disrupted:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disrupted:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. diff --git a/website/content/en/v0.32/concepts/disruption.md b/website/content/en/v0.32/concepts/disruption.md index b7ebec2b83df..1a7ef328cde5 100644 --- a/website/content/en/v0.32/concepts/disruption.md +++ b/website/content/en/v0.32/concepts/disruption.md @@ -26,7 +26,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disruption:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all daemonset pods and [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/). Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. diff --git a/website/content/en/v0.37/concepts/disruption.md b/website/content/en/v0.37/concepts/disruption.md index 001de3a242ff..bad247672bcc 100644 --- a/website/content/en/v0.37/concepts/disruption.md +++ b/website/content/en/v0.37/concepts/disruption.md @@ -28,7 +28,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disruption=disrupting:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disruption=disrupting:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. diff --git a/website/content/en/v1.0/concepts/disruption.md b/website/content/en/v1.0/concepts/disruption.md index 25e4db1ff0e6..c7396db6f8a1 100644 --- a/website/content/en/v1.0/concepts/disruption.md +++ b/website/content/en/v1.0/concepts/disruption.md @@ -28,7 +28,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disrupted:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disrupted:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. diff --git a/website/content/en/v1.1/concepts/disruption.md b/website/content/en/v1.1/concepts/disruption.md index 40e9e1f83825..6077be7ba9e8 100644 --- a/website/content/en/v1.1/concepts/disruption.md +++ b/website/content/en/v1.1/concepts/disruption.md @@ -28,7 +28,7 @@ Karpenter automatically discovers disruptable nodes and spins up replacements wh ### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown). Karpenter's graceful shutdown process will: 1. Add the `karpenter.sh/disrupted:NoSchedule` taint to the node to prevent pods from scheduling to it. 2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disrupted:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. From e34bf5a08e947bd791dd843646640d98becd3784 Mon Sep 17 00:00:00 2001 From: Brandon Clodius Date: Thu, 16 Jan 2025 13:43:00 -0500 Subject: [PATCH 17/18] docs: Add notes about instanceStorePolicy that can help prevent deadlock (#7566) --- website/content/en/docs/concepts/nodeclasses.md | 6 ++++-- website/content/en/preview/concepts/nodeclasses.md | 6 ++++-- website/content/en/v0.37/concepts/nodeclasses.md | 6 ++++-- website/content/en/v1.0/concepts/nodeclasses.md | 6 ++++-- website/content/en/v1.1/concepts/nodeclasses.md | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/website/content/en/docs/concepts/nodeclasses.md b/website/content/en/docs/concepts/nodeclasses.md index 696b3dc7c4c7..5eed3ec97158 100644 --- a/website/content/en/docs/concepts/nodeclasses.md +++ b/website/content/en/docs/concepts/nodeclasses.md @@ -974,9 +974,11 @@ spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). This configuration is likely to be useful for workloads that leverage dense storage instance types or require the low latency from instance-stores that are nvme ssd based. -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Even if you already configure your volumes with RAID0, Karpenter won't recognize this by default unless you set the `instanceStorePolicy` to `RAID0`. Without this, scheduling workloads that depend on ephemeral-storage from the instance-stores may result in a deadlock due to insufficient storage. + +Instructions for each AMI family are listed below: #### AL2 diff --git a/website/content/en/preview/concepts/nodeclasses.md b/website/content/en/preview/concepts/nodeclasses.md index 95fb29306f86..9388d487e265 100644 --- a/website/content/en/preview/concepts/nodeclasses.md +++ b/website/content/en/preview/concepts/nodeclasses.md @@ -974,9 +974,11 @@ spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). This configuration is likely to be useful for workloads that leverage dense storage instance types or require the low latency from instance-stores that are nvme ssd based. -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Even if you already configure your volumes with RAID0, Karpenter won't recognize this by default unless you set the `instanceStorePolicy` to `RAID0`. Without this, scheduling workloads that depend on ephemeral-storage from the instance-stores may result in a deadlock due to insufficient storage. + +Instructions for each AMI family are listed below: #### AL2 diff --git a/website/content/en/v0.37/concepts/nodeclasses.md b/website/content/en/v0.37/concepts/nodeclasses.md index 5a4b9f80504e..cc63343471f2 100644 --- a/website/content/en/v0.37/concepts/nodeclasses.md +++ b/website/content/en/v0.37/concepts/nodeclasses.md @@ -682,9 +682,11 @@ spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). This configuration is likely to be useful for workloads that leverage dense storage instance types or require the low latency from instance-stores that are nvme ssd based. -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Even if you already configure your volumes with RAID0, Karpenter won't recognize this by default unless you set the `instanceStorePolicy` to `RAID0`. Without this, scheduling workloads that depend on ephemeral-storage from the instance-stores may result in a deadlock due to insufficient storage. + +Instructions for each AMI family are listed below: #### AL2 diff --git a/website/content/en/v1.0/concepts/nodeclasses.md b/website/content/en/v1.0/concepts/nodeclasses.md index b6ecd70c9e68..ba95d287c8d7 100644 --- a/website/content/en/v1.0/concepts/nodeclasses.md +++ b/website/content/en/v1.0/concepts/nodeclasses.md @@ -975,9 +975,11 @@ spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). This configuration is likely to be useful for workloads that leverage dense storage instance types or require the low latency from instance-stores that are nvme ssd based. -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Even if you already configure your volumes with RAID0, Karpenter won't recognize this by default unless you set the `instanceStorePolicy` to `RAID0`. Without this, scheduling workloads that depend on ephemeral-storage from the instance-stores may result in a deadlock due to insufficient storage. + +Instructions for each AMI family are listed below: #### AL2 diff --git a/website/content/en/v1.1/concepts/nodeclasses.md b/website/content/en/v1.1/concepts/nodeclasses.md index e8f8bcb5a4aa..2c7b5d9b048a 100644 --- a/website/content/en/v1.1/concepts/nodeclasses.md +++ b/website/content/en/v1.1/concepts/nodeclasses.md @@ -974,9 +974,11 @@ spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). This configuration is likely to be useful for workloads that leverage dense storage instance types or require the low latency from instance-stores that are nvme ssd based. -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Even if you already configure your volumes with RAID0, Karpenter won't recognize this by default unless you set the `instanceStorePolicy` to `RAID0`. Without this, scheduling workloads that depend on ephemeral-storage from the instance-stores may result in a deadlock due to insufficient storage. + +Instructions for each AMI family are listed below: #### AL2 From 69ed8b9991fa9a9021d17017adfadb05ab4dcd49 Mon Sep 17 00:00:00 2001 From: Amanuel Engeda <74629455+engedaam@users.noreply.github.com> Date: Fri, 17 Jan 2025 21:19:12 -0800 Subject: [PATCH 18/18] chore: Use security group policy to manage `pod-eni` resource (#7607) --- go.mod | 43 +++++----- go.sum | 86 ++++++++++--------- test/pkg/environment/aws/environment.go | 3 + test/pkg/environment/common/setup.go | 3 + .../integration/extended_resources_test.go | 33 +++++-- 5 files changed, 96 insertions(+), 72 deletions(-) diff --git a/go.mod b/go.mod index 4585e620c502..1b77116fbc03 100644 --- a/go.mod +++ b/go.mod @@ -6,18 +6,19 @@ require ( github.com/Pallinder/go-randomdata v1.2.0 github.com/PuerkitoBio/goquery v1.10.1 github.com/avast/retry-go v3.0.0+incompatible - github.com/aws/aws-sdk-go-v2 v1.32.8 - github.com/aws/aws-sdk-go-v2/config v1.28.10 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3 - github.com/aws/aws-sdk-go-v2/service/eks v1.56.2 - github.com/aws/aws-sdk-go-v2/service/fis v1.31.4 - github.com/aws/aws-sdk-go-v2/service/iam v1.38.4 - github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9 - github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6 - github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4 - github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10 + github.com/aws/amazon-vpc-resource-controller-k8s v1.6.3 + github.com/aws/aws-sdk-go-v2 v1.32.7 + github.com/aws/aws-sdk-go-v2/config v1.28.7 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 + github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 + github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 + github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 + github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 + github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 + github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 github.com/aws/smithy-go v1.22.1 github.com/awslabs/amazon-eks-ami/nodeadm v0.0.0-20240229193347-cfab22a10647 @@ -43,22 +44,22 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/controller-runtime v0.19.4 - sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099 + sigs.k8s.io/karpenter v1.1.2-0.20250117235835-ff44f7325bf0 sigs.k8s.io/yaml v1.4.0 ) require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.51 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.48 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -74,7 +75,7 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect diff --git a/go.sum b/go.sum index 9b71515058ac..7b41d6f19bb2 100644 --- a/go.sum +++ b/go.sum @@ -8,48 +8,50 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo= -github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg= -github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg= -github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8= +github.com/aws/amazon-vpc-resource-controller-k8s v1.6.3 h1:B4o15iZP8CQoyDjoNAoQiyEPabLsgxXLY5tv3uvvCic= +github.com/aws/amazon-vpc-resource-controller-k8s v1.6.3/go.mod h1:k4zcf2Dz/Mvrgo8NVzAEWP5HK4USqbJTD93pVVDxvc0= +github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= +github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.7 h1:GduUnoTXlhkgnxTD93g1nv4tVPILbdNQOzav+Wpg7AE= +github.com/aws/aws-sdk-go-v2/config v1.28.7/go.mod h1:vZGX6GVkIE8uECSUHB6MWAUsd4ZcG2Yq/dMa4refR3M= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48 h1:IYdLD1qTJ0zanRavulofmqut4afs45mOWEI+MzZtTfQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.48/go.mod h1:tOscxHN3CGmuX9idQ3+qbkzrjVIx32lqDSU1/0d/qXs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3 h1:h5UPeMBMm29Vjk45QVnH2Qu2QMbzRrWUORwyGjzWQso= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.3/go.mod h1:WAFpTnWeO2BNfwpQ8LTTTx9l9/bTztMPrA8gkh41PvI= -github.com/aws/aws-sdk-go-v2/service/eks v1.56.2 h1:NXxglcZhHubtK2SgqavDGkbArM4NYI7QvLr+FpOL3Oo= -github.com/aws/aws-sdk-go-v2/service/eks v1.56.2/go.mod h1:KkH+D6VJmtIVGD9KTxB9yZu4hQP7s9kxWn8lLb7tmVg= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.4 h1:368PLRSPKPYLcRwcUVOZ7/47cXbHK0L3BCukuuIgiJ4= -github.com/aws/aws-sdk-go-v2/service/fis v1.31.4/go.mod h1:dTr6z1mEz80NiibrjBsHZS0ahFcG/R0ZBzoRBkzcFUo= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.4 h1:440YtmP8Cn6Qp7WHYfvz2/Xzmu1v1Vox/FJnzUDDQGM= -github.com/aws/aws-sdk-go-v2/service/iam v1.38.4/go.mod h1:oXqc4hmGhZpj06Zu8z+ahXhdbjq4Uw8pjN9flty0Ync= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1 h1:YbNopxjd9baM83YEEmkaYHi+NuJt0AszeaSLqo0CVr0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.198.1/go.mod h1:mwr3iRm8u1+kkEx4ftDM2Q6Yr0XQFBKrP036ng+k5Lk= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 h1:x31cGGE/t/QkrHVh5m2uWvYwDiaDXpj88nh6OdnI5r0= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0/go.mod h1:kNUWaiotRWCnfQlprrxSMg8ALqbZyA9xLCwKXuLumSk= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.3 h1:Pyde+VIhO71j5j+BXiwA2civiljvIRLkKFpCSEpw29E= +github.com/aws/aws-sdk-go-v2/service/fis v1.31.3/go.mod h1:lMzi+Vbnzlq6fPfIvHPWoX2LHKM2S2EOn5z6Vx71nmw= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 h1:2sFIoFzU1IEL9epJWubJm9Dhrn45aTNEJuwsesaCGnk= +github.com/aws/aws-sdk-go-v2/service/iam v1.38.3/go.mod h1:KzlNINwfr/47tKkEhgk0r10/OZq3rjtyWy0txL3lM+I= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8 h1:h56mLNgpqWIL7RZOIQO634Xr569bXGTlIE83t/a0LSE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.8/go.mod h1:kK04550Xx95KI0sNmwoB7ciS9QkRwt9TojhoTMXyJdo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9 h1:DYynbLftAXgRuwumB9TFMi8/lxa6EMzDAWlIr7BIDAQ= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.9/go.mod h1:WJ2trRtCOyyg9g7xWi9CCYu0TKCzrtsLY60/zZfU9As= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6 h1:0Xj5aASTw9X+KqfPNZY0OhvTKAY1jTJ2X0nhcvsxN5M= -github.com/aws/aws-sdk-go-v2/service/sqs v1.37.6/go.mod h1:C17b05qSo++jCYngf3cdhCrsxLyxZliBbmYUFfGxLZo= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4 h1:oXh/PjaKtStu7RkaUtuKX6+h/OxXriMa9WyQQhylKG0= -github.com/aws/aws-sdk-go-v2/service/ssm v1.56.4/go.mod h1:IiHGbiFg4wVdEKrvFi/zxVZbjfEpgSe21N9RwyQFXCU= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 h1:VwhTrsTuVn52an4mXx29PqRzs2Dvu921NpGk7y43tAM= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.6/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10 h1:R7gIzuE1yvmo5W/BNXXqsZToILLLT1tC8/cYY0x4cRY= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.10/go.mod h1:Kq3W70z1J01kaVX32gzR37X00ciCCqzJUChUDMPCKl0= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 h1:EqGlayejoCRXmnVC6lXl6phCm9R2+k35e0gWsO9G5DI= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7/go.mod h1:BTw+t+/E5F3ZnDai/wSOYM54WUVjSdewE7Jvwtb7o+w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 h1:8eUsivBQzZHqe/3FE+cqwfH+0p5Jo8PFM/QYQSmeZ+M= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7/go.mod h1:kLPQvGUmxn/fqiCrDeohwG33bq2pQpGeY62yRO6Nrh0= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8 h1:R3X3UwwZKYLCNVVeJ+WLefvrjI5HonYCMlf40BYvJ8E= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.8/go.mod h1:4kkTK4zhY31emmt9VGgq3S+ElECNsiI5h6bqSBt71b0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 h1:WpoMCoS4+qOkkuWQommvDRboKYzK91En6eXO/k5dXr0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4/go.mod h1:171mrsbgz6DahPMnLJzQiH3bXXrdsWhpE9USZiM19Lk= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2 h1:MOxvXH2kRP5exvqJxAZ0/H9Ar51VmADJh95SgZE8u60= +github.com/aws/aws-sdk-go-v2/service/ssm v1.56.2/go.mod h1:RKWoqC9FlgMCkrfVOtgfqfwdaUIaq8H93UAt4xNaR0A= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 h1:CvuUmnXI7ebaUAhbJcDy9YQx8wHR69eZ9I7q5hszt/g= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 h1:F2rBfNAL5UyswqoeWv9zs74N/NanhK16ydHW1pahX6E= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3 h1:Xgv/hyNgvLda/M9l9qxXc4UFSgppnRczLxlMs5Ae/QY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.3/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9 h1:9zoIQ/6NA9b70dDvhYvi4IA3jcLDEu2UEALXLsvmQkI= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.29.9/go.mod h1:otxD6AyG1ABYxxhFX6eua+C4vntFe45igc3ake0mkuE= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881 h1:m9rhsGhdepdQV96tZgfy68oU75AWAjOH8u65OefTjwA= github.com/aws/karpenter-provider-aws/tools/kompat v0.0.0-20240410220356-6b868db24881/go.mod h1:+Mk5k0b6HpKobxNq+B56DOhZ+I/NiPhd5MIBhQMSTSs= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= @@ -96,8 +98,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -341,8 +343,8 @@ sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGF sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099 h1:HpqyjVfGgeE3Sj/GAbvg3mriTyY9i8Ds351d/w8glAI= -sigs.k8s.io/karpenter v1.1.2-0.20250110222631-380bcc932099/go.mod h1:qizACS4OKCZ5a+8YVK+I8BwKK4fK6D7EhItejdbXRmI= +sigs.k8s.io/karpenter v1.1.2-0.20250117235835-ff44f7325bf0 h1:AAOsDTOzQIScWKWqwVEWsYCOkvtfqU9W+neUDnqYqCg= +sigs.k8s.io/karpenter v1.1.2-0.20250117235835-ff44f7325bf0/go.mod h1:OIjZ34eS462NJtQ2AW8nVBQX4/YKu1B41QJ17BaWBf4= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/test/pkg/environment/aws/environment.go b/test/pkg/environment/aws/environment.go index bf2957850ad1..caeb74ca2311 100644 --- a/test/pkg/environment/aws/environment.go +++ b/test/pkg/environment/aws/environment.go @@ -22,6 +22,7 @@ import ( coretest "sigs.k8s.io/karpenter/pkg/test" + "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1beta1" "github.com/aws/aws-sdk-go-v2/aws" config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/ec2" @@ -37,6 +38,7 @@ import ( . "github.com/onsi/ginkgo/v2" "github.com/samber/lo" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/env" karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" @@ -49,6 +51,7 @@ import ( ) func init() { + lo.Must0(v1beta1.AddToScheme(scheme.Scheme)) // add scheme for the security group policy CRD karpv1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) } diff --git a/test/pkg/environment/common/setup.go b/test/pkg/environment/common/setup.go index 91f65cdcbaa7..62e4de07dc45 100644 --- a/test/pkg/environment/common/setup.go +++ b/test/pkg/environment/common/setup.go @@ -35,6 +35,8 @@ import ( "sigs.k8s.io/karpenter/pkg/test" "sigs.k8s.io/karpenter/pkg/utils/pod" + "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" @@ -60,6 +62,7 @@ var ( &corev1.Node{}, &karpv1.NodeClaim{}, &v1.EC2NodeClass{}, + &v1beta1.SecurityGroupPolicy{}, } ) diff --git a/test/suites/integration/extended_resources_test.go b/test/suites/integration/extended_resources_test.go index 49c0803fdc64..98a1b01a5952 100644 --- a/test/suites/integration/extended_resources_test.go +++ b/test/suites/integration/extended_resources_test.go @@ -35,6 +35,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) @@ -195,6 +197,17 @@ var _ = Describe("Extended Resources", func() { DeferCleanup(func() { env.ExpectPodENIDisabled() }) + env.ExpectCreated(nodeClass) // Creating the nodeclass first to discover the security groups + + // evenutally expect the status on the nodeclass to be hydrated + Eventually(func(g Gomega) { + nodeClass = env.ExpectExists(nodeClass).(*v1.EC2NodeClass) + g.Expect(len(nodeClass.Status.SecurityGroups)).To(BeNumerically(">", 0)) + }).Should(Succeed()) + securityGroupIDs := lo.Map(nodeClass.Status.SecurityGroups, func(sg v1.SecurityGroup, _ int) string { + return sg.ID + }) + numPods := 1 dep := test.Deployment(test.DeploymentOptions{ Replicas: int32(numPods), @@ -202,18 +215,20 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "vpc.amazonaws.com/pod-eni": resource.MustParse("1"), - }, - Limits: corev1.ResourceList{ - "vpc.amazonaws.com/pod-eni": resource.MustParse("1"), - }, - }, }, }) selector := labels.SelectorFromSet(dep.Spec.Selector.MatchLabels) - env.ExpectCreated(nodeClass, nodePool, dep) + sgp := &v1beta1.SecurityGroupPolicy{ + ObjectMeta: test.NamespacedObjectMeta(), + Spec: v1beta1.SecurityGroupPolicySpec{ + PodSelector: metav1.SetAsLabelSelector(dep.Spec.Selector.MatchLabels), + SecurityGroups: v1beta1.GroupIds{ + Groups: securityGroupIDs, + }, + }, + } + + env.ExpectCreated(nodePool, dep, sgp) env.EventuallyExpectHealthyPodCount(selector, numPods) env.ExpectCreatedNodeCount("==", 1) env.EventuallyExpectInitializedNodeCount("==", 1)