Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions cmd/cloud-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf
logger := log.Background().WithName("RunWrapper")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't use Background context if there is an existing context. Maybe we can have another PR addressing this issue?

return func(ctx context.Context) {
if !c.DynamicReloadingConfig.EnableDynamicReloading {
klog.V(1).Infof("using static initialization from config file %s", c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile)
logger.V(1).Info("using static initialization from config file", "cloudConfigFile", c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile)
if err := Run(ctx, c.Complete(), h); err != nil {
klog.Errorf("RunWrapper: failed to start cloud controller manager: %v", err)
os.Exit(1)
Expand All @@ -224,10 +224,10 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf

cloudConfigFile := c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile
if cloudConfigFile != "" {
klog.V(1).Infof("RunWrapper: using dynamic initialization from config file %s, starting the file watcher", cloudConfigFile)
logger.V(1).Info("using dynamic initialization from config file, starting the file watcher", "cloudConfigFile", cloudConfigFile)
updateCh = dynamic.RunFileWatcherOrDie(cloudConfigFile)
} else {
klog.V(1).Infof("RunWrapper: using dynamic initialization from secret %s/%s, starting the secret watcher", c.DynamicReloadingConfig.CloudConfigSecretNamespace, c.DynamicReloadingConfig.CloudConfigSecretName)
logger.V(1).Info("using dynamic initialization from secret, starting the secret watcher", "namespace", c.DynamicReloadingConfig.CloudConfigSecretNamespace, "name", c.DynamicReloadingConfig.CloudConfigSecretName)
updateCh = dynamic.RunSecretWatcherOrDie(c)
}

Expand All @@ -236,7 +236,7 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf
for {
select {
case <-updateCh:
klog.V(2).Info("RunWrapper: detected the cloud config has been updated, re-constructing the cloud controller manager")
logger.V(2).Info("detected the cloud config has been updated, re-constructing the cloud controller manager")

// stop the previous goroutines
cancelFunc()
Expand Down Expand Up @@ -289,6 +289,7 @@ func shouldDisableCloudProvider(configFilePath string) (bool, error) {
}

func runAsync(s *options.CloudControllerManagerOptions, errCh chan error, h *controllerhealthz.MutableHealthzHandler) context.CancelFunc {
logger := log.Background().WithName("runAsync")
ctx, cancelFunc := context.WithCancel(context.Background())

go func() {
Expand All @@ -303,7 +304,7 @@ func runAsync(s *options.CloudControllerManagerOptions, errCh chan error, h *con
errCh <- err
}

klog.V(1).Infof("RunAsync: stopping")
logger.V(1).Info("stopping")
}()

return cancelFunc
Expand Down Expand Up @@ -415,7 +416,7 @@ func startControllers(ctx context.Context, controllerContext genericcontrollerma
continue
}

klog.V(1).Infof("Starting %q", controllerName)
logger.V(1).Info("Starting controller", "controller", controllerName)
ctrl, started, err := initFn(ctx, controllerContext, completedConfig, cloud)
if err != nil {
klog.Errorf("Error starting %q: %s", controllerName, err.Error())
Expand Down Expand Up @@ -448,11 +449,11 @@ func startControllers(ctx context.Context, controllerContext genericcontrollerma
klog.Fatalf("Failed to wait for apiserver being healthy: %v", err)
}

klog.V(2).Infof("startControllers: starting shared informers")
logger.V(2).Info("startControllers: starting shared informers")
completedConfig.SharedInformers.Start(ctx.Done())
controllerContext.InformerFactory.Start(ctx.Done())
<-ctx.Done()
klog.V(1).Infof("startControllers: received stopping signal, exiting")
logger.V(1).Info("startControllers: received stopping signal, exiting")

return nil
}
Expand Down
4 changes: 3 additions & 1 deletion cmd/cloud-controller-manager/app/dynamic/secret_watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (

cloudcontrollerconfig "sigs.k8s.io/cloud-provider-azure/cmd/cloud-controller-manager/app/config"
"sigs.k8s.io/cloud-provider-azure/cmd/cloud-controller-manager/app/options"
"sigs.k8s.io/cloud-provider-azure/pkg/log"
)

type SecretWatcher struct {
Expand Down Expand Up @@ -64,6 +65,7 @@ func RunSecretWatcherOrDie(c *cloudcontrollerconfig.Config) chan struct{} {
// NewSecretWatcher creates a SecretWatcher and a signal channel to indicate
// the specific secret has been updated
func NewSecretWatcher(informerFactory informers.SharedInformerFactory, secretName, secretNamespace string) (*SecretWatcher, chan struct{}) {
logger := log.Background().WithName("NewSecretWatcher")
secretInformer := informerFactory.Core().V1().Secrets()
updateSignal := make(chan struct{})

Expand All @@ -79,7 +81,7 @@ func NewSecretWatcher(informerFactory informers.SharedInformerFactory, secretNam

if strings.EqualFold(newSecret.Name, secretName) &&
strings.EqualFold(newSecret.Namespace, secretNamespace) {
klog.V(1).Infof("secret %s updated, sending the signal", newSecret.Name)
logger.V(1).Info("secret updated, sending the signal", "secretName", newSecret.Name)
updateSignal <- struct{}{}
}
},
Expand Down
2 changes: 1 addition & 1 deletion cmd/cloud-node-manager/app/nodemanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func Run(ctx context.Context, c *cloudnodeconfig.Config) error {
// startControllers starts the cloud specific controller loops.
func startControllers(ctx context.Context, c *cloudnodeconfig.Config, healthzHandler *controllerhealthz.MutableHealthzHandler) error {
logger := log.Background().WithName("startControllers")
klog.V(1).Infof("Starting cloud-node-manager...")
logger.V(1).Info("Starting cloud-node-manager...")

// Start the CloudNodeController
nodeController := nodemanager.NewCloudNodeController(
Expand Down
9 changes: 5 additions & 4 deletions health-probe-proxy/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
)

func main() {
logger := klog.Background().WithName("main")
logs.InitLogs()
defer logs.FlushLogs()

Expand All @@ -43,17 +44,17 @@ func main() {
targetUrl, _ := url.Parse(fmt.Sprintf("http://localhost:%s", strconv.Itoa(targetPort)))

proxy := httputil.NewSingleHostReverseProxy(targetUrl)
klog.Infof("target url: %s", targetUrl)
logger.Info("Target URL", "targetURL", targetUrl)

http.Handle("/", proxy)
klog.Infof("proxying from port %d to port %d", healthCheckPort, targetPort)
logger.Info("proxying between ports", "from", healthCheckPort, "to", targetPort)

listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%s", strconv.Itoa(healthCheckPort)))
if err != nil {
klog.Errorf("failed to listen on port %d: %s", targetPort, err)
panic(err)
}
klog.Infof("listening on port %d", healthCheckPort)
logger.Info("listening on port", "port", healthCheckPort)

proxyListener := &proxyproto.Listener{Listener: listener}
defer func(proxyListener *proxyproto.Listener) {
Expand All @@ -64,7 +65,7 @@ func main() {
}
}(proxyListener)

klog.Infof("listening on port with proxy listener %d", healthCheckPort)
logger.Info("listening on port with proxy listener", "port", healthCheckPort)
err = http.Serve(proxyListener, nil)
if err != nil {
klog.Errorf("failed to serve: %s", err)
Expand Down
11 changes: 7 additions & 4 deletions kubetest2-aks/deployer/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

git "github.com/go-git/go-git/v5"
plumbing "github.com/go-git/go-git/v5/plumbing"
"k8s.io/klog"
"k8s.io/klog/v2"

"sigs.k8s.io/kubetest2/pkg/exec"
)
Expand Down Expand Up @@ -87,15 +87,17 @@ func (d *deployer) makeCloudProviderImages(path string) (string, error) {

// makeCloudProviderImagesByPath makes CCM or CNM images with repo path.
func (d *deployer) makeCloudProviderImagesByPath() (string, error) {
klog.Infof("Making Cloud provider images with repo path")
logger := klog.Background().WithName("makeCloudProviderImagesByPath")
logger.Info("Making Cloud provider images with repo path")

path := d.TargetPath
return d.makeCloudProviderImages(path)
}

// makeCloudProviderImagesByTag makes CCM or CNM images with repo refs.
func (d *deployer) makeCloudProviderImagesByTag(url string) (string, error) {
klog.Infof("Making Cloud provider images with refs")
logger := klog.Background().WithName("makeCloudProviderImagesByTag")
logger.Info("Making Cloud provider images with refs")
ccmPath := fmt.Sprintf("%s/cloud-provider-azure", gitClonePath)

repo, err := git.PlainClone(ccmPath, false, &git.CloneOptions{
Expand All @@ -118,6 +120,7 @@ func (d *deployer) makeCloudProviderImagesByTag(url string) (string, error) {
}

func (d *deployer) Build() error {
logger := klog.Background().WithName("Build")
err := d.verifyBuildFlags()
if err != nil {
return fmt.Errorf("failed to verify build flags: %v", err)
Expand All @@ -134,7 +137,7 @@ func (d *deployer) Build() error {
return fmt.Errorf("failed to make Cloud provider image with tag %q: %v", d.TargetTag, err)
}
}
klog.Infof("cloud-provider-azure image with tag %q are ready", imageTag)
logger.Info("cloud-provider-azure image are ready", "imageTag", imageTag)
}

return nil
Expand Down
8 changes: 5 additions & 3 deletions kubetest2-aks/deployer/down.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
"k8s.io/klog"
"k8s.io/klog/v2"
)

func (d *deployer) deleteResourceGroup(subscriptionID string, credential azcore.TokenCredential) error {
klog.Infof("Deleting resource group %q", d.ResourceGroupName)
logger := klog.Background().WithName("deleteResourceGroup")
logger.Info("Deleting resource group", "resourceGroup", d.ResourceGroupName)
rgClient, _ := armresources.NewResourceGroupsClient(subscriptionID, credential, nil)

poller, err := rgClient.BeginDelete(ctx, d.ResourceGroupName, nil)
Expand All @@ -40,6 +41,7 @@ func (d *deployer) deleteResourceGroup(subscriptionID string, credential azcore.
}

func (d *deployer) Down() error {
logger := klog.Background().WithName("Down")
// Create a credentials object.
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
Expand All @@ -51,6 +53,6 @@ func (d *deployer) Down() error {
klog.Fatalf("failed to delete resource group %q: %v", d.ResourceGroupName, err)
}

klog.Infof("Resource group %q deleted", d.ResourceGroupName)
logger.Info("Resource group deleted", "resourceGroup", d.ResourceGroupName)
return nil
}
29 changes: 17 additions & 12 deletions kubetest2-aks/deployer/up.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import (
armcontainerservicev2 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"

"sigs.k8s.io/kubetest2/pkg/exec"
Expand Down Expand Up @@ -210,6 +210,7 @@ func (d *deployer) prepareCustomConfig() ([]byte, error) {

// prepareClusterConfig generates cluster config.
func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev2.ManagedCluster, string, error) {
logger := klog.Background().WithName("prepareClusterConfig")
configFile, err := openPath(d.ConfigPath)
if err != nil {
return nil, "", fmt.Errorf("failed to read cluster config file at %q: %v", d.ConfigPath, err)
Expand All @@ -230,12 +231,12 @@ func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev
return nil, "", fmt.Errorf("failed to prepare custom config: %v", err)
}

klog.Infof("Customized configurations are: %s", string(customConfig))
logger.Info("Customized configurations", "config", string(customConfig))

encodedCustomConfig := base64.StdEncoding.EncodeToString(customConfig)
clusterConfig = strings.ReplaceAll(clusterConfig, "{CUSTOM_CONFIG}", encodedCustomConfig)

klog.Infof("AKS cluster config without credential: %s", clusterConfig)
logger.Info("AKS cluster config without credential", "config", clusterConfig)

mcConfig := &armcontainerservicev2.ManagedCluster{}
err = json.Unmarshal([]byte(clusterConfig), mcConfig)
Expand All @@ -248,10 +249,11 @@ func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev
}

func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) {
klog.Infof("Updating Azure credentials to manage cluster resource group")
logger := klog.Background().WithName("updateAzureCredential")
logger.Info("Updating Azure credentials to manage cluster resource group")

if len(clientID) != 0 && len(clientSecret) != 0 {
klog.Infof("Service principal is used to manage cluster resource group")
logger.Info("Service principal is used to manage cluster resource group")
// Reset `Identity` in case managed identity is defined in templates while service principal is used.
mcConfig.Identity = nil
mcConfig.Properties.ServicePrincipalProfile = &armcontainerservicev2.ManagedClusterServicePrincipalProfile{
Expand All @@ -262,7 +264,7 @@ func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) {
}
// Managed identity is preferable over service principal and picked by default when creating an AKS cluster.
// TODO(mainred): we can consider supporting user-assigned managed identity.
klog.Infof("System assigned managed identity is used to manage cluster resource group")
logger.Info("System assigned managed identity is used to manage cluster resource group")
// Reset `ServicePrincipalProfile` in case service principal is defined in templates while managed identity is used.
mcConfig.Properties.ServicePrincipalProfile = nil
systemAssignedIdentity := armcontainerservicev2.ResourceIdentityTypeSystemAssigned
Expand All @@ -273,7 +275,8 @@ func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) {

// createAKSWithCustomConfig creates an AKS cluster with custom configuration.
func (d *deployer) createAKSWithCustomConfig() error {
klog.Infof("Creating the AKS cluster with custom config")
logger := klog.Background().WithName("createAKSWithCustomConfig")
logger.Info("Creating the AKS cluster with custom config")
clusterID := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.ContainerService/managedClusters/%s", subscriptionID, d.ResourceGroupName, d.ClusterName)

mcConfig, encodedCustomConfig, err := d.prepareClusterConfig(clusterID)
Expand Down Expand Up @@ -307,13 +310,14 @@ func (d *deployer) createAKSWithCustomConfig() error {
return fmt.Errorf("failed to put resource: %v", err.Error())
}

klog.Infof("An AKS cluster %q in resource group %q is created", d.ClusterName, d.ResourceGroupName)
logger.Info("An AKS cluster is created", "clusterName", d.ClusterName, "resourceGroup", d.ResourceGroupName)
return nil
}

// getAKSKubeconfig gets kubeconfig of the AKS cluster and writes it to specific path.
func (d *deployer) getAKSKubeconfig() error {
klog.Infof("Retrieving AKS cluster's kubeconfig")
logger := klog.Background().WithName("getAKSKubeconfig")
logger.Info("Retrieving AKS cluster's kubeconfig")
client, err := armcontainerservicev2.NewManagedClustersClient(subscriptionID, cred, nil)
if err != nil {
return fmt.Errorf("failed to new managed cluster client with sub ID %q: %v", subscriptionID, err)
Expand All @@ -324,7 +328,7 @@ func (d *deployer) getAKSKubeconfig() error {
resp, err = client.ListClusterUserCredentials(ctx, d.ResourceGroupName, d.ClusterName, nil)
if err != nil {
if strings.Contains(err.Error(), "404 Not Found") {
klog.Infof("failed to list cluster user credentials for 1 minute, retrying")
logger.Info("failed to list cluster user credentials for 1 minute, retrying")
return false, nil
}
return false, fmt.Errorf("failed to list cluster user credentials with resource group name %q, cluster ID %q: %v", d.ResourceGroupName, d.ClusterName, err)
Expand All @@ -349,7 +353,7 @@ func (d *deployer) getAKSKubeconfig() error {
return fmt.Errorf("failed to write kubeconfig to %s", destPath)
}

klog.Infof("Succeeded in getting kubeconfig of cluster %q in resource group %q", d.ClusterName, d.ResourceGroupName)
logger.Info("Succeeded in getting kubeconfig of cluster", "clusterName", d.ClusterName, "resourceGroup", d.ResourceGroupName)
return nil
}

Expand Down Expand Up @@ -380,6 +384,7 @@ func (d *deployer) verifyUpFlags() error {
}

func (d *deployer) Up() error {
logger := klog.Background().WithName("Up")
if err := d.verifyUpFlags(); err != nil {
return fmt.Errorf("up flags are invalid: %v", err)
}
Expand All @@ -389,7 +394,7 @@ func (d *deployer) Up() error {
if err != nil {
return fmt.Errorf("failed to create the resource group: %v", err)
}
klog.Infof("Resource group %s created", *resourceGroup.ResourceGroup.ID)
logger.Info("Resource group created", "resourceGroupID", *resourceGroup.ResourceGroup.ID)

// Create the AKS cluster
if err := d.createAKSWithCustomConfig(); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion kubetest2-aks/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ require (
github.com/spf13/pflag v1.0.10
k8s.io/apimachinery v0.34.2
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d
sigs.k8s.io/kubetest2 v0.0.0-20250820195306-f71fd4c1cc1a
)
Expand Down Expand Up @@ -48,5 +49,4 @@ require (
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.31.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
)
6 changes: 4 additions & 2 deletions pkg/azclient/policy/retryaftermin/retryaftermin.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"time"

"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"

"k8s.io/klog/v2"
)

Expand All @@ -45,6 +46,7 @@ func (p *Policy) GetMinRetryAfter() time.Duration {

// Do implements the policy.Policy interface
func (p *Policy) Do(req *policy.Request) (*http.Response, error) {
logger := klog.Background().WithName("Do")
resp, err := req.Next()
// If the request failed or the status code is >= 300, return
if err != nil || resp == nil || resp.StatusCode >= 300 {
Expand All @@ -54,7 +56,7 @@ func (p *Policy) Do(req *policy.Request) (*http.Response, error) {
// Check if the response retry-after header is less than the minimum
overrideRetryAfter := func(header http.Header, headerName string, retryAfter time.Duration) {
if retryAfter < p.minRetryAfter {
klog.V(5).Infof("RetryAfterMinPolicy: retry-after value %s is less than minimum %s, removing retry-after header..", retryAfter, p.minRetryAfter)
logger.V(5).Info("RetryAfterMinPolicy: retry-after value is less than minimum, removing retry-after header", "retryAfter", retryAfter, "minimum", p.minRetryAfter)
header.Del(headerName)
}
}
Expand All @@ -76,7 +78,7 @@ func (p *Policy) Do(req *policy.Request) (*http.Response, error) {
// If the retry-after value is less than the minimum, remove it
overrideRetryAfter(resp.Header, headerName, retryDuration)
} else {
klog.V(5).Infof("RetryAfterMinPolicy: not modifying %s header with unrecognized format: %s", headerName, retryAfter)
logger.V(5).Info("RetryAfterMinPolicy: not modifying header with unrecognized format", "headerName", headerName, "unrecognized format", retryAfter)
}
}
}
Expand Down
Loading