diff --git a/cluster-autoscaler/cloudprovider/oci/common/oci_ref.go b/cluster-autoscaler/cloudprovider/oci/common/oci_ref.go index df5f3ec488e8..549aaef3b226 100644 --- a/cluster-autoscaler/cloudprovider/oci/common/oci_ref.go +++ b/cluster-autoscaler/cloudprovider/oci/common/oci_ref.go @@ -7,6 +7,8 @@ package common import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/instancepools/consts" + "k8s.io/klog/v2" + "strings" ) @@ -45,8 +47,10 @@ func NodeToOciRef(n *apiv1.Node) (OciRef, error) { func getNodeShape(node *apiv1.Node) string { // First check for the deprecated label if shape, ok := node.Labels[apiv1.LabelInstanceType]; ok { + klog.V(5).Infof("Extracting node shape %s from label %s", shape, apiv1.LabelInstanceType) return shape } else if shape, ok := node.Labels[apiv1.LabelInstanceTypeStable]; ok { + klog.V(5).Infof("Extracting node shape %s from label %s", shape, apiv1.LabelInstanceTypeStable) return shape } return "" @@ -56,8 +60,10 @@ func getNodeShape(node *apiv1.Node) string { func getNodeAZ(node *apiv1.Node) string { // First check for the deprecated label if az, ok := node.Labels[apiv1.LabelZoneFailureDomain]; ok { + klog.V(5).Infof("Extracting availability domain %s from label %s", az, apiv1.LabelZoneFailureDomain) return az } else if az, ok := node.Labels[apiv1.LabelTopologyZone]; ok { + klog.V(5).Infof("Extracting availability domain %s from label %s", az, apiv1.LabelTopologyZone) return az } return "" @@ -67,6 +73,7 @@ func getNodeAZ(node *apiv1.Node) string { func getNodeInternalAddress(node *apiv1.Node) string { for _, address := range node.Status.Addresses { if address.Type == apiv1.NodeInternalIP { + klog.V(5).Infof("Extracting node internal IP %s from node %s", address.Address, node.Name) return address.Address } } @@ -77,6 +84,7 @@ func getNodeInternalAddress(node *apiv1.Node) string { func getNodeExternalAddress(node *apiv1.Node) string { for _, address := range node.Status.Addresses { if address.Type == apiv1.NodeExternalIP { + klog.V(5).Infof("Extracting node external IP %s from node %s", address.Address, node.Name) return address.Address } } @@ -96,10 +104,12 @@ func getNodeInstancePoolID(node *apiv1.Node) string { poolIDSuffixLabel, _ := node.Labels[consts.InstancePoolIDLabelSuffix] if poolIDPrefixLabel != "" && poolIDSuffixLabel != "" { + klog.V(5).Infof("Extracting instance-pool %s from labels %s + %s", poolIDPrefixLabel+"."+poolIDSuffixLabel, consts.InstancePoolIDLabelPrefix, consts.InstancePoolIDLabelSuffix) return poolIDPrefixLabel + "." + poolIDSuffixLabel } poolIDAnnotation, _ := node.Annotations[consts.OciInstancePoolIDAnnotation] + klog.V(5).Infof("Extracting instance-pool %s from annotation %s", poolIDAnnotation, consts.OciInstanceIDAnnotation) return poolIDAnnotation } @@ -107,6 +117,7 @@ func getNodeInstancePoolID(node *apiv1.Node) string { func getNodeInstanceID(node *apiv1.Node) string { providerID := strings.TrimPrefix(node.Spec.ProviderID, "oci://") if len(providerID) != 0 { + klog.V(5).Infof("Extracting instance-id %s from .spec.providerID", providerID) return providerID } @@ -119,9 +130,11 @@ func getNodeInstanceID(node *apiv1.Node) string { instanceSuffixLabel, _ := node.Labels[consts.InstanceIDLabelSuffix] if instancePrefixLabel != "" && instanceSuffixLabel != "" { + klog.V(5).Infof("Extracting instance-id %s from labels %s + %s", instancePrefixLabel+"."+instanceSuffixLabel, consts.InstanceIDLabelPrefix, consts.InstanceIDLabelSuffix) return instancePrefixLabel + "." + instanceSuffixLabel } instanceIDAnnotation, _ := node.Annotations[consts.OciInstanceIDAnnotation] + klog.V(5).Infof("Extracting instance-id %s from annotation %s", instanceIDAnnotation, consts.OciInstanceIDAnnotation) return instanceIDAnnotation } diff --git a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_cloud_provider.go b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_cloud_provider.go index 727148aa9442..76c60769f548 100644 --- a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_cloud_provider.go @@ -53,6 +53,7 @@ func (ocp *OciCloudProvider) NodeGroupForNode(n *apiv1.Node) (cloudprovider.Node ociRef, err := ocicommon.NodeToOciRef(n) if err != nil { + klog.V(4).Infof("NodeGroupForNode: ref conversion for node %s failed: %v", n.Name, err) return nil, err } @@ -61,9 +62,11 @@ func (ocp *OciCloudProvider) NodeGroupForNode(n *apiv1.Node) (cloudprovider.Node // this instance may not be a part of an instance pool, or it may be part of a instance pool that the autoscaler does not manage if errors.Cause(err) == errInstanceInstancePoolNotFound { // should not be processed by cluster autoscaler + klog.V(4).Infof("NodeGroupForNode: node %s is not a member of any of the specified instance-pool(s)", n.Name) return nil, nil } + klog.V(4).Infof("NodeGroupForNode: %s belongs to instance-pool %s", n.Name, ng.Id()) return ng, err } @@ -71,22 +74,21 @@ func (ocp *OciCloudProvider) NodeGroupForNode(n *apiv1.Node) (cloudprovider.Node func (ocp *OciCloudProvider) HasInstance(node *apiv1.Node) (bool, error) { instance, err := ocicommon.NodeToOciRef(node) if err != nil { + klog.V(4).Infof("HasInstance: ref conversion for node %s failed: %v", node.Name, err) return false, err } instancePool, err := ocp.poolManager.GetInstancePoolForInstance(instance) if err != nil { + klog.V(4).Infof("HasInstance: instance-pool check for node %s failed: %v", node.Name, err) return false, err } - instances, err := ocp.poolManager.GetInstancePoolNodes(*instancePool) - if err != nil { - return false, err + if instancePool == nil || instancePool.Id() == "" { + klog.V(4).Infof("HasInstance: node %s is not a member of any of the specified instance-pool(s)", node.Name) + return false, nil } - for _, i := range instances { - if i.Id == instance.InstanceID { - return true, nil - } - } - return false, nil + + klog.V(4).Infof("HasInstance: node %s belongs to instance-pool %s", node.Name, instancePool.Id()) + return true, nil } // Pricing returns pricing model for this cloud provider or error if not available. diff --git a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_cache.go b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_cache.go index 72bf533879dc..3837a2d4fd66 100644 --- a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_cache.go +++ b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_cache.go @@ -88,7 +88,7 @@ func (c *instancePoolCache) rebuild(staticInstancePools map[string]*InstancePool InstancePoolId: common.String(id), }) if err != nil { - klog.Errorf("get instance pool %s failed: %v", id, err) + klog.Errorf("get instance-pool %s failed: %v", id, err) return err } klog.V(6).Infof("GetInstancePool() response %v", getInstancePoolResp.InstancePool) @@ -105,6 +105,7 @@ func (c *instancePoolCache) rebuild(staticInstancePools map[string]*InstancePool Page: page, }) if err != nil { + klog.V(4).Infof("ListInstancePoolInstances for %s failed: %v", id, err) return err } @@ -118,7 +119,7 @@ func (c *instancePoolCache) rebuild(staticInstancePools map[string]*InstancePool // Compare instance pool's size with the latest number of InstanceSummaries. If found, look for unrecoverable // errors such as quota or capacity issues in scaling pool. if len(*c.instanceSummaryCache[id]) < *c.poolCache[id].Size { - klog.V(4).Infof("Instance pool %s has only %d instances created while requested count is %d. ", + klog.V(4).Infof("instance-pool %s has only %d instances created while requested count is %d. ", *getInstancePoolResp.InstancePool.DisplayName, len(*c.instanceSummaryCache[id]), *c.poolCache[id].Size) if getInstancePoolResp.LifecycleState != core.InstancePoolLifecycleStateRunning { @@ -166,6 +167,7 @@ func (c *instancePoolCache) removeInstance(instancePool InstancePoolNodeGroup, i klog.Warning("instanceID is not set - skipping removal.") return false } + klog.V(4).Infof("detaching instance %s from instance-pool: %v", instanceID, instancePool.Id()) var err error if strings.Contains(instanceID, consts.InstanceIDUnfulfilled) { @@ -213,7 +215,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) if c.unownedInstances[ociInstance] { // We already know this instance is not part of a configured pool. Return early and avoid additional API calls. - klog.V(4).Infof("Node " + ociInstance.Name + " is known to not be a member of any of the specified instance pool(s)") + klog.V(4).Info("Node " + ociInstance.Name + " is known to not be a member of any of the specified instance-pool(s)") return nil, errInstanceInstancePoolNotFound } @@ -221,7 +223,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) for _, nextInstancePool := range c.poolCache { // Skip searching instance pool if we happen tp know (prior labels) the pool ID and this is not it if (ociInstance.InstancePoolID != "") && (ociInstance.InstancePoolID != *nextInstancePool.Id) { - klog.V(5).Infof("skipping over instance pool %s since it is not the one we are looking for", *nextInstancePool.Id) + klog.V(5).Infof("skipping over instance-pool %s since it is not the one we are looking for (%s)", *nextInstancePool.Id, ociInstance.InstancePoolID) continue } @@ -236,6 +238,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) listInstancePoolInstances, err := c.computeManagementClient.ListInstancePoolInstances(context.Background(), listInstancePoolInstancesReq) if err != nil { + klog.V(4).Infof("ListInstancePoolInstances for %s failed: %v", *nextInstancePool.Id, err) return nil, err } @@ -254,7 +257,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) } // Skip this instance if we happen to know (prior labels) the instance ID and this is not it if (ociInstance.InstanceID != "") && (ociInstance.InstanceID != *poolMember.Id) { - klog.V(5).Infof("skipping over instance %s since it is not the one we are looking for", *poolMember.Id) + klog.V(5).Infof("skipping over instance %s since it is not the one we are looking for (%s)", *poolMember.Id, ociInstance.InstanceID) continue } @@ -285,7 +288,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) if *poolMember.Id == ociInstance.InstanceID || (getVnicResp.Vnic.PrivateIp != nil && *getVnicResp.Vnic.PrivateIp == ociInstance.PrivateIPAddress) || (getVnicResp.Vnic.PublicIp != nil && *getVnicResp.Vnic.PublicIp == ociInstance.PublicIPAddress) { - klog.V(4).Info(*poolMember.DisplayName, " is a member of "+*nextInstancePool.Id) + klog.V(4).Infof("findInstanceByDetails: %s belongs to instance-pool %s", *poolMember.DisplayName, *nextInstancePool.Id) // Return a complete instance details. if ociInstance.Name == "" { ociInstance.Name = *poolMember.DisplayName @@ -307,7 +310,7 @@ func (c *instancePoolCache) findInstanceByDetails(ociInstance ocicommon.OciRef) } c.unownedInstances[ociInstance] = true - klog.V(4).Infof(ociInstance.Name + " is not a member of any of the specified instance pool(s)") + klog.V(4).Info("findInstanceByDetails node " + ociInstance.Name + " is not a member of any of the specified instance-pool(s)") return nil, errInstanceInstancePoolNotFound } @@ -321,7 +324,7 @@ func (c *instancePoolCache) getInstancePool(id string) (*core.InstancePool, erro func (c *instancePoolCache) getInstancePoolWithoutLock(id string) (*core.InstancePool, error) { instancePool := c.poolCache[id] if instancePool == nil { - return nil, errors.New("instance pool was not found in the cache") + return nil, errors.New("instance-pool was not found in the cache") } return instancePool, nil @@ -345,7 +348,7 @@ func (c *instancePoolCache) getInstanceSummaries(poolID string) (*[]core.Instanc func (c *instancePoolCache) getInstanceSummariesWithoutLock(poolID string) (*[]core.InstanceSummary, error) { instanceSummaries := c.instanceSummaryCache[poolID] if instanceSummaries == nil { - return nil, errors.New("instance summaries for instance pool id " + poolID + " were not found in cache") + return nil, errors.New("instance summaries for instance-pool id " + poolID + " were not found in cache") } return instanceSummaries, nil @@ -363,11 +366,13 @@ func (c *instancePoolCache) setSize(instancePoolID string, size int) error { if instancePoolID == "" { return errors.New("instance-pool is required") } + klog.V(4).Infof("adjusting size of instance-pool %s to: %d", instancePoolID, size) getInstancePoolResp, err := c.computeManagementClient.GetInstancePool(context.Background(), core.GetInstancePoolRequest{ InstancePoolId: common.String(instancePoolID), }) if err != nil { + klog.V(4).Infof("GetInstancePool for %s failed: %v", common.String(instancePoolID), err) return err } @@ -384,6 +389,7 @@ func (c *instancePoolCache) setSize(instancePoolID string, size int) error { UpdateInstancePoolDetails: updateDetails, }) if err != nil { + klog.V(4).Infof("UpdateInstancePool for %s failed: %v", common.String(instancePoolID), err) return err } @@ -425,7 +431,7 @@ func (c *instancePoolCache) waitForState(ctx context.Context, instancePoolID str InstancePoolId: common.String(instancePoolID), }) if err != nil { - klog.Errorf("getInstancePool failed. Retrying: %+v", err) + klog.Errorf("getInstancePool failed for %s. Retrying: %+v", instancePoolID, err) return false, err } else if getInstancePoolResp.LifecycleState != desiredState { deadline, _ := ctx.Deadline() @@ -433,7 +439,7 @@ func (c *instancePoolCache) waitForState(ctx context.Context, instancePoolID str instancePoolID, desiredState, getInstancePoolResp.LifecycleState, deadline.Sub(time.Now()).Round(time.Second)) return false, nil } - klog.V(3).Infof("instance pool %s is in desired state: %s", instancePoolID, desiredState) + klog.V(3).Infof("instance-pool %s is in desired state: %s", instancePoolID, desiredState) return true, nil }, ctx.Done()) // context timeout @@ -522,7 +528,7 @@ func (c *instancePoolCache) monitorScalingProgress(ctx context.Context, target i Page: page, }) if err != nil { - klog.Errorf("list instance pool instances for pool %s failed: %v", instancePoolID, err) + klog.Errorf("list instance-pool instances for %s failed: %v", instancePoolID, err) errCh <- err return } @@ -572,6 +578,7 @@ func (c *instancePoolCache) getSize(id string) (int, error) { return -1, errors.New("target size not found") } + klog.V(4).Infof("instance-pool %s size is %d", id, *pool.Size) return *pool.Size, nil } diff --git a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager.go b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager.go index 6aa36f0509e0..a6e5d7dc4c16 100644 --- a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager.go +++ b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager.go @@ -6,7 +6,6 @@ package instancepools import ( "fmt" - npconsts "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/nodepools/consts" "os" "strconv" "strings" @@ -14,6 +13,7 @@ import ( ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/instancepools/consts" + npconsts "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/nodepools/consts" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -199,7 +199,7 @@ func instancePoolFromArg(value string) (*InstancePoolNodeGroup, error) { spec.id = tokens[2] - klog.Infof("static instance pool wrapper spec constructed: %+v", spec) + klog.Infof("static instance-pool wrapper spec constructed: %+v", spec) return spec, nil } @@ -257,7 +257,7 @@ func (m *InstancePoolManagerImpl) GetInstancePools() []*InstancePoolNodeGroup { // GetInstancePoolNodes returns InstancePool nodes that are not in a terminal state. func (m *InstancePoolManagerImpl) GetInstancePoolNodes(ip InstancePoolNodeGroup) ([]cloudprovider.Instance, error) { - klog.V(4).Infof("getting (cached) instances for node pool: %q", ip.Id()) + klog.V(4).Infof("getting (cached) instances for instance-pool: %q", ip.Id()) instanceSummaries, err := m.instancePoolCache.getInstanceSummaries(ip.Id()) if err != nil { @@ -267,32 +267,35 @@ func (m *InstancePoolManagerImpl) GetInstancePoolNodes(ip InstancePoolNodeGroup) var providerInstances []cloudprovider.Instance for _, instance := range *instanceSummaries { status := &cloudprovider.InstanceStatus{} - switch *instance.State { - case string(core.InstanceLifecycleStateStopped), string(core.InstanceLifecycleStateTerminated): + switch strings.ToLower(*instance.State) { + case strings.ToLower(string(core.InstanceLifecycleStateStopped)), strings.ToLower(string(core.InstanceLifecycleStateTerminated)): klog.V(4).Infof("skipping instance is in stopped/terminated state: %q", *instance.Id) - case string(core.InstanceLifecycleStateRunning): + case strings.ToLower(string(core.InstanceLifecycleStateRunning)): status.State = cloudprovider.InstanceRunning - case string(core.InstanceLifecycleStateCreatingImage): + case strings.ToLower(string(core.InstanceLifecycleStateCreatingImage)): status.State = cloudprovider.InstanceCreating - case string(core.InstanceLifecycleStateStarting): + case strings.ToLower(string(core.InstanceLifecycleStateStarting)): status.State = cloudprovider.InstanceCreating - case string(core.InstanceLifecycleStateMoving): + case strings.ToLower(string(core.InstanceLifecycleStateMoving)): status.State = cloudprovider.InstanceCreating - case string(core.InstanceLifecycleStateProvisioning): + case strings.ToLower(string(core.InstanceLifecycleStateProvisioning)): status.State = cloudprovider.InstanceCreating - case string(core.InstanceLifecycleStateTerminating): + case strings.ToLower(string(core.InstanceLifecycleStateTerminating)): status.State = cloudprovider.InstanceDeleting - case string(core.InstanceLifecycleStateStopping): + case strings.ToLower(string(core.InstanceLifecycleStateStopping)): status.State = cloudprovider.InstanceDeleting - case consts.InstanceStateUnfulfilled: + case strings.ToLower(consts.InstanceStateUnfulfilled): status.State = cloudprovider.InstanceCreating status.ErrorInfo = &cloudprovider.InstanceErrorInfo{ ErrorClass: cloudprovider.OutOfResourcesErrorClass, ErrorCode: consts.InstanceStateUnfulfilled, ErrorMessage: "OCI cannot provision additional instances for this instance pool. Review quota and/or capacity.", } + default: + klog.Warningf("instance %s has unknown state: %s", *instance.Id, *instance.State) } + klog.V(5).Infof("instance %s is in state: %s", *instance.Id, *instance.State) // Instance not in a terminal or unknown state, ok to add. if status.State != 0 { providerInstances = append(providerInstances, cloudprovider.Instance{ @@ -310,7 +313,7 @@ func (m *InstancePoolManagerImpl) GetInstancePoolNodes(ip InstancePoolNodeGroup) func (m *InstancePoolManagerImpl) GetInstancePoolForInstance(instanceDetails ocicommon.OciRef) (*InstancePoolNodeGroup, error) { if m.cfg.Global.UseNonMemberAnnotation && instanceDetails.InstancePoolID == consts.OciInstancePoolIDNonPoolMember { // Instance is not part of a configured pool. Return early and avoid additional API calls. - klog.V(4).Infof(instanceDetails.Name + " is not a member of any of the specified instance pool(s) and already annotated as " + + klog.V(4).Info("GetInstancePoolForInstance node " + instanceDetails.Name + " is not a member of any of the specified instance pool(s) and already annotated as " + consts.OciInstancePoolIDNonPoolMember) return nil, errInstanceInstancePoolNotFound } @@ -323,6 +326,12 @@ func (m *InstancePoolManagerImpl) GetInstancePoolForInstance(instanceDetails oci if ip, ok := m.staticInstancePools[instanceDetails.InstancePoolID]; ok { return ip, nil } + // Skip search if the instance-pool is not set but the node-pool is (it is an OKE node) + if instanceDetails.NodePoolID != "" { + klog.V(4).Infof("GetInstancePoolForInstance skipping further search for %s since instance-pool is empty and node-pool is set to %s", instanceDetails.InstancePoolID, instanceDetails.NodePoolID) + return nil, errInstanceInstancePoolNotFound + } + // This instance is not in the cache. // Try to resolve the pool ID and other details, though it may not be a member of an instance-pool we manage. foundInstanceDetails, err := m.instancePoolCache.findInstanceByDetails(instanceDetails) @@ -369,13 +378,14 @@ func (m *InstancePoolManagerImpl) GetInstancePoolSize(ip InstancePoolNodeGroup) // SetInstancePoolSize sets instance-pool size. func (m *InstancePoolManagerImpl) SetInstancePoolSize(np InstancePoolNodeGroup, size int) error { - klog.Infof("SetInstancePoolSize (%d) called on instance pool %s", size, np.Id()) + klog.Infof("SetInstancePoolSize (%d) called on instance-pool %s", size, np.Id()) setSizeErr := m.instancePoolCache.setSize(np.Id(), size) - klog.V(5).Infof("SetInstancePoolSize was called: refreshing instance pool cache") + klog.V(5).Infof("SetInstancePoolSize completed: refreshing instance-pool cache") // refresh instance pool cache after update (regardless if there was an error or not) _ = m.forceRefreshInstancePool(np.Id()) if setSizeErr != nil { + klog.V(4).Infof("SetInstancePoolSize to %d failed: %v", size, setSizeErr) return setSizeErr } @@ -390,7 +400,7 @@ func (m *InstancePoolManagerImpl) SetInstancePoolSize(np InstancePoolNodeGroup, // DeleteInstances deletes the given instances. All instances must be controlled by the same instance-pool. func (m *InstancePoolManagerImpl) DeleteInstances(instancePool InstancePoolNodeGroup, instances []ocicommon.OciRef) error { - klog.Infof("DeleteInstances called on instance pool %s", instancePool.Id()) + klog.Infof("DeleteInstances called on instance-pool %s", instancePool.Id()) for _, instance := range instances { // removeInstance auto decrements instance pool size. diff --git a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager_test.go b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager_test.go index 0fe27b40b510..53ee94ba5eae 100644 --- a/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager_test.go +++ b/cluster-autoscaler/cloudprovider/oci/instancepools/oci_instance_pool_manager_test.go @@ -6,13 +6,14 @@ package instancepools import ( "context" + "reflect" + "testing" + apiv1 "k8s.io/api/core/v1" ocicommon "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/common" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/vendor-internal/github.com/oracle/oci-go-sdk/v65/core" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/vendor-internal/github.com/oracle/oci-go-sdk/v65/workrequests" kubeletapis "k8s.io/kubelet/pkg/apis" - "reflect" - "testing" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/oci/vendor-internal/github.com/oracle/oci-go-sdk/v65/common" @@ -372,6 +373,12 @@ func TestGetInstancePoolNodes(t *testing.T) { AvailabilityDomain: common.String("PHX-AD-1"), State: common.String(string(core.InstanceLifecycleStateTerminating)), }, + { + // Instance state is running with varied capitalization + Id: common.String("ocid1.instance.oc1.phx.aaa3"), + AvailabilityDomain: common.String("PHX-AD-1"), + State: common.String("Running"), + }, } expected := []cloudprovider.Instance{ @@ -387,6 +394,12 @@ func TestGetInstancePoolNodes(t *testing.T) { State: cloudprovider.InstanceDeleting, }, }, + { + Id: "ocid1.instance.oc1.phx.aaa3", + Status: &cloudprovider.InstanceStatus{ + State: cloudprovider.InstanceRunning, + }, + }, } manager := &InstancePoolManagerImpl{instancePoolCache: nodePoolCache, cfg: &ocicommon.CloudConfig{}}