Skip to content

Remove openshift and k8s pins #4110

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions cmd/aro/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,7 @@ func operator(ctx context.Context, log *logrus.Entry) error {
}

mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
HealthProbeBindAddress: ":8080",
MetricsBindAddress: ":8888",
Port: 8443,
HealthProbeBindAddress: ":8081",
})
if err != nil {
return err
Expand Down
372 changes: 90 additions & 282 deletions go.mod

Large diffs are not rendered by default.

217 changes: 99 additions & 118 deletions go.sum

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions pkg/cluster/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -331,13 +331,13 @@ func (m *manager) deleteGatewayAndWait(ctx context.Context) error {
}

m.log.Info("waiting for gateway record deletion")
return wait.PollImmediateUntil(15*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(timeoutCtx, 15*time.Second, true, func(ctx context.Context) (bool, error) {
_, err := m.dbGateway.Get(ctx, m.doc.OpenShiftCluster.Properties.NetworkProfile.GatewayPrivateLinkID)
if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) /* already gone */ {
return true, nil
if err != nil && cosmosdb.IsErrorStatusCode(err, http.StatusNotFound) {
return true, err
}
return false, nil
}, timeoutCtx.Done())
})
}

func (m *manager) deleteGateway(ctx context.Context) error {
Expand Down
5 changes: 2 additions & 3 deletions pkg/cluster/deploybaseresources.go
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn
// NSG since the inner loop is tolerant of that, and since we are attaching
// the same NSG the only allowed failure case is when the NSG cannot be
// attached to begin with, so it shouldn't happen in practice.
_ = wait.PollImmediateUntil(pollInterval, func() (bool, error) {
_ = wait.PollUntilContextCancel(timeoutCtx, pollInterval, true, func(ctx context.Context) (bool, error) {
var c bool
c, innerErr = func() (bool, error) {
for _, subnetID := range []string{
Expand Down Expand Up @@ -412,9 +412,8 @@ func (m *manager) _attachNSGs(ctx context.Context, timeout time.Duration, pollIn
}
return true, nil
}()

return c, innerErr
}, timeoutCtx.Done())
})

return innerErr
}
Expand Down
60 changes: 30 additions & 30 deletions pkg/cluster/deploybaseresources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,16 +284,16 @@ func TestAttachNSGs(t *testing.T) {
},
},
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "masterSubnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "masterSubnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/aro-12345678/providers/Microsoft.Network/networkSecurityGroups/infra-nsg"),
},
},
}).Return(nil)
subnet.EXPECT().Get(ctx, "workerSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "workerSubnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "workerSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "workerSubnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/aro-12345678/providers/Microsoft.Network/networkSecurityGroups/infra-nsg"),
Expand Down Expand Up @@ -350,7 +350,7 @@ func TestAttachNSGs(t *testing.T) {
},
},
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "masterSubnetID").Return(&mgmtnetwork.Subnet{}, fmt.Errorf("subnet not found"))
subnet.EXPECT().Get(gomock.Any(), "masterSubnetID").Return(&mgmtnetwork.Subnet{}, fmt.Errorf("subnet not found"))
},
wantErr: "subnet not found",
},
Expand All @@ -376,7 +376,7 @@ func TestAttachNSGs(t *testing.T) {
},
},
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "masterSubnetID").Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "masterSubnetID").Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr("I shouldn't be here!"),
Expand Down Expand Up @@ -408,8 +408,8 @@ func TestAttachNSGs(t *testing.T) {
},
},
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "masterSubnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "masterSubnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/aro-12345678/providers/Microsoft.Network/networkSecurityGroups/infra-nsg"),
Expand Down Expand Up @@ -440,8 +440,8 @@ func TestAttachNSGs(t *testing.T) {
},
},
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "masterSubnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "masterSubnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "masterSubnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
NetworkSecurityGroup: &mgmtnetwork.SecurityGroup{
ID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/aro-12345678/providers/Microsoft.Network/networkSecurityGroups/infra-nsg"),
Expand Down Expand Up @@ -481,8 +481,8 @@ func TestSetMasterSubnetPolicies(t *testing.T) {
{
name: "ok, !gatewayEnabled",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "subnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "subnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "subnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "subnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
PrivateLinkServiceNetworkPolicies: to.StringPtr("Disabled"),
},
Expand All @@ -492,8 +492,8 @@ func TestSetMasterSubnetPolicies(t *testing.T) {
{
name: "ok, gatewayEnabled",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "subnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(ctx, "subnetID", &mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "subnetID").Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().CreateOrUpdate(gomock.Any(), "subnetID", &mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
PrivateEndpointNetworkPolicies: to.StringPtr("Disabled"),
PrivateLinkServiceNetworkPolicies: to.StringPtr("Disabled"),
Expand All @@ -505,7 +505,7 @@ func TestSetMasterSubnetPolicies(t *testing.T) {
{
name: "ok, skipCreateOrUpdate, !gatewayEnabled",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "subnetID").Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "subnetID").Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
PrivateLinkServiceNetworkPolicies: to.StringPtr("Disabled"),
},
Expand All @@ -516,7 +516,7 @@ func TestSetMasterSubnetPolicies(t *testing.T) {
{
name: "ok, skipCreateOrUpdate, gatewayEnabled",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "subnetID").Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), "subnetID").Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
PrivateEndpointNetworkPolicies: to.StringPtr("Disabled"),
PrivateLinkServiceNetworkPolicies: to.StringPtr("Disabled"),
Expand All @@ -529,7 +529,7 @@ func TestSetMasterSubnetPolicies(t *testing.T) {
{
name: "error",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, "subnetID").Return(nil, fmt.Errorf("sad"))
subnet.EXPECT().Get(gomock.Any(), "subnetID").Return(nil, fmt.Errorf("sad"))
},
wantErr: "sad",
},
Expand Down Expand Up @@ -681,14 +681,14 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
{
name: "no service endpoints set returns empty string slice",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(&mgmtnetwork.Subnet{}, nil)
},
wantSubnets: []string{},
},
{
name: "master subnet has service endpoint, but incorrect location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Expand All @@ -700,7 +700,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
Expand All @@ -710,7 +710,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
{
name: "master subnet has service endpoint with correct location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Expand All @@ -722,7 +722,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
Expand All @@ -732,7 +732,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
{
name: "master subnet has service endpoint with all location",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(&mgmtnetwork.Subnet{
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(&mgmtnetwork.Subnet{
SubnetPropertiesFormat: &mgmtnetwork.SubnetPropertiesFormat{
ServiceEndpoints: &[]mgmtnetwork.ServiceEndpointPropertiesFormat{
{
Expand All @@ -744,7 +744,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
},
},
}, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
Expand All @@ -767,8 +767,8 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
},
}

subnet.EXPECT().Get(ctx, masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
Expand All @@ -794,9 +794,9 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
},
}

subnet.EXPECT().Get(ctx, masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(ctx, fmt.Sprintf(workerSubnetFormatString, "worker-subnet-002")).Return(&mgmtnetwork.Subnet{}, nil)
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001")).Return(subnetWithServiceEndpoint, nil)
subnet.EXPECT().Get(gomock.Any(), fmt.Sprintf(workerSubnetFormatString, "worker-subnet-002")).Return(&mgmtnetwork.Subnet{}, nil)
},
workerSubnets: []string{
fmt.Sprintf(workerSubnetFormatString, "worker-subnet-001"),
Expand All @@ -811,7 +811,7 @@ func TestSubnetsWithServiceEndpoints(t *testing.T) {
{
name: "Get subnet returns error",
mocks: func(subnet *mock_subnet.MockManager) {
subnet.EXPECT().Get(ctx, masterSubnet).Return(nil, errors.New("generic error"))
subnet.EXPECT().Get(gomock.Any(), masterSubnet).Return(nil, errors.New("generic error"))
},
workerSubnets: []string{},
wantErr: "generic error",
Expand Down
8 changes: 7 additions & 1 deletion pkg/cluster/install.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
kruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
Expand Down Expand Up @@ -632,7 +633,12 @@ func (m *manager) initializeKubernetesClients(ctx context.Context) error {
return err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery)
httpClient, err := rest.HTTPClientFor(restConfig)
if err != nil {
return err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient)
if err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/deploy/predeploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -570,9 +570,9 @@ func (d *deployer) restartOldScaleset(ctx context.Context, vmssName string, lbHe
}

func (d *deployer) waitForReadiness(ctx context.Context, vmssName string, vmInstanceID string) error {
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
return d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, vmInstanceID), nil
}, ctx.Done())
})
}

func (d *deployer) isVMInstanceHealthy(ctx context.Context, resourceGroupName string, vmssName string, vmInstanceID string) bool {
Expand Down
2 changes: 1 addition & 1 deletion pkg/deploy/predeploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1662,7 +1662,7 @@ func TestWaitForReadiness(t *testing.T) {
cancel: cancelFastTimeout,
},
mocks: []mock{getInstanceViewMock(unhealthyVMSS)},
wantErr: "timed out waiting for the condition",
wantErr: "context deadline exceeded",
},
{
name: "run successfully after confirming healthy status",
Expand Down
4 changes: 2 additions & 2 deletions pkg/deploy/upgrade_gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ func (d *deployer) gatewayWaitForReadiness(ctx context.Context, vmssName string)
}

d.log.Printf("waiting for %s instances to be healthy", vmssName)
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
for _, vm := range scalesetVMs {
if !d.isVMInstanceHealthy(ctx, d.config.GatewayResourceGroupName, vmssName, *vm.InstanceID) {
return false, nil
}
}

return true, nil
}, ctx.Done())
})
}

func (d *deployer) gatewayRemoveOldScalesets(ctx context.Context) error {
Expand Down
4 changes: 2 additions & 2 deletions pkg/deploy/upgrade_rp.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ func (d *deployer) rpWaitForReadiness(ctx context.Context, vmssName string) erro
}

d.log.Printf("waiting for %s instances to be healthy", vmssName)
return wait.PollImmediateUntil(10*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) {
for _, vm := range scalesetVMs {
if !d.isVMInstanceHealthy(ctx, d.config.RPResourceGroupName, vmssName, *vm.InstanceID) {
return false, nil
}
}

return true, nil
}, ctx.Done())
})
}

func (d *deployer) rpRemoveOldScalesets(ctx context.Context) error {
Expand Down
7 changes: 6 additions & 1 deletion pkg/frontend/adminactions/kubeactions.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,12 @@ func NewKubeActions(log *logrus.Entry, env env.Interface, oc *api.OpenShiftClust
return nil, err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery)
httpClient, err := restclient.HTTPClientFor(restConfig)
if err != nil {
return nil, err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient)
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/hive/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ func TestListSyncSet(t *testing.T) {
t.Fatal(err)
}

if result != nil && reflect.DeepEqual(result, syncsetTest) {
if result != nil && !reflect.DeepEqual(result, syncsetTest) {
t.Fatal("Unexpected syncset list returned", result)
}
})
Expand Down Expand Up @@ -703,7 +703,7 @@ func TestGetSyncSet(t *testing.T) {
t.Fatal(err)
}

if result != nil && reflect.DeepEqual(result, syncsetTest) {
if result != nil && !reflect.DeepEqual(result, syncsetTest) {
t.Fatal("Unexpected syncset is returned", result)
}
})
Expand Down
9 changes: 8 additions & 1 deletion pkg/mimo/actuator/task.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import (

"github.com/sirupsen/logrus"

"k8s.io/client-go/rest"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"

Expand Down Expand Up @@ -90,7 +92,12 @@ func (t *th) ClientHelper() (clienthelper.Interface, error) {
return nil, err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery)
httpClient, err := rest.HTTPClientFor(restConfig)
if err != nil {
return nil, err
}

mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient)
if err != nil {
return nil, err
}
Expand Down
4 changes: 4 additions & 0 deletions pkg/mimo/steps/cluster/operatorflags_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ func TestOperatorFlags(t *testing.T) {
Name: arov1alpha1.SingletonClusterName,
ResourceVersion: "1000",
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: arov1alpha1.SchemeGroupVersion.String(),
},
Spec: arov1alpha1.ClusterSpec{
OperatorFlags: arov1alpha1.OperatorFlags{
"foo": "bar",
Expand Down
Loading
Loading