Skip to content

Add --migrate-vmas-to-vms option to support VMAS migration to VMS agent pool #8711

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/aks-preview/azext_aks_preview/_help.py
Original file line number Diff line number Diff line change
Expand Up @@ -1252,6 +1252,9 @@
- name: --disable-imds-restriction
type: bool
short-summary: Disable IMDS restriction in the cluster. All Pods in the cluster will be able to access IMDS.
- name: --migrate-vmas-to-vms
type: bool
short-summary: Migrate cluster with VMAS node pool to VMS node pool.
examples:
- name: Reconcile the cluster back to its current state.
text: az aks update -g MyResourceGroup -n MyManagedCluster
Expand Down
2 changes: 2 additions & 0 deletions src/aks-preview/azext_aks_preview/_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -1370,6 +1370,8 @@ def load_arguments(self, _):
arg_type=get_enum_type(health_probe_modes),
)

c.argument('migrate_vmas_to_vms', is_preview=True, action='store_true')

with self.argument_context("aks upgrade") as c:
c.argument("kubernetes_version", completer=get_k8s_upgrades_completion_list)
c.argument(
Expand Down
1 change: 1 addition & 0 deletions src/aks-preview/azext_aks_preview/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,7 @@ def aks_update(
# IMDS restriction
enable_imds_restriction=False,
disable_imds_restriction=False,
migrate_vmas_to_vms=False,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
Expand Down
31 changes: 31 additions & 0 deletions src/aks-preview/azext_aks_preview/managed_cluster_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
CONST_AZURE_SERVICE_MESH_UPGRADE_COMMAND_START,
CONST_AZURE_SERVICE_MESH_DEFAULT_EGRESS_NAMESPACE,
CONST_LOAD_BALANCER_SKU_BASIC,
CONST_LOAD_BALANCER_SKU_STANDARD,
CONST_MANAGED_CLUSTER_SKU_NAME_BASE,
CONST_MANAGED_CLUSTER_SKU_NAME_AUTOMATIC,
CONST_MANAGED_CLUSTER_SKU_TIER_FREE,
Expand All @@ -40,6 +41,8 @@
CONST_OUTBOUND_TYPE_BLOCK,
CONST_IMDS_RESTRICTION_ENABLED,
CONST_IMDS_RESTRICTION_DISABLED,
CONST_AVAILABILITY_SET,
CONST_VIRTUAL_MACHINES,
)
from azext_aks_preview._helpers import (
check_is_apiserver_vnet_integration_cluster,
Expand Down Expand Up @@ -2813,6 +2816,13 @@ def get_disable_imds_restriction(self) -> bool:
"""
return self.raw_param.get("disable_imds_restriction")

def get_migrate_vmas_to_vms(self) -> bool:
"""Obtain the value of migrate_vmas_to_vms.

:return: bool
"""
return self.raw_param.get("migrate_vmas_to_vms")


# pylint: disable=too-many-public-methods
class AKSPreviewManagedClusterCreateDecorator(AKSManagedClusterCreateDecorator):
Expand Down Expand Up @@ -5242,6 +5252,25 @@ def update_imds_restriction(self, mc: ManagedCluster) -> ManagedCluster:
raise DecoratorEarlyExitException()
return mc

def update_vmas_to_vms(self, mc: ManagedCluster) -> ManagedCluster:
"""Update the agent pool profile type to be VMS and LB sku to standard

:return: the ManagedCluster object
"""
self._ensure_mc(mc)

if self.context.get_migrate_vmas_to_vms():
# Ensure we have valid vmas AP
if len(mc.agent_pool_profiles) == 1 and mc.agent_pool_profiles[0].type == CONST_AVAILABILITY_SET:
mc.agent_pool_profiles[0].type = CONST_VIRTUAL_MACHINES
else:
raise CLIError('This is not a valid VMAS cluster, we cannot proceed with the migration.')

if mc.network_profile.load_balancer_sku == CONST_LOAD_BALANCER_SKU_BASIC:
mc.network_profile.load_balancer_sku = CONST_LOAD_BALANCER_SKU_STANDARD

return mc

def update_mc_profile_preview(self) -> ManagedCluster:
"""The overall controller used to update the preview ManagedCluster profile.

Expand Down Expand Up @@ -5315,6 +5344,8 @@ def update_mc_profile_preview(self) -> ManagedCluster:
mc = self.update_static_egress_gateway(mc)
# update imds restriction
mc = self.update_imds_restriction(mc)
# update VMAS to VMS
mc = self.update_vmas_to_vms(mc)

return mc

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15852,4 +15852,59 @@ def test_aks_loadbalancer_commands(
self.cmd(
"aks delete -g {resource_group} -n {name} --yes --no-wait",
checks=[self.is_empty()],
)
)

# Comment out below tests as we only allow this in certain regions.
# @AllowLargeResponse()
# @AKSCustomResourceGroupPreparer(
# random_name_length=17,
# name_prefix="clitest",
# location="centraluseuap",
# )
# def test_aks_migrate_vmas_to_vms(
# self, resource_group, resource_group_location
# ):
# _, create_version = self._get_versions(resource_group_location)
# aks_name = self.create_random_name("cliakstest", 16)
# self.kwargs.update(
# {
# "resource_group": resource_group,
# "name": aks_name,
# "location": resource_group_location,
# "k8s_version": create_version,
# "ssh_key_value": self.generate_ssh_keys(),
# }
# )

# # create
# create_cmd = (
# "aks create --resource-group={resource_group} --name={name} --location={location} "
# "--ssh-key-value={ssh_key_value} "
# "--vm-set-type AvailabilitySet "
# "--load-balancer-sku Basic "
# )
# self.cmd(
# create_cmd,
# checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check("agentPoolProfiles[0].type", "AvailabilitySet"),
# self.check("networkProfile.loadBalancerSku", "basic"),
# ],
# )

# # update -- migrate vmas to vma
# update_cmd = (
# "aks update --resource-group {resource_group} --name {name} "
# "--migrate-vmas-to-vms "
# )
# self.cmd(update_cmd, checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check("agentPoolProfiles[0].type", "VirtualMachines"),
# self.check("networkProfile.loadBalancerSku", "standard"),
# ])

# # delete
# self.cmd(
# "aks delete -g {resource_group} -n {name} --yes --no-wait",
# checks=[self.is_empty()],
# )
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_LOAD_BALANCER_BACKEND_POOL_TYPE_NODE_IP,
CONST_LOAD_BALANCER_SKU_STANDARD,
CONST_LOAD_BALANCER_SKU_BASIC,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
Expand All @@ -47,6 +48,8 @@
CONST_ROTATION_POLL_INTERVAL,
CONST_SECRET_ROTATION_ENABLED,
CONST_VIRTUAL_MACHINE_SCALE_SETS,
CONST_AVAILABILITY_SET,
CONST_VIRTUAL_MACHINES,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
CONST_WORKLOAD_RUNTIME_OCI_CONTAINER,
Expand Down Expand Up @@ -85,6 +88,7 @@
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
UnknownError,
CLIError,
)
from dateutil.parser import parse

Expand Down Expand Up @@ -9000,6 +9004,110 @@ def test_update_acns_in_network_profile(self):
),
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)

def test_update_vmas_to_vms(self):
# Should not update mc if unset
dec_0 = AKSPreviewManagedClusterUpdateDecorator(
self.cmd,
self.client,
{},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_0 = self.models.ManagedCluster(
location="test_location",
)
dec_0.context.attach_mc(mc_0)
dec_mc_0 = dec_0.update_vmas_to_vms(mc_0)
ground_truth_mc_0 = self.models.ManagedCluster(
location="test_location",
)
self.assertEqual(dec_mc_0, ground_truth_mc_0)

# Should raise error if trying to migrate non-vmas cluster to vms
dec_1 = AKSPreviewManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"migrate_vmas_to_vms": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(
location="test_location",
)
ap_1 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name",
type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
)
mc_1.agent_pool_profiles = [ap_1]
dec_1.context.attach_mc(mc_1)
with self.assertRaises(CLIError):
dec_1.update_vmas_to_vms(mc_1)

# Should raise error if cluster has more than 1 AP
dec_2 = AKSPreviewManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"migrate_vmas_to_vms": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(
location="test_location",
)
ap_2_1 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name_1",
type=CONST_AVAILABILITY_SET,
)
ap_2_2 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name_2",
type=CONST_AVAILABILITY_SET,
)
mc_2.agent_pool_profiles = [ap_2_1, ap_2_2]
dec_2.context.attach_mc(mc_2)
with self.assertRaises(CLIError):
dec_2.update_vmas_to_vms(mc_2)

# Should migrate vmas-blb to vms-slb
dec_3 = AKSPreviewManagedClusterUpdateDecorator(
self.cmd,
self.client,
{
"migrate_vmas_to_vms": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_3 = self.models.ManagedCluster(
location="test_location",
)
ap_3 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name",
type=CONST_AVAILABILITY_SET,
)
network_profile_3 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku=CONST_LOAD_BALANCER_SKU_BASIC,
)
mc_3.agent_pool_profiles = [ap_3]
mc_3.network_profile = network_profile_3
dec_3.context.attach_mc(mc_3)
dec_mc_3 = dec_3.update_vmas_to_vms(mc_3)

ground_truth_mc_3 = self.models.ManagedCluster(
location="test_location",
)
ground_truth_ap_3 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name",
type=CONST_VIRTUAL_MACHINES,
)
ground_truth_network_profile_3 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku=CONST_LOAD_BALANCER_SKU_STANDARD,
)
ground_truth_mc_3.agent_pool_profiles = [ground_truth_ap_3]
ground_truth_mc_3.network_profile = ground_truth_network_profile_3
self.assertEqual(dec_mc_3, ground_truth_mc_3)



if __name__ == "__main__":
unittest.main()
Loading