Skip to content

Commit f5ca6a1

Browse files
MaximMoninmnaser
andauthored
implementing control plane resize (#581)
* passing validation control plane resize * updating control plane topology while resizing master node group * fix linter issues * fix completing control plane resize operation * fix linter issue * do not create default-master server group * Update driver.py --------- Co-authored-by: Mohammed Naser <[email protected]>
1 parent a1c0888 commit f5ca6a1

File tree

1 file changed

+37
-14
lines changed

1 file changed

+37
-14
lines changed

magnum_cluster_api/driver.py

Lines changed: 37 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from eventlet import tpool # type: ignore
2020
from heatclient import exc # type: ignore
2121
from magnum import objects as magnum_objects # type: ignore
22+
from magnum.common import exception as magnum_exception # type: ignore
2223
from magnum.conductor import scale_manager # type: ignore
2324
from magnum.drivers.common import driver # type: ignore
2425
from magnum.objects import fields # type: ignore
@@ -171,7 +172,9 @@ def update_cluster_control_plane_status(
171172

172173
if updated_replicas != replicas:
173174
nodegroup.status = f"{action}_IN_PROGRESS"
174-
elif updated_replicas == replicas and ready:
175+
elif (
176+
updated_replicas == replicas and nodegroup.node_count == replicas and ready
177+
):
175178
nodegroup.status = f"{action}_COMPLETE"
176179
nodegroup.status_reason = failure_message
177180

@@ -428,6 +431,11 @@ def delete_cluster(self, context, cluster: magnum_objects.Cluster):
428431
resources.Cluster(context, self.kube_client, self.k8s_api, cluster).delete()
429432
resources.ClusterAutoscalerHelmRelease(self.k8s_api, cluster).delete()
430433

434+
# magnum-cluster-api driver supports control plane resize
435+
def validate_master_resize(self, node_count):
436+
if node_count % 2 == 0 or node_count < 1:
437+
raise magnum_exception.MasterNGSizeInvalid(requested_size=node_count)
438+
431439
@cluster_lock_wrapper
432440
def create_nodegroup(
433441
self,
@@ -577,24 +585,39 @@ def _update_nodegroup(
577585
nodegroup: magnum_objects.NodeGroup,
578586
):
579587
utils.validate_nodegroup(nodegroup)
580-
utils.ensure_worker_server_group(
581-
ctx=context, cluster=cluster, node_group=nodegroup
582-
)
583588

584589
cluster_resource = objects.Cluster.for_magnum_cluster(self.k8s_api, cluster)
585590

586-
current_md_spec = cluster_resource.get_machine_deployment_spec(nodegroup.name)
587-
target_md_spec = resources.mutate_machine_deployment(
588-
context,
589-
cluster,
590-
nodegroup,
591-
cluster_resource.get_machine_deployment_spec(nodegroup.name),
592-
)
591+
if nodegroup.role == "master":
592+
current_count = cluster_resource.obj["spec"]["topology"]["controlPlane"][
593+
"replicas"
594+
]
595+
if current_count == nodegroup.node_count:
596+
return
593597

594-
if current_md_spec == target_md_spec:
595-
return
598+
cluster_resource.obj["spec"]["topology"]["controlPlane"][
599+
"replicas"
600+
] = nodegroup.node_count
601+
else:
602+
utils.ensure_worker_server_group(
603+
ctx=context, cluster=cluster, node_group=nodegroup
604+
)
605+
606+
current_md_spec = cluster_resource.get_machine_deployment_spec(
607+
nodegroup.name
608+
)
609+
target_md_spec = resources.mutate_machine_deployment(
610+
context,
611+
cluster,
612+
nodegroup,
613+
cluster_resource.get_machine_deployment_spec(nodegroup.name),
614+
)
615+
616+
if current_md_spec == target_md_spec:
617+
return
618+
619+
cluster_resource.set_machine_deployment_spec(nodegroup.name, target_md_spec)
596620

597-
cluster_resource.set_machine_deployment_spec(nodegroup.name, target_md_spec)
598621
utils.kube_apply_patch(cluster_resource)
599622

600623
nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS

0 commit comments

Comments
 (0)