|
19 | 19 | from eventlet import tpool # type: ignore |
20 | 20 | from heatclient import exc # type: ignore |
21 | 21 | from magnum import objects as magnum_objects # type: ignore |
| 22 | +from magnum.common import exception as magnum_exception # type: ignore |
22 | 23 | from magnum.conductor import scale_manager # type: ignore |
23 | 24 | from magnum.drivers.common import driver # type: ignore |
24 | 25 | from magnum.objects import fields # type: ignore |
@@ -171,7 +172,9 @@ def update_cluster_control_plane_status( |
171 | 172 |
|
172 | 173 | if updated_replicas != replicas: |
173 | 174 | nodegroup.status = f"{action}_IN_PROGRESS" |
174 | | - elif updated_replicas == replicas and ready: |
| 175 | + elif ( |
| 176 | + updated_replicas == replicas and nodegroup.node_count == replicas and ready |
| 177 | + ): |
175 | 178 | nodegroup.status = f"{action}_COMPLETE" |
176 | 179 | nodegroup.status_reason = failure_message |
177 | 180 |
|
@@ -428,6 +431,11 @@ def delete_cluster(self, context, cluster: magnum_objects.Cluster): |
428 | 431 | resources.Cluster(context, self.kube_client, self.k8s_api, cluster).delete() |
429 | 432 | resources.ClusterAutoscalerHelmRelease(self.k8s_api, cluster).delete() |
430 | 433 |
|
| 434 | + # magnum-cluster-api driver supports control plane resize |
| 435 | + def validate_master_resize(self, node_count): |
| 436 | + if node_count % 2 == 0 or node_count < 1: |
| 437 | + raise magnum_exception.MasterNGSizeInvalid(requested_size=node_count) |
| 438 | + |
431 | 439 | @cluster_lock_wrapper |
432 | 440 | def create_nodegroup( |
433 | 441 | self, |
@@ -577,24 +585,39 @@ def _update_nodegroup( |
577 | 585 | nodegroup: magnum_objects.NodeGroup, |
578 | 586 | ): |
579 | 587 | utils.validate_nodegroup(nodegroup) |
580 | | - utils.ensure_worker_server_group( |
581 | | - ctx=context, cluster=cluster, node_group=nodegroup |
582 | | - ) |
583 | 588 |
|
584 | 589 | cluster_resource = objects.Cluster.for_magnum_cluster(self.k8s_api, cluster) |
585 | 590 |
|
586 | | - current_md_spec = cluster_resource.get_machine_deployment_spec(nodegroup.name) |
587 | | - target_md_spec = resources.mutate_machine_deployment( |
588 | | - context, |
589 | | - cluster, |
590 | | - nodegroup, |
591 | | - cluster_resource.get_machine_deployment_spec(nodegroup.name), |
592 | | - ) |
| 591 | + if nodegroup.role == "master": |
| 592 | + current_count = cluster_resource.obj["spec"]["topology"]["controlPlane"][ |
| 593 | + "replicas" |
| 594 | + ] |
| 595 | + if current_count == nodegroup.node_count: |
| 596 | + return |
593 | 597 |
|
594 | | - if current_md_spec == target_md_spec: |
595 | | - return |
| 598 | + cluster_resource.obj["spec"]["topology"]["controlPlane"][ |
| 599 | + "replicas" |
| 600 | + ] = nodegroup.node_count |
| 601 | + else: |
| 602 | + utils.ensure_worker_server_group( |
| 603 | + ctx=context, cluster=cluster, node_group=nodegroup |
| 604 | + ) |
| 605 | + |
| 606 | + current_md_spec = cluster_resource.get_machine_deployment_spec( |
| 607 | + nodegroup.name |
| 608 | + ) |
| 609 | + target_md_spec = resources.mutate_machine_deployment( |
| 610 | + context, |
| 611 | + cluster, |
| 612 | + nodegroup, |
| 613 | + cluster_resource.get_machine_deployment_spec(nodegroup.name), |
| 614 | + ) |
| 615 | + |
| 616 | + if current_md_spec == target_md_spec: |
| 617 | + return |
| 618 | + |
| 619 | + cluster_resource.set_machine_deployment_spec(nodegroup.name, target_md_spec) |
596 | 620 |
|
597 | | - cluster_resource.set_machine_deployment_spec(nodegroup.name, target_md_spec) |
598 | 621 | utils.kube_apply_patch(cluster_resource) |
599 | 622 |
|
600 | 623 | nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS |
|
0 commit comments