Skip to content
This repository was archived by the owner on Oct 16, 2024. It is now read-only.

Commit 55d0831

Browse files
authored
Revert previously undeployed changes (#348)
* Revert "add and emit pool owner metadata for alerting (#327)" This reverts commit 2595b5e. * Revert "CLUSTERMAN-812: upgrade k8s client library (#334)" This reverts commit 6c4b8bb.
1 parent 50a569c commit 55d0831

File tree

13 files changed

+17
-42
lines changed

13 files changed

+17
-42
lines changed

acceptance/srv-configs/clusterman-clusters/local-dev/default.kubernetes

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,3 @@ autoscaling:
2121
instance_loss_threshold: 3
2222

2323
alert_on_max_capacity: false
24-
pool_owner: compute_infra

acceptance/srv-configs/clusterman-clusters/local-dev/default.mesos

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,3 @@ autoscale_signal:
2929
minute_range: 10
3030

3131
alert_on_max_capacity: false
32-
pool_owner: compute_infra

clusterman/autoscaler/autoscaler.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -178,9 +178,12 @@ def run(self, dry_run: bool = False, timestamp: Optional[arrow.Arrow] = None) ->
178178
else:
179179
capacity_offset = get_capacity_offset(self.cluster, self.pool, self.scheduler, timestamp)
180180
new_target_capacity = self._compute_target_capacity(resource_request) + capacity_offset
181-
self.target_capacity_gauge.set(new_target_capacity, self.add_metric_labels(dry_run))
182-
self.max_capacity_gauge.set(self.pool_manager.max_capacity, self.add_metric_labels(dry_run))
183-
self.setpoint_gauge.set(self.autoscaling_config.setpoint, self.add_metric_labels(dry_run))
181+
self.target_capacity_gauge.set(new_target_capacity, {"dry_run": dry_run})
182+
self.max_capacity_gauge.set(
183+
self.pool_manager.max_capacity,
184+
{"dry_run": dry_run, "alert_on_max_capacity": self.pool_manager.alert_on_max_capacity},
185+
)
186+
self.setpoint_gauge.set(self.autoscaling_config.setpoint, {"dry_run": dry_run})
184187
self._emit_requested_resource_metrics(resource_request, dry_run=dry_run)
185188

186189
try:
@@ -199,14 +202,7 @@ def run(self, dry_run: bool = False, timestamp: Optional[arrow.Arrow] = None) ->
199202
def _emit_requested_resource_metrics(self, resource_request: SignalResourceRequest, dry_run: bool) -> None:
200203
for resource_type, resource_gauge in self.resource_request_gauges.items():
201204
if getattr(resource_request, resource_type) is not None:
202-
resource_gauge.set(getattr(resource_request, resource_type), self.add_metric_labels(dry_run))
203-
204-
def add_metric_labels(self, dry_run):
205-
return {
206-
"dry_run": dry_run,
207-
"alert_on_max_capacity": self.pool_manager.alert_on_max_capacity,
208-
"team": self.pool_manager.pool_owner,
209-
}
205+
resource_gauge.set(getattr(resource_request, resource_type), {"dry_run": dry_run})
210206

211207
def _get_signal_for_app(self, app: str) -> Signal:
212208
"""Load the signal object to use for autoscaling for a particular app

clusterman/autoscaler/pool_manager.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ def __init__(
8686
"autoscaling.killable_nodes_prioritizing_v2", default=False
8787
)
8888
self.alert_on_max_capacity = self.pool_config.read_bool("alert_on_max_capacity", default=True)
89-
self.pool_owner = self.pool_config.read_string("pool_owner", default="compute_infra")
9089
monitoring_info = {"cluster": cluster, "pool": pool}
9190
self.killable_nodes_counter = get_monitoring_client().create_counter(SFX_KILLABLE_NODES_COUNT, monitoring_info)
9291

clusterman/kubernetes/kubernetes_cluster_connector.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@
2424
import colorlog
2525
import kubernetes
2626
import staticconf
27+
from kubernetes.client import V1beta1Eviction
2728
from kubernetes.client import V1DeleteOptions
28-
from kubernetes.client import V1Eviction
2929
from kubernetes.client import V1ObjectMeta
3030
from kubernetes.client.models.v1_node import V1Node as KubernetesNode
3131
from kubernetes.client.models.v1_pod import V1Pod as KubernetesPod
@@ -356,7 +356,7 @@ def _evict_pod(self, pod: KubernetesPod):
356356
self._core_api.create_namespaced_pod_eviction(
357357
name=pod.metadata.name,
358358
namespace=pod.metadata.namespace,
359-
body=V1Eviction(
359+
body=V1beta1Eviction(
360360
metadata=V1ObjectMeta(
361361
name=pod.metadata.name,
362362
namespace=pod.metadata.namespace,

clusterman/kubernetes/util.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
3131
from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm
3232
from kubernetes.client.models.v1_pod import V1Pod as KubernetesPod
33-
from kubernetes.config.config_exception import ConfigException
3433

3534
from clusterman.util import ClustermanResources
3635

@@ -73,7 +72,7 @@ def __init__(self, kubeconfig_path: str, client_class: Type) -> None:
7372
kubernetes.config.load_incluster_config()
7473
else:
7574
kubernetes.config.load_kube_config(kubeconfig_path, context=os.getenv("KUBECONTEXT"))
76-
except (TypeError, ConfigException):
75+
except TypeError:
7776
error_msg = "Could not load KUBECONFIG; is this running on Kubernetes master?"
7877
if "yelpcorp" in socket.getfqdn():
7978
error_msg += "\nHint: try using the clusterman-k8s-<clustername> wrapper script!"

clusterman/simulator/simulated_pool_manager.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ def __init__(
5959
MAX_MIN_NODE_SCALEIN_UPTIME_SECONDS,
6060
)
6161
self.alert_on_max_capacity = self.pool_config.read_bool("alert_on_max_capacity", default=True)
62-
self.pool_owner = self.pool_config.read_string("pool_owner", default="compute_infra")
6362
self.killable_nodes_prioritizing_v2 = self.pool_config.read_bool(
6463
"autoscaling.killable_nodes_prioritizing_v2", default=False
6564
)

examples/schemas/pool.json

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,7 @@
6464
"additionalProperties": false
6565
},
6666
"sensu_config": {"$ref": "definitions.json#sensu_config"},
67-
"alert_on_max_capacity": {"type": "boolean"},
68-
"pool_owner": {"type": "string"}
67+
"alert_on_max_capacity": {"type": "boolean"}
6968
},
7069
"additionalProperties": false
7170
}

itests/environment.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,6 @@ def setup_configurations(context):
121121
],
122122
},
123123
"alert_on_max_capacity": True,
124-
"pool_owner": "compute_infra",
125124
}
126125
kube_pool_config = {
127126
"resource_groups": [
@@ -145,7 +144,6 @@ def setup_configurations(context):
145144
"period_minutes": 7,
146145
},
147146
"alert_on_max_capacity": True,
148-
"pool_owner": "compute_infra",
149147
}
150148
with staticconf.testing.MockConfiguration(
151149
boto_config, namespace=CREDENTIALS_NAMESPACE

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ idna==2.8
1616
jmespath==0.9.4
1717
jsonpickle==1.4.2
1818
kiwisolver==1.1.0
19-
kubernetes==24.2.0
19+
kubernetes==10.0.1
2020
matplotlib==3.4.2
2121
mypy-extensions==0.4.3
2222
numpy==1.21.6

0 commit comments

Comments
 (0)