|
1 | 1 | import logging
|
2 | 2 | import logging.handlers
|
3 | 3 | import queue
|
| 4 | +import re |
4 | 5 | import uuid
|
| 6 | +from datetime import timedelta |
5 | 7 | from typing import List, cast
|
6 | 8 | from urllib.request import urlopen
|
7 | 9 |
|
|
16 | 18 | TelemetryFilter,
|
17 | 19 | )
|
18 | 20 | from temporalio.worker import Worker
|
19 |
| -from tests.helpers import assert_eq_eventually, find_free_port |
| 21 | +from tests.helpers import assert_eq_eventually, assert_eventually, find_free_port |
20 | 22 |
|
21 | 23 |
|
22 | 24 | @workflow.defn
|
@@ -181,3 +183,74 @@ async def has_log() -> bool:
|
181 | 183 | assert record.levelno == logging.WARNING
|
182 | 184 | assert record.name == f"{logger.name}-sdk_core::temporal_sdk_core::worker::workflow"
|
183 | 185 | assert record.temporal_log.fields["run_id"] == handle.result_run_id # type: ignore
|
| 186 | + |
| 187 | + |
| 188 | +async def test_prometheus_histogram_bucket_overrides(client: Client): |
| 189 | + # Set up a Prometheus configuration with custom histogram bucket overrides |
| 190 | + prom_addr = f"127.0.0.1:{find_free_port()}" |
| 191 | + special_value = float(1234.5678) |
| 192 | + histogram_overrides = { |
| 193 | + "temporal_long_request_latency": [special_value / 2, special_value], |
| 194 | + "custom_histogram": [special_value / 2, special_value], |
| 195 | + } |
| 196 | + |
| 197 | + runtime = Runtime( |
| 198 | + telemetry=TelemetryConfig( |
| 199 | + metrics=PrometheusConfig( |
| 200 | + bind_address=prom_addr, |
| 201 | + counters_total_suffix=False, |
| 202 | + unit_suffix=False, |
| 203 | + durations_as_seconds=False, |
| 204 | + histogram_bucket_overrides=histogram_overrides, |
| 205 | + ), |
| 206 | + ), |
| 207 | + ) |
| 208 | + |
| 209 | + # Create a custom histogram metric |
| 210 | + custom_histogram = runtime.metric_meter.create_histogram( |
| 211 | + "custom_histogram", "Custom histogram", "ms" |
| 212 | + ) |
| 213 | + |
| 214 | + # Record a value to the custom histogram |
| 215 | + custom_histogram.record(600) |
| 216 | + |
| 217 | + # Create client with overrides |
| 218 | + client_with_overrides = await Client.connect( |
| 219 | + client.service_client.config.target_host, |
| 220 | + namespace=client.namespace, |
| 221 | + runtime=runtime, |
| 222 | + ) |
| 223 | + |
| 224 | + async def run_workflow(client: Client): |
| 225 | + task_queue = f"task-queue-{uuid.uuid4()}" |
| 226 | + async with Worker( |
| 227 | + client, |
| 228 | + task_queue=task_queue, |
| 229 | + workflows=[HelloWorkflow], |
| 230 | + ): |
| 231 | + assert "Hello, World!" == await client.execute_workflow( |
| 232 | + HelloWorkflow.run, |
| 233 | + "World", |
| 234 | + id=f"workflow-{uuid.uuid4()}", |
| 235 | + task_queue=task_queue, |
| 236 | + ) |
| 237 | + |
| 238 | + await run_workflow(client_with_overrides) |
| 239 | + |
| 240 | + async def check_metrics() -> None: |
| 241 | + with urlopen(url=f"http://{prom_addr}/metrics") as f: |
| 242 | + metrics_output = f.read().decode("utf-8") |
| 243 | + |
| 244 | + for key, buckets in histogram_overrides.items(): |
| 245 | + assert ( |
| 246 | + key in metrics_output |
| 247 | + ), f"Missing {key} in full output: {metrics_output}" |
| 248 | + for bucket in buckets: |
| 249 | + # expect to have {key}_bucket and le={bucket} in the same line with arbitrary strings between them |
| 250 | + regex = re.compile(f'{key}_bucket.*le="{bucket}"') |
| 251 | + assert regex.search( |
| 252 | + metrics_output |
| 253 | + ), f"Missing bucket for {key} in full output: {metrics_output}" |
| 254 | + |
| 255 | + # Wait for metrics to appear and match the expected buckets |
| 256 | + await assert_eventually(check_metrics) |
0 commit comments