Skip to content

Commit 1e8e78e

Browse files
committed
fix: resolve mypy typecheck errors in UI modules
Add type annotations and casts to fix 22 mypy errors across ui/state.py, ui/api_client.py, ui/components/slo.py, and ui/components/recommendations.py. Signed-off-by: Andre Fredette <afredette@redhat.com>
1 parent a4658fc commit 1e8e78e

File tree

4 files changed

+27
-22
lines changed

4 files changed

+27
-22
lines changed

ui/api_client.py

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import contextlib
77
import logging
88
import os
9+
from typing import Any, cast
910

1011
import pandas as pd
1112
import requests
@@ -62,7 +63,7 @@ def fetch_slo_defaults(use_case: str) -> dict | None:
6263
response.raise_for_status()
6364
data = response.json()
6465
if data.get("success"):
65-
return data.get("slo_defaults")
66+
return cast(dict[Any, Any] | None, data.get("slo_defaults"))
6667
return None
6768
except Exception as e:
6869
logger.warning(f"Failed to fetch SLO defaults for {use_case}: {e}")
@@ -89,7 +90,7 @@ def fetch_expected_rps(use_case: str, user_count: int) -> dict | None:
8990
response.raise_for_status()
9091
data = response.json()
9192
if data.get("success"):
92-
return data
93+
return cast(dict[Any, Any], data)
9394
return None
9495
except Exception as e:
9596
logger.warning(f"Failed to fetch expected RPS for {use_case}: {e}")
@@ -116,7 +117,7 @@ def fetch_workload_profile(use_case: str) -> dict | None:
116117
response.raise_for_status()
117118
data = response.json()
118119
if data.get("success"):
119-
return data.get("workload_profile")
120+
return cast(dict[Any, Any] | None, data.get("workload_profile"))
120121
return None
121122
except Exception as e:
122123
logger.warning(f"Failed to fetch workload profile for {use_case}: {e}")
@@ -138,7 +139,7 @@ def fetch_priority_weights() -> dict | None:
138139
response.raise_for_status()
139140
data = response.json()
140141
if data.get("success"):
141-
return data.get("priority_weights")
142+
return cast(dict[Any, Any] | None, data.get("priority_weights"))
142143
return None
143144
except Exception as e:
144145
logger.warning(f"Failed to fetch priority weights: {e}")
@@ -154,10 +155,10 @@ def fetch_ranked_recommendations(
154155
ttft_target_ms: int,
155156
itl_target_ms: int,
156157
e2e_target_ms: int,
157-
weights: dict = None,
158+
weights: dict[Any, Any] | None = None,
158159
include_near_miss: bool = False,
159160
percentile: str = "p95",
160-
preferred_gpu_types: list[str] = None,
161+
preferred_gpu_types: list[str] | None = None,
161162
) -> dict | None:
162163
"""Fetch ranked recommendations from the backend API.
163164
@@ -205,7 +206,7 @@ def fetch_ranked_recommendations(
205206
timeout=30,
206207
)
207208
response.raise_for_status()
208-
return response.json()
209+
return cast(dict[Any, Any], response.json())
209210
except requests.exceptions.RequestException as e:
210211
st.error(f"Failed to fetch ranked recommendations: {e}")
211212
return None
@@ -230,7 +231,7 @@ def extract_business_context(user_input: str) -> dict | None:
230231
gpu_list = result["preferred_gpu_types"]
231232
result["hardware"] = ", ".join(gpu_list) if gpu_list else None
232233
logger.info(f"LLM extraction successful: {result.get('use_case')}")
233-
return result
234+
return cast(dict[Any, Any], result)
234235
else:
235236
logger.warning(
236237
f"LLM extraction API returned status {response.status_code}: {response.text[:200]}"
@@ -293,7 +294,7 @@ def check_cluster_status() -> dict:
293294
timeout=5,
294295
)
295296
if response.status_code == 200:
296-
return response.json()
297+
return cast(dict[Any, Any], response.json())
297298
return {"accessible": False}
298299
except Exception:
299300
return {"accessible": False}
@@ -311,7 +312,7 @@ def load_all_deployments() -> list | None:
311312
)
312313
if response.status_code == 200:
313314
data = response.json()
314-
return data.get("deployments", [])
315+
return cast(list[Any], data.get("deployments", []))
315316
elif response.status_code == 503:
316317
return None
317318
else:
@@ -336,7 +337,7 @@ def deploy_to_cluster(recommendation: dict, namespace: str = "default") -> dict:
336337
timeout=60,
337338
)
338339
if response.status_code == 200:
339-
return response.json()
340+
return cast(dict[Any, Any], response.json())
340341
elif response.status_code == 503:
341342
return {"success": False, "message": "Kubernetes cluster not accessible"}
342343
else:
@@ -358,7 +359,7 @@ def delete_deployment(deployment_id: str) -> dict:
358359
timeout=30,
359360
)
360361
if response.status_code == 200:
361-
return response.json()
362+
return cast(dict[Any, Any], response.json())
362363
else:
363364
return {"success": False, "message": response.text}
364365
except Exception as e:
@@ -376,7 +377,7 @@ def get_k8s_status(deployment_id: str) -> dict | None:
376377
timeout=DEFAULT_TIMEOUT,
377378
)
378379
if response.status_code == 200:
379-
return response.json()
380+
return cast(dict[Any, Any], response.json())
380381
return None
381382
except Exception as e:
382383
logger.error(f"Failed to get K8s status for {deployment_id}: {e}")
@@ -396,7 +397,7 @@ def fetch_deployment_mode() -> str | None:
396397
try:
397398
response = requests.get(f"{API_BASE_URL}/api/v1/deployment-mode", timeout=DEFAULT_TIMEOUT)
398399
response.raise_for_status()
399-
return response.json().get("mode")
400+
return cast(str | None, response.json().get("mode"))
400401
except Exception as e:
401402
logger.error(f"Failed to fetch deployment mode: {e}")
402403
return None
@@ -414,7 +415,7 @@ def update_deployment_mode(mode: str) -> dict | None:
414415
timeout=DEFAULT_TIMEOUT,
415416
)
416417
response.raise_for_status()
417-
return response.json()
418+
return cast(dict[Any, Any], response.json())
418419
except Exception as e:
419420
logger.error(f"Failed to set deployment mode: {e}")
420421
return None
@@ -436,7 +437,7 @@ def fetch_db_status() -> dict | None:
436437
timeout=DEFAULT_TIMEOUT,
437438
)
438439
response.raise_for_status()
439-
return response.json()
440+
return cast(dict[Any, Any], response.json())
440441
except Exception as e:
441442
logger.error(f"Failed to fetch DB status: {e}")
442443
return None
@@ -459,7 +460,7 @@ def upload_benchmarks(file_bytes: bytes, filename: str) -> dict | None:
459460
timeout=60,
460461
)
461462
response.raise_for_status()
462-
return response.json()
463+
return cast(dict[Any, Any], response.json())
463464
except requests.exceptions.HTTPError as e:
464465
detail = ""
465466
with contextlib.suppress(Exception):
@@ -483,7 +484,7 @@ def reset_database() -> dict | None:
483484
timeout=30,
484485
)
485486
response.raise_for_status()
486-
return response.json()
487+
return cast(dict[Any, Any], response.json())
487488
except Exception as e:
488489
logger.error(f"Database reset failed: {e}")
489490
return {"success": False, "message": str(e)}

ui/components/recommendations.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
Category cards, top-5 table, options list, and recommendation results.
44
"""
55

6+
from typing import Any
7+
68
import streamlit as st
79
import streamlit.components.v1 as components
810
from api_client import deploy_and_generate_yaml
@@ -436,7 +438,7 @@ def render_recommendation_result(result: dict, priority: str, extraction: dict):
436438
st.markdown("---")
437439

438440
# Get all recommendations for the cards
439-
all_recs = []
441+
all_recs: list[dict[str, Any]] = []
440442
for cat in ["balanced", "best_accuracy", "lowest_cost", "lowest_latency", "simplest"]:
441443
cat_recs = (
442444
st.session_state.ranked_response.get(cat, [])

ui/components/slo.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def get_workload_insights(use_case: str, qps: int, user_count: int) -> list:
7070
7171
Returns list of tuples: (icon, color, message, severity)
7272
"""
73-
messages = []
73+
messages: list[tuple[str, str, str, str]] = []
7474

7575
workload_profile = fetch_workload_profile(use_case)
7676
if not workload_profile:
@@ -266,7 +266,7 @@ def get_weight_for_priority(dimension: str, priority_level: str) -> int:
266266
logger.info(
267267
f"get_weight_for_priority({dimension}, {priority_level}): pw={pw}, weight={weight}"
268268
)
269-
return weight
269+
return int(weight)
270270

271271
extraction = st.session_state.get("extraction_result", {})
272272

ui/state.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
"""Session state defaults and initialization for the NeuralNav UI."""
22

3+
from typing import Any
4+
35
import streamlit as st
46

5-
SESSION_DEFAULTS = {
7+
SESSION_DEFAULTS: dict[str, Any] = {
68
# Core workflow state
79
"extraction_result": None,
810
"recommendation_result": None,

0 commit comments

Comments
 (0)