Skip to content

Commit 30a6621

Browse files
committed
fix pre-commit errors
1 parent 2e18ed6 commit 30a6621

File tree

4 files changed

+14
-15
lines changed

4 files changed

+14
-15
lines changed

policy_analysis/dspy_policies_and_taxonomy_extraction/policy_extraction/policy_dspy_model_creation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,6 @@ def __call__(self,example, pred, trace=None):
134134
print(f"Final Score on Validation Set (optimized): {optimized_score}%")
135135

136136
# --- Saving the optimized model ---
137-
model_path = f"saved_dspy_model/policy_model/"
137+
model_path = "saved_dspy_model/policy_model/"
138138
compiled_program.save(model_path,save_program=True)
139139
print(f"Optimized model saved to {model_path}")

policy_analysis/dspy_policies_and_taxonomy_extraction/taxonomy_extraction/classify_data_with_trained_model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from datasets import load_dataset
66
import pyarrow as pa
77
import pyarrow.parquet as pq
8-
from itertools import islice
98

109
# ---------------------------------------------------------------------
1110
# 0. Config

policy_analysis/dspy_policies_and_taxonomy_extraction/taxonomy_extraction/geography_only_dspy_model_creation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ def geography_f1_metric(example, pred, trace=None):
270270
print("Final Geography F1:", score)
271271

272272
# Save results
273-
optimized_score = evaluator(compiled_program, save_as_json=f"results.json")
273+
optimized_score = evaluator(compiled_program, save_as_json="results.json")
274274

275275
# ---------------------------------------------------------------------
276276
# 8. SAVE MODEL

policy_analysis/dspy_policies_and_taxonomy_extraction/taxonomy_extraction/impact_classification_model_training.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,6 @@
1616

1717
from sentence_transformers import SentenceTransformer
1818

19-
# ---------------------------------------------------------------------
20-
# PATH SETUP
21-
# ---------------------------------------------------------------------
22-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
23-
load_dotenv()
24-
2519
# ---------------------------------------------------------------------
2620
# 0. IMPORT TAXONOMY ENUMS
2721
# ---------------------------------------------------------------------
@@ -33,6 +27,12 @@
3327
Planetary_boundaries,
3428
)
3529

30+
# ---------------------------------------------------------------------
31+
# PATH SETUP
32+
# ---------------------------------------------------------------------
33+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
34+
load_dotenv()
35+
3636
# ---------------------------------------------------------------------
3737
# ENUM NORMALIZATION
3838
# ---------------------------------------------------------------------
@@ -73,13 +73,13 @@ def normalize_labels(labels, allowed_enum, allow_unknown=False):
7373
labels = [labels]
7474

7575
cleaned = []
76-
for l in labels:
77-
if not l:
76+
for label in labels:
77+
if not label:
7878
continue
7979

8080
# --- FIX: FORCE SNAKE_CASE ---
8181
# 1. Lowercase and strip
82-
l_norm = l.lower().strip()
82+
l_norm = label.lower().strip()
8383
# 2. Replace spaces and hyphens with underscores
8484
# (e.g., "Climate Change" -> "climate_change", "Land-System Change" -> "land_system_change")
8585
l_norm = re.sub(r"[\s\-]+", "_", l_norm)
@@ -91,7 +91,7 @@ def normalize_labels(labels, allowed_enum, allow_unknown=False):
9191
cleaned.append(l_norm)
9292
# Optional: Print warning if label is rejected (good for debugging)
9393
# else:
94-
# print(f"Warning: Label '{l}' normalized to '{l_norm}' not found in Enum.")
94+
# print(f"Warning: Label '{label}' normalized to '{l_norm}' not found in Enum.")
9595

9696
if not cleaned and allow_unknown:
9797
return ["unknown"]
@@ -255,8 +255,8 @@ def load_taxonomy_jsonl(path, source_name):
255255
y_true_labels = mlb_dict[field].inverse_transform(y_true)
256256
y_pred_labels = mlb_dict[field].inverse_transform(y_pred)
257257

258-
results[f"{field}_true"] = [list(l) for l in y_true_labels]
259-
results[f"{field}_pred"] = [list(l) for l in y_pred_labels]
258+
results[f"{field}_true"] = [list(label) for label in y_true_labels]
259+
results[f"{field}_pred"] = [list(label) for label in y_pred_labels]
260260

261261
f1 = f1_score(y_true, y_pred, average="micro")
262262
f1_scores[field] = round(f1, 4)

0 commit comments

Comments
 (0)