Skip to content

Commit b48d6d0

Browse files
committed
fix(lint): update ruff config and pin to latest version
- Add UP045 to ignored rules (Optional[X] -> X | None modernization) - Update pre-commit hooks to latest versions: - ruff v0.8.4 -> v0.14.9 - pre-commit-hooks v5.0.0 -> v6.0.0 - codespell v2.3.0 -> v2.4.1 - Pin ruff==0.14.9 in CI workflow for consistency - Apply minor formatting changes from newer ruff version
1 parent 6cc5566 commit b48d6d0

File tree

9 files changed

+42
-41
lines changed

9 files changed

+42
-41
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ jobs:
8080
- name: Install Ruff
8181
run: |
8282
python -m pip install --upgrade pip
83-
pip install ruff
83+
pip install ruff==0.14.9
8484
8585
- name: Check linting with Ruff
8686
run: |

.pre-commit-config.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
repos:
22
# Basic file fixes
33
- repo: https://github.com/pre-commit/pre-commit-hooks
4-
rev: v5.0.0
4+
rev: v6.0.0
55
hooks:
66
- id: trailing-whitespace
77
- id: end-of-file-fixer
@@ -16,15 +16,15 @@ repos:
1616

1717
# Ruff - replaces black, isort, flake8, and all their plugins
1818
- repo: https://github.com/astral-sh/ruff-pre-commit
19-
rev: v0.8.4
19+
rev: v0.14.9
2020
hooks:
2121
- id: ruff
2222
args: [--fix, --exit-non-zero-on-fix]
2323
- id: ruff-format
2424

2525
# Spell checking (catches typos in code/comments)
2626
- repo: https://github.com/codespell-project/codespell
27-
rev: v2.3.0
27+
rev: v2.4.1
2828
hooks:
2929
- id: codespell
3030
args: [--skip, "*.ipynb,*.html,*.css,*.js"]

example/parallel_ensemble_demo.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def train_sequential_ensemble(locator_seq, genotypes, samples, k_folds, output_d
5656

5757
seq_time = time.time() - start_time
5858
print(f"\nSequential training completed in {seq_time:.1f} seconds")
59-
print(f"Average time per fold: {seq_time/k_folds:.1f} seconds")
59+
print(f"Average time per fold: {seq_time / k_folds:.1f} seconds")
6060

6161
# Get averaged normalization parameters
6262
seq_norm = seq_result["normalization_params"]
@@ -91,7 +91,7 @@ def train_parallel_ensemble(
9191
par_time = time.time() - start_time
9292

9393
print(f"\nParallel training completed in {par_time:.1f} seconds")
94-
print(f"Average time per fold: {par_time/k_folds:.1f} seconds")
94+
print(f"Average time per fold: {par_time / k_folds:.1f} seconds")
9595

9696
# Get averaged normalization parameters
9797
par_norm = par_result["normalization_params"]

locator/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def main(): # noqa: C901
290290

291291
# Report runtime
292292
end = time.time()
293-
print(f"Run time: {(end-start)/60:.2f} minutes")
293+
print(f"Run time: {(end - start) / 60:.2f} minutes")
294294

295295
return 0
296296

locator/parallel/parallel_analysis.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ def parallel_k_fold_holdouts( # noqa: C901
413413

414414
if verbose:
415415
print(
416-
f"\nCompleted {k}-fold CV in {total_time:.1f}s ({total_time/k:.1f}s per fold)"
416+
f"\nCompleted {k}-fold CV in {total_time:.1f}s ({total_time / k:.1f}s per fold)"
417417
)
418418

419419
# Restore original bandwidth setting if we changed it
@@ -902,7 +902,7 @@ def parallel_holdouts( # noqa: C901
902902

903903
if verbose:
904904
print(
905-
f"\nCompleted {n_reps} replicates in {total_time:.1f}s ({total_time/n_reps:.1f}s per replicate)"
905+
f"\nCompleted {n_reps} replicates in {total_time:.1f}s ({total_time / n_reps:.1f}s per replicate)"
906906
)
907907

908908
# Restore original bandwidth setting if we changed it
@@ -1450,7 +1450,7 @@ def parallel_windows_holdouts( # noqa: C901
14501450
)
14511451

14521452
if verbose and len(windows) > 10:
1453-
print(f"... and {len(windows)-10} more windows")
1453+
print(f"... and {len(windows) - 10} more windows")
14541454

14551455
# Wait for all windows to complete with progress bar
14561456
if verbose:
@@ -1485,7 +1485,7 @@ def parallel_windows_holdouts( # noqa: C901
14851485

14861486
if verbose:
14871487
print(
1488-
f"\nCompleted {len(windows)} windows in {total_time:.1f}s ({total_time/len(windows):.1f}s per window)"
1488+
f"\nCompleted {len(windows)} windows in {total_time:.1f}s ({total_time / len(windows):.1f}s per window)"
14891489
)
14901490

14911491
# Show GPU utilization summary
@@ -1497,7 +1497,7 @@ def parallel_windows_holdouts( # noqa: C901
14971497
print("\nGPU utilization:")
14981498
for gpu_id in sorted(gpu_counts.keys()):
14991499
print(
1500-
f" GPU {gpu_id}: {gpu_counts[gpu_id]} windows ({gpu_counts[gpu_id]/len(windows)*100:.1f}%)"
1500+
f" GPU {gpu_id}: {gpu_counts[gpu_id]} windows ({gpu_counts[gpu_id] / len(windows) * 100:.1f}%)"
15011501
)
15021502

15031503
# Restore original bandwidth setting if we changed it
@@ -1884,7 +1884,7 @@ def parallel_train_ensemble( # noqa: C901
18841884

18851885
if verbose:
18861886
print(
1887-
f"\nCompleted ensemble training in {total_time:.1f}s ({total_time/k:.1f}s per fold)"
1887+
f"\nCompleted ensemble training in {total_time:.1f}s ({total_time / k:.1f}s per fold)"
18881888
)
18891889

18901890
# Show speedup vs sequential

locator/plotting.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -552,8 +552,8 @@ def plot_error_summary( # noqa: C901
552552
f"Mean error: {merged['error'].mean():.2f} {error_units}\n"
553553
f"Median error: {merged['error'].median():.2f} {error_units}\n"
554554
f"Max error: {merged['error'].max():.2f} {error_units}\n"
555-
f"R² (x): {np.corrcoef(merged['x_pred'], merged['x_true'])[0,1]**2:.3f}\n"
556-
f"R² (y): {np.corrcoef(merged['y_pred'], merged['y_true'])[0,1]**2:.3f}"
555+
f"R² (x): {np.corrcoef(merged['x_pred'], merged['x_true'])[0, 1] ** 2:.3f}\n"
556+
f"R² (y): {np.corrcoef(merged['y_pred'], merged['y_true'])[0, 1] ** 2:.3f}"
557557
)
558558
hist_ax.text(
559559
0.95,
@@ -1313,7 +1313,7 @@ def _repr_html_(self): # noqa: C901
13131313
if k in self.config["weight_samples"].keys():
13141314
if self.config["weight_samples"][k] is not None:
13151315
html.append(
1316-
f"<tr><td style='padding:5px'>{'weight_samples '+k}</td>"
1316+
f"<tr><td style='padding:5px'>{'weight_samples ' + k}</td>"
13171317
f"<td style='padding:5px'>{self.config['weight_samples'][k]}</td></tr>"
13181318
)
13191319

locator/training.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -318,14 +318,14 @@ def train( # noqa: C901
318318
if self.config.get("verbose_splits", False):
319319
print("\nData split summary:")
320320
print(
321-
f" Training samples: {len(train)} ({len(train)/len(samples)*100:.1f}%)"
321+
f" Training samples: {len(train)} ({len(train) / len(samples) * 100:.1f}%)"
322322
)
323323
print(
324-
f" Validation samples: {len(test)} ({len(test)/len(samples)*100:.1f}%)"
324+
f" Validation samples: {len(test)} ({len(test) / len(samples) * 100:.1f}%)"
325325
)
326326
if len(pred) > 0:
327327
print(
328-
f" Prediction samples (no coords): {len(pred)} ({len(pred)/len(samples)*100:.1f}%)"
328+
f" Prediction samples (no coords): {len(pred)} ({len(pred) / len(samples) * 100:.1f}%)"
329329
)
330330
print(f" Total samples: {len(samples)}")
331331
print(f" Total SNPs: {self.filtered_genotypes.shape[0]}")
@@ -548,13 +548,13 @@ def train_holdout( # noqa: C901
548548
if self.config.get("verbose_splits", False):
549549
print("\nHoldout split summary:")
550550
print(
551-
f" Training samples: {len(train_indices)} ({len(train_indices)/len(samples)*100:.1f}%)"
551+
f" Training samples: {len(train_indices)} ({len(train_indices) / len(samples) * 100:.1f}%)"
552552
)
553553
print(
554-
f" Validation samples: {len(test_indices)} ({len(test_indices)/len(samples)*100:.1f}%)"
554+
f" Validation samples: {len(test_indices)} ({len(test_indices) / len(samples) * 100:.1f}%)"
555555
)
556556
print(
557-
f" Holdout samples: {len(holdout_idx)} ({len(holdout_idx)/len(samples)*100:.1f}%)"
557+
f" Holdout samples: {len(holdout_idx)} ({len(holdout_idx) / len(samples) * 100:.1f}%)"
558558
)
559559
print(f" Total samples: {len(samples)}")
560560
print(f" Total SNPs: {self.filtered_genotypes.shape[0]}")
@@ -739,12 +739,12 @@ def _create_model(self, input_shape):
739739
"""Create neural network model. Extracted to avoid duplication."""
740740
loss_fn = None
741741
if self.config.get("use_range_penalty"):
742-
assert (
743-
self.config.get("species_range_shapefile") is not None
744-
), "species_range_shapefile must be provided if use_range_penalty is True"
745-
assert (
746-
self.config.get("resolution") is not None
747-
), "resolution must be provided if use_range_penalty is True"
742+
assert self.config.get("species_range_shapefile") is not None, (
743+
"species_range_shapefile must be provided if use_range_penalty is True"
744+
)
745+
assert self.config.get("resolution") is not None, (
746+
"resolution must be provided if use_range_penalty is True"
747+
)
748748

749749
mask_tensor, mask_transform = rasterize_species_range(
750750
self.config["species_range_shapefile"],

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,8 @@ ignore = [
126126
"B904", # raise from err/None in except clause
127127
# pyupgrade (type annotation modernization - requires Python 3.10+)
128128
"UP006", # use tuple instead of Tuple
129-
"UP007", # use X | Y instead of Optional[X]
129+
"UP007", # use X | Y instead of Union[X, Y]
130+
"UP045", # use X | None instead of Optional[X]
130131
]
131132

132133
[tool.ruff.lint.per-file-ignores]

tests/test_separate_mode_predict_all.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -70,16 +70,16 @@ def test_separate_mode_predicts_all_samples(self, tmp_path):
7070
predictions = locator.predict(return_df=True, save_preds_to_disk=False)
7171

7272
# Check that we got predictions for ALL samples
73-
assert (
74-
len(predictions) == 20
75-
), f"Expected 20 predictions but got {len(predictions)}"
73+
assert len(predictions) == 20, (
74+
f"Expected 20 predictions but got {len(predictions)}"
75+
)
7676

7777
# Check that predictions include all sample IDs
7878
pred_sample_ids = set(predictions["sampleID"])
7979
all_sample_ids = set(samples)
80-
assert (
81-
pred_sample_ids == all_sample_ids
82-
), "Predictions should include all samples"
80+
assert pred_sample_ids == all_sample_ids, (
81+
"Predictions should include all samples"
82+
)
8383

8484
def test_separate_mode_with_no_na_samples(self, tmp_path):
8585
"""Test that 'separate' mode works correctly when all samples have coordinates."""
@@ -105,9 +105,9 @@ def test_separate_mode_with_no_na_samples(self, tmp_path):
105105
predictions = locator.predict(return_df=True, save_preds_to_disk=False)
106106

107107
# Check that we still get predictions for all samples
108-
assert (
109-
len(predictions) == 10
110-
), f"Expected 10 predictions but got {len(predictions)}"
108+
assert len(predictions) == 10, (
109+
f"Expected 10 predictions but got {len(predictions)}"
110+
)
111111

112112
def test_exclude_mode_only_predicts_na(self, tmp_path):
113113
"""Test that 'exclude' mode excludes NA samples from both training and prediction."""
@@ -137,6 +137,6 @@ def test_exclude_mode_only_predicts_na(self, tmp_path):
137137
predictions = locator.predict(return_df=True, save_preds_to_disk=False)
138138

139139
# In exclude mode, there are no samples to predict
140-
assert (
141-
len(predictions) == 0
142-
), f"Expected 0 predictions in exclude mode but got {len(predictions)}"
140+
assert len(predictions) == 0, (
141+
f"Expected 0 predictions in exclude mode but got {len(predictions)}"
142+
)

0 commit comments

Comments
 (0)