|
| 1 | +import json |
| 2 | +from pathlib import Path |
| 3 | +from typing import Any, TypedDict |
| 4 | + |
| 5 | +import numpy as np |
| 6 | +import numpy.typing as npt |
| 7 | +from sklearn.metrics import f1_score |
| 8 | +from typing_extensions import Self |
| 9 | + |
| 10 | +from autointent import Context |
| 11 | +from autointent.context.data_handler import Tag |
| 12 | +from autointent.custom_types import LabelType |
| 13 | +from autointent.metrics.converter import transform |
| 14 | + |
| 15 | +from .base import PredictionModule |
| 16 | +from .utils import InvalidNumClassesError, WrongClassificationError, apply_tags |
| 17 | + |
| 18 | +default_search_space = np.linspace(0, 1, num=10) |
| 19 | + |
| 20 | + |
| 21 | +class AdaptivePredictorDumpMetadata(TypedDict): |
| 22 | + r: float |
| 23 | + tags: list[Tag] | None |
| 24 | + n_classes: int |
| 25 | + |
| 26 | + |
| 27 | +class AdaptivePredictor(PredictionModule): |
| 28 | + metadata_dict_name = "metadata.json" |
| 29 | + n_classes: int |
| 30 | + _r: float |
| 31 | + tags: list[Tag] | None |
| 32 | + name = "adaptive" |
| 33 | + |
| 34 | + def __init__(self, search_space: list[float] | None = None) -> None: |
| 35 | + self.search_space = search_space if search_space is not None else default_search_space |
| 36 | + |
| 37 | + @classmethod |
| 38 | + def from_context(cls, context: Context, search_space: list[float] | None = None) -> Self: |
| 39 | + return cls( |
| 40 | + search_space=search_space, |
| 41 | + ) |
| 42 | + |
| 43 | + def fit( |
| 44 | + self, |
| 45 | + scores: npt.NDArray[Any], |
| 46 | + labels: list[LabelType], |
| 47 | + tags: list[Tag] | None = None, |
| 48 | + ) -> None: |
| 49 | + self.tags = tags |
| 50 | + multilabel = isinstance(labels[0], list) |
| 51 | + if not multilabel: |
| 52 | + msg = """AdaptivePredictor is not designed to perform multiclass classification, |
| 53 | + consider using other predictor algorithms""" |
| 54 | + raise WrongClassificationError(msg) |
| 55 | + self.n_classes = ( |
| 56 | + len(labels[0]) if multilabel and isinstance(labels[0], list) else len(set(labels).difference([-1])) |
| 57 | + ) |
| 58 | + |
| 59 | + metrics_list = [] |
| 60 | + for r in self.search_space: |
| 61 | + y_pred = multilabel_predict(scores, r, self.tags) |
| 62 | + metric_value = multilabel_score(labels, y_pred) |
| 63 | + metrics_list.append(metric_value) |
| 64 | + |
| 65 | + self._r = float(self.search_space[np.argmax(metrics_list)]) |
| 66 | + |
| 67 | + def predict(self, scores: npt.NDArray[Any]) -> npt.NDArray[Any]: |
| 68 | + if scores.shape[1] != self.n_classes: |
| 69 | + msg = "Provided scores number don't match with number of classes which predictor was trained on." |
| 70 | + raise InvalidNumClassesError(msg) |
| 71 | + return multilabel_predict(scores, self._r, self.tags) |
| 72 | + |
| 73 | + def dump(self, path: str) -> None: |
| 74 | + dump_dir = Path(path) |
| 75 | + |
| 76 | + metadata = AdaptivePredictorDumpMetadata(r=self._r, tags=self.tags, n_classes=self.n_classes) |
| 77 | + |
| 78 | + with (dump_dir / self.metadata_dict_name).open("w") as file: |
| 79 | + json.dump(metadata, file, indent=4) |
| 80 | + |
| 81 | + def load(self, path: str) -> None: |
| 82 | + dump_dir = Path(path) |
| 83 | + |
| 84 | + with (dump_dir / self.metadata_dict_name).open() as file: |
| 85 | + metadata: AdaptivePredictorDumpMetadata = json.load(file) |
| 86 | + |
| 87 | + self._r = metadata["r"] |
| 88 | + self.n_classes = metadata["n_classes"] |
| 89 | + self.tags = [Tag(**tag) for tag in metadata["tags"] if metadata["tags"] and isinstance(metadata["tags"], list)] # type: ignore[arg-type, union-attr] |
| 90 | + self.metadata = metadata |
| 91 | + |
| 92 | + |
| 93 | +def get_adapted_threshes(r: float, scores: npt.NDArray[Any]) -> npt.NDArray[Any]: |
| 94 | + return r * np.max(scores, axis=1) + (1 - r) * np.min(scores, axis=1) # type: ignore[no-any-return] |
| 95 | + |
| 96 | + |
| 97 | +def multilabel_predict(scores: npt.NDArray[Any], r: float, tags: list[Tag] | None) -> npt.NDArray[Any]: |
| 98 | + thresh = get_adapted_threshes(r, scores) |
| 99 | + res = (scores >= thresh[:, None]).astype(int) # suspicious |
| 100 | + if tags: |
| 101 | + res = apply_tags(res, scores, tags) |
| 102 | + return res |
| 103 | + |
| 104 | + |
| 105 | +def multilabel_score(y_true: list[LabelType], y_pred: npt.NDArray[Any]) -> float: |
| 106 | + y_true_, y_pred_ = transform(y_true, y_pred) |
| 107 | + |
| 108 | + return f1_score(y_pred_, y_true_, average="weighted") # type: ignore[no-any-return] |
0 commit comments