Skip to content
15 changes: 15 additions & 0 deletions aeon/classification/dictionary_based/_redcomets.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,17 @@ class REDCOMETS(BaseClassifier):
If ``RandomState`` instance, ``random_state`` is the random number generator;
If ``None``, the random number generator is the ``RandomState`` instance used
by ``np.random``.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -102,6 +113,7 @@ def __init__(
perc_length=5,
n_trees=100,
random_state=None,
class_weight=None,
n_jobs=1,
parallel_backend=None,
):
Expand All @@ -114,6 +126,7 @@ def __init__(
self.n_trees = n_trees

self.random_state = random_state
self.class_weight = class_weight
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend

Expand Down Expand Up @@ -260,6 +273,7 @@ def _build_univariate_ensemble(self, X, y):
rf = RandomForestClassifier(
n_estimators=self.n_trees,
random_state=self.random_state,
class_weight=self.class_weight,
n_jobs=self.n_jobs,
)
rf.fit(X_sfa, y_smote)
Expand All @@ -285,6 +299,7 @@ def _build_univariate_ensemble(self, X, y):
rf = RandomForestClassifier(
n_estimators=self.n_trees,
random_state=self.random_state,
class_weight=self.class_weight,
n_jobs=self.n_jobs,
)
rf.fit(X_sax, y_smote)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,17 @@ class ProbabilityThresholdEarlyClassifier(BaseEarlyClassifier):
in the _classification_points List. Duplicate values will be removed, and the
full series length will be appeneded if not present.
If None, will use 20 thresholds linearly spaces from 0 to the series length.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -106,6 +117,7 @@ def __init__(
probability_threshold=0.85,
consecutive_predictions=1,
classification_points=None,
class_weight=None,
n_jobs=1,
random_state=None,
):
Expand All @@ -114,6 +126,7 @@ def __init__(
self.consecutive_predictions = consecutive_predictions
self.classification_points = classification_points

self.class_weight = class_weight
self.n_jobs = n_jobs
self.random_state = random_state

Expand Down
15 changes: 14 additions & 1 deletion aeon/classification/early_classification/_teaser.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,17 @@ class TEASER(BaseEarlyClassifier):
in the _classification_points List. Duplicate values will be removed, and the
full series length will be appeneded if not present.
If None, will use 20 thresholds linearly spaces from 0 to the series length.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -118,14 +129,16 @@ def __init__(
one_class_classifier=None,
one_class_param_grid=None,
classification_points=None,
class_weight=None,
n_jobs=1,
random_state=None,
):
self.estimator = estimator
self.one_class_classifier = one_class_classifier
self.one_class_param_grid = one_class_param_grid
self.classification_points = classification_points


self.class_weight = class_weight
self.n_jobs = n_jobs
self.random_state = random_state

Expand Down
14 changes: 14 additions & 0 deletions aeon/classification/feature_based/_catch22.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,17 @@ class Catch22Classifier(BaseClassifier):
If `RandomState` instance, random_state is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -128,6 +139,7 @@ def __init__(
use_pycatch22=False,
estimator=None,
random_state=None,
class_weight=None,
n_jobs=1,
parallel_backend=None,
):
Expand All @@ -138,6 +150,7 @@ def __init__(
self.use_pycatch22 = use_pycatch22
self.estimator = estimator
self.random_state = random_state
self.class_weight = class_weight
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend

Expand Down Expand Up @@ -167,6 +180,7 @@ def _fit(self, X, y):
outlier_norm=self.outlier_norm,
replace_nans=self.replace_nans,
use_pycatch22=self.use_pycatch22,
class_weight=self.class_weight,
n_jobs=self._n_jobs,
parallel_backend=self.parallel_backend,
)
Expand Down
13 changes: 13 additions & 0 deletions aeon/classification/feature_based/_signature_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,17 @@ class SignatureClassifier(BaseClassifier):
Signature truncation depth.
random_state : int, default=None
If `int`, random_state is the seed used by the random number generator;
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.

Attributes
----------
Expand Down Expand Up @@ -105,6 +116,7 @@ def __init__(
sig_tfm="signature",
depth=4,
random_state=None,
class_weight=None,
):
self.estimator = estimator
self.augmentation_list = augmentation_list
Expand All @@ -116,6 +128,7 @@ def __init__(
self.sig_tfm = sig_tfm
self.depth = depth
self.random_state = random_state
self.class_weight = class_weight

super().__init__()

Expand Down
13 changes: 13 additions & 0 deletions aeon/classification/feature_based/_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,17 @@ class SummaryClassifier(BaseClassifier):
estimator : sklearn classifier, default=None
An sklearn estimator to be built using the transformed data. Defaults to a
Random Forest with 200 trees.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -79,12 +90,14 @@ def __init__(
self,
summary_stats="default",
estimator=None,
class_weight=None,
n_jobs=1,
random_state=None,
):
self.summary_stats = summary_stats
self.estimator = estimator

self.class_weight = class_weight
self.n_jobs = n_jobs
self.random_state = random_state

Expand Down
15 changes: 15 additions & 0 deletions aeon/classification/feature_based/_tsfresh.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,17 @@ class TSFreshClassifier(BaseClassifier):
Random Forest with 200 trees.
verbose : int, default=0
Level of output printed to the console (for information only).
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -84,6 +95,7 @@ def __init__(
relevant_feature_extractor=True,
estimator=None,
verbose=0,
class_weight=None,
n_jobs=1,
chunksize=None,
random_state=None,
Expand All @@ -93,6 +105,7 @@ def __init__(
self.estimator = estimator

self.verbose = verbose
self.class_weight = class_weight
self.n_jobs = n_jobs
self.chunksize = chunksize
self.random_state = random_state
Expand Down Expand Up @@ -127,12 +140,14 @@ def _fit(self, X, y):
self._transformer = (
TSFreshRelevantFeatureExtractor(
default_fc_parameters=self.default_fc_parameters,
class_weight = self.class_weight,
n_jobs=self._n_jobs,
chunksize=self.chunksize,
)
if self.relevant_feature_extractor
else TSFreshFeatureExtractor(
default_fc_parameters=self.default_fc_parameters,
class_weight = self.class_weight,
n_jobs=self._n_jobs,
chunksize=self.chunksize,
)
Expand Down
13 changes: 13 additions & 0 deletions aeon/classification/hybrid/_rist.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,17 @@ class RISTClassifier(BaseRIST, BaseClassifier):
If `RandomState` instance, random_state is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
Expand Down Expand Up @@ -110,6 +121,7 @@ def __init__(
use_pycatch22=False,
use_pyfftw=False,
estimator=None,
class_weight=None,
n_jobs=1,
random_state=None,
):
Expand All @@ -129,6 +141,7 @@ def __init__(
use_pyfftw=use_pyfftw,
estimator=estimator,
random_state=random_state,
class_weight=class_weight,
n_jobs=n_jobs,
)

Expand Down
28 changes: 28 additions & 0 deletions aeon/classification/interval_based/_interval_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,17 @@ class RandomIntervalClassifier(BaseClassifier):
Seed or RandomState object used for random number generation.
If random_state is None, use the RandomState singleton used by np.random.
If random_state is an int, use a new RandomState instance seeded with seed.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `transform` functions.
`-1` means using all processors.
Expand Down Expand Up @@ -111,6 +122,7 @@ def __init__(
features=None,
dilation=None,
estimator=None,
class_weight=None,
n_jobs=1,
random_state=None,
parallel_backend=None,
Expand All @@ -122,6 +134,7 @@ def __init__(
self.dilation = dilation
self.estimator = estimator
self.random_state = random_state
self.class_weight = class_weight
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend

Expand Down Expand Up @@ -151,6 +164,7 @@ def _fit(self, X, y):
features=self.features,
dilation=self.dilation,
random_state=self.random_state,
class_weight=self.class_weight,
n_jobs=self._n_jobs,
parallel_backend=self.parallel_backend,
)
Expand Down Expand Up @@ -290,6 +304,17 @@ class SupervisedIntervalClassifier(BaseClassifier):
Seed or RandomState object used for random number generation.
If random_state is None, use the RandomState singleton used by np.random.
If random_state is an int, use a new RandomState instance seeded with seed.
class_weight{“balanced”, “balanced_subsample”}: dict or list of dicts, default=None
From sklearn documentation:
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as
n_samples / (n_classes * np.bincount(y))
The “balanced_subsample” mode is the same as “balanced” except that weights
are computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed through
the fit method) if sample_weight is specified.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `transform` functions.
`-1` means using all processors.
Expand Down Expand Up @@ -350,6 +375,7 @@ def __init__(
normalise_for_search=True,
estimator=None,
random_state=None,
class_weight=None,
n_jobs=1,
parallel_backend=None,
):
Expand All @@ -361,6 +387,7 @@ def __init__(
self.normalise_for_search = normalise_for_search
self.estimator = estimator
self.random_state = random_state
self.class_weight = class_weight
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend

Expand Down Expand Up @@ -391,6 +418,7 @@ def _fit(self, X, y):
randomised_split_point=self.randomised_split_point,
normalise_for_search=self.normalise_for_search,
random_state=self.random_state,
class_weight=self.class_weight,
n_jobs=self.n_jobs,
parallel_backend=self.parallel_backend,
)
Expand Down
Loading