diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8aa7820a0..120cc075d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -78,8 +78,6 @@ _Note 2 (deep learning):_\ In case you want to install the optional deep learning dependencies (i.e. `pip install .[deeplearning]`), -### Tools used - ### Tools used MOABB uses [`pre-commit`](https://pre-commit.com/). It automatically runs variety of Code Quality diff --git a/docs/source/pipelines.rst b/docs/source/pipelines.rst new file mode 100644 index 000000000..11b521587 --- /dev/null +++ b/docs/source/pipelines.rst @@ -0,0 +1,37 @@ +========= +Pipelines +========= + +.. automodule:: moabb.pipelines + +.. currentmodule:: moabb.pipelines + +--------- +Pipelines +--------- + +.. autosummary:: + :toctree: generated/ + :template: class.rst + + features.LogVariance + features.FM + features.ExtendedSSVEPSignal + features.AugmentedDataset + features.StandardScaler_Epoch + csp.TRCSP + classification.SSVEP_CCA + classification.SSVEP_TRCA + classification.SSVEP_MsetCCA + + +------------ +Base & Utils +------------ + +.. autosummary:: + :toctree: generated/ + :template: function.rst + + utils.create_pipeline_from_config + utils.FilterBank diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 431dff6f9..6f51173e8 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -28,6 +28,7 @@ Bugs API changes ~~~~~~~~~~~ +- Removing the deep learning module from inside moabb in favour of braindecode integration (:gh:`692` by `Bruno Aristimunha`_ ) Version - 1.2.0 (Stable - PyPi) @@ -66,6 +67,7 @@ API changes - Removing the braindecode module from inside moabb (:gh:`666` by `Bruno Aristimunha`_ ) + Version - 1.1.1 ---------------- diff --git a/examples/advanced_examples/pipelines_save/keras_deepconvnet.yml b/examples/advanced_examples/pipelines_save/keras_deepconvnet.yml deleted file mode 100644 index 4fb5ce5ee..000000000 --- a/examples/advanced_examples/pipelines_save/keras_deepconvnet.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Keras_DeepConvNet - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1002/hbm.23730 - -pipeline: - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasDeepConvNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers.legacy - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/data_management_and_configuration/noplot_load_model.py b/examples/data_management_and_configuration/noplot_load_model.py index e33f45905..3016e5c8f 100644 --- a/examples/data_management_and_configuration/noplot_load_model.py +++ b/examples/data_management_and_configuration/noplot_load_model.py @@ -1,6 +1,6 @@ """ ============================================== -Load Model (Scikit, Keras) with MOABB +Load Model (Scikit) with MOABB ============================================== This example shows how to use load the pretrained pipeline in MOABB. @@ -12,12 +12,7 @@ from pickle import load -import keras -from scikeras.wrappers import KerasClassifier -from sklearn.pipeline import Pipeline - from moabb import set_log_level -from moabb.pipelines.features import StandardScaler_Epoch from moabb.utils import setup_seed @@ -26,9 +21,7 @@ ############################################################################### # In this example, we will use the results computed by the following examples # -# - plot_benchmark_ -# - plot_benchmark_braindecode_ -# - plot_benchmark_DL_ +# - plot_benchmark # --------------------- # Set up reproducibility of Tensorflow and PyTorch @@ -38,26 +31,7 @@ # Loading the Scikit-learn pipelines with open( - "./results/Models_WithinSession/Zhou2016/1/0/CSP + SVM/fitted_model_best.pkl", + "../how_to_benchmark/results/Models_WithinSession/Zhou2016/1/0/csp+svm/fitted_model_best.pkl", "rb", ) as pickle_file: CSP_SVM_Trained = load(pickle_file) - -############################################################################### -# Loading the Keras model -# We load the single Keras model, if we want we can set in the exact same pipeline. - -model_Keras = keras.models.load_model( - "../how_to_benchmark/results/Models_WithinSession/BNCI2014-001/1/1E/Keras_DeepConvNet/kerasdeepconvnet_fitted_model_best.h5" -) -# Now we need to instantiate a new SciKeras object since we only saved the Keras model -Keras_DeepConvNet_Trained = KerasClassifier(model_Keras) -# Create the pipelines - - -pipes_keras = Pipeline( - [ - ("StandardScaler_Epoch", StandardScaler_Epoch), - ("Keras_DeepConvNet_Trained", Keras_DeepConvNet_Trained), - ] -) diff --git a/examples/how_to_benchmark/noplot_benchmark_dl.py b/examples/how_to_benchmark/noplot_benchmark_dl.py deleted file mode 100644 index d4079fa35..000000000 --- a/examples/how_to_benchmark/noplot_benchmark_dl.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -==================================================================== -Benchmarking on MOABB with Tensorflow deep net architectures -==================================================================== -This example shows how to use MOABB to benchmark a set of Deep Learning pipeline (Tensorflow) -on all available datasets. -For this example, we will use only one dataset to keep the computation time low, but this benchmark is designed -to easily scale to many datasets. -""" - -# Authors: Igor Carrara -# -# License: BSD (3-clause) - -import os - -import matplotlib.pyplot as plt -import tensorflow as tf -from absl.logging import ERROR, set_verbosity - -from moabb import benchmark, set_log_level -from moabb.analysis.plotting import score_plot -from moabb.datasets import BNCI2014_001 -from moabb.utils import setup_seed - - -set_log_level("info") -# Avoid output Warning -set_verbosity(ERROR) -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -CPU = len(tf.config.list_physical_devices("CPU")) > 0 -print("CPU is", "AVAILABLE" if CPU else "NOT AVAILABLE") - -GPU = len(tf.config.list_physical_devices("GPU")) > 0 -print("GPU is", "AVAILABLE" if GPU else "NOT AVAILABLE") - -############################################################################### -# In this example, we will use only the dataset ``BNCI2014_001``. -# -# Running the benchmark -# --------------------- -# -# The benchmark is run using the ``benchmark`` function. You need to specify the -# folder containing the pipelines to use, the kind of evaluation and the paradigm -# to use. By default, the benchmark will use all available datasets for all -# paradigms listed in the pipelines. You could restrict to specific evaluation and -# paradigm using the ``evaluations`` and ``paradigms`` arguments. -# -# To save computation time, the results are cached. If you want to re-run the -# benchmark, you can set the ``overwrite`` argument to ``True``. -# -# It is possible to indicate the folder to cache the results and the one to save -# the analysis & figures. By default, the results are saved in the ``results`` -# folder, and the analysis & figures are saved in the ``benchmark`` folder. -# -# This code is implemented to run on CPU. If you're using a GPU, do not use multithreading -# (i.e. set n_jobs=1) - -# Set up reproducibility of Tensorflow -setup_seed(42) - -# Restrict this example only on the first two subject of BNCI2014_001 -dataset = BNCI2014_001() -dataset.subject_list = dataset.subject_list[:2] -datasets = [dataset] - -results = benchmark( - pipelines="./pipelines_dl", - evaluations=["WithinSession"], - paradigms=["LeftRightImagery"], - include_datasets=datasets, - results="./results/", - overwrite=False, - plot=False, - output="./benchmark/", - n_jobs=-1, -) - -############################################################################### -# The deep learning architectures implemented in MOABB are: -# - Shallow Convolutional Network [1]_ -# - Deep Convolutional Network [1]_ -# - EEGNet [2]_ -# - EEGTCNet [3]_ -# - EEGNex [4]_ -# - EEGITNet [5]_ -# -# Benchmark prints a summary of the results. Detailed results are saved in a -# pandas dataframe, and can be used to generate figures. The analysis & figures -# are saved in the ``benchmark`` folder. - -score_plot(results) -plt.show() - -############################################################################## -# References -# ---------- -# .. [1] Schirrmeister, R. T., Springenberg, J. T., Fiederer, L. D. J., -# Glasstetter, M., Eggensperger, K., Tangermann, M., ... & Ball, T. (2017). -# `Deep learning with convolutional neural networks for EEG decoding and -# visualization `_. -# Human brain mapping, 38(11), 5391-5420. -# .. [2] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon, S. M., -# Hung, C. P., & Lance, B. J. (2018). `EEGNet: a compact convolutional neural -# network for EEG-based brain-computer interfaces. -# `_ -# Journal of neural engineering, 15(5), 056013. -# .. [3] Ingolfsson, T. M., Hersche, M., Wang, X., Kobayashi, N., Cavigelli, L., & -# Benini, L. (2020, October). `EEG-TCNet: An accurate temporal convolutional -# network for embedded motor-imagery brain-machine interfaces. -# `_ -# In 2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC) -# (pp. 2958-2965). IEEE. -# .. [4] Chen, X., Teng, X., Chen, H., Pan, Y., & Geyer, P. (2022). `Toward reliable -# signals decoding for electroencephalogram: A benchmark study to EEGNeX. -# `_ -# arXiv preprint arXiv:2207.12369. -# .. [5] Salami, A., Andreu-Perez, J., & Gillmeister, H. (2022). `EEG-ITNet: An -# explainable inception temporal convolutional network for motor imagery -# classification -# `_. -# IEEE Access, 10, 36672-36685. diff --git a/examples/how_to_benchmark/pipelines_codecarbon/keras_eegnet_8_2.yml b/examples/how_to_benchmark/pipelines_codecarbon/keras_eegnet_8_2.yml deleted file mode 100644 index c8cbc6541..000000000 --- a/examples/how_to_benchmark/pipelines_codecarbon/keras_eegnet_8_2.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Keras_EEGNet_8_2 - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1088/1741-2552/aace8c - -pipeline: - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGNet_8_2 - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers.legacy - parameters: - learning_rate: 0.0009 - epochs: 10 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_deepconvnet.yml b/examples/how_to_benchmark/pipelines_dl/keras_deepconvnet.yml deleted file mode 100644 index c5f63f113..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_deepconvnet.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_DeepConvNet - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1002/hbm.23730 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasDeepConvNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_eegitnet.yml b/examples/how_to_benchmark/pipelines_dl/keras_eegitnet.yml deleted file mode 100644 index 705060482..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_eegitnet.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_EEGITNet - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1109/ACCESS.2022.3161489 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGITNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_eegnet_8_2.yml b/examples/how_to_benchmark/pipelines_dl/keras_eegnet_8_2.yml deleted file mode 100644 index c2d26ee68..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_eegnet_8_2.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_EEGNet_8_2 - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1088/1741-2552/aace8c - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGNet_8_2 - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_eegnex.yml b/examples/how_to_benchmark/pipelines_dl/keras_eegnex.yml deleted file mode 100644 index 6d775b5ae..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_eegnex.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_EEGNeX - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.48550/arXiv.2207.12369 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGNeX - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_eegtcnet.yml b/examples/how_to_benchmark/pipelines_dl/keras_eegtcnet.yml deleted file mode 100644 index 4db2f17b6..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_eegtcnet.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_EEGTCNet - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1109/SMC42975.2020.9283028 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGTCNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/examples/how_to_benchmark/pipelines_dl/keras_shallowconvnet.yml b/examples/how_to_benchmark/pipelines_dl/keras_shallowconvnet.yml deleted file mode 100644 index 702169ef8..000000000 --- a/examples/how_to_benchmark/pipelines_dl/keras_shallowconvnet.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Keras_ShallowConvNet - -paradigms: - - LeftRightImagery - - MotorImagery - -citations: - - https://doi.org/10.1002/hbm.23730 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasShallowConvNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 2 - batch_size: 1 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 300 - factor: 0.5 diff --git a/moabb/benchmark.py b/moabb/benchmark.py index 55880e3f3..b01458193 100644 --- a/moabb/benchmark.py +++ b/moabb/benchmark.py @@ -168,10 +168,7 @@ def benchmark( # noqa: C901 ppl_with_epochs, ppl_with_array = {}, {} for pn, pv in prdgms[paradigm].items(): - if "Keras" in pn: - ppl_with_epochs[pn] = pv - else: - ppl_with_array[pn] = pv + ppl_with_array[pn] = pv if len(ppl_with_epochs) > 0: # Keras pipelines require return_epochs=True diff --git a/moabb/evaluations/utils.py b/moabb/evaluations/utils.py index 4a28b8d48..5b5b6d24c 100644 --- a/moabb/evaluations/utils.py +++ b/moabb/evaluations/utils.py @@ -16,29 +16,8 @@ optuna_available = False -def _check_if_is_keras_model(model): - """Check if the model is a Keras model. - - Parameters - ---------- - model: object - Model to check - Returns - ------- - is_keras_model: bool - True if the model is a Keras model - """ - try: - from scikeras.wrappers import KerasClassifier - - is_keras_model = isinstance(model, KerasClassifier) - return is_keras_model - except ImportError: - return False - - def _check_if_is_pytorch_model(model): - """Check if the model is a Keras model. + """Check if the model is a skorch model. Parameters ---------- @@ -46,8 +25,8 @@ def _check_if_is_pytorch_model(model): Model to check Returns ------- - is_keras_model: bool - True if the model is a Keras model + is_pytorch_model: bool + True if the model is a Skorch model """ try: from skorch import NeuralNetClassifier @@ -69,15 +48,6 @@ def _check_if_is_pytorch_steps(model): return skorch_valid -def _check_if_is_keras_steps(model): - keras_valid = False - try: - keras_valid = any(_check_if_is_keras_model(j) for j in model.named_steps.values()) - return keras_valid - except Exception: - return keras_valid - - def save_model_cv(model: object, save_path: str | Path, cv_index: str | int): """Save a model fitted to a given fold from cross-validation. @@ -115,16 +85,6 @@ def save_model_cv(model: object, save_path: str | Path, cv_index: str | int): else: with open((Path(save_path) / f"{file_step}.pkl"), "wb") as file: dump(step, file, protocol=HIGHEST_PROTOCOL) - - elif _check_if_is_keras_steps(model): - for step_name in model.named_steps: - file_step = f"{step_name}_fitted_model_{cv_index}" - step = model.named_steps[step_name] - if _check_if_is_keras_model(step): - step.model_.save(Path(save_path) / f"{file_step}.h5") - else: - with open((Path(save_path) / f"{file_step}.pkl"), "wb") as file: - dump(step, file, protocol=HIGHEST_PROTOCOL) else: with open((Path(save_path) / f"fitted_model_{cv_index}.pkl"), "wb") as file: dump(model, file, protocol=HIGHEST_PROTOCOL) diff --git a/moabb/pipelines/__init__.py b/moabb/pipelines/__init__.py index f550332c3..421d82fe6 100644 --- a/moabb/pipelines/__init__.py +++ b/moabb/pipelines/__init__.py @@ -27,46 +27,10 @@ def __getattr__(name): "TCN_block", } - if name in deep_learning_classes and _check_if_tensorflow_installed(): - return _import_class(name, ".deep_learning") - elif name in utils_deep_model_classes and _check_if_tensorflow_installed(): - return _import_class(name, ".utils_deep_model") - - raise AttributeError(f"Module '{__name__}' has no attribute '{name}'") - - -def _import_class(name, module_name): - import importlib - - warning_msg = _warning_msg(name, module_name) - warn(warning_msg) - - module = importlib.import_module(module_name, __package__) - return getattr(module, name) - - -def _warning_msg(name, submodule): - return ( - f"{name} is incorrectly imported. \nPlease use:\033[1m " - f"from moabb.pipeline{submodule} import {name}\033[0m.\n" - f"Instead of: \033[1mfrom moabb.pipeline import {name}\033[0m." - ) - - -def _check_if_tensorflow_installed(): - try: - import scikeras - - return True - except ModuleNotFoundError: - warn( - "\nThere was a problem importing tensorflow or keras, " - "which are required for the deep learning pipelines. \n" - "The Keras MOABB deep learning pipelines cannot be used.\n " - "To resolve this issue, please install the necessary dependencies " - "by running the following command in your terminal: \n" - "\033[94m" # This is the ANSI escape code for blue - "pip install moabb[deeplearning]" - "\033[0m", # This resets the color back to normal + if name in deep_learning_classes or name in utils_deep_model_classes: + raise AttributeError( + f"Module deep learning using tensorflow is not " + f"longer part of moabb package. Please use " + f"braindecode package instead." + f"See https://braindecode.org/ for more information." ) - return False diff --git a/moabb/pipelines/deep_learning.py b/moabb/pipelines/deep_learning.py deleted file mode 100644 index cb1f558c0..000000000 --- a/moabb/pipelines/deep_learning.py +++ /dev/null @@ -1,821 +0,0 @@ -"""Deep learning integrated in MOABB Implementation using the tensorflow, keras -and scikeras framework.""" - -# Authors: Igor Carrara -# Bruno Aristimunha -# Sylvain Chevallier - -# License: BSD (3-clause) - -from typing import Any, Dict - -import tensorflow as tf -from keras.constraints import max_norm -from keras.layers import ( - Activation, - Add, - AveragePooling2D, - AvgPool2D, - BatchNormalization, - Concatenate, - Conv2D, - Dense, - DepthwiseConv2D, - Dropout, - Flatten, - Input, - Lambda, - LayerNormalization, - MaxPooling2D, - Permute, -) -from keras.models import Model, Sequential -from keras.ops import pad -from scikeras.wrappers import KerasClassifier -from tensorflow.keras import backend as K - -from moabb.pipelines.utils_deep_model import EEGNet, EEGNet_TC, TCN_block - - -# ===================================================================================== -# ShallowConvNet -# ===================================================================================== -def square(x): - """Function to square the input tensor element-wise. - - Element-wise square. - """ - return K.square(x) - - -def log(x): - """Function to take the log of the input tensor element-wise. We use a clip - to avoid taking the log of 0. min_value=1e-7, max_value=10000. - - Parameters - ---------- - x: tensor - - Returns - ------- - tensor - """ - return K.log(K.clip(x, min_value=1e-7, max_value=10000)) - - -class KerasShallowConvNet(KerasClassifier): - """Keras implementation of the Shallow Convolutional Network as described - in [1]_. - - This implementation is taken from code by the Army Research Laboratory (ARL) - at https://github.com/vlawhern/arl-eegmodels - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Schirrmeister, R. T., Springenberg, J. T., Fiederer, L. D. J., Glasstetter, M., Eggensperger, - K., Tangermann, M., ... & Ball, T. (2017). Deep learning with convolutional neural networks - for EEG decoding and visualization. Human brain mapping, 38(11), 5391-5420. - https://doi.org/10.1002/hbm.23730 - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.001, - drop_rate=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - input_main = Input(shape=(self.X_shape_[1], self.X_shape_[2], 1)) - block1 = Conv2D( - 40, - (1, 25), - input_shape=(self.X_shape_[1], self.X_shape_[2], 1), - kernel_constraint=max_norm(2.0, axis=(0, 1, 2)), - )(input_main) - block1 = Conv2D( - 40, - (self.X_shape_[1], 1), - use_bias=False, - kernel_constraint=max_norm(2.0, axis=(0, 1, 2)), - )(block1) - block1 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block1) - block1 = Activation(square)(block1) - block1 = AveragePooling2D(pool_size=(1, 75), strides=(1, 15))(block1) - block1 = Activation(log)(block1) - block1 = Dropout(self.drop_rate)(block1) - flatten = Flatten()(block1) - dense = Dense(self.n_classes_, kernel_constraint=max_norm(0.5))(flatten) - softmax = Activation("softmax")(dense) - - model = Model(inputs=input_main, outputs=softmax) - - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model - - -# ================================================================================= -# DeepConvNet -# ================================================================================= -class KerasDeepConvNet(KerasClassifier): - """Keras implementation of the Deep Convolutional Network as described in - [1]_. - - This implementation is taken from code by the Army Research Laboratory (ARL) - at https://github.com/vlawhern/arl-eegmodels - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Schirrmeister, R. T., Springenberg, J. T., Fiederer, L. D. J., Glasstetter, M., Eggensperger, - K., Tangermann, M., ... & Ball, T. (2017). Deep learning with convolutional neural networks - for EEG decoding and visualization. Human brain mapping, 38(11), 5391-5420. - https://doi.org/10.1002/hbm.23730 - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.0009, - drop_rate=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - input_main = Input(shape=(self.X_shape_[1], self.X_shape_[2], 1)) - block1 = Conv2D( - 25, - (1, 10), - input_shape=(self.X_shape_[1], self.X_shape_[2], 1), - kernel_constraint=max_norm(2.0, axis=(0, 1, 2)), - )(input_main) - block1 = Conv2D( - 25, - (self.X_shape_[1], 1), - kernel_constraint=max_norm(2.0, axis=(0, 1, 2)), - )(block1) - block1 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block1) - block1 = Activation("elu")(block1) - block1 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block1) - block1 = Dropout(self.drop_rate)(block1) - - block2 = Conv2D(50, (1, 10), kernel_constraint=max_norm(2.0, axis=(0, 1, 2)))( - block1 - ) - block2 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block2) - block2 = Activation("elu")(block2) - block2 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block2) - block2 = Dropout(0.5)(block2) - - block3 = Conv2D(100, (1, 10), kernel_constraint=max_norm(2.0, axis=(0, 1, 2)))( - block2 - ) - block3 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block3) - block3 = Activation("elu")(block3) - block3 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block3) - block3 = Dropout(self.drop_rate)(block3) - - block4 = Conv2D(200, (1, 10), kernel_constraint=max_norm(2.0, axis=(0, 1, 2)))( - block3 - ) - block4 = BatchNormalization(epsilon=1e-05, momentum=0.9)(block4) - block4 = Activation("elu")(block4) - block4 = MaxPooling2D(pool_size=(1, 3), strides=(1, 3))(block4) - block4 = Dropout(self.drop_rate)(block4) - - flatten = Flatten()(block4) - - dense = Dense(self.n_classes_, kernel_constraint=max_norm(0.5))(flatten) - softmax = Activation("softmax")(dense) - - model = Model(inputs=input_main, outputs=softmax) - - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model - - -# =========================================================================== -# EEGNet_8_2 -# =========================================================================== -class KerasEEGNet_8_2(KerasClassifier): - """Keras implementation of the EEGNet as described in [1]_. - - This implementation is taken from code by the Army Research Laboratory (ARL) - at https://github.com/vlawhern/arl-eegmodels - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon, S. M., Hung, C. P., & Lance, B. J. (2018). EEGNet: - a compact convolutional neural network for EEG-based brain–computer interfaces. Journal of neural - engineering, 15(5), 056013. - https://doi.org/10.1088/1741-2552/aace8c - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.0009, - drop_rate=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - # Parameter of the Article - F1 = 8 - kernLength = 64 - D = 2 - dropout = self.drop_rate - - # Architecture - # Input - input_main = Input(shape=(self.X_shape_[1], self.X_shape_[2], 1)) - # EEGNet Block - eegnet = EEGNet( - self, - input_layer=input_main, - filters_1=F1, - kernel_size=kernLength, - depth=D, - dropout=dropout, - ) - flatten = Flatten()(eegnet) - # Classification Block - dense = Dense(self.n_classes_, kernel_constraint=max_norm(0.5))(flatten) - softmax = Activation("softmax")(dense) - # Creation of the Model - model = Model(inputs=input_main, outputs=softmax) - - # Compile Model - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model - - -# ======================================================================= -# EEGTCNet -# ======================================================================= -class KerasEEGTCNet(KerasClassifier): - """Keras implementation of the EEGTCNet as described in [1]_. - - This implementation is taken from code by - at https://github.com/iis-eth-zurich/eeg-tcnet - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Ingolfsson, T. M., Hersche, M., Wang, X., Kobayashi, N., Cavigelli, L., & Benini, L. (2020, October). - EEG-TCNet: An accurate temporal convolutional network for embedded motor-imagery brain–machine interfaces. - In 2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC) (pp. 2958-2965). IEEE. - https://doi.org/10.1109/SMC42975.2020.9283028 - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.0009, - drop_rate=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - # Parameter of the Article - F1 = 8 - kernLength = 64 - D = 2 - dropout = self.drop_rate - F2 = F1 * D - - # Architecture - # Input - input_1 = Input(shape=(self.X_shape_[1], self.X_shape_[2], 1)) - input_2 = Permute((2, 1, 3))(input_1) - # EEGNet Block - eegnet = EEGNet_TC( - self, input_layer=input_2, F1=F1, kernLength=kernLength, D=D, dropout=dropout - ) - block2 = Lambda(lambda x: x[:, :, -1, :])(eegnet) - # TCN Block - outs = TCN_block( - input_layer=block2, - input_dimension=F2, - depth=2, - kernel_size=4, - filters=12, - dropout=dropout, - activation="elu", - ) - out = Lambda(lambda x: x[:, -1, :])(outs) - # Classification Block - dense = Dense(self.n_classes_, kernel_constraint=max_norm(0.5))(out) - softmax = Activation("softmax")(dense) - # Creation of the Model - model = Model(inputs=input_1, outputs=softmax) - - # Compile Model - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model - - -# ===================================================================== -# EEGNeX -# ===================================================================== -class KerasEEGNeX(KerasClassifier): - """Keras implementation of the EEGNex as described in [1]_. - - This implementation is taken from code by - at https://github.com/chenxiachan/EEGNeX - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Chen, X., Teng, X., Chen, H., Pan, Y., & Geyer, P. (2022). Toward reliable signals decoding for - electroencephalogram: A benchmark study to EEGNeX. arXiv preprint arXiv:2207.12369. - https://doi.org/10.48550/arXiv.2207.12369 - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.0009, - drop_rate=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - # Architecture - # Input - model = Sequential() - model.add(Input(shape=(self.X_shape_[1], self.X_shape_[2], 1))) - # EEGNeX - model.add( - Conv2D( - filters=8, - kernel_size=(1, 32), - use_bias=False, - padding="same", - data_format="channels_last", - ) - ) - model.add(LayerNormalization()) - model.add(Activation(activation="elu")) - model.add( - Conv2D( - filters=32, - kernel_size=(1, 32), - use_bias=False, - padding="same", - data_format="channels_last", - ) - ) - model.add(LayerNormalization()) - model.add(Activation(activation="elu")) - - model.add( - DepthwiseConv2D( - kernel_size=(self.X_shape_[1], 1), - depth_multiplier=2, - use_bias=False, - depthwise_constraint=max_norm(1.0), - data_format="channels_last", - ) - ) - model.add(LayerNormalization()) - model.add(Activation(activation="elu")) - model.add( - AvgPool2D(pool_size=(1, 4), padding="same", data_format="channels_last") - ) - model.add(Dropout(self.drop_rate)) - - model.add( - Conv2D( - filters=32, - kernel_size=(1, 16), - use_bias=False, - padding="same", - dilation_rate=(1, 2), - data_format="channels_last", - ) - ) - model.add(LayerNormalization()) - model.add(Activation(activation="elu")) - - model.add( - Conv2D( - filters=8, - kernel_size=(1, 16), - use_bias=False, - padding="same", - dilation_rate=(1, 4), - data_format="channels_last", - ) - ) - - model.add(LayerNormalization()) - model.add(Activation(activation="elu")) - model.add(Dropout(self.drop_rate)) - - model.add(Flatten()) - # Classification Block - model.add(Dense(self.n_classes_, kernel_constraint=max_norm(0.5))) - model.add(Activation(activation="softmax")) - - # Compile Model - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model - - -# ================================================================= -# EEGITNet -# ================================================================= - -n_ff = [2, 4, 8] -n_sf = [1, 1, 1] - - -class KerasEEGITNet(KerasClassifier): - """Keras implementation of the EEGITNet as described in [1]_. - - This implementation is taken from code by - at https://github.com/AbbasSalami/EEG-ITNet - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Salami, A., Andreu-Perez, J., & Gillmeister, H. (2022). EEG-ITNet: An explainable inception temporal - convolutional network for motor imagery classification. IEEE Access, 10, 36672-36685. - https://doi.org/10.1109/ACCESS.2022.3161489 - - Notes - ----- - .. versionadded:: 0.5.0 - """ - - def __init__( - self, - loss, - optimizer="Adam", - epochs=1000, - batch_size=64, - verbose=0, - random_state=None, - validation_split=0.2, - history_plot=False, - path=None, - learning_rate=0.0009, - drop_rate=0.4, - **kwargs, - ): - super().__init__(**kwargs) - - self.loss = loss - if optimizer == "Adam": - optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) - - self.optimizer = optimizer - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - self.random_state = random_state - self.validation_split = validation_split - self.history_plot = history_plot - self.path = path - self.drop_rate = drop_rate - - def _keras_build_fn(self, compile_kwargs: Dict[str, Any]): - input_main = Input(shape=(self.X_shape_[1], self.X_shape_[2], 1)) - - block1 = Conv2D( - n_ff[0], - (1, 16), - use_bias=False, - activation="linear", - padding="same", - name="Spectral_filter_1", - )(input_main) - block1 = BatchNormalization()(block1) - block1 = DepthwiseConv2D( - (self.X_shape_[1], 1), - use_bias=False, - padding="valid", - depth_multiplier=n_sf[0], - activation="linear", - depthwise_constraint=tf.keras.constraints.MaxNorm(max_value=1), - name="Spatial_filter_1", - )(block1) - block1 = BatchNormalization()(block1) - block1 = Activation("elu")(block1) - - # ================================ - - block2 = Conv2D( - n_ff[1], - (1, 32), - use_bias=False, - activation="linear", - padding="same", - name="Spectral_filter_2", - )(input_main) - block2 = BatchNormalization()(block2) - block2 = DepthwiseConv2D( - (self.X_shape_[1], 1), - use_bias=False, - padding="valid", - depth_multiplier=n_sf[1], - activation="linear", - depthwise_constraint=tf.keras.constraints.MaxNorm(max_value=1), - name="Spatial_filter_2", - )(block2) - block2 = BatchNormalization()(block2) - block2 = Activation("elu")(block2) - - # ================================ - - block3 = Conv2D( - n_ff[2], - (1, 64), - use_bias=False, - activation="linear", - padding="same", - name="Spectral_filter_3", - )(input_main) - block3 = BatchNormalization()(block3) - block3 = DepthwiseConv2D( - (self.X_shape_[1], 1), - use_bias=False, - padding="valid", - depth_multiplier=n_sf[2], - activation="linear", - depthwise_constraint=tf.keras.constraints.MaxNorm(max_value=1), - name="Spatial_filter_3", - )(block3) - block3 = BatchNormalization()(block3) - block3 = Activation("elu")(block3) - - # ================================ - - block = Concatenate(axis=-1)([block1, block2, block3]) - - # ================================ - - block = AveragePooling2D((1, 4), padding="same")(block) - block_in = Dropout(0.4)(block) - - # ================================ - - paddings = [[0, 0], [0, 0], [3, 0], [0, 0]] - block = pad(block_in, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 1) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(0.4)(block) - block = pad(block, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 1) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block_out = Add()([block_in, block]) - - paddings = [[0, 0], [0, 0], [6, 0], [0, 0]] - block = pad(block_out, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 2) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - print(block.dtype) - block = pad(block, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 2) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block_out = Add()([block_out, block]) - - paddings = [[0, 0], [0, 0], [12, 0], [0, 0]] - block = pad(block_out, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 4) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block = pad(block, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 4) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block_out = Add()([block_out, block]) - - paddings = [[0, 0], [0, 0], [24, 0], [0, 0]] - block = pad(block_out, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 8) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block = pad(block, paddings, "constant") - block = DepthwiseConv2D( - (1, 4), padding="valid", depth_multiplier=1, dilation_rate=(1, 8) - )(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = Dropout(self.drop_rate)(block) - block_out = Add()([block_out, block]) - - # ================================ - - block = block_out - - # ================================ - - block = Conv2D(28, (1, 1))(block) - block = BatchNormalization()(block) - block = Activation("elu")(block) - block = AveragePooling2D((4, 1), padding="same")(block) - block = Dropout(self.drop_rate)(block) - embedded = Flatten()(block) - - dense = Dense(self.n_classes_, kernel_constraint=max_norm(0.5))(embedded) - softmax = Activation("softmax")(dense) - - model = Model(inputs=input_main, outputs=softmax) - - model.compile(loss=compile_kwargs["loss"], optimizer=compile_kwargs["optimizer"]) - - return model diff --git a/moabb/pipelines/features.py b/moabb/pipelines/features.py index f036a2b8d..00574142f 100644 --- a/moabb/pipelines/features.py +++ b/moabb/pipelines/features.py @@ -113,7 +113,7 @@ def transform(self, X: ndarray): class StandardScaler_Epoch(BaseEstimator, TransformerMixin): - """Function to standardize the X raw data for the DeepLearning Method.""" + """Function to standardize the X raw data.""" def __init__(self): """Init.""" diff --git a/moabb/pipelines/utils_deep_model.py b/moabb/pipelines/utils_deep_model.py deleted file mode 100644 index 76cfbc47f..000000000 --- a/moabb/pipelines/utils_deep_model.py +++ /dev/null @@ -1,188 +0,0 @@ -"""Utils for Deep learning integrated on MOABB. - -Implementation using the tensorflow, keras and scikeras framework. -""" - -# Authors: Igor Carrara -# Bruno Aristimunha -# Sylvain Chevallier - -# License: BSD (3-clause) - -from keras.constraints import max_norm -from keras.layers import ( - Activation, - Add, - AveragePooling2D, - BatchNormalization, - Conv1D, - Conv2D, - DepthwiseConv2D, - Dropout, - SeparableConv2D, -) - - -def EEGNet( - data, input_layer, filters_1=8, kernel_size=64, depth=2, dropout=0.5, activation="elu" -): - """EEGNet block implementation as described in [1]_. - - This implementation is taken from code by The Integrated Systems Laboratory of ETH Zurich - at https://github.com/iis-eth-zurich/eeg-tcnet - - We use the original parameter implemented on the paper. - - Note that this implementation has not been verified by the original - authors. - - References - ---------- - .. [1] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon, S. M., Hung, C. P., & Lance, B. J. (2018). EEGNet: - a compact convolutional neural network for EEG-based brain–computer interfaces. Journal of neural - engineering, 15(5), 056013. - https://doi.org/10.1088/1741-2552/aace8c - """ - - filters_2 = filters_1 * depth - - block1 = Conv2D( - filters=filters_1, - kernel_size=(1, kernel_size), - padding="same", - input_shape=(data.X_shape_[1], data.X_shape_[2], 1), - use_bias=False, - )(input_layer) - block1 = BatchNormalization()(block1) - block1 = DepthwiseConv2D( - kernel_size=(data.X_shape_[1], 1), - use_bias=False, - depth_multiplier=2, - depthwise_constraint=max_norm(1.0), - )(block1) - block1 = BatchNormalization()(block1) - block1 = Activation(activation)(block1) - block1 = AveragePooling2D((1, 4))(block1) - block1 = Dropout(dropout)(block1) - - block2 = SeparableConv2D( - filters_2, kernel_size=(1, 16), use_bias=False, padding="same" - )(block1) - block2 = BatchNormalization()(block2) - block2 = Activation(activation)(block2) - block2 = AveragePooling2D((1, 8))(block2) - block2 = Dropout(dropout)(block2) - - return block2 - - -def TCN_block( - input_layer, input_dimension, depth, kernel_size, filters, dropout, activation -): - """Temporal Convolutional Network (TCN), TCN_block from [1]_. - - This implementation is taken from code by The Integrated Systems Laboratory of ETH Zurich - at https://github.com/iis-eth-zurich/eeg-tcnet - - References - ---------- - .. [1] Ingolfsson, T. M., Hersche, M., Wang, X., Kobayashi, N., Cavigelli, L., & Benini, L. (2020, October). - EEG-TCNet: An accurate temporal convolutional network for embedded motor-imagery brain–machine interfaces. - In 2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC) (pp. 2958-2965). IEEE. - https://doi.org/10.48550/arXiv.2006.00622 - """ - - block = Conv1D( - filters, - kernel_size=kernel_size, - dilation_rate=1, - activation="linear", - padding="causal", - kernel_initializer="he_uniform", - )(input_layer) - block = BatchNormalization()(block) - block = Activation(activation)(block) - block = Dropout(dropout)(block) - block = Conv1D( - filters, - kernel_size=kernel_size, - dilation_rate=1, - activation="linear", - padding="causal", - kernel_initializer="he_uniform", - )(block) - block = BatchNormalization()(block) - block = Activation(activation)(block) - block = Dropout(dropout)(block) - if input_dimension != filters: - conv = Conv1D(filters, kernel_size=1, padding="same")(input_layer) - added = Add()([block, conv]) - else: - added = Add()([block, input_layer]) - out = Activation(activation)(added) - - for i in range(depth - 1): - block = Conv1D( - filters, - kernel_size=kernel_size, - dilation_rate=2 ** (i + 1), - activation="linear", - padding="causal", - kernel_initializer="he_uniform", - )(out) - block = BatchNormalization()(block) - block = Activation(activation)(block) - block = Dropout(dropout)(block) - block = Conv1D( - filters, - kernel_size=kernel_size, - dilation_rate=2 ** (i + 1), - activation="linear", - padding="causal", - kernel_initializer="he_uniform", - )(block) - block = BatchNormalization()(block) - block = Activation(activation)(block) - block = Dropout(dropout)(block) - added = Add()([block, out]) - out = Activation(activation)(added) - - return out - - -def EEGNet_TC(self, input_layer, F1=8, kernLength=64, D=2, dropout=0.1, activation="elu"): - F2 = F1 * D - - block1 = Conv2D( - F1, - kernel_size=(kernLength, 1), - padding="same", - use_bias=False, - data_format="channels_last", - )(input_layer) - block1 = BatchNormalization(axis=-1)(block1) - block1 = DepthwiseConv2D( - kernel_size=(1, self.X_shape_[1]), - use_bias=False, - depth_multiplier=D, - depthwise_constraint=max_norm(1.0), - data_format="channels_last", - )(block1) - block1 = BatchNormalization(axis=-1)(block1) - block1 = Activation(activation)(block1) - block1 = AveragePooling2D((8, 1), data_format="channels_last")(block1) - block1 = Dropout(dropout)(block1) - - block2 = SeparableConv2D( - F2, - kernel_size=(16, 1), - use_bias=False, - padding="same", - data_format="channels_last", - )(block1) - block2 = BatchNormalization(axis=-1)(block2) - block2 = Activation(activation)(block2) - block2 = AveragePooling2D((8, 1), data_format="channels_last")(block2) - block2 = Dropout(dropout)(block2) - - return block2 diff --git a/moabb/tests/util_tests.py b/moabb/tests/util_tests.py index 3ab7f5a35..533419f42 100644 --- a/moabb/tests/util_tests.py +++ b/moabb/tests/util_tests.py @@ -1,14 +1,13 @@ import os.path as osp import tempfile import unittest -from unittest.mock import MagicMock, patch import pytest from joblib import Parallel, delayed from mne import get_config, set_config from moabb.datasets import utils -from moabb.utils import aliases_list, depreciated_alias, set_download_dir, setup_seed +from moabb.utils import aliases_list, depreciated_alias, set_download_dir class TestDownload(unittest.TestCase): @@ -76,31 +75,6 @@ def test_dataset_channel_search(self): self.assertFalse(set(chans) <= set(raw.info["ch_names"])) -class TestSetupSeed(unittest.TestCase): - @patch("builtins.print") - def test_without_tensorflow(self, mock_print): - # Test when tensorflow is not installed - with patch.dict("sys.modules", {"tensorflow": None}): - self.assertFalse(setup_seed(42)) - mock_print.assert_any_call( - "We try to set the tensorflow seeds, but it seems that tensorflow is not installed. Please refer to `https://www.tensorflow.org/` to install if you need to use this deep learning module." - ) - - @patch("builtins.print") - def test_without_torch(self, mock_print): - # Test when torch is not installed - with patch.dict("sys.modules", {"torch": None}): - self.assertFalse(setup_seed(42)) - mock_print.assert_any_call( - "We try to set the torch seeds, but it seems that torch is not installed. Please refer to `https://pytorch.org/` to install if you need to use this deep learning module." - ) - - @patch.dict("sys.modules", {"tensorflow": MagicMock(), "torch": MagicMock()}) - def test_with_tensorflow_and_torch(self): - # Test when tensorflow and torch are installed - self.assertTrue(setup_seed(42) is None) # noqa: E71 - - class TestDepreciatedAlias(unittest.TestCase): def test_class_alias(self): @depreciated_alias("DummyB", expire_version="0.1") diff --git a/moabb/utils.py b/moabb/utils.py index 0223d201e..90a8ee75f 100644 --- a/moabb/utils.py +++ b/moabb/utils.py @@ -37,64 +37,8 @@ def _set_random_seed(seed: int) -> None: np.random.seed(seed) -def _set_tensorflow_seed(seed: int) -> None: - """Set the seed for TensorFlow. - - Parameters - ---------- - seed: int - The random seed to use. - Returns - ------- - None - """ - try: - import tensorflow as tf - - tf.random.set_seed(seed) # tf cpu fix seed - os.environ["TF_DETERMINISTIC_OPS"] = ( - "1" # tf gpu fix seed, please `pip install tensorflow-determinism` first - ) - tf.keras.utils.set_random_seed(seed) - - except ImportError: - print( - "We try to set the tensorflow seeds, but it seems that tensorflow is not installed. " - "Please refer to `https://www.tensorflow.org/` to install if you need to use " - "this deep learning module." - ) - return False - - -def _set_torch_seed(seed: int) -> None: - """Set the seed for PyTorch. - - Parameters - ---------- - seed: int - The random seed to use. - Returns - ------- - None - """ - try: - import torch - - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - except ImportError: - print( - "We try to set the torch seeds, but it seems that torch is not installed. " - "Please refer to `https://pytorch.org/` to install if you need to use " - "this deep learning module." - ) - return False - - def setup_seed(seed: int) -> None: - """Set the seed for random, numpy, TensorFlow and PyTorch. + """Set the seed for random, numpy. Parameters ---------- @@ -105,14 +49,8 @@ def setup_seed(seed: int) -> None: None """ _set_random_seed(seed) - # check if the return is bool - tensorflow_return = _set_tensorflow_seed(seed) - torch_return = _set_torch_seed(seed) - - if tensorflow_return is False or torch_return is False: - return False - else: - return None + + return None def set_log_level(level="INFO"): diff --git a/pipelines/Keras_DeepConvNet.yml b/pipelines/Keras_DeepConvNet.yml deleted file mode 100644 index ae0101f6b..000000000 --- a/pipelines/Keras_DeepConvNet.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_DeepConvNet - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.1002/hbm.23730 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasDeepConvNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pipelines/Keras_EEGITNet.yml b/pipelines/Keras_EEGITNet.yml deleted file mode 100644 index b78557243..000000000 --- a/pipelines/Keras_EEGITNet.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_EEGITNet - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.1109/ACCESS.2022.3161489 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGITNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pipelines/Keras_EEGNeX.yml b/pipelines/Keras_EEGNeX.yml deleted file mode 100644 index 8c08a6edb..000000000 --- a/pipelines/Keras_EEGNeX.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_EEGNeX - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.48550/arXiv.2207.12369 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGNeX - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pipelines/Keras_EEGNet_8_2.yml b/pipelines/Keras_EEGNet_8_2.yml deleted file mode 100644 index 5ed67194d..000000000 --- a/pipelines/Keras_EEGNet_8_2.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_EEGNet_8_2 - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.1088/1741-2552/aace8c - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 128 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGNet_8_2 - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pipelines/Keras_EEGTCNet.yml b/pipelines/Keras_EEGTCNet.yml deleted file mode 100644 index f68ed0834..000000000 --- a/pipelines/Keras_EEGTCNet.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_EEGTCNet - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.1109/SMC42975.2020.9283028 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasEEGTCNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pipelines/Keras_ShallowConvNet.yml b/pipelines/Keras_ShallowConvNet.yml deleted file mode 100644 index 626d428fa..000000000 --- a/pipelines/Keras_ShallowConvNet.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Keras_ShallowConvNet - -paradigms: - - LeftRightImagery - - MotorImagery - - P300 - - SSVEP - -citations: - - https://doi.org/10.1002/hbm.23730 - -pipeline: - - name: Resampler_Epoch - from: moabb.pipelines.features - parameters: - sfreq: 250 - - - name: Convert_Epoch_Array - from: moabb.pipelines.features - - - name: StandardScaler_Epoch - from: moabb.pipelines.features - - - name: KerasShallowConvNet - from: moabb.pipelines.deep_learning - parameters: - loss: "sparse_categorical_crossentropy" - optimizer: - - name: Adam - from: tensorflow.keras.optimizers - parameters: - learning_rate: 0.001 - epochs: 300 - batch_size: 64 - verbose: 0 - random_state: 42 - validation_split: 0.2 - callbacks: - - name: EarlyStopping - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - - - name: ReduceLROnPlateau - from: tensorflow.keras.callbacks - parameters: - monitor: "val_loss" - patience: 75 - factor: 0.5 diff --git a/pyproject.toml b/pyproject.toml index 18df836dc..7405d6f43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,10 +37,6 @@ scikit-learn = "<1.6" codecarbon = { version = "^2.1.4", optional = true } # Optional dependencies for deep learning -tensorflow = { version = ">=2.16", optional = true } -keras = { version = ">=3.2.0", optional = true } -scikeras = { version = "^0.13.0", optional = true } -libclang = { version = "^15.0", optional = true } optuna = { version = "^3.6.1", optional = true } optuna-integration = { version = "^3.6.0", optional = true } braindecode = { version = ">=0.8.1", optional = true } @@ -66,7 +62,7 @@ sphinx-favicon = { version = "^1.0.1", optional = true } [tool.poetry.extras] carbonemission = ["codecarbon"] -deeplearning = ["tensorflow", "keras", "scikeras", "braindecode", "libclang"] +deeplearning = ["braindecode"] optuna = ["optuna", "optuna-integration"] tests = ["pytest", "pytest-cov", "codecov", "pytest_cases"] docs = ["Sphinx", "sphinx-gallery", "sphinx-bootstrap-theme", "pydata-sphinx-theme", "numpydoc", "myst-parser", "tdlda", "sphinx-design", "sphinx-rtd-theme", "sphinx_copybutton", "sphinxcontrib-bibtex", "sphinx-favicon"]