diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..570c103f8 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 000000000..d5f91e3c8 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,76 @@ +name: coverage + +on: + push: + branches: + - master + - dev + - 144-monitor-test-coverage + workflow_dispatch: + inputs: + slow: + type: boolean + description: Run with slow tests + default: false + +jobs: + coverage-job: + runs-on: ${{ matrix.os }} + + defaults: + run: + shell: bash -l {0} + + strategy: + matrix: + os: [ubuntu-latest] + python-version: ["3.12"] + + env: + CONDA_FILE: environment.yml + + steps: + - uses: actions/checkout@v4 + + - name: Get Date + id: get-date + run: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT + shell: bash + + - name: Setup Conda Environment + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-version: latest + use-mamba: true + activate-environment: cadet-process + channels: conda-forge, + + - name: Cache conda + uses: actions/cache@v4 + env: + # Increase this value to reset cache if environment.yml has not changed + CACHE_NUMBER: 0 + with: + path: ${{ env.CONDA }}/envs + key: ${{ matrix.os }}-python_${{ matrix.python-version }}-${{ steps.get-date.outputs.today }}-${{ hashFiles(env.CONDA_FILE) }}-${{ env.CACHE_NUMBER }} + + - name: Update environment + run: | + mamba install "setuptools>=69" "pip>=24" + mamba install python=${{ matrix.python-version }} + echo "python=${{ matrix.python-version }}.*" > $CONDA_PREFIX/conda-meta/pinned + mamba env update -n cadet-process -f ${{ env.CONDA_FILE }} + if: steps.cache.outputs.cache-hit != 'true' + + - name: Install + run: | + pip install -e ./[testing,coverage] + + # Push event doesn't have input context => inputs.notslows is empty => false + - name: Coverage Run + run: | + if [ ${{ github.event.inputs.slow }} ]; then + pytest --cov=./CADETProcess --cov=./tests --cov-report term-missing tests + else + pytest -m "not slow" --cov=./CADETProcess --cov=./tests --cov-report term-missing tests + fi diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml index 4cc51ecde..c1faf33a7 100644 --- a/.github/workflows/pipeline.yml +++ b/.github/workflows/pipeline.yml @@ -21,8 +21,8 @@ jobs: os: [ubuntu-latest] python-version: ["3.10", "3.11", "3.12"] include: - # - os: windows-latest - # python-version: "3.12" + - os: windows-latest + python-version: "3.12" - os: macos-13 python-version: "3.12" @@ -41,9 +41,8 @@ jobs: uses: conda-incubator/setup-miniconda@v3 with: miniforge-version: latest - use-mamba: true activate-environment: cadet-process - channels: conda-forge, + channels: conda-forge - name: Cache conda uses: actions/cache@v4 @@ -53,31 +52,32 @@ jobs: with: path: ${{ env.CONDA }}/envs key: ${{ matrix.os }}-python_${{ matrix.python-version }}-${{ steps.get-date.outputs.today }}-${{ hashFiles(env.CONDA_FILE) }}-${{ env.CACHE_NUMBER }} + id: cache - name: Update environment run: | - mamba install "setuptools>=69" "pip>=24" - mamba install python=${{ matrix.python-version }} + conda install "setuptools>=69" "pip>=24" + conda install python=${{ matrix.python-version }} echo "python=${{ matrix.python-version }}.*" > $CONDA_PREFIX/conda-meta/pinned - mamba env update -n cadet-process -f ${{ env.CONDA_FILE }} + conda env update -n cadet-process -f ${{ env.CONDA_FILE }} if: steps.cache.outputs.cache-hit != 'true' - name: Install run: | - pip install -e ./[testing] + conda run pip install -e ./[testing] - name: Test run: | - python -m unittest discover -s tests + pytest tests -m "not slow" --durations=0 - name: Install pypa/build run: | - python -m pip install build --user + conda run python -m pip install build --user - name: Build binary wheel and source tarball run: | - python -m build --sdist --wheel --outdir dist/ . + conda run python -m build --sdist --wheel --outdir dist/ . - name: Test Wheel install and import run: | - python -c "import CADETProcess; print(CADETProcess.__version__)" + conda run python -c "import CADETProcess; print(CADETProcess.__version__)" diff --git a/CADETProcess/__init__.py b/CADETProcess/__init__.py index b1d559644..2b1056b62 100644 --- a/CADETProcess/__init__.py +++ b/CADETProcess/__init__.py @@ -22,6 +22,7 @@ settings = Settings() from . import sysinfo +from . import numerics from . import dataStructure from . import transform from . import plotting diff --git a/CADETProcess/comparison/comparator.py b/CADETProcess/comparison/comparator.py index 79cfcbe0b..8f4f7161f 100644 --- a/CADETProcess/comparison/comparator.py +++ b/CADETProcess/comparison/comparator.py @@ -13,6 +13,7 @@ from CADETProcess.dataStructure import get_nested_value from CADETProcess.solution import SolutionBase from CADETProcess.comparison import DifferenceBase +from CADETProcess.numerics import round_to_significant_digits class Comparator(Structure): @@ -319,10 +320,14 @@ def plot_comparison( solution.smooth_data() solution_sliced = metric.slice_and_transform(solution) + y_max = 1.1 * max( + np.max(solution_sliced.solution), np.max(metric.reference.solution) + ) + fig, ax = solution_sliced.plot( ax=ax, show=False, - y_max=1.1*np.max(metric.reference.solution) + y_max=y_max, ) plot_args = { @@ -338,12 +343,7 @@ def plot_comparison( ax.legend(loc=1) m = metric.evaluate(solution_sliced, slice=False) - m = [ - np.format_float_scientific( - n, precision=2, - ) - for n in m - ] + m = round_to_significant_digits(m, digits=2) text = f"{metric}: " if metric.n_metrics > 1: @@ -356,7 +356,7 @@ def plot_comparison( except AttributeError: text += f"{m}" else: - text += m[0] + text += str(m[0]) plotting.add_text(ax, text, fontsize=14) diff --git a/CADETProcess/comparison/difference.py b/CADETProcess/comparison/difference.py index f19feea08..96f6c1ffc 100644 --- a/CADETProcess/comparison/difference.py +++ b/CADETProcess/comparison/difference.py @@ -1,7 +1,6 @@ from abc import abstractmethod import copy from functools import wraps -from warnings import warn import numpy as np from scipy.integrate import simpson @@ -29,14 +28,6 @@ ] -def squishify(*args, **kwargs): - warn( - 'This function is deprecated, use sigmoid_distance.', - DeprecationWarning, stacklevel=2 - ) - return sigmoid_distance(*args, **kwargs) - - def sigmoid_distance(measurement, target, normalization=1): """Calculate the distance between two values using a sigmoid function. @@ -495,8 +486,9 @@ class Shape(DifferenceBase): @wraps(DifferenceBase.__init__) def __init__( self, *args, - use_derivative=True, normalize_metrics=True, normalization_factor=None, - include_height=True, + use_derivative=True, + normalize_metrics=True, + normalization_factor=None, **kwargs): """Initialize Shape metric. @@ -514,10 +506,6 @@ def __init__( normalization_factor : float, optional Normalization factor used by the sigmoid function. Default is None, which sets it to 1/10 of the simulation time. - include_height : bool, optional - If True, also return a metric for the height difference. - If `use_derivative` is also true, also return height differences - for the derivatives. **kwargs : dict Keyword arguments passed to the base class constructor. @@ -532,13 +520,6 @@ def __init__( "Shape currently only supports single component." ) - self.include_height = include_height - if include_height: - warn("Peak height will be removed from the Shape difference metric.", DeprecationWarning) - self.peak_height = PeakHeight( - *args, normalize=False, normalize_metrics=normalize_metrics, **kwargs - ) - self.use_derivative = use_derivative if use_derivative: self.reference_der = self.reference.derivative @@ -555,17 +536,6 @@ def __init__( coordinates={'time': (self.start, self.end)} ) - self.peak_der_min = PeakHeight( - self.reference_der, *args[1:], normalize=False, - find_minima=True, normalize_metrics=normalize_metrics, - **kwargs - ) - self.peak_der_max = PeakHeight( - self.reference_der, *args[1:], normalize=False, - find_minima=False, normalize_metrics=normalize_metrics, - **kwargs - ) - self.normalize_metrics = normalize_metrics if normalization_factor is None: normalization_factor = self.reference.time[-1]/10 @@ -575,29 +545,16 @@ def __init__( def n_metrics(self): n_metrics = 2 - if self.include_height: - n_metrics += 1 - if self.use_derivative: n_metrics += 1 - if self.include_height: - n_metrics += 2 - return n_metrics @property def labels(self): labels = ['Pearson Correleation', 'Time offset'] - if self.include_height: - labels += ["Peak Height"] if self.use_derivative: labels += ['Pearson Correlation Derivative'] - if self.include_height: - labels += [ - 'Peak Minimum Derivative', - 'Peak Maximum Derivative' - ] return labels def _evaluate(self, solution): @@ -615,9 +572,6 @@ def _evaluate(self, solution): solution.solution_interpolated.solutions[0], ) - if self.include_height: - peak_height = self.peak_height(solution, slice=False) - if self.normalize_metrics: offset = sigmoid_distance( offset_original, target=0, normalization=self.normalization_factor @@ -626,10 +580,7 @@ def _evaluate(self, solution): offset = np.abs(offset_original) if not self.use_derivative: - if self.include_height: - return np.array([corr, offset, peak_height[0]]) - elif not self.include_height: - return np.array([corr, offset]) + return np.array([corr, offset]) solution_der = solution.derivative solution_der_sliced = self.slice_and_transform(solution_der) @@ -641,23 +592,12 @@ def _evaluate(self, solution): offset_original, ) - der_min = self.peak_der_min(solution_der_sliced, slice=False) - der_max = self.peak_der_max(solution_der_sliced, slice=False) - - if self.include_height: - return np.array( - [ - corr, offset, peak_height[0], - corr_der, der_min[0], der_max[0] - ] - ) - elif not self.include_height: - return np.array( - [ - corr, offset, - corr_der, - ] - ) + return np.array( + [ + corr, offset, + corr_der, + ] + ) class PeakHeight(DifferenceBase): diff --git a/CADETProcess/dataStructure/__init__.py b/CADETProcess/dataStructure/__init__.py index efa04e240..750a3588d 100644 --- a/CADETProcess/dataStructure/__init__.py +++ b/CADETProcess/dataStructure/__init__.py @@ -41,3 +41,4 @@ from .cache import * from .diskcache import * from .nested_dict import * +from .deprecation import * diff --git a/CADETProcess/dataStructure/aggregator.py b/CADETProcess/dataStructure/aggregator.py index 03a86b1ea..65b4807f9 100644 --- a/CADETProcess/dataStructure/aggregator.py +++ b/CADETProcess/dataStructure/aggregator.py @@ -2,13 +2,96 @@ from .dataStructure import Aggregator +import numpy as np + + +class NumpyProxyArray(np.ndarray): + """A numpy array that dynamically updates attributes of container elements.""" + + def __new__(cls, aggregator, instance): + values = aggregator._get_values_from_container(instance, transpose=True) + + if values is None: + return + + obj = values.view(cls) + + obj.aggregator = aggregator + obj.instance = instance + + return obj + + def _get_values_from_aggregator(self): + """Refresh data from the underlying container.""" + return self.aggregator._get_values_from_container( + self.instance, transpose=True, check=True + ) + + def __getitem__(self, index): + """Retrieve an item from the aggregated parameter array.""" + return self._get_values_from_aggregator()[index] + + def __setitem__(self, index, value): + """ + Modify an individual element in the aggregated parameter list. + This ensures changes are propagated back to the objects. + """ + current_value = self._get_values_from_aggregator() + current_value[index] = value + self.aggregator.__set__(self.instance, current_value) + + def __array_finalize__(self, obj): + """Ensure attributes are copied when creating a new view or slice.""" + if obj is None: + self.aggregator = None + self.instance = None + return + + if not isinstance(obj, NumpyProxyArray): + return np.asarray(obj) + + self.aggregator = getattr(obj, 'aggregator', None) + self.instance = getattr(obj, 'instance', None) + + def __array_function__(self, func, types, *args, **kwargs): + """ + Ensures that high-level NumPy functions (like np.dot, np.linalg.norm) return a + normal np.ndarray. + """ + result = super().__array_function__(func, types, *args, **kwargs) + return np.asarray(result) + + def __repr__(self): + """Return a fresh representation that reflects live data.""" + return f"NumpyProxyA{self._get_values_from_aggregator().__repr__()[1:]}" + class SizedAggregator(Aggregator): """Aggregator for sized parameters.""" + def __init__(self, *args, transpose=False, **kwargs): + """ + Initialize a SizedAggregator instance. + + Parameters + ---------- + *args : Any + Variable length argument list. + transpose : bool, options + If False, the parameter shape will be ((n_instances, ) + parameter_shape). + Else, it will be (parameter_shape + (n_instances, )) + The default is False. + **kwargs : Any + Arbitrary keyword arguments. + """ + self.transpose = transpose + + super().__init__(*args, **kwargs) + def _parameter_shape(self, instance): - values = self._get_parameter_values_from_container(instance) - shapes = [np.array(el, ndmin=1).shape for el in values] + values = self._get_values_from_container(instance, transpose=False) + + shapes = [el.shape for el in values] if len(set(shapes)) > 1: raise ValueError("Inconsistent parameter shapes.") @@ -19,35 +102,90 @@ def _parameter_shape(self, instance): return shapes[0] def _expected_shape(self, instance): - return (self._n_instances(instance), ) + self._parameter_shape(instance) + if self.transpose: + return self._parameter_shape(instance) + (self._n_instances(instance), ) + else: + return (self._n_instances(instance), ) + self._parameter_shape(instance) - def _get_parameter_values_from_container(self, instance): - value = super()._get_parameter_values_from_container(instance) + def _get_values_from_container(self, instance, transpose=False, check=False): + value = super()._get_values_from_container(instance, check=False) if value is None or len(value) == 0: return - value = np.array(value, ndmin=2).T + value = np.array(value, ndmin=2) + + if check: + value = self._prepare(instance, value, transpose=False, recursive=True) + self._check(instance, value, transpose=True, recursive=True) + + if transpose and self.transpose: + value = value.T + return value - def _check(self, instance, value, recursive=False): + def _check(self, instance, value, transpose=True, recursive=False): + value_array = np.array(value, ndmin=2) + if transpose and self.transpose: + value_array = value_array.T + + value_shape = value_array.shape expected_shape = self._expected_shape(instance) - if value.shape != expected_shape: + + if value_shape != expected_shape: raise ValueError( - f"Expected a array with shape {expected_shape}, got {value.shape}" + f"Expected a array with shape {expected_shape}, got {value_shape}" ) if recursive: super()._check(instance, value, recursive) - def _prepare(self, instance, value, recursive=False): - value = np.array(value) + def _prepare(self, instance, value, transpose=False, recursive=False): + value = np.array(value, ndmin=2) + + if transpose and self.transpose: + value = value.T if recursive: value = super()._prepare(instance, value, recursive) return value + def __get__(self, instance, cls): + """ + Retrieve the descriptor value for the given instance. + + Parameters + ---------- + instance : Any + Instance to retrieve the descriptor value for. + cls : Type[Any], optional + Class to which the descriptor belongs. + + Returns + ------- + NumpyProxyArray + A numpy-like array that modifies the underlying objects when changed. + """ + if instance is None: + return self + + return NumpyProxyArray(self, instance) + + def __set__(self, instance, value): + """ + Set the descriptor value for the given instance. + + Parameters + ---------- + instance : Any + Instance to set the descriptor value for. + value : Any + Value to set. + """ + value = self._prepare(instance, value, transpose=True) + super().__set__(instance, value) + class ClassDependentAggregator(Aggregator): """Aggregator where parameter name changes depending on instance type.""" @@ -70,7 +208,7 @@ def __init__(self, *args, mapping, **kwargs): super().__init__(*args, **kwargs) - def _get_parameter_values_from_container(self, instance): + def _get_values_from_container(self, instance, check=False): container = self._container_obj(instance) values = [] @@ -88,7 +226,12 @@ def _get_parameter_values_from_container(self, instance): if len(values) == 0: return + if check: + values = self._prepare(instance, values, transpose=False, recursive=True) + self._check(instance, values, transpose=True, recursive=True) + return values + class SizedClassDependentAggregator(SizedAggregator, ClassDependentAggregator): pass diff --git a/CADETProcess/dataStructure/dataStructure.py b/CADETProcess/dataStructure/dataStructure.py index f18604a14..7525e1a77 100644 --- a/CADETProcess/dataStructure/dataStructure.py +++ b/CADETProcess/dataStructure/dataStructure.py @@ -48,20 +48,63 @@ def __delete__(self, instance): del instance.__dict__[self.name] +class ProxyList(): + """A proxy list that dynamically updates attributes of container elements.""" + + def __init__(self, aggregator, instance): + self.aggregator = aggregator + self.instance = instance + + def _get_values_from_aggregator(self): + """Fetch the latest values from the aggregator.""" + return self.aggregator._get_values_from_container( + self.instance, check=True + ) + + def __getitem__(self, index): + """Retrieve an item from the aggregated parameter list (live view).""" + return self._get_values_from_aggregator()[index] + + def __setitem__(self, index, value): + """ + Modify an individual element in the aggregated parameter list. + Ensures changes propagate to the underlying objects. + """ + current_value = self._get_values_from_aggregator() + current_value[index] = value + self.aggregator.__set__(self.instance, current_value) + + def __iter__(self): + """Iterate over aggregated values.""" + return iter(self._get_values_from_aggregator()) + + def __len__(self): + """Return the length of the container.""" + return len(self._get_values_from_aggregator()) + + def __repr__(self): + """String representation for debugging.""" + return f"ProxyList({self._get_values_from_aggregator().__repr__()})" + + def __eq__(self, other): + """Equality comparison.""" + return list(self._get_values_from_aggregator()) == other + + class Aggregator(): - """Descriptor aggregating parameters from instance container with other instances.""" + """Descriptor aggregating parameters from iterable container of other objects.""" def __init__(self, parameter_name, container, *args, **kwargs): """ - Initialize the Aggregator descriptor. + Initialize the Aggregator. Parameters ---------- parameter_name : str Name of the parameter to be aggregated. container : str - Name of the attribute in the instance that contains the other instances - from which parameters will be aggregated. + Name of the iterable attribute in the instance that contains the other + objects from which parameters will be aggregated. *args : tuple, optional Additional positional arguments. **kwargs : dict, optional @@ -98,14 +141,16 @@ def _container_obj(self, instance): return container def _n_instances(self, instance): - return len(self._get_parameter_values_from_container(instance)) + return len(self._container_obj(instance)) - def _get_parameter_values_from_container(self, instance): + def _get_values_from_container(self, instance, check=False): container = self._container_obj(instance) + value = [getattr(el, self.parameter_name) for el in container] - if len(value) == 0: - return + if check: + value = self._prepare(instance, value, recursive=True) + self._check(instance, value, recursive=True) return value @@ -128,12 +173,7 @@ def __get__(self, instance, cls): if instance is None: return self - value = self._get_parameter_values_from_container(instance) - - if value is not None: - self._check(instance, value, recursive=True) - - return value + return ProxyList(self, instance) def __set__(self, instance, value): """ @@ -143,8 +183,9 @@ def __set__(self, instance, value): ---------- instance : Any Instance to set the descriptor value for. - value : Any - Value to set. + value : Iterable + Value to set. Note, this assumes that each element of the value maps to + each element of the container. """ if value is not None: value = self._prepare(instance, value, recursive=True) @@ -152,8 +193,8 @@ def __set__(self, instance, value): container = self._container_obj(instance) - for i, el in enumerate(container): - setattr(el, self.parameter_name, value[i]) + for el, el_value in zip(container, value): + setattr(el, self.parameter_name, el_value) def _prepare(self, instance, value, recursive=False): """ @@ -185,12 +226,21 @@ def _check(self, instance, value, recursive=False): ---------- instance : Any Instance to retrieve the descriptor value for. - value : Any - Value to check. + value : Iterable + Value to set. Note, this assumes that each element of the value maps to + each element of the container. recursive : bool, optional If True, perform the check recursively. Defaults to False. """ + container = self._container_obj(instance) + + if len(value) != len(container): + raise ValueError( + "Unexpected length. " + f"Expected {len(container)} entries, got {len(value)}." + ) + return @@ -232,7 +282,6 @@ class StructMeta(type): Descriptor : Class that represents the descriptors this metaclass operates on. Parameters : Base class for model parameters with e.g. type or bound constraints. - Methods ------- __prepare__(name, bases) -> OrderedDict diff --git a/CADETProcess/dataStructure/deprecation.py b/CADETProcess/dataStructure/deprecation.py new file mode 100644 index 000000000..6ae5bf958 --- /dev/null +++ b/CADETProcess/dataStructure/deprecation.py @@ -0,0 +1,80 @@ +from typing import Any, Callable, Dict +import functools +import warnings + + +__all__ = ["deprecated_alias", "rename_kwargs"] + + +def deprecated_alias(**aliases: str) -> Callable: + """Add alias for deprecated function arguments. + + Parameters + ---------- + **aliases : str + Keyword arguments where keys are old parameter names and values are new parameter names + + Returns + ------- + Callable + A decorator function that wraps the original function + + Examples + -------- + @deprecated_alias(old_name='new_name') + def example_function(new_name): + return new_name + """ + # Decorator function: takes the f and returns a f with the new argument names + def decorator(f: Callable): + @functools.wraps(f) + def wrap_decorated_argument(*args, **kwargs): + rename_kwargs(f.__name__, kwargs, aliases) + return f(*args, **kwargs) + + return wrap_decorated_argument + + return decorator + + +def rename_kwargs(func_name: str, kwargs: Dict[str, Any], aliases: Dict[str, str]): + """Helper function for deprecating function arguments. + + Parameters + ---------- + func_name : str + Name of the function being decorated + kwargs : Dict[str, Any] + Dictionary of keyword arguments passed to the function + aliases : Dict[str, str] + Dictionary mapping old parameter names to new ones + + Returns + ------- + None + + Raises + ------ + TypeError + If both old and new parameter names are used simultaneously + + Examples + -------- + rename_kwargs('example_function', {'old_name': 'value'}, {'old_name': 'new_name'}) + """ + for alias, new in aliases.items(): + if alias in kwargs: + if new in kwargs: + raise TypeError( + f"{func_name} received both {alias} and {new} as arguments!" + f" {alias} is deprecated, use {new} instead." + ) + warnings.warn( + message=( + f"`{alias}` is deprecated as an argument to `{func_name}`; use" + f" `{new}` instead." + ), + category=DeprecationWarning, + stacklevel=2, + ) + kwargs[new] = kwargs.pop(alias) diff --git a/CADETProcess/dataStructure/diskcache.py b/CADETProcess/dataStructure/diskcache.py index d2a4eac20..0c5a165b7 100644 --- a/CADETProcess/dataStructure/diskcache.py +++ b/CADETProcess/dataStructure/diskcache.py @@ -12,6 +12,8 @@ ) +__all__ = ["DillDisk"] + class DillDisk(diskcache.Disk): """Cache key and value serialization for SQLite database and files.""" diff --git a/CADETProcess/dataStructure/nested_dict.py b/CADETProcess/dataStructure/nested_dict.py index 0c88e7b62..05da2396e 100644 --- a/CADETProcess/dataStructure/nested_dict.py +++ b/CADETProcess/dataStructure/nested_dict.py @@ -1,115 +1,294 @@ -import collections.abc +from collections.abc import Mapping from functools import reduce from operator import getitem +from typing import Any, Generator, Sequence +__all__ = [ + "check_nested", + "generate_nested_dict", + "update_dict_recursively", + "get_nested_value", + "set_nested_value", + "get_nested_attribute", + "set_nested_attribute", + "get_nested_list_value", + "set_nested_list_value", +] -def check_nested(nested_dict, path): - """Check if item sequence exists in nested dict + +def check_nested(nested_dict: dict[str, Any], path: str | list) -> bool: + """Check if a key path exists in a nested dictionary. Parameters ---------- - nested_dict: dict - dictionary in which path is checked - path : str - path of item in dot notation. + nested_dict : dict + dictionary in which the path is checked. + path : str or list + Path to the key in dot notation (string) or as a list of keys. Returns ------- - True if item exists and isn't a dictionary, False otherwise. - + bool + True if the item exists and is not a dictionary, False otherwise. """ if isinstance(path, str): path = path.split('.') try: value = get_nested_value(nested_dict, path) - if isinstance(value, dict): - return False - return True + return not isinstance(value, dict) except (KeyError, TypeError): return False -def generate_nested_dict(path, value=None): - """Generate a nested dict from path in dot notation.""" +def generate_nested_dict(path: str | list, value: Any = None) -> dict[str, Any]: + """Generate a nested dictionary from a dot-separated path. + + Parameters + ---------- + path : str or list + Path to create in the dictionary, given in dot notation or as a list. + value : Any, optional + The value to assign to the innermost key. + + Returns + ------- + dict + A nested dictionary created from the path. + """ if isinstance(path, str): path = path.split('.') nested_dict = {path[-1]: value} - for key in reversed(path[0:-1]): + for key in reversed(path[:-1]): nested_dict = {key: nested_dict} return nested_dict -def insert_path(nested_dict, path, value): - """Add path to existing dict without overwriting keys.""" +def insert_path(nested_dict: dict[str, Any], path: str | list, value: Any) -> None: + """Add a key path to an existing dictionary without overwriting existing keys. + + Parameters + ---------- + nested_dict : dict + dictionary to update. + path : str or list + Path to the key in dot notation (string) or as a list of keys. + value : Any + Value to insert. + + Raises + ------ + KeyError + If an intermediate key in the path does not exist. + """ if isinstance(path, str): path = path.split('.') if len(path) == 1: - nested_dict[path[0]] = value + nested_dict.setdefault(path[0], value) else: + nested_dict.setdefault(path[0], {}) insert_path(nested_dict[path[0]], path[1:], value) -def get_leaves(nested_dict): - """Return leaves of nested dictionary in dot notation.""" +def get_leaves(nested_dict: dict[str, Any]) -> Generator[str, None, None]: + """Yield leaf keys of a nested dictionary in dot notation. + + Parameters + ---------- + nested_dict : dict + The nested dictionary to traverse. + + Yields + ------ + str + The full path to each leaf node in dot notation. + """ for key, value in nested_dict.items(): - if not isinstance(value, dict): - yield key - else: + if isinstance(value, dict) and value: for subpath in get_leaves(value): - yield ".".join((key, subpath)) + yield f"{key}.{subpath}" + else: + yield key -def set_nested_value(nested_dict, path, value): - """Set a value in a nested dict using dot notation.""" +def set_nested_value(nested_dict: dict[str, Any], path: str | list, value: Any) -> None: + """Set a value in a nested dictionary using a dot-separated path. + + Parameters + ---------- + nested_dict : dict + dictionary to update. + path : str or list + Path to the key in dot notation (string) or as a list of keys. + value : Any + Value to set. + + Raises + ------ + KeyError + If an intermediate key in the path does not exist. + """ if isinstance(path, str): path = path.split('.') + get_nested_value(nested_dict, path[:-1])[path[-1]] = value -def get_nested_value(nested_dict, path): - """Access a value in a nested dict using path in dot notation.""" +def get_nested_value(nested_dict: dict[str, Any], path: str | list) -> Any: + """Retrieve a value from a nested dictionary using a dot-separated path. + + Parameters + ---------- + nested_dict : dict + dictionary to retrieve the value from. + path : str or list + Path to the key in dot notation (string) or as a list of keys. + + Returns + ------- + Any + The value stored at the specified key path. + + Raises + ------ + KeyError + If the key path does not exist. + """ if isinstance(path, str): path = path.split('.') + return reduce(getitem, path, nested_dict) -def update(d, u): - """Recursively update dictionary d with u.""" - for k, v in u.items(): - if isinstance(v, collections.abc.Mapping): - d[k] = update(d.get(k, {}), v) +def update_dict_recursively( + target_dict: dict[str, Any], + updates: dict[str, Any], + only_existing_keys: bool = False + ) -> dict[str, Any]: + """Recursively update `target_dict` with values from `updates`. + + Parameters + ---------- + target_dict : dict + The original dictionary to be updated. + updates : dict + The dictionary containing new values to merge into `target_dict`. + only_existing_keys : bool, optional + If True, only update keys that already exist in `target_dict`. + If False, add new keys from `updates`. Default is False. + + Returns + ------- + dict + The updated dictionary with `updates` applied. + """ + for key, value in updates.items(): + if only_existing_keys and key not in target_dict: + continue # Skip keys that don't exist in target_dict + + if isinstance(value, Mapping) and isinstance(target_dict.get(key), Mapping): + # Recursively update nested dictionaries + target_dict[key] = update_dict_recursively( + target_dict[key], value, only_existing_keys + ) else: - d[k] = v - return d + # Directly update the value + target_dict[key] = value + + return target_dict -def get_nested_attribute(obj, path): - """Access a nested attribute using path in dot notation.""" +def get_nested_attribute(obj: Any, path: str) -> Any: + """Access a nested attribute using a dot-separated path. + + Parameters + ---------- + obj : Any + The object to retrieve the attribute from. + path : str + The dot-separated path to the nested attribute. + + Returns + ------- + Any + The value of the nested attribute. + + Raises + ------ + AttributeError + If any attribute in the path does not exist. + """ attributes = path.split('.') for attr in attributes: obj = getattr(obj, attr) return obj -def set_nested_attribute(obj, attr_string, value): - """Set a nested attribute using path in dot notation.""" +def set_nested_attribute(obj: Any, attr_string: str, value: Any) -> None: + """Set a nested attribute using a dot-separated path. + + Parameters + ---------- + obj : Any + The object to modify. + attr_string : str + The dot-separated path to the nested attribute. + value : Any + The value to set. + + """ attributes = attr_string.split('.') for attr in attributes[:-1]: obj = getattr(obj, attr) setattr(obj, attributes[-1], value) -def get_nested_list_value(ls, idx_tuple): +def get_nested_list_value(ls: Sequence[Any], idx_tuple: tuple[int, ...]) -> Any: + """Retrieve a value from a nested list structure using an index tuple. + + Parameters + ---------- + ls : Sequence[Any] + The nested list structure. + idx_tuple : tuple[int, ...] + A tuple of indices specifying the path to the desired value. + + Returns + ------- + Any + The value at the specified nested position. + + Raises + ------ + IndexError + If any index in the path is out of range. + """ return reduce(lambda l, i: l[i], idx_tuple, ls) -def set_nested_list_value(ls, idx_tuple, value): - # Navigate through the nested list to the second last index - for idx in idx_tuple[:-1]: - ls = ls[idx] +def set_nested_list_value( + ls: Sequence[Any], + idx_tuple: tuple[int, ...], + value: Any + ) -> None: + """Set a value in a nested list structure using an index tuple. - # Set the value using the last index - ls[idx_tuple[-1]] = value + Parameters + ---------- + ls : Sequence[Any] + The nested list structure. + idx_tuple : tuple[int, ...] + A tuple of indices specifying the path to the value to be set. + value : Any + The new value to assign. + + Raises + ------ + IndexError + If any index in the path is out of range. + """ + for idx in idx_tuple[:-1]: + ls = ls[idx] # Navigate through the nested lists + ls[idx_tuple[-1]] = value # Set the final value diff --git a/CADETProcess/dataStructure/parameter.py b/CADETProcess/dataStructure/parameter.py index a15d0f3a3..f07c47581 100644 --- a/CADETProcess/dataStructure/parameter.py +++ b/CADETProcess/dataStructure/parameter.py @@ -1,4 +1,3 @@ -from abc import abstractmethod import copy import math import operator @@ -119,7 +118,8 @@ def get_default_value(self, instance): if default is not None: default = self._prepare(instance, default, recursive=True) - self._check(instance, default, recursive=True) + if default is not None: + self._check(instance, default, recursive=True) return default @@ -545,8 +545,11 @@ def cast_value(self, value): Float equivalent if value is an integer or numpy number; otherwise, the original value. """ - if isinstance(value, (int, np.number)): - value = float(value) + try: + value = float(np.array(value).squeeze()) + except ValueError: + raise TypeError("Cannot cast value to float.") + return value @@ -1025,11 +1028,13 @@ def check_size(self, instance, value): ValueError If the value's size does not match the expected size. """ - if np.array(value).size == 1: - return - size = self.get_size(value) - expected_size = self.get_expected_size(instance) + try: + expected_size = self.get_expected_size(instance) + except ValueError: + if np.array(value).squeeze().ndim == 0: + return + raise if size != expected_size: raise ValueError(f"Expected size {expected_size}") @@ -1074,7 +1079,9 @@ def _prepare(self, instance, value, recursive=False): else: raise ValueError("Cannot cast value from given value.") - if expected_size is not None: + if expected_size == 0: + value = None + elif expected_size is not None: value = np.prod(expected_size) * value if recursive: @@ -1180,8 +1187,7 @@ def _prepare(self, instance, value, recursive=False): ValueError If the value cannot be cast to a NumPy array with the expected shape. """ - # if not isinstance(value, self.ty): - if isinstance(value, (int, float)): + if np.array(value).squeeze().ndim == 0: try: expected_size = self.get_expected_size(instance) except ValueError: @@ -1191,9 +1197,10 @@ def _prepare(self, instance, value, recursive=False): value = value * np.ones(expected_size) else: try: - self.check_size(instance, np.array(value)) + value_array = np.array(value) except ValueError: - raise ValueError("Cannot cast value from given value.") + raise TypeError("Cannot cast value from given value.") + self.check_size(instance, value_array) if recursive: value = super()._prepare(instance, value, recursive) @@ -1615,6 +1622,7 @@ def __init__(self, *args, **kwargs): # %% Modulated Parameters + class DependentlyModulated(Sized): """ Mixin for checking parameter shapes based on other instance attributes. diff --git a/CADETProcess/dataStructure/parameter_group.py b/CADETProcess/dataStructure/parameter_group.py index b3fecbeed..f4e98cc24 100644 --- a/CADETProcess/dataStructure/parameter_group.py +++ b/CADETProcess/dataStructure/parameter_group.py @@ -1,6 +1,4 @@ from CADETProcess import CADETProcessError -from CADETProcess.dataStructure import Structure -from CADETProcess.dataStructure import frozen_attributes class ParameterWrapper(): diff --git a/CADETProcess/dynamicEvents/event.py b/CADETProcess/dynamicEvents/event.py index af92df448..d5289928c 100644 --- a/CADETProcess/dynamicEvents/event.py +++ b/CADETProcess/dynamicEvents/event.py @@ -945,23 +945,25 @@ def parameter_type(self): @property def parameter_shape(self): """tuple: Shape of the parameter array.""" - if isinstance(self.parameter_descriptor, (Float, Integer, Bool)): + param_descriptor = self.parameter_descriptor + if isinstance(param_descriptor, (Float, Integer, Bool)): return () - if isinstance(self.parameter_descriptor, Sized): - shape = self.parameter_descriptor.get_expected_size(self.performer_obj) + if isinstance(param_descriptor, Sized): + shape = param_descriptor.get_expected_size(self.performer_obj) if not isinstance(shape, tuple): shape = (shape, ) return shape - if self.current_value is None: + cur_value = self.current_value + if cur_value is None: raise CADETProcessError( "Parameter is not initialized. " "Cannot determine parameter shape." ) - return np.array(self.current_value).shape + return np.array(cur_value).shape @property def is_sized(self): @@ -999,13 +1001,15 @@ def indices(self): List of tuples for each entry. If parameter is scalar, None """ - if len(self.parameter_shape) == 0: + param_shape = self.parameter_shape + + if len(param_shape) == 0: return - indices = generate_indices(self.parameter_shape, self._indices) + indices = generate_indices(param_shape, self._indices) # Check if all indices unique: - full_indices = unravel(self.parameter_shape, indices) + full_indices = unravel(param_shape, indices) duplicates = [ index for index in set(full_indices) if full_indices.count(index) > 1 ] @@ -1260,6 +1264,7 @@ def full_state(self): If the length of the state does not match the length of the indices. """ state = self._state + indices = self.indices # Get new (full) parameter value if self._indices is None: @@ -1277,12 +1282,12 @@ def full_state(self): # Ensure state is 2D for indices that contain slices state = self._ensure_2D_for_slices(state) - if len(state) != len(self.indices): + if len(state) != len(indices): raise ValueError( f"Expected {len(self.indices)} entries for state. Got {len(state)}" ) - for i, ind in enumerate(self.indices): + for i, ind in enumerate(indices): expected_shape = new_value[ind].shape if self.is_polynomial and len(self.parameter_shape) > 1 and len(ind) == 1: new_slice = self.parameter_descriptor.fill_values( @@ -1308,10 +1313,10 @@ def full_state(self): self.set_value(new_value) new_value = self.current_value - if self.indices is not None: + if indices is not None: new_value = np.array(new_value, ndmin=1) full_state = [] - for ind in self.indices: + for ind in indices: full_state += new_value[ind].flatten().tolist() else: full_state = new_value diff --git a/CADETProcess/fractionation/fractionationOptimizer.py b/CADETProcess/fractionation/fractionationOptimizer.py index c4510b755..3b31aef1f 100644 --- a/CADETProcess/fractionation/fractionationOptimizer.py +++ b/CADETProcess/fractionation/fractionationOptimizer.py @@ -55,9 +55,9 @@ def __init__(self, optimizer=None, log_level='WARNING'): """ if optimizer is None: optimizer = COBYLA() - optimizer.tol = 1e-3 + optimizer.tol = 1e-4 optimizer.catol = 5e-3 - optimizer.rhobeg = 1e-4 + optimizer.rhobeg = 1e-3 self.optimizer = optimizer self.log_level = log_level diff --git a/CADETProcess/fractionation/fractionator.py b/CADETProcess/fractionation/fractionator.py index c3e32701b..5b6c8080a 100644 --- a/CADETProcess/fractionation/fractionator.py +++ b/CADETProcess/fractionation/fractionator.py @@ -1,19 +1,23 @@ from collections import defaultdict from functools import wraps import os +from typing import Optional, Any from addict import Dict import numpy as np from matplotlib.axes import Axes +from matplotlib.figure import Figure from CADETProcess import CADETProcessError from CADETProcess import settings from CADETProcess.dataStructure import String -from CADETProcess.dynamicEvents import EventHandler +from CADETProcess.dynamicEvents import EventHandler, Event from CADETProcess import plotting from CADETProcess.performance import Performance from CADETProcess.solution import slice_solution, SolutionIO from CADETProcess import SimulationResults +from CADETProcess.processModel import Process +from CADETProcess.processModel import ComponentSystem from CADETProcess.fractionation.fractions import Fraction, FractionPool @@ -38,17 +42,17 @@ class Fractionator(EventHandler): """ name = String(default='Fractionator') - performance_keys = [ + performance_keys: list[str] = [ 'mass', 'concentration', 'purity', 'recovery', 'productivity', 'eluent_consumption' ] def __init__( self, - simulation_results, - components=None, - use_total_concentration_components=True, - *args, **kwargs): + simulation_results: SimulationResults, + components: Optional[list[str]] = None, + use_total_concentration_components: bool = True, + *args: Any, **kwargs: Any) -> None: """Initialize the Fractionator. Parameters @@ -65,19 +69,19 @@ def __init__( Arbitrary keyword arguments. """ - self.components = components - self.use_total_concentration_components = use_total_concentration_components + self.components: Optional[list[str]] = components + self.use_total_concentration_components: bool = use_total_concentration_components self.simulation_results = simulation_results super().__init__(*args, **kwargs) @property - def simulation_results(self): + def simulation_results(self) -> SimulationResults: """SimulationResults: The simulation results containing the chromatograms.""" return self._simulation_results @simulation_results.setter - def simulation_results(self, simulation_results): + def simulation_results(self, simulation_results: SimulationResults) -> None: """Set the simulation results. Parameters @@ -124,7 +128,7 @@ def simulation_results(self, simulation_results): n_species = len(indices) m_feed[counter:counter+n_species] = m_feed_comp counter += n_species - self.m_feed = m_feed + self.m_feed: np.ndarray = m_feed self._fractionation_states = Dict({ chrom: [] @@ -140,14 +144,14 @@ def simulation_results(self, simulation_results): self.reset() @property - def component_system(self): + def component_system(self) -> ComponentSystem: """ComponentSystem: The component system of the chromatograms.""" return self.chromatograms[0].component_system def _call_by_chrom_name(func): """Decorator to enable calling functions with chromatogram object or name.""" @wraps(func) - def wrapper(self, chrom, *args, **kwargs): + def wrapper_call_by_chrom_name(self, chrom, *args, **kwargs): """Enable calling functions with chromatogram object or name.""" if isinstance(chrom, str): try: @@ -156,10 +160,10 @@ def wrapper(self, chrom, *args, **kwargs): raise CADETProcessError('Not a valid unit') return func(self, chrom, *args, **kwargs) - return wrapper + return wrapper_call_by_chrom_name @property - def chromatograms(self): + def chromatograms(self) -> list[SolutionIO]: """list: Chromatograms to be fractionized. See Also @@ -171,22 +175,22 @@ def chromatograms(self): return self._chromatograms @property - def chromatograms_dict(self): + def chromatograms_dict(self) -> dict[str, SolutionIO]: """dict: Chromatogram names and objects.""" return {chrom.name: chrom for chrom in self.chromatograms} @property - def chromatogram_names(self): + def chromatogram_names(self) -> list[str]: """list: Chromatogram names""" return [chrom.name for chrom in self.chromatograms] @property - def n_chromatograms(self): + def n_chromatograms(self) -> int: """int: Number of Chromatograms Fractionator.""" return len(self.chromatograms) @property - def chromatogram_events(self): + def chromatogram_events(self) -> dict[SolutionIO, list[Event]]: chrom_events = { chrom: sorted(events, key=lambda evt: evt.time) for chrom, events in self._chromatogram_events.items() @@ -195,17 +199,17 @@ def chromatogram_events(self): return chrom_events @property - def process(self): + def process(self) -> Process: """Process: The process from the simulation results.""" return self.simulation_results.process @property - def n_comp(self): + def n_comp(self) -> int: """int: Number of components to be fractionized""" return self.chromatograms[0].n_comp @property - def cycle_time(self): + def cycle_time(self) -> float: """float: The cycle time of the Fractionator. Note that in some situations, it might be desired to set a custom cycle time @@ -222,24 +226,24 @@ def cycle_time(self): return self._cycle_time @property - def time(self): + def time(self) -> np.ndarray: """np.ndarray: solution times of Chromatogram.""" return self.chromatograms[0].time @plotting.create_and_save_figure def plot_fraction_signal( self, - chromatogram: SolutionIO | None = None, + chromatogram: Optional[SolutionIO | str] = None, x_axis_in_minutes: bool = True, - ax: Axes | None = None, - *args, - **kwargs, + ax: Optional[Axes] = None, + *args: Any, + **kwargs: Any, ) -> Axes: """Plot the signal without the waste fractions. Parameters ---------- - chromatogram : SolutionIO, optional + chromatogram : Optional[SolutionIO | str] Chromatogram to be plotted. If None, the first one is plotted. ax : Axes, optional Axes to plot on. If None, a new figure is created. @@ -267,13 +271,13 @@ def plot_fraction_signal( self.performer_timelines['fractionation_states'][chromatogram.name] try: - start = kwargs['start'] + start: float = kwargs['start'] if x_axis_in_minutes: start = start / 60 except KeyError: start = 0 try: - end = kwargs['end'] + end: float = kwargs['end'] if x_axis_in_minutes: end = end / 60 except KeyError: @@ -293,7 +297,7 @@ def plot_fraction_signal( text = 'W' else: color_index = comp_index - text = str(comp_index + 1) + text = self.component_system.names[comp_index] sec_start = sec.start sec_end = sec.end @@ -329,7 +333,7 @@ def plot_fraction_signal( return ax @property - def fractionation_states(self): + def fractionation_states(self) -> Dict: """dict: Fractionation state of Chromatograms. Notes @@ -340,7 +344,7 @@ def fractionation_states(self): return self._fractionation_states @_call_by_chrom_name - def set_fractionation_state(self, chrom, state): + def set_fractionation_state(self, chrom: SolutionIO, state: int | list[float]) -> None: """Set fractionation states of Chromatogram. Parameters @@ -385,12 +389,12 @@ def set_fractionation_state(self, chrom, state): self._fractionation_states[chrom] = fractionation_state @property - def n_fractions_per_pool(self): + def n_fractions_per_pool(self) -> list[int]: """list: number of fractions per pool.""" return [pool.n_fractions for pool in self.fraction_pools] @property - def fraction_pools(self): + def fraction_pools(self) -> list[FractionPool]: """List of the component and waste fraction pools. For every event, the end time is determined and a Fraction object is @@ -438,7 +442,7 @@ def fraction_pools(self): return self._fraction_pools - def _create_fraction(self, chrom_index, start, end): + def _create_fraction(self, chrom_index: int, start: float, end: float) -> Fraction: """Helper function to create Fraction object calculate mass. Parameters @@ -458,7 +462,7 @@ def _create_fraction(self, chrom_index, start, end): return fraction - def add_fraction(self, fraction, target): + def add_fraction(self, fraction: Fraction, target: int) -> None: """Add Fraction to the FractionPool of target component. Notes @@ -474,7 +478,7 @@ def add_fraction(self, fraction, target): self._fraction_pools[target].add_fraction(fraction) @property - def mass(self): + def mass(self) -> np.ndarray: """ndarray: Component mass in corresponding fraction pools.""" if self._mass is None: self._mass = np.array([ @@ -484,12 +488,12 @@ def mass(self): return self._mass @property - def total_mass(self): + def total_mass(self) -> np.ndarray: """ndarray: Total mass of each component in all fraction pools.""" return np.sum([pool.mass for pool in self.fraction_pools], axis=0) @property - def concentration(self): + def concentration(self) -> np.ndarray: """ndarray: Component concentration in corresponding fraction pool.""" return np.array([ pool.concentration[comp] @@ -497,7 +501,7 @@ def concentration(self): ]) @property - def purity(self): + def purity(self) -> np.ndarray: """ndarray: Component purity in corresponding fraction pool.""" return np.array([ pool.purity[comp] @@ -505,7 +509,7 @@ def purity(self): ]) @property - def recovery(self): + def recovery(self) -> np.ndarray: """ndarray: Component recovery yield in corresponding fraction pool.""" with np.errstate(divide='ignore', invalid='ignore'): recovery = self.mass / self.m_feed @@ -513,7 +517,7 @@ def recovery(self): return np.nan_to_num(recovery) @property - def mass_balance_difference(self): + def mass_balance_difference(self) -> np.ndarray: """ndarray: Difference in mass balance between m_feed and fraction pools. The mass balance is calculated as the difference between the feed mass (m_feed) @@ -536,14 +540,14 @@ def mass_balance_difference(self): return self.total_mass - self.m_feed @property - def productivity(self): + def productivity(self) -> np.ndarray: """ndarray: Specific productivity in corresponding fraction pool.""" return self.mass / ( self.process.cycle_time * self.process.V_solid ) @property - def eluent_consumption(self): + def eluent_consumption(self) -> np.ndarray: """ndarray: Specific eluent consumption in corresponding fraction pool. Notes @@ -555,7 +559,7 @@ def eluent_consumption(self): return self.mass / self.process.V_eluent @property - def performance(self): + def performance(self) -> Performance: """Performance: The performance metrics of the fractionation.""" self.reset() return Performance( @@ -565,25 +569,30 @@ def performance(self): self.component_system ) - def reset(self): + def reset(self) -> None: """Reset the results when fractionation times are changed.""" self._fractionation_state = None self._fraction_pools = None self._mass = None def add_fractionation_event( - self, event_name, target, time, chromatogram=None): + self, + event_name: str, + target: str | int, + time: float, + chromatogram: Optional[SolutionIO | str] = None + ) -> None: """Add a fractionation event. Parameters ---------- event_name : str The name of the event. - target : int - The target component. + target : str | int + The indice or name of target component in Component System. time : float The time of the event. - chromatogram : SolutionIO, optional + chromatogram : Optional[SolutionIO | str] The chromatogram associated with the event. If None and there is only one chromatogram, it will be used. @@ -610,6 +619,10 @@ def add_fractionation_event( raise CADETProcessError("Could not find chromatogram.") param_path = f'fractionation_states.{chromatogram.name}' + + if isinstance(target, str): + target = self.component_system.names.index(target) + evt = self.add_event( event_name, param_path, target, time ) @@ -617,7 +630,7 @@ def add_fractionation_event( self.reset() - def initial_values(self, purity_required=0.95): + def initial_values(self, purity_required: float | list[float] = 0.95) -> None: """Create events from chromatogram with minimum purity. Function creates fractions for areas in the chromatogram, where the @@ -674,7 +687,7 @@ def initial_values(self, purity_required=0.95): off_indices = np.where(diff[:, comp] == -1) off_indices = off_indices[0] for index, off_evt in enumerate(off_indices): - time = chrom.time[int(off_evt)] + time = chrom.time[int(off_evt) - 1] event_name = \ 'chrom_' + str(chrom_index) + \ '_comp_' + str(comp) + \ @@ -701,7 +714,7 @@ def initial_values(self, purity_required=0.95): self._chromatogram_events[chrom].remove(evt) @property - def parameters(self): + def parameters(self) -> Dict: parameters = super().parameters parameters['fractionation_states'] = { chrom.name: self.fractionation_states[chrom] @@ -711,7 +724,7 @@ def parameters(self): return Dict(parameters) @parameters.setter - def parameters(self, parameters): + def parameters(self, parameters: Dict) -> None: try: fractionation_states = parameters.pop('fractionation_states') for chrom, state in fractionation_states.items(): @@ -722,10 +735,27 @@ def parameters(self, parameters): super(Fractionator, self.__class__).parameters.fset(self, parameters) @property - def section_dependent_parameters(self): + def section_dependent_parameters(self) -> Dict: return self.parameters - def save(self, case_dir, start=0, end=None): + def save( + self, + case_dir: str, + start: float = 0, + end: Optional[float] = None + ) -> None: + """ + Save chromatogram and purity plots to a specified directory. + + Parameters + ---------- + case_dir : str + Directory name within the working directory to save plots. + start : float, optional + Start time for plotting purity, default is 0. + end : Optional[float] + End time for plotting purity. If None, includes all data. + """ path = os.path.join(settings.working_directory, case_dir) for index, chrom in enumerate(self.chromatograms): @@ -742,5 +772,5 @@ def save(self, case_dir, start=0, end=None): index=index ) - def __str__(self): + def __str__(self) -> str: return self.__class__.__name__ diff --git a/CADETProcess/modelBuilder/carouselBuilder.py b/CADETProcess/modelBuilder/carouselBuilder.py index 121bb45f0..c93e6563a 100644 --- a/CADETProcess/modelBuilder/carouselBuilder.py +++ b/CADETProcess/modelBuilder/carouselBuilder.py @@ -1388,8 +1388,13 @@ def time(self): return self.simulation_results.solution.column_0.bulk.time def plot_at_time( - self, t, overlay=None, y_min=None, y_max=None, - ax=None, lines=None): + self, + t, + overlay=None, + y_min=None, + y_max=None, + ax=None, + lines=None): """Plot bulk solution over space at given time. Parameters @@ -1403,7 +1408,6 @@ def plot_at_time( -------- CADETProcess.plotting """ - n_cols = self.builder.n_columns if ax is None: fig, axs = plt.subplots( @@ -1441,8 +1445,8 @@ def plot_at_time( for comp in range(self.n_comp): lines[position][comp].set_ydata(y[..., comp]) else: - l = ax.plot(x, y) - _lines.append(l) + line = ax.plot(x, y) + _lines.append(line) zone = self.builder.zones[zone_counter] diff --git a/CADETProcess/numerics.py b/CADETProcess/numerics.py new file mode 100644 index 000000000..1ee20751d --- /dev/null +++ b/CADETProcess/numerics.py @@ -0,0 +1,71 @@ +import numpy as np + + +def round_to_significant_digits(values: np.ndarray | list[float], digits: int) -> np.ndarray: + """ + Round an array of numbers to the specified number of significant digits. + + Parameters + ---------- + values : np.ndarray | list[float] + Input array of floats to be rounded. Can be a NumPy array or a list of floats. + digits : int + Number of significant digits to retain. Must be greater than 0. + + Returns + ------- + np.ndarray + Array of values rounded to the specified significant digits. + The shape matches the input. + + Notes + ----- + - The function handles zero values by returning 0 directly, avoiding issues + with logarithms. + - Input arrays are automatically converted to `ndarray` if not already. + + Examples + -------- + >>> import numpy as np + >>> values = np.array([123.456, 0.001234, 98765, 0, -45.6789]) + >>> round_to_significant_digits(values, 3) + array([ 123. , 0.00123, 98700. , 0. , -45.7 ]) + + >>> values = [1.2345e-5, 6.789e3, 0.0] + >>> round_to_significant_digits(values, 2) + array([ 1.2e-05, 6.8e+03, 0.0e+00]) + """ + + if digits is None: + return values + + input_type = type(values) + + values = np.asarray(values) # Ensure input is a NumPy array + + if digits <= 0: + raise ValueError("Number of significant digits must be greater than 0.") + + # Mask for non-zero values + nonzero_mask = values != 0 + nan_mask = ~np.isnan(values) + combined_mask = np.logical_and(nonzero_mask, nan_mask) + result = np.zeros_like(values) # Initialize result array + + # For non-zero elements, calculate the scaling and apply rounding + if np.any(combined_mask): # Check if there are any non-zero values + nonzero_values = values[combined_mask] + scales = digits - np.floor(np.log10(np.abs(nonzero_values))).astype(int) - 1 + + # Round each non-zero value individually + rounded_nonzero = [ + round(v, int(scale)) for v, scale in zip(nonzero_values, scales) + ] + + result[combined_mask] = rounded_nonzero # Assign the rounded values back + + result[~nan_mask] = np.nan + if input_type is not np.ndarray: + result = input_type(result) + + return result diff --git a/CADETProcess/optimization/axAdapater.py b/CADETProcess/optimization/axAdapater.py index 3c9abb9e2..a538a959d 100644 --- a/CADETProcess/optimization/axAdapater.py +++ b/CADETProcess/optimization/axAdapater.py @@ -349,7 +349,7 @@ def _setup_optimization_config(self, objectives, outcome_constraints): """ raise NotImplementedError - def run(self, optimization_problem, x0): + def _run(self, optimization_problem, x0): search_space = self._setup_searchspace(self.optimization_problem) objectives = self._setup_objectives() diff --git a/CADETProcess/optimization/optimizationProblem.py b/CADETProcess/optimization/optimizationProblem.py index 760450cb8..7a3ddb4dd 100644 --- a/CADETProcess/optimization/optimizationProblem.py +++ b/CADETProcess/optimization/optimizationProblem.py @@ -1,5 +1,5 @@ import copy -from functools import partial, wraps +from functools import wraps import inspect import math from pathlib import Path @@ -24,6 +24,7 @@ RangedInteger, Callable, Tuple, SizedNdArray ) from CADETProcess.dataStructure import frozen_attributes +from CADETProcess.dataStructure import NumpyProxyArray from CADETProcess.dataStructure import ( check_nested, generate_nested_dict, get_nested_value, get_nested_attribute, set_nested_list_value @@ -31,19 +32,22 @@ from CADETProcess.dynamicEvents.section import ( generate_indices, unravel, get_inhomogeneous_shape, get_full_shape ) - from CADETProcess.optimization.parallelizationBackend import ( ParallelizationBackendBase, SequentialBackend ) from CADETProcess.transform import ( - NoTransform, AutoTransform, NormLinearTransform, NormLogTransform + NullTransformer, AutoTransformer, NormLinearTransformer, NormLogTransformer ) from CADETProcess.metric import MetricBase +from CADETProcess.numerics import round_to_significant_digits from CADETProcess.optimization import Individual, Population from CADETProcess.optimization import ResultsCache +__all__ = ["OptimizationProblem", "OptimizationVariable"] + + @frozen_attributes class OptimizationProblem(Structure): """Class for configuring optimization problems. @@ -331,7 +335,7 @@ def variable_values(self): def add_variable( self, name, evaluation_objects=-1, parameter_path=None, lb=-math.inf, ub=math.inf, transform=None, indices=None, - pre_processing=None): + significant_digits=None, pre_processing=None): """Add optimization variable to the OptimizationProblem. The function encapsulates the creation of OptimizationVariable objects @@ -358,6 +362,9 @@ def add_variable( indices : int or tuple, optional Indices for variables that modify entries of a parameter array. If None, variable is assumed to be index independent. + significant_digits : int, optional + Number of significant figures to which variable can be rounded. + If None, variable is not rounded. The default is None. pre_processing : callable, optional Additional step to process the value before setting it. This function must accept a single argument (the value) and return the processed value. @@ -401,6 +408,7 @@ def add_variable( name, evaluation_objects, parameter_path, lb=lb, ub=ub, transform=transform, indices=indices, + significant_digits=significant_digits, pre_processing=pre_processing, ) @@ -604,9 +612,6 @@ def get_dependent_values(self, X_independent: npt.ArrayLike) -> np.ndarray: for i, x in enumerate(X_independent): for indep_variable, indep_value in zip(independent_variables, x): - indep_value = np.format_float_positional( - indep_value, precision=indep_variable.precision, fractional=False - ) indep_variable.value = float(indep_value) variable_values[i, :] = self.variable_values @@ -1942,7 +1947,7 @@ def lower_bounds_transformed(self): upper_bounds """ - return [var.transform.lb for var in self.variables] + return [var.transformer.lb for var in self.variables] @property def lower_bounds_independent(self): @@ -1964,7 +1969,7 @@ def lower_bounds_independent_transformed(self): upper_bounds """ - return [var.transform.lb for var in self.independent_variables] + return [var.transformer.lb for var in self.independent_variables] @property def upper_bounds(self): @@ -1986,7 +1991,7 @@ def upper_bounds_transformed(self): upper_bounds """ - return [var._transform.ub for var in self.variables] + return [var.transformer.ub for var in self.variables] @property def upper_bounds_independent(self): @@ -2008,7 +2013,7 @@ def upper_bounds_independent_transformed(self): upper_bounds """ - return [var._transform.ub for var in self.independent_variables] + return [var.transformer.ub for var in self.independent_variables] @untransforms @gets_dependent_values @@ -2209,8 +2214,8 @@ def A_transformed(self): A_t = self.A.copy() for a in A_t: for j, v in enumerate(self.variables): - t = v.transform - if isinstance(t, NoTransform): + t = v.transformer + if isinstance(t, NullTransformer): continue if a[j] != 0 and not t.is_linear: @@ -2289,8 +2294,8 @@ def b_transformed(self): A_t = self.A.copy() for i, a in enumerate(A_t): for j, v in enumerate(self.variables): - t = v.transform - if isinstance(t, NoTransform): + t = v.transformer + if isinstance(t, NullTransformer): continue if a[j] != 0 and not t.is_linear: @@ -2508,8 +2513,8 @@ def Aeq_transformed(self): Aeq_t = self.Aeq.copy() for aeq in Aeq_t: for j, v in enumerate(self.variables): - t = v.transform - if isinstance(t, NoTransform): + t = v.transformer + if isinstance(t, NullTransformer): continue if aeq[j] != 0 and not t.is_linear: @@ -2587,8 +2592,8 @@ def beq_transformed(self): Aeq_t = self.Aeq.copy() for i, aeq in enumerate(Aeq_t): for j, v in enumerate(self.variables): - t = v.transform - if isinstance(t, NoTransform): + t = v.transformer + if isinstance(t, NullTransformer): continue if aeq[j] != 0 and not t.is_linear: @@ -2712,7 +2717,7 @@ def transform(self, X_independent: npt.ArrayLike) -> np.ndarray: for i, ind in enumerate(X_independent): transform[i, :] = [ - var.transform_fun(value) + var.transform(value) for value, var in zip(ind, self.independent_variables) ] @@ -2736,7 +2741,7 @@ def untransform(self, X_transformed: npt.ArrayLike) -> np.ndarray: for i, ind in enumerate(X_transformed): untransform[i, :] = [ - var.untransform_fun(value) + var.untransform(value, significant_digits=var.significant_digits) for value, var in zip(ind, self.independent_variables) ] @@ -2796,11 +2801,12 @@ def prune_cache(self, tag=None, close=True): def create_hopsy_problem( self, - include_dependent_variables=True, - simplify=False, - use_custom_model=False - ): - """Creates a hopsy problem from the optimization problem. + include_dependent_variables: Optional[bool] = True, + simplify: Optional[bool] = False, + use_custom_model: Optional[bool] = False, + ) -> hopsy.Problem: + """ + Create a hopsy problem from the optimization problem. Parameters ---------- @@ -2834,11 +2840,11 @@ def compute_negative_log_likelihood(self, x): for i, var in enumerate(variables): if ( - isinstance(var._transform, NormLogTransform) + isinstance(var.transformer, NormLogTransformer) or ( - isinstance(var._transform, AutoTransform) and - var._transform.use_log + isinstance(var.transformer, AutoTransformer) and + var.transformer.use_log ) ): log_space_indices.append(i) @@ -2889,7 +2895,10 @@ def compute_negative_log_likelihood(self, x): return problem - def get_chebyshev_center(self, include_dependent_variables=True): + def get_chebyshev_center( + self, + include_dependent_variables: Optional[bool] = True + ) -> list[float]: """Compute chebychev center. The Chebyshev center is the center of the largest Euclidean ball that is fully @@ -2902,45 +2911,31 @@ def get_chebyshev_center(self, include_dependent_variables=True): Returns ------- - chebyshev : list + chebyshev : list[float] Chebyshev center. """ problem = self.create_hopsy_problem( include_dependent_variables=False, simplify=False, use_custom_model=True ) - # !!! Additional checks in place to handle PolyRound.round() - # removing "small" dimensions. - # Bug reported, Check for future release! - chebyshev_orig = hopsy.compute_chebyshev_center( + + problem = hopsy.round(problem, simplify=False) + + chebyshev = hopsy.compute_chebyshev_center( problem, original_space=True )[:, 0] - try: - problem_rounded = hopsy.round(problem) - except ValueError: - problem_rounded = problem - - if problem_rounded.A.shape[1] == problem.A.shape[1]: - chebyshev_rounded = hopsy.compute_chebyshev_center( - problem_rounded, original_space=True - )[:, 0] - - if np.all(np.greater(chebyshev_rounded, self.lower_bounds_independent)): - problem = problem_rounded - chebyshev = chebyshev_rounded - else: - chebyshev = chebyshev_orig - else: - chebyshev = chebyshev_orig - if include_dependent_variables: chebyshev = self.get_dependent_values(chebyshev) return chebyshev def create_initial_values( - self, n_samples=1, seed=None, burn_in=100000, - include_dependent_variables=True): + self, + n_samples: Optional[int] = 1, + seed: Optional[int] = None, + burn_in: Optional[int] = 100000, + include_dependent_variables: Optional[bool] = True + ) -> np.ndarray: """Create initial value within parameter space. Uses hopsy (Highly Optimized toolbox for Polytope Sampling) to retrieve @@ -2948,10 +2943,10 @@ def create_initial_values( Parameters ---------- - n_samples : int - Number of initial values to be drawn + n_samples : int, optional + Number of initial values to be drawn. The default is 1. seed : int, optional - Seed to initialize random numbers. Only used if method == 'random' + Seed to initialize random numbers. burn_in : int, optional Number of samples that are created to ensure uniform sampling. The actual initial values are then drawn from this set. @@ -2963,16 +2958,19 @@ def create_initial_values( Raises ------ CADETProcessError - If method is not known. + If not enough individuals fulfilling linear constraints are found. Returns ------- - values : list + values : np.ndarray Initial values for starting the optimization. """ burn_in = int(burn_in) + if seed is None: + seed = random.randint(0, 255) + with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -2982,19 +2980,11 @@ def create_initial_values( use_custom_model=True, ) - chebychev_center = self.get_chebyshev_center( - include_dependent_variables=False - ) - - if seed is None: - seed = random.randint(0, 255) - - rng = np.random.default_rng(seed) + problem = hopsy.round(problem, simplify=False) mc = hopsy.MarkovChain( problem, proposal=hopsy.UniformCoordinateHitAndRunProposal, - starting_point=chebychev_center ) rng_hopsy = hopsy.RandomNumberGenerator(seed=seed) @@ -3003,6 +2993,7 @@ def create_initial_values( ) independent_values = states[0, ...] + rng = np.random.default_rng(seed) values = [] counter = 0 while len(values) < n_samples: @@ -3016,9 +3007,9 @@ def create_initial_values( ind = [] for i_var, var in enumerate(self.independent_variables): ind.append( - float(np.format_float_positional( + float(round_to_significant_digits( independent_values[i, i_var], - precision=var.precision, fractional=False + digits=var.significant_digits, )) ) @@ -3219,7 +3210,7 @@ def check_linear_constraints_transforms(self): for constr in self.linear_constraints + self.linear_equality_constraints: opt_vars = [self.variables_dict[key] for key in constr["opt_vars"]] for var in opt_vars: - if not var.transform.is_linear: + if not var.transformer.is_linear: flag = False warnings.warn( f"'{var.name}' uses non-linear transform and is used in " @@ -3383,12 +3374,12 @@ class OptimizationVariable: Lower bound of the variable. ub : float Upper bound of the variable. - transform : TransformBase + transformer : TransformerBase Transformation function for parameter normalization. indices : int, or slice Indices for variables that modify an entry of a parameter array. If None, variable is assumed to be index independent. - precision : int, optional + significant_digits : int, optional Number of significant figures to which variable can be rounded. If None, variable is not rounded. The default is None. pre_processing : callable, optional @@ -3405,7 +3396,7 @@ class OptimizationVariable: def __init__( self, name, evaluation_objects=None, parameter_path=None, - lb=-math.inf, ub=math.inf, transform=None, indices=None, precision=None, + lb=-math.inf, ub=math.inf, transform=None, indices=None, significant_digits=None, pre_processing=None ): self.name = name @@ -3428,21 +3419,21 @@ def __init__( self.ub = ub if transform is None: - transform = NoTransform(lb, ub) + transformer = NullTransformer(lb, ub) else: if np.isinf(lb) or np.isinf(ub): raise CADETProcessError("Transform requires bound constraints.") if transform == 'auto': - transform = AutoTransform(lb, ub) + transformer = AutoTransformer(lb, ub) elif transform == 'linear': - transform = NormLinearTransform(lb, ub) + transformer = NormLinearTransformer(lb, ub) elif transform == 'log': - transform = NormLogTransform(lb, ub) + transformer = NormLogTransformer(lb, ub) else: raise ValueError("Unknown transform") - self._transform = transform - self.precision = precision + self._transformer = transformer + self.significant_digits = significant_digits self._dependencies = [] self._dependency_transform = None @@ -3665,14 +3656,49 @@ def n_indices(self): return 0 @property - def transform(self): - return self._transform + def transformer(self) -> "TransformerBase": + """TransformerBase: The variable transformer instance.""" + return self._transformer + + def transform(self, x: float, *args: Any, **kwargs: Any) -> float: + """ + Apply the transformation to the input. - def transform_fun(self, x): - return self._transform.transform(x) + Parameters + ---------- + x : float + The input data to be transformed. + *args : Any + Additional positional arguments passed to the transformer's `transform` method. + **kwargs : Any + Additional keyword arguments passed to the transformer's `transform` method. - def untransform_fun(self, x): - return self._transform.untransform(x) + Returns + ------- + float + The transformed data. + """ + return self.transformer.transform(x, *args, **kwargs) + + def untransform(self, x: float, *args: Any, **kwargs: Any) -> float: + """ + Apply the inverse transformation to the input. + + Parameters + ---------- + x : float + The input data to be untransformed. + *args : Any + Additional positional arguments passed to the transformer's `untransform` method. + **kwargs : Any + Additional keyword arguments passed to the transformer's `untransform` method. + + Returns + ------- + float + The untransformed data. + """ + return self.transformer.untransform(x, *args, **kwargs) def add_dependency(self, dependencies, transform): """Add dependency of Variable on other Variables. @@ -3725,7 +3751,9 @@ def value(self): return self._value else: dependencies = [dep.value for dep in self.dependencies] - return self.dependency_transform(*dependencies) + value = self.dependency_transform(*dependencies) + value = round_to_significant_digits(value, self.significant_digits) + return value @value.setter def value(self, value): @@ -3734,31 +3762,37 @@ def value(self, value): self.set_value(value) - self._value = value - def set_value(self, value): """Set value to evaluation_objects.""" if not np.isscalar(value): raise TypeError("Expected scalar value") + value = round_to_significant_digits( + value, digits=self.significant_digits, + ) + if value < self.lb: raise ValueError("Exceeds lower bound") if value > self.ub: raise ValueError("Exceeds upper bound") + self._value = value + if self.evaluation_objects is None: return + indicies = self.indices + for i_eval, eval_obj in enumerate(self.evaluation_objects): is_polynomial = self._is_polynomial(eval_obj) if ( - self.indices[i_eval] is None + indicies[i_eval] is None or self._indices[i_eval] is None and is_polynomial): new_value = value else: - eval_ind = self.indices[i_eval] + eval_ind = indicies[i_eval] parameter_shape = self._get_parameter_shape(eval_obj) current_value = self._current_value(eval_obj) @@ -3800,13 +3834,17 @@ def set_value(self, value): set_nested_list_value(new_value, i, val) parameter_type = self._parameter_type(eval_obj) - if not isinstance(new_value, parameter_type): + if ( + parameter_type is not NumpyProxyArray + and + not isinstance(new_value, parameter_type) + ): new_value = parameter_type(new_value.tolist()) # Set the value: - self._set_value(eval_obj, new_value) + self._set_value_in_evaluation_object(eval_obj, new_value) - def _set_value(self, evaluation_object, value): + def _set_value_in_evaluation_object(self, evaluation_object, value): """Set the value to the evaluation object.""" if self.pre_processing is not None: value = self.pre_processing(value) @@ -3821,7 +3859,7 @@ def _set_value(self, evaluation_object, value): @property def transformed_bounds(self): """list: Transformed bounds of the parameter.""" - return [self.transform_fun(self.lb), self.transform_fun(self.ub)] + return [self.transform(self.lb), self.transform(self.ub)] def __repr__(self): if self.evaluation_objects is not None: diff --git a/CADETProcess/optimization/optimizer.py b/CADETProcess/optimization/optimizer.py index 269e5880c..1bfda7776 100644 --- a/CADETProcess/optimization/optimizer.py +++ b/CADETProcess/optimization/optimizer.py @@ -3,12 +3,11 @@ from pathlib import Path import shutil import time +from typing import Optional import warnings -from cadet import H5 import numpy as np import matplotlib.pyplot as plt -import numpy as np from CADETProcess import settings from CADETProcess import log @@ -53,7 +52,6 @@ class OptimizerBase(Structure): constraints. progress_frequency : int Number of generations after which the optimizer reports progress. - The default is 1. cv_bounds_tol : float Tolerance for bounds constraint violation. The default is 0.0. @@ -232,11 +230,7 @@ def optimize( checkpoint_path = os.path.join(results_directory, 'checkpoint.h5') if use_checkpoint and os.path.isfile(checkpoint_path): self.logger.info("Continue optimization from checkpoint.") - data = H5() - data.filename = checkpoint_path - data.load() - - self.results.update_from_dict(data) + self.results.load_results(checkpoint_path) else: self.results.setup_csv() @@ -262,15 +256,15 @@ def optimize( if not flag: raise ValueError("x0 contains invalid entries.") - log.log_time('Optimization', self.logger.level)(self.run) - log.log_results('Optimization', self.logger.level)(self.run) - log.log_exceptions('Optimization', self.logger.level)(self.run) + log.log_time('Optimization', self.logger.level)(self._run) + log.log_results('Optimization', self.logger.level)(self._run) + log.log_exceptions('Optimization', self.logger.level)(self._run) backend = plt.get_backend() plt.switch_backend('agg') start = time.time() - self.run(self.optimization_problem, x0, *args, **kwargs) + self._run(self.optimization_problem, x0, *args, **kwargs) time_elapsed = time.time() - start self.results.time_elapsed = time_elapsed @@ -291,8 +285,39 @@ def optimize( return self.results + def load_results( + self, + checkpoint_path: str | Path, + optimization_problem: Optional[OptimizationProblem] = None + ) -> OptimizationResults: + """ + Load optimization results from a checkpoint file. + + Parameters + ---------- + checkpoint_path : str | Path + Path to the checkpoint file. + optimization_problem : OptimizationProblem, optional + The optimization problem associated with the results. + If None, results are loaded without a problem reference. + + Returns + ------- + OptimizationResults + The loaded optimization results. + """ + results = OptimizationResults( + optimization_problem=optimization_problem, + optimizer=self, + similarity_tol=self.similarity_tol, + ) + + results.load_results(checkpoint_path) + + return results + @abstractmethod - def run(optimization_problem, x0=None, *args, **kwargs): + def _run(optimization_problem, x0=None, *args, **kwargs): """Abstract Method for solving an optimization problem. Parameters diff --git a/CADETProcess/optimization/population.py b/CADETProcess/optimization/population.py index 9dc02c961..2b43d86a2 100644 --- a/CADETProcess/optimization/population.py +++ b/CADETProcess/optimization/population.py @@ -1,9 +1,12 @@ from pathlib import Path +from typing import Optional import uuid +import warnings from addict import Dict import corner import numpy as np +import numpy.typing as npt import matplotlib.pyplot as plt from pymoo.visualization.scatter import Scatter @@ -684,6 +687,58 @@ def plot_pareto( return plot + def plot_pairwise( + self, + n_bins: int = 20, + autoscale: bool = True, + use_transformed=False, + show=True, + plot_directory=None + ) -> tuple[plt.Figure, np.ndarray]: + """ + Create a pairplot using Matplotlib. + + Parameters + ---------- + variable_names : list of str, optional + list of variable names corresponding to columns in the data. + If None, default names will be assigned. + n_bins : int, default=20 + Number of bins for histogram plots. + autoscale : bool, optional + If True, automatically adjust the scaling of the axes. The default is True. + show : bool, optional + If True, display the plot. The default is True. + plot_directory : str, optional + The directory where the plot should be saved. The default is None. + + Returns + ------- + tuple + A tuple containing: + - plt.Figure: The Matplotlib Figure object. + - np.ndarray: An array of Axes objects representing the subplot grid. + """ + if use_transformed: + x = self.x_transformed + labels = self.independent_variable_names + else: + x = self.x + labels = self.variable_names + + fig, axs = plot_pairwise( + x, labels, n_bins=n_bins, + ) + + if plot_directory is not None: + plot_directory = Path(plot_directory) + fig.savefig(f'{plot_directory / "pairwise.png"}') + + if not show: + plt.close(fig) + + return fig, axs + def plot_corner(self, use_transformed=False, show=True, plot_directory=None): """Create a corner plot of the independent variables. @@ -696,6 +751,12 @@ def plot_corner(self, use_transformed=False, show=True, plot_directory=None): plot_directory : str, optional The directory where the plot should be saved. The default is None. """ + warnings.warn( + "This method will be deprecated in the future. " + "Use `plot_pairwise` instead.", + FutureWarning, + ) + if use_transformed: x = self.x_transformed labels = self.independent_variable_names @@ -1003,3 +1064,132 @@ def from_dict(cls, data): front.add_individual(individual) return front + + +def plot_pairwise( + population: npt.ArrayLike, + variable_names: Optional[list[str]] = None, + n_bins: int = 20, + autoscale: bool = True, + fig: Optional[plt.Figure] = None, + axs: Optional[npt.NDArray[plt.Axes]] = None, + ) -> tuple[plt.Figure, npt.NDArray[plt.Axes]]: + """ + Create a pairwise scatter plot for all variables of a population. + + Parameters + ---------- + population : npt.ArrayLike + 3D array-like structure containing numerical variables with shape + (n_chains, n_samples, n_variables) + variable_names : list of str, optional + list of variable names corresponding to columns in the data. + If None, default names will be assigned. + n_bins : int, default=20 + Number of bins for histogram plots. + autoscale : bool, default=True + If True, automatically adjust the scaling of the axes. + fig : Optional[plt.Figure], default=None + An optional Matplotlib Figure object. If none is provided, a new figure will be + created. + axs : Optional[npt.NDArray[plt.Axes]], default=None + An optional array of Matplotlib Axes. If none is provided, new axes will be + created. + Returns + ------- + tuple + A tuple containing: + - plt.Figure: The Matplotlib Figure object. + - npt.NDArray[plt.Axes]: An array of Axes objects representing the subplot grid. + """ + population = np.array(population) + + if population.ndim != 2: + raise ValueError(f"Expected 2D array, got array with ndim={population.ndim}") + + n_samples, n_variables = population.shape + + if variable_names is None: + variable_names = [f"$x_{i}$" for i in range(n_variables)] + + if fig is None and axs is None: + fig, axes = plt.subplots( + n_variables, n_variables, + figsize=(6 * n_variables, 5 * n_variables), + sharex="col", sharey="row", + squeeze=False + ) + + if axes.shape != (n_variables, n_variables): + raise ValueError( + "Inconsistent shape for provided axes." + f"Expected {(n_variables, n_variables)}, got {axes.shape}." + ) + + for i in range(n_variables): + for j in range(n_variables): + scale_variable = False + + ax = axes[i, j] + + if i == j: + # Create a twin axis for histograms to avoid sharing y-axis + ax_hist = ax.twinx() + + # Determine binning strategy + if autoscale and np.all(population[:, i] > 0): + value_range = population[:, i].max() / population[:, i].min() + if value_range > 100.0: + scale_variable = True + + if scale_variable: + bins = np.geomspace( + population[:, i].min(), population[:, i].max(), n_bins+1 + ) + else: + bins = np.linspace( + population[:, i].min(), + population[:, i].max(), + n_bins+1 + ) + + ax_hist.hist( + population[:, i], + bins=bins, + alpha=0.7, + color="blue", + edgecolor="black", + align="mid" + ) + ax_hist.set_yticks([]) # Hide y-ticks for the histogram + else: + # Scatter plot for non-diagonal elements + ax.scatter(population[:, j], population[:, i], alpha=0.5, s=10) + + # Apply log scale based on autoscale logic + if autoscale and np.all(population[:, j] > 0): + ax.set_xscale("log") + if autoscale and np.all(population[:, i] > 0): + ax.set_yscale("log") + + # Ensure axis labels and ticks are visible only on the first column and + # last row + if j == 0: + ax.yaxis.set_tick_params(labelleft=True) + else: + ax.yaxis.set_tick_params(labelleft=False) + + if i == n_variables - 1: + ax.xaxis.set_tick_params(labelbottom=True) + else: + ax.xaxis.set_tick_params(labelbottom=False) + + # Set axis labels on the edges + if i == n_variables - 1: + ax.set_xlabel(variable_names[j]) + if j == 0: + ax.set_ylabel(variable_names[i]) + + fig.tight_layout() + + return fig, axes diff --git a/CADETProcess/optimization/pymooAdapter.py b/CADETProcess/optimization/pymooAdapter.py index 5ef399dfa..973c13cde 100644 --- a/CADETProcess/optimization/pymooAdapter.py +++ b/CADETProcess/optimization/pymooAdapter.py @@ -49,7 +49,7 @@ class PymooInterface(OptimizerBase): 'seed', 'pop_size', 'xtol', 'ftol', 'cvtol', 'n_max_gen', 'n_skip' ] - def run(self, optimization_problem: OptimizationProblem, x0=None): + def _run(self, optimization_problem: OptimizationProblem, x0=None): """Solve optimization problem using functional pymoo implementation. Parameters diff --git a/CADETProcess/optimization/results.py b/CADETProcess/optimization/results.py index 53a998c4b..c1ac62b7d 100644 --- a/CADETProcess/optimization/results.py +++ b/CADETProcess/optimization/results.py @@ -1,7 +1,8 @@ import csv +from functools import wraps import os from pathlib import Path -from typing import Literal +from typing import Literal, NoReturn import warnings from addict import Dict @@ -381,6 +382,7 @@ def plot_figures(self, show=True): plot_convergence plot_objectives plot_corner + plot_pairwise plot_pareto """ @@ -415,6 +417,10 @@ def plot_figures(self, show=True): show=show, plot_directory=self.plot_directory ) + self.plot_pairwise( + show=show, plot_directory=self.plot_directory + ) + if self.optimization_problem.n_objectives > 1: self.plot_pareto( show=show, plot_directory=self.plot_directory, @@ -561,31 +567,13 @@ def plot_pareto( plot_directory=_plot_directory ) + @wraps(Population.plot_corner) def plot_corner(self, *args, **kwargs): - """Create a corner plot of the independent variables. + return self.population_all.plot_corner(*args, **kwargs) - Parameters - ---------- - untransformed : bool, optional - If True, use the untransformed independent variables. - The default is True. - show : bool, optional - If True, display the plot. - The default is True. - plot_directory : str, optional - The directory where the plot should be saved. - The default is None. - - See Also - -------- - CADETProcess.results.plot_corner - corner.corner - - """ - try: - self.population_all.plot_corner(*args, **kwargs) - except AssertionError: - pass + @wraps(Population.plot_pairwise) + def plot_pairwise(self, *args, **kwargs): + return self.population_all.plot_pairwise(*args, **kwargs) def setup_convergence_figure(self, target, plot_individual=False): if target == 'objectives': @@ -806,6 +794,29 @@ def save_results(self, file_name: str): results.filename = self.results_directory / f'{file_name}.h5' results.save() + def load_results(self, file_name: str) -> NoReturn: + """ + Update optimization results from an HDF5 checkpoint file. + + Parameters + ---------- + file_name : str + Path to the checkpoint file. + + """ + data = H5() + data.filename = file_name + + # Check for CADET-Python >= v1.1, which introduced the .load_from_file interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .load() interface + # This check can be removed at some point in the future. + if hasattr(data, "load_from_file"): + data.load_from_file() + else: + data.load() + + self.update_from_dict(data) + def to_dict(self) -> dict: """Convert Results to a dictionary. diff --git a/CADETProcess/optimization/scipyAdapter.py b/CADETProcess/optimization/scipyAdapter.py index 271c73e38..6846f2719 100644 --- a/CADETProcess/optimization/scipyAdapter.py +++ b/CADETProcess/optimization/scipyAdapter.py @@ -50,7 +50,7 @@ class SciPyInterface(OptimizerBase): tol = UnsignedFloat() jac = Switch(valid=['2-point', '3-point', 'cs'], default='2-point') - def run(self, optimization_problem: OptimizationProblem, x0=None): + def _run(self, optimization_problem: OptimizationProblem, x0=None): """Solve the optimization problem using any of the scipy methods. Returns diff --git a/CADETProcess/processModel/__init__.py b/CADETProcess/processModel/__init__.py index fd98e7aee..07e60d6dc 100644 --- a/CADETProcess/processModel/__init__.py +++ b/CADETProcess/processModel/__init__.py @@ -41,18 +41,24 @@ Linear Langmuir LangmuirLDF + LangmuirLDFLiquidPhase BiLangmuir BiLangmuirLDF FreundlichLDF StericMassAction AntiLangmuir Spreading + MobilePhaseModulator + ExtendedMobilePhaseModulator SelfAssociation BiStericMassAction MultistateStericMassAction SimplifiedMultistateStericMassAction Saska GeneralizedIonExchange + HICConstantWaterActivity + HICWaterOnHydrophobicSurfaces + MultiComponentColloidal Unit Operation Models ===================== @@ -68,6 +74,7 @@ LumpedRateModelWithoutPores LumpedRateModelWithPores GeneralRateModel + MCT Discretization -------------- @@ -102,6 +109,7 @@ Process """ + from . import componentSystem from .componentSystem import * diff --git a/CADETProcess/processModel/binding.py b/CADETProcess/processModel/binding.py index 21b39fc3c..3280bc7ee 100644 --- a/CADETProcess/processModel/binding.py +++ b/CADETProcess/processModel/binding.py @@ -342,7 +342,7 @@ class FreundlichLDF(BindingBaseClass): class StericMassAction(BindingBaseClass): - """Parameters for Steric Mass Action Law binding model. + r"""Parameters for Steric Mass Action Law binding model. Attributes ---------- @@ -515,7 +515,7 @@ class MobilePhaseModulator(BindingBaseClass): ion_exchange_characteristic : list of unsigned floats. Parameters describing the ion-exchange characteristics (IEX). Length depends on `n_comp`. - hydrophobicity : list of unsigned floats. + hydrophobicity : list of floats. Parameters describing the hydrophobicity (HIC). Length depends on `n_comp`. linear_threshold : UnsignedFloat @@ -527,7 +527,7 @@ class MobilePhaseModulator(BindingBaseClass): capacity = SizedUnsignedList(size='n_comp') ion_exchange_characteristic = SizedUnsignedList(size='n_comp') beta = ion_exchange_characteristic - hydrophobicity = SizedUnsignedList(size='n_comp') + hydrophobicity = SizedList(size='n_comp') gamma = hydrophobicity linear_threshold = UnsignedFloat(default=1e-8) @@ -555,7 +555,7 @@ class ExtendedMobilePhaseModulator(BindingBaseClass): ion_exchange_characteristic : list of unsigned floats. Parameters describing the ion-exchange characteristics (IEX). Length depends on `n_comp`. - hydrophobicity : list of unsigned floats. + hydrophobicity : list of floats. Parameters describing the hydrophobicity (HIC). Length depends on `n_comp`. component_mode : list of unsigned integers. @@ -572,7 +572,7 @@ class ExtendedMobilePhaseModulator(BindingBaseClass): capacity = SizedUnsignedList(size='n_comp') ion_exchange_characteristic = SizedUnsignedList(size='n_comp') beta = ion_exchange_characteristic - hydrophobicity = SizedUnsignedList(size='n_comp') + hydrophobicity = SizedList(size='n_comp') gamma = hydrophobicity component_mode = SizedUnsignedIntegerList(size='n_comp', ub=2) @@ -587,7 +587,7 @@ class ExtendedMobilePhaseModulator(BindingBaseClass): class SelfAssociation(BindingBaseClass): - """Self Association adsorption isotherm. + r"""Self Association adsorption isotherm. Attributes ---------- @@ -695,7 +695,7 @@ def __init__(self, *args, n_states=2, **kwargs): class MultistateStericMassAction(BindingBaseClass): - """Multistate Steric Mass Action adsorption isotherm. + r"""Multistate Steric Mass Action adsorption isotherm. Attributes ---------- @@ -894,7 +894,7 @@ class Saska(BindingBaseClass): class GeneralizedIonExchange(BindingBaseClass): - """Generalized Ion Exchange isotherm model. + r"""Generalized Ion Exchange isotherm model. Attributes ---------- diff --git a/CADETProcess/processModel/componentSystem.py b/CADETProcess/processModel/componentSystem.py index e0ab9f99e..bf8c4a734 100644 --- a/CADETProcess/processModel/componentSystem.py +++ b/CADETProcess/processModel/componentSystem.py @@ -33,6 +33,9 @@ class Species(Structure): molecular_weight: UnsignedFloat = UnsignedFloat() density: UnsignedFloat = UnsignedFloat() + def __str__(self) -> str: + """str: String representation of the component.""" + return self.name class Component(Structure): """ @@ -440,8 +443,12 @@ def __repr__(self) -> str: """str: Return the string representation of the object.""" return f'{self.__class__.__name__}({self.names})' + def __len__(self) -> int: + """int: Return the number of components in the system.""" + return self.n_comp + def __iter__(self): - """Iterator over components in the system.""" + """Iterate over components in the system.""" yield from self.components def __getitem__(self, item: int) -> Component: diff --git a/CADETProcess/processModel/discretization.py b/CADETProcess/processModel/discretization.py index 60b92c56d..a5402a26c 100644 --- a/CADETProcess/processModel/discretization.py +++ b/CADETProcess/processModel/discretization.py @@ -125,7 +125,7 @@ class LRMDiscretizationFV(DiscretizationParametersBase): reconstruction = Switch(default='WENO', valid=['WENO']) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'ncol', 'use_analytic_jacobian', 'reconstruction', + 'ncol', 'use_analytic_jacobian', 'reconstruction', ] _dimensionality = ['ncol'] @@ -161,8 +161,7 @@ class LRMDiscretizationDG(DGMixin): exact_integration = Bool(default=False) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'nelem', 'use_analytic_jacobian', - 'polydeg', 'exact_integration' + 'nelem', 'use_analytic_jacobian', 'polydeg', 'exact_integration' ] _dimensionality = ['axial_dof'] @@ -228,8 +227,7 @@ class LRMPDiscretizationFV(DiscretizationParametersBase): schur_safety = UnsignedFloat(default=1.0e-8) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'ncol', 'par_geom', - 'use_analytic_jacobian', 'reconstruction', + 'ncol', 'par_geom', 'use_analytic_jacobian', 'reconstruction', 'gs_type', 'max_krylov', 'max_restarts', 'schur_safety' ] _dimensionality = ['ncol'] @@ -276,8 +274,7 @@ class LRMPDiscretizationDG(DGMixin): exact_integration = Bool(default=False) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'nelem', 'par_geom', - 'use_analytic_jacobian', + 'nelem', 'par_geom', 'use_analytic_jacobian', 'polydeg', 'exact_integration', ] _dimensionality = ['axial_dof'] @@ -378,7 +375,7 @@ class GRMDiscretizationFV(DiscretizationParametersBase): fix_zero_surface_diffusion = Bool(default=False) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'ncol', 'npar', + 'ncol', 'npar', 'par_geom', 'par_disc_type', 'par_disc_vector', 'par_boundary_order', 'use_analytic_jacobian', 'reconstruction', 'gs_type', 'max_krylov', 'max_restarts', 'schur_safety', @@ -467,7 +464,7 @@ class GRMDiscretizationDG(DGMixin): fix_zero_surface_diffusion = Bool(default=False) _parameters = DiscretizationParametersBase._parameters + [ - 'spatial_method', 'nelem', 'par_nelem', + 'nelem', 'par_nelem', 'par_geom', 'par_disc_type', 'par_disc_vector', 'par_boundary_order', 'use_analytic_jacobian', 'polydeg', 'par_polydeg', @@ -569,8 +566,7 @@ class ConsistencySolverParameters(Structure): ) _parameters = [ - 'solver_name', 'init_damping', 'min_damping', - 'max_iterations', 'subsolvers' + 'solver_name', 'init_damping', 'min_damping', 'max_iterations', 'subsolvers' ] @@ -591,11 +587,10 @@ class MCTDiscretizationFV(DiscretizationParametersBase): """ + spatial_method = Constant(value='FV') ncol = UnsignedInteger(default=100) use_analytic_jacobian = Bool(default=True) reconstruction = Switch(default='WENO', valid=['WENO']) - _parameters = [ - 'ncol', 'use_analytic_jacobian', 'reconstruction', - ] + _parameters = ['ncol', 'use_analytic_jacobian', 'reconstruction'] _dimensionality = ['ncol'] diff --git a/CADETProcess/processModel/flowSheet.py b/CADETProcess/processModel/flowSheet.py index 2badae1d2..f7c52b0bd 100644 --- a/CADETProcess/processModel/flowSheet.py +++ b/CADETProcess/processModel/flowSheet.py @@ -1,13 +1,14 @@ +from collections import defaultdict from functools import wraps +from typing import Iterator, NoReturn from warnings import warn -from collections import defaultdict import numpy as np from addict import Dict from CADETProcess import CADETProcessError from CADETProcess.dataStructure import frozen_attributes -from CADETProcess.dataStructure import Structure, UnsignedInteger, String +from CADETProcess.dataStructure import Structure, String from .componentSystem import ComponentSystem from .unitOperation import UnitBaseClass from .unitOperation import Inlet, Outlet, Cstr @@ -38,9 +39,11 @@ class FlowSheet(Structure): name = String() - def __init__(self, component_system, name=None): + def __init__(self, component_system, *args, **kwargs): + super().__init__(*args, **kwargs) + self.component_system = component_system - self.name = name + self._units = [] self._feed_inlets = [] self._eluent_inlets = [] @@ -54,7 +57,8 @@ def __init__(self, component_system, name=None): self._section_dependent_parameters = Dict() @property - def component_system(self): + def component_system(self) -> ComponentSystem: + """ComponentSystem: The component system of the flow sheet.""" return self._component_system @component_system.setter @@ -65,11 +69,12 @@ def component_system(self, component_system): @property def n_comp(self): + """int: The number of components.""" return self.component_system.n_comp def unit_name_decorator(func): @wraps(func) - def wrapper(self, unit, *args, **kwargs): + def unit_name_wrapper(self, unit, *args, **kwargs): """Enable calling functions with unit object or unit name.""" if isinstance(unit, str): try: @@ -78,11 +83,11 @@ def wrapper(self, unit, *args, **kwargs): raise CADETProcessError('Not a valid unit') return func(self, unit, *args, **kwargs) - return wrapper + return unit_name_wrapper def origin_destination_name_decorator(func): @wraps(func) - def wrapper(self, origin, destination, *args, **kwargs): + def origin_destination_name_wrapper(self, origin, destination, *args, **kwargs): """Enable calling origin and destination using unit names.""" if isinstance(origin, str): try: @@ -98,7 +103,7 @@ def wrapper(self, origin, destination, *args, **kwargs): return func(self, origin, destination, *args, **kwargs) - return wrapper + return origin_destination_name_wrapper def update_parameters(self): for unit in self.units: @@ -178,7 +183,7 @@ def get_unit_index(self, unit): return self.units.index(unit) def get_port_index(self, unit, port): - """Return the port index of a unit + """Return the port index of a unit. Parameters ---------- @@ -198,7 +203,6 @@ def get_port_index(self, unit, port): if unit not in self.units: raise CADETProcessError('Unit not in flow sheet') - port_index = unit.ports.index(port) return port_index @@ -336,10 +340,16 @@ def remove_unit(self, unit): destinations = [] if self._connections[unit]['origins'] is not None: - origins = [origin for ports in self._connections[unit]['origins'] for origin in self._connections[unit]['origins'][ports]] + origins = [ + origin for ports in self._connections[unit]['origins'] + for origin in self._connections[unit]['origins'][ports] + ] if self._connections[unit]['destinations'] is not None: - destinations = [destination for ports in self._connections[unit]['destinations'] for destination in self._connections[unit]['destinations'][ports]].copy() + destinations = [ + destination for ports in self._connections[unit]['destinations'] + for destination in self._connections[unit]['destinations'][ports] + ].copy() for origin in origins: for origin_port in self._connections[unit]['origins']: @@ -408,29 +418,29 @@ def add_connection( if isinstance(destination, Inlet): raise CADETProcessError("Inlet unit cannot have ingoing stream.") - if origin.has_ports and origin_port is None: raise CADETProcessError('Missing `origin_port`') if not origin.has_ports and origin_port is not None: - raise CADETProcessError(f'Origin unit does not support ports.') + raise CADETProcessError('Origin unit does not support ports.') if origin.has_ports and origin_port not in origin.ports: - raise CADETProcessError(f'Origin port "{origin_port}" not found in ports: {origin.ports}.') + raise CADETProcessError( + f'Origin port "{origin_port}" not found in ports: {origin.ports}.' + ) if origin_port in self._connections[destination]['origins'][destination_port][origin]: raise CADETProcessError("Connection already exists") - - if destination.has_ports and destination_port is None: raise CADETProcessError('Missing `destination_port`') if not destination.has_ports and destination_port is not None: raise CADETProcessError('Destination unit does not support ports.') if destination.has_ports and destination_port not in destination.ports: - raise CADETProcessError(f'destination port "{destination_port}" not found in ports: {destination.ports}.') + raise CADETProcessError( + f'destination port "{destination_port}" not found in ports: {destination.ports}.' + ) if destination_port in self._connections[origin]['destinations'][origin_port][destination]: raise CADETProcessError("Connection already exists") - if not destination.has_ports: destination_port = destination.ports[0] if not origin.has_ports: @@ -441,7 +451,6 @@ def add_connection( self.set_output_state(origin, 0, origin_port) - @origin_destination_name_decorator @update_parameters_decorator def remove_connection( self, origin, destination, origin_port=None, destination_port=None): @@ -476,7 +485,9 @@ def remove_connection( self, origin, destination, origin_port=None, destination_ raise CADETProcessError('Missing `origin_port`') if origin_port not in origin.ports: - raise CADETProcessError(f'Origin port {origin_port} not in Unit {origin.name}.') + raise CADETProcessError( + f'Origin port {origin_port} not in Unit {origin.name}.' + ) if destination not in self._units: raise CADETProcessError('Destination not in flow sheet') @@ -484,19 +495,24 @@ def remove_connection( self, origin, destination, origin_port=None, destination_ raise CADETProcessError('Missing `destination_port`') if destination_port not in destination.ports: - raise CADETProcessError(f'Origin port {destination_port} not in Unit {destination.name}.') + raise CADETProcessError( + f'Origin port {destination_port} not in Unit {destination.name}.' + ) try: - self._connections[destination]['origins'][destination_port].pop(origin) self._connections[origin]['destinations'][origin_port].pop(destination) except KeyError: raise CADETProcessError('Connection does not exist.') - - @origin_destination_name_decorator - def connection_exists(self, origin, destination, origin_port=None, destination_port=None): + def connection_exists( + self, + origin, + destination, + origin_port=None, + destination_port=None, + ): """bool: check if connection exists in flow sheet. Parameters @@ -508,16 +524,24 @@ def connection_exists(self, origin, destination, origin_port=None, destination_p """ if origin.has_ports and not origin_port: - raise CADETProcessError(f'Origin port needs to be specified for unit operation with ports {origin.name}.') + raise CADETProcessError( + "Origin port needs to be specified for unit operation with ports " + f"{origin.name}." + ) if destination.has_ports and not destination_port: - raise CADETProcessError(f'Destination port needs to be specified for unit operation with ports {destination.name}.') + raise CADETProcessError( + "Destination port needs to be specified for unit operation with ports " + f"{destination.name}." + ) if origin_port not in origin.ports: raise CADETProcessError(f'{origin.name} does not have port {origin_port}') if destination_port not in destination.ports: - raise CADETProcessError(f'{destination.name} does not have port {destination_port}') + raise CADETProcessError( + f'{destination.name} does not have port {destination_port}' + ) if destination in self._connections[origin].destinations[origin_port]\ and destination_port in self._connections[origin].destinations[origin_port][destination]\ @@ -636,7 +660,7 @@ def set_output_state(self, unit, state, port=None): If port exceeds the number of ports """ def get_port_index(unit_connection_dict, destination, destination_port): - """helper function to classify the index of a connection for your outputstate + """helper function to compute the index of a connection for the output state Parameters ---------- @@ -647,26 +671,27 @@ def get_port_index(unit_connection_dict, destination, destination_port): destination object destination_port : int destination Port - """ - ret_index = 0 for unit_destination in unit_connection_dict: if unit_destination is destination: - ret_index+=unit_connection_dict[unit_destination].index(destination_port) + ret_index += unit_connection_dict[unit_destination].index( + destination_port + ) break - ret_index+=len(unit_connection_dict[unit_destination]) + ret_index += len(unit_connection_dict[unit_destination]) return ret_index - if unit not in self._units: raise CADETProcessError('Unit not in flow sheet') if port not in unit.ports: raise CADETProcessError(f'Port {port} is not a port of Unit {unit.name}') - - state_length = sum([len(self._connections[unit].destinations[port][unit_name]) for unit_name in self._connections[unit].destinations[port]]) + state_length = sum([ + len(self._connections[unit].destinations[port][unit_name]) + for unit_name in self._connections[unit].destinations[port] + ]) if state_length == 0: output_state = [] @@ -683,21 +708,32 @@ def get_port_index(unit_connection_dict, destination, destination_port): for dest, value in state.items(): try: for destination_port, output_value in value.items(): - try: - assert self.connection_exists(unit, dest, port, destination_port) - except AssertionError: - raise CADETProcessError(f'{unit} on port {port} does not connect to {dest} on port {destination_port}.') + if not self.connection_exists(unit, dest, port, destination_port): + raise CADETProcessError( + f'{unit} on port {port} does not connect to {dest} on ' + f'port {destination_port}.' + ) inner_dest = self[dest] - index = get_port_index(self._connections[unit].destinations[port], inner_dest, destination_port) + index = get_port_index( + self._connections[unit].destinations[port], + inner_dest, + destination_port + ) output_state[index] = output_value except AttributeError: destination_port = None - try: - assert self.connection_exists(unit, dest, port, destination_port) - except AssertionError: - raise CADETProcessError(f'{unit} on port {port} does not connect to {dest} on port {destination_port}.') + + if not self.connection_exists(unit, dest, port, destination_port): + raise CADETProcessError( + f'{unit} on port {port} does not connect to {dest} on port ' + f'{destination_port}.' + ) dest = self[dest] - index = get_port_index(self.connections[unit].destinations[port], dest, destination_port) + index = get_port_index( + self.connections[unit].destinations[port], + dest, + destination_port + ) output_state[index] = value elif isinstance(state, (list)): @@ -720,7 +756,7 @@ def get_port_index(unit_connection_dict, destination, destination_port): self._output_states[unit][port] = output_state def get_flow_rates(self, state=None, eps=5.9e16): - """Calculate flow rate for all connections. + r"""Calculate flow rate for all connections. Optionally, an additional output state can be passed to update the current output states. @@ -811,9 +847,7 @@ def get_flow_rates(self, state=None, eps=5.9e16): for origin_unit in self._connections[unit]['origins'][port]: for origin_port in self._connections[unit]['origins'][port][origin_unit]: - o_index = port_index_list.index((origin_unit, origin_port)) - local_d_index = 0 for inner_unit in self._connections[origin_unit]['destinations'][origin_port]: @@ -872,7 +906,6 @@ def get_flow_rates(self, state=None, eps=5.9e16): for origin_port in self._connections[unit]['origins'][port][origin_unit]: o_index = port_index_list.index((origin_unit, origin_port)) - local_d_index = 0 for inner_unit in self._connections[origin_unit]['destinations'][origin_port]: @@ -913,7 +946,6 @@ def get_flow_rates(self, state=None, eps=5.9e16): index = port_index_list.index((unit, unit_port)) unit_solution_dict['total_out'][unit_port] = list(total_flow_rate_coefficents[:, index]) - if not isinstance(unit, Inlet): unit_solution_dict['origins'] = Dict() @@ -932,7 +964,8 @@ def get_flow_rates(self, state=None, eps=5.9e16): unit_port_index = port_index_list.index((unit, unit_port)) flow_list = list( total_flow_rate_coefficents[:, origin_port_index] - * w_out_help[unit_port_index, origin_port_index]) + * w_out_help[unit_port_index, origin_port_index] + ) unit_solution_dict['origins'][unit_port][origin_unit.name][origin_port] = flow_list if not isinstance(unit, Outlet): @@ -961,7 +994,8 @@ def get_flow_rates(self, state=None, eps=5.9e16): return return_flow_rates - def check_flow_rates(self, state=None): + def check_flow_rates(self, state=None) -> NoReturn: + """Check if in and outgoing flow rates of unit operations are balanced.""" flow_rates = self.get_flow_rates(state) for unit, q in flow_rates.items(): if isinstance(unit, (Inlet, Outlet)): @@ -1118,7 +1152,8 @@ def remove_product_outlet(self, product_outlet): self._product_outlets.remove(product_outlet) @property - def parameters(self): + def parameters(self) -> dict: + """dict: Parameters of the flow sheet and associated unit operations.""" return self._parameters @parameters.setter @@ -1144,19 +1179,23 @@ def parameters(self, parameters): self.update_parameters() @property - def sized_parameters(self): + def sized_parameters(self) -> list: + """list: List of sized parameters.""" return self._sized_parameters @property - def polynomial_parameters(self): + def polynomial_parameters(self) -> list: + """list: List of polynomial parameters.""" return self._polynomial_parameters @property - def section_dependent_parameters(self): + def section_dependent_parameters(self) -> list: + """list: List of section dependent parameters.""" return self._section_dependent_parameters @property - def initial_state(self): + def initial_state(self) -> dict: + """dict: Initial state of the unit oeprations.""" initial_state = {unit.name: unit.initial_state for unit in self.units} return initial_state @@ -1168,7 +1207,15 @@ def initial_state(self, initial_state): raise CADETProcessError('Not a valid unit') self.units_dict[unit].initial_state = st - def __getitem__(self, unit_name): + def __len__(self) -> int: + """int: The number of unit operations in the system.""" + return self.number_of_units + + def __iter__(self) -> Iterator[UnitBaseClass]: + """Iterate over the unit operations in the system.""" + return iter(self.units) + + def __getitem__(self, unit_name) -> UnitBaseClass: """Make FlowSheet substriptable s.t. units can be used as keys. Parameters @@ -1192,7 +1239,7 @@ def __getitem__(self, unit_name): except KeyError: raise KeyError('Not a valid unit') - def __contains__(self, item): + def __contains__(self, item: UnitBaseClass | str) -> bool: """Check if UnitOperation is part of the FlowSheet. Parameters diff --git a/CADETProcess/processModel/process.py b/CADETProcess/processModel/process.py index 04ddbf91e..892d86601 100644 --- a/CADETProcess/processModel/process.py +++ b/CADETProcess/processModel/process.py @@ -1,6 +1,7 @@ from collections import defaultdict -from warnings import warn from dataclasses import dataclass +from warnings import warn +from typing import Literal, Optional from addict import Dict import numpy as np @@ -549,7 +550,15 @@ def config(self, config): self.parameters = config['parameters'] self.initial_state = config['initial_state'] - def add_concentration_profile(self, unit, time, c, components=None, s=1e-6): + def add_concentration_profile( + self, + unit: str, + time: np.ndarray, + c: np.ndarray, + components: Optional[list[str]] = None, + s: float = 1e-6, + interpolation_method: Literal['cubic', 'pchip', None] = 'pchip' + ) -> None: """Add concentration profile to Process. Parameters @@ -557,19 +566,24 @@ def add_concentration_profile(self, unit, time, c, components=None, s=1e-6): unit : str The name of the inlet unit operation. time : np.ndarray - An array containing the time values of the concentration profile. + 1D array containing the time values of the concentration profile. c : np.ndarray - An array containing the concentration profile with shape - (len(time), n_comp), where n_comp is the number of components specified with - the `components` argument. - components : list, optional. + 2D array containing the concentration profile with shape + (len(time), n_comp), where n_comp is the number of components + specified in the `components` argument. + components : list[str] | None, optional Component species for which the concentration profile shall be added. If `None`, the profile is expected to have shape (len(time), n_comp). If `-1`, the same (1D) profile is added to all components. - The default is `None`. + Default is `None`. s : float, optional - A smoothing factor used to generate the spline representation of the - concentration profile. The default is 1e-6. + Smoothing factor used to generate the spline representation of the + concentration profile. Default is `1e-6`. + interpolation_method : Literal["linear", "cubic", "pchip", None], optional + The interpolation method to use. Options: + - `"cubic"` : Cubic spline interpolation. + - `"pchip"` : Piecewise cubic Hermite interpolation (default). + - `None` : No interpolation, use raw time data. Raises ------ @@ -578,7 +592,7 @@ def add_concentration_profile(self, unit, time, c, components=None, s=1e-6): ValueError If the time values in `time` exceed the cycle time of the Process. If `c` has an invalid shape. - + If `interpolation_method` is unknown. """ if isinstance(unit, str): unit = self.flow_sheet[unit] @@ -589,15 +603,19 @@ def add_concentration_profile(self, unit, time, c, components=None, s=1e-6): if max(time) > self.cycle_time: raise ValueError('Inlet profile exceeds cycle time.') + # Handle components and concentration shape if components is None: if c.shape[1] != self.n_comp: - raise ValueError(f'Expected shape ({len(time), self.n_comp}) for concentration array. Got {c.shape}.') + raise ValueError( + f'Expected shape ({len(time), self.n_comp}) for concentration array' + f'. Got {c.shape}.' + ) components = self.component_system.species elif components == -1: - # Assume same profile for all components. + # Assume same profile for all components if c.ndim > 1: raise ValueError('Expected single concentration profile.') - c = np.column_stack([c]*self.n_comp) + c = np.column_stack([c] * self.n_comp) components = self.component_system.species if not isinstance(components, list): @@ -607,21 +625,68 @@ def add_concentration_profile(self, unit, time, c, components=None, s=1e-6): if len(indices) == 1 and c.ndim == 1: c = np.array(c, ndmin=2).T + # Determine the interpolation method + if interpolation_method is None: + time_interp = time + else: + time_interp = np.linspace(0, max(time), max(1001, len(time))) + + match interpolation_method: + case None: + pass + case 'cubic': + interpolator = interpolate.CubicSpline(time, c) + case 'pchip': + interpolator = interpolate.PchipInterpolator(time, c) + case _: + raise ValueError( + f"Unknown `interpolation_method`: {interpolation_method}." + ) + + c = interpolator(time_interp) + + # Process each component's profile for i, comp in enumerate(indices): - tck = interpolate.splrep(time, c[:, i], s=s) + profile = c[:, i] + + # Normalize the profile + min_val = np.min(profile) + max_val = np.max(profile) + range_val = max_val - min_val + + if range_val == 0: + warn( + f'Component {comp} has no variation in concentration; ' + 'scaling will be skipped.' + ) + range_val = 1 # Avoid division by zero + + normalized_profile = (profile - min_val) / range_val + + # Fit the spline on the normalized profile + tck = interpolate.splrep(time_interp, normalized_profile, s=s) ppoly = interpolate.PPoly.from_spline(tck) - for i, (t, sec) in enumerate(zip(ppoly.x, ppoly.c.T)): - if i < 3: + # Add events with properly unscaled coefficients + for j, (t, sec) in enumerate(zip(ppoly.x, ppoly.c.T)): + if j < 3 or j > len(ppoly.x) - 5: continue - elif i > len(ppoly.x) - 5: - continue - evt = self.add_event( - f'{unit}_inlet_{comp}_{i-3}', f'flow_sheet.{unit}.c', - np.flip(sec), t, comp + # Scale coefficients and adjust the constant term + scaled_sec = sec * range_val + scaled_sec[-1] += min_val # Adjust the constant term for shifting + self.add_event( + f'{unit}_inlet_{comp}_{j-3}', f'flow_sheet.{unit}.c', + np.flip(scaled_sec), t, comp ) - def add_flow_rate_profile(self, unit, time, flow_rate, s=1e-6): + def add_flow_rate_profile( + self, + unit: str, + time: np.ndarray, + flow_rate: np.ndarray, + s: float = 1e-6, + interpolation_method: Literal["cubic", "pchip", None] = "pchip" + ) -> None: """Add flow rate profile to a SourceMixin unit operation. Parameters @@ -629,12 +694,17 @@ def add_flow_rate_profile(self, unit, time, flow_rate, s=1e-6): unit : str The name of the SourceMixin unit operation. time : np.ndarray - An array containing the time values of the flow rate profile. + 1D array containing the time values of the flow rate profile. flow_rate : np.ndarray - An array containing the flow rate profile. + 1D array containing the flow rate values over time. s : float, optional - A smoothing factor used to generate the spline representation of the - flow rate profile. The default is 1e-6. + Smoothing factor used to generate the spline representation of the flow rate profile. + Default is `1e-6`. + interpolation_method : Literal["linear", "cubic", "pchip", None], optional + The interpolation method to use. Options: + - `"cubic"` : Cubic spline interpolation. + - `"pchip"` : Piecewise cubic Hermite interpolation (default). + - `None` : No interpolation, use raw time data. Raises ------ @@ -647,22 +717,64 @@ def add_flow_rate_profile(self, unit, time, flow_rate, s=1e-6): unit = self.flow_sheet[unit] if unit not in self.flow_sheet.inlets + self.flow_sheet.cstrs: - raise TypeError('Expected SourceMixin.') + raise TypeError("Expected SourceMixin.") if max(time) > self.cycle_time: - raise ValueError('Inlet profile exceeds cycle time') + raise ValueError("Inlet profile exceeds cycle time.") + + # Compute min and max for scaling + min_val = np.min(flow_rate) + max_val = np.max(flow_rate) + range_val = max_val - min_val + + if range_val == 0: + warn( + "Flow rate has no variation; scaling will be skipped." + ) + range_val = 1 # Avoid division by zero, effectively skipping scaling + + # Normalize flow_rate to [0, 1] + normalized_flow_rate = (flow_rate - min_val) / range_val - tck = interpolate.splrep(time, flow_rate, s=s) + # Determine interpolation method + if interpolation_method is None: + time_interp = time + interpolated_flow_rate = normalized_flow_rate + else: + time_interp = np.linspace(0, max(time), max(1001, len(time))) + match interpolation_method: + + case "cubic": + interpolator = interpolate.CubicSpline( + time, normalized_flow_rate + ) + case "pchip": + interpolator = interpolate.PchipInterpolator( + time, normalized_flow_rate + ) + case _: + raise ValueError( + f"Unknown `interpolation_method`: {interpolation_method}." + ) + + interpolated_flow_rate = interpolator(time_interp) + + # Fit the spline with the interpolated normalized flow rate + tck = interpolate.splrep(time_interp, interpolated_flow_rate, s=s) ppoly = interpolate.PPoly.from_spline(tck) + # Add events with unscaled coefficients for i, (t, sec) in enumerate(zip(ppoly.x, ppoly.c.T)): - if i < 3: - continue - elif i > len(ppoly.x) - 5: + if i < 3 or i > len(ppoly.x) - 5: continue - evt = self.add_event( - f'{unit}_flow_rate_{i-3}', f'flow_sheet.{unit}.flow_rate', - np.flip(sec), t + + # Unscale all coefficients + unscaled_sec = sec * range_val + unscaled_sec[-1] += min_val # Adjust the constant term for shifting + + self.add_event( + f"{unit}_flow_rate_{i-3}", f"flow_sheet.{unit}.flow_rate", + np.flip(unscaled_sec), t ) def check_config(self): diff --git a/CADETProcess/processModel/reaction.py b/CADETProcess/processModel/reaction.py index 8e7d13356..8265a317e 100755 --- a/CADETProcess/processModel/reaction.py +++ b/CADETProcess/processModel/reaction.py @@ -1,16 +1,20 @@ -from addict import Dict from functools import wraps +import warnings + +from addict import Dict import numpy as np from CADETProcess import CADETProcessError from CADETProcess.dataStructure import Structure from CADETProcess.dataStructure import ( - Aggregator, SizedAggregator, SizedClassDependentAggregator + Aggregator, SizedAggregator, SizedClassDependentAggregator, ) from CADETProcess.dataStructure import ( Bool, String, SizedList, SizedNdArray, UnsignedInteger, UnsignedFloat ) +from CADETProcess.dataStructure import deprecated_alias + from .componentSystem import ComponentSystem @@ -50,7 +54,7 @@ class Reaction(Structure): The equilibrium constant for the reaction. """ is_kinetic = Bool(default=True) - stoich = SizedNdArray(size='n_comp', default=0) + stoich = SizedNdArray(size='n_comp') k_fwd = UnsignedFloat() k_bwd = UnsignedFloat() k_fwd_min = UnsignedFloat(default=100) @@ -67,8 +71,9 @@ class Reaction(Structure): 'exponents_bwd', ] + @deprecated_alias(indices='components') def __init__( - self, component_system, indices, coefficients, + self, component_system, components, coefficients, k_fwd, k_bwd=1, is_kinetic=True, k_fwd_min=100, exponents_fwd=None, exponents_bwd=None): """Initialize individual Mass Action Law Reaction. @@ -77,10 +82,10 @@ def __init__( ---------- component_system : ComponentSystem Component system of the reaction. - indices : list of int - Component indices. + components : list of int or strings + Component names of the components involved in the reaction. coefficients : np.ndarray - Stoichiometric coefficients in the same order of component indices. + Stoichiometric coefficients in the same order of components . k_fwd : float Forward reaction rate. k_bwd : float, optional @@ -92,11 +97,11 @@ def __init__( Minimum value of foward reaction rate in case of rapid equilbrium. The default is 100. exponents_fwd : list of float, optional - Concentration exponents of the components in order of indices for + Concentration exponents of the components in order of components for forward reaction. If None is given, values are inferred from the stoichiometric coefficients. The default is None. exponents_bwd : list of float, optional - Concentration exponents of the components in order of indices for + Concentration exponents of the components in order of components for backward reaction. If None is given, values are inferred from the stoichiometric coefficients. The default is None. @@ -104,10 +109,6 @@ def __init__( self.component_system = component_system super().__init__() - self.stoich = np.zeros((self.n_comp,)) - for i, c in zip(indices, coefficients): - self.stoich[i] = c - self.is_kinetic = is_kinetic if not is_kinetic: k_fwd, k_bwd = scale_to_rapid_equilibrium(k_fwd, k_fwd_min) @@ -115,6 +116,16 @@ def __init__( self.k_fwd = k_fwd self.k_bwd = k_bwd + if isinstance(components[0], str): + indices = [component_system.species.index(i) for i in components] + else: + warnings.warn( + "Component are expected to be specified by name. " + "This will be deprecated in future versions.", + DeprecationWarning, stacklevel=2 + ) + indices = components + self.stoich = np.zeros((self.n_comp,)) for i, c in zip(indices, coefficients): self.stoich[i] = c @@ -249,9 +260,9 @@ class CrossPhaseReaction(Structure): 'exponents_bwd_liquid_modsolid', 'exponents_bwd_solid_modliquid', ] - + @deprecated_alias(indices='components') def __init__( - self, component_system, indices, coefficients, phases, + self, component_system, components, coefficients, phases, k_fwd, k_bwd=1, is_kinetic=True, k_fwd_min=100, exponents_fwd_liquid=None, exponents_bwd_liquid=None, exponents_fwd_solid=None, exponents_bwd_solid=None): @@ -261,8 +272,8 @@ def __init__( ---------- component_system : ComponentSystem Component system of the reaction. - indices : list - Component indices. + components : list of int or strings + Component names of the components involved in the reaction. coefficients : list Stoichiometric coefficients in the same order of component indices. phases : list @@ -307,6 +318,16 @@ def __init__( self.exponents_fwd_liquid_modsolid = np.zeros((self.n_comp,)) self.exponents_bwd_liquid_modsolid = np.zeros((self.n_comp,)) + if isinstance(components[0], str): + indices = [component_system.species.index(i) for i in components] + else: + warnings.warn( + "Component are expected to be specified by name. " + "This will be deprecated in future versions.", + DeprecationWarning, stacklevel=2 + ) + indices = components + if phases is None: phases = [0 for n in indices] @@ -496,9 +517,9 @@ class MassActionLaw(BulkReactionBase): k_fwd = Aggregator('k_fwd', 'reactions') k_bwd = Aggregator('k_bwd', 'reactions') - stoich = SizedAggregator('stoich', 'reactions') - exponents_fwd = SizedAggregator('exponents_fwd', 'reactions') - exponents_bwd = SizedAggregator('exponents_bwd', 'reactions') + stoich = SizedAggregator('stoich', 'reactions', transpose=True) + exponents_fwd = SizedAggregator('exponents_fwd', 'reactions', transpose=True) + exponents_bwd = SizedAggregator('exponents_bwd', 'reactions', transpose=True) _parameters = ['stoich', 'exponents_fwd', 'exponents_bwd', 'k_fwd', 'k_bwd'] @@ -507,11 +528,13 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @wraps(Reaction.__init__) - def add_reaction(self, *args, **kwargs): + def add_reaction(self, *args, **kwargs) -> Reaction: """Add reaction to ReactionSystem.""" r = Reaction(self.component_system, *args, **kwargs) self._reactions.append(r) + return r + @property def reactions(self): """list: Reactions in ReactionSystem.""" @@ -547,38 +570,50 @@ class MassActionLawParticle(ParticleReactionBase): mapping={ CrossPhaseReaction: 'stoich_liquid', None: 'stoich' - } + }, + transpose=True, ) k_fwd_liquid = Aggregator('k_fwd', 'liquid_reactions') k_bwd_liquid = Aggregator('k_bwd', 'liquid_reactions') - exponents_fwd_liquid = SizedAggregator('exponents_fwd', 'liquid_reactions') - exponents_bwd_liquid = SizedAggregator('exponents_bwd', 'liquid_reactions') + exponents_fwd_liquid = SizedAggregator( + 'exponents_fwd', 'liquid_reactions', transpose=True + ) + exponents_bwd_liquid = SizedAggregator( + 'exponents_bwd', 'liquid_reactions', transpose=True + ) stoich_solid = SizedClassDependentAggregator( 'stoich_solid', 'solid_reactions', mapping={ CrossPhaseReaction: 'stoich_solid', None: 'stoich' - } + }, + transpose=True ) k_fwd_solid = Aggregator('k_fwd', 'solid_reactions') k_bwd_solid = Aggregator('k_bwd', 'solid_reactions') - exponents_fwd_solid = SizedAggregator('exponents_fwd', 'solid_reactions') - exponents_bwd_solid = SizedAggregator('exponents_bwd', 'solid_reactions') + exponents_fwd_solid = SizedAggregator( + 'exponents_fwd', 'solid_reactions', transpose=True + ) + exponents_bwd_solid = SizedAggregator( + 'exponents_bwd', 'solid_reactions', transpose=True + ) exponents_fwd_liquid_modsolid = SizedClassDependentAggregator( 'exponents_fwd_liquid_modsolid', 'liquid_reactions', mapping={ CrossPhaseReaction: 'exponents_fwd_liquid_modsolid', None: None - } + }, + transpose=True, ) exponents_bwd_liquid_modsolid = SizedClassDependentAggregator( 'exponents_bwd_liquid_modsolid', 'liquid_reactions', mapping={ CrossPhaseReaction: 'exponents_bwd_liquid_modsolid', None: None - } + }, + transpose=True, ) exponents_fwd_solid_modliquid = SizedClassDependentAggregator( @@ -586,14 +621,16 @@ class MassActionLawParticle(ParticleReactionBase): mapping={ CrossPhaseReaction: 'exponents_fwd_solid_modliquid', None: None - } + }, + transpose=True, ) exponents_bwd_solid_modliquid = SizedClassDependentAggregator( 'exponents_bwd_solid_modliquid', 'solid_reactions', mapping={ CrossPhaseReaction: 'exponents_bwd_solid_modliquid', None: None - } + }, + transpose=True, ) _parameters = [ diff --git a/CADETProcess/processModel/unitOperation.py b/CADETProcess/processModel/unitOperation.py index 04d8bfed7..e33da9cc2 100644 --- a/CADETProcess/processModel/unitOperation.py +++ b/CADETProcess/processModel/unitOperation.py @@ -2,6 +2,8 @@ import math import warnings +import numpy as np + from CADETProcess import CADETProcessError from CADETProcess.dataStructure import frozen_attributes @@ -40,6 +42,7 @@ 'Cstr', 'TubularReactorBase', 'TubularReactor', + 'ChromatographicColumnBase', 'LumpedRateModelWithoutPores', 'LumpedRateModelWithPores', 'GeneralRateModel', @@ -76,8 +79,8 @@ class UnitBaseClass(Structure): CADETProcess.processModel.binding CADETProcess.processModel.reaction CADETProcess.processModel.FlowSheet - """ + name = String() _parameters = [] @@ -125,8 +128,6 @@ def __init__( solution_recorder = IORecorder() self.solution_recorder = solution_recorder - - super().__init__(*args, **kwargs) @property @@ -288,6 +289,8 @@ def binding_model(self, binding_model): @property def n_bound_states(self): + if isinstance(self.binding_model, NoBinding): + return 0 return self.binding_model.n_bound_states @property @@ -374,14 +377,13 @@ def __str__(self): class SourceMixin(Structure): - """Mixin class for Units that have Source-like behavior + """Mixin class for Units that have Source-like behavior. See Also -------- SinkMixin Inlet Cstr - """ _n_poly_coeffs = 4 flow_rate = Polynomial(size=('_n_poly_coeffs')) @@ -390,14 +392,14 @@ class SourceMixin(Structure): class SinkMixin(): - """Mixin class for Units that have Sink-like behavior + """Mixin class for Units that have Sink-like behavior. See Also -------- SourceMixin Cstr - """ + pass @@ -412,7 +414,6 @@ class Inlet(UnitBaseClass, SourceMixin): Polynomial coefficients for volumetric flow rate. solution_recorder : IORecorder Solution recorder for the unit operation. - """ c = NdPolynomial(size=('n_comp', '_n_poly_coeffs'), default=0) @@ -439,6 +440,7 @@ class Outlet(UnitBaseClass, SinkMixin): class MixerSplitter(UnitBaseClass): """Pseudo unit operation for mixing/splitting streams in the system.""" + pass @@ -460,22 +462,24 @@ class TubularReactorBase(UnitBaseClass): Length of column. diameter : UnsignedFloat Diameter of column. - axial_dispersion : UnsignedFloat - Dispersion rate of compnents in axial direction. + axial_dispersion : List of unsigned floats. Length depends on n_comp. + Axial dispersion coefficient for each component. flow_direction : Switch If 1: Forward flow. If -1: Backwards flow. discretization : DiscretizationParametersBase Discretization scheme of the unit. - """ length = UnsignedFloat() diameter = UnsignedFloat() - axial_dispersion = UnsignedFloat() + axial_dispersion = SizedUnsignedList(size='n_comp') flow_direction = Switch(valid=[-1, 1], default=1) + c = SizedList(size='n_comp', default=0) + _initial_state = UnitBaseClass._initial_state + ['c'] - _parameters = ['length', 'diameter', 'axial_dispersion', 'flow_direction'] + _parameters = ['length', 'diameter', 'axial_dispersion', 'flow_direction'] \ + + _initial_state _section_dependent_parameters = \ UnitBaseClass._section_dependent_parameters + \ ['axial_dispersion', 'flow_direction'] @@ -673,12 +677,11 @@ def calculate_flow_rate_from_velocity(self, u0): -------- calculate_interstitial_velocity calculate_interstitial_rt - """ return u0 * self.cross_section_area_interstitial def NTP(self, flow_rate): - r"""Number of theoretical plates. + r"""Calculate number of theoretical plates. Parameters ---------- @@ -694,10 +697,9 @@ def NTP(self, flow_rate): ------- NTP : float Number of theretical plates - """ u0 = self.calculate_interstitial_velocity(flow_rate) - return u0 * self.length / (2 * self.axial_dispersion) + return u0 * self.length / (2 * np.array(self.axial_dispersion)) def set_axial_dispersion_from_NTP(self, NTP, flow_rate): r"""Set axial dispersion from number of theoretical plates (NTP). @@ -712,7 +714,7 @@ def set_axial_dispersion_from_NTP(self, NTP, flow_rate): Calculated using the axial dispersion coefficient: .. math:: - NTP = \frac{u \cdot L_{Column}}{2 \cdot D_a} + NTP = \frac{u_{int} \cdot L_{Column}}{2 \cdot D_{ax}} Returns ------- @@ -721,13 +723,43 @@ def set_axial_dispersion_from_NTP(self, NTP, flow_rate): See Also -------- - u0 + calculate_interstitial_velocity NTP """ u0 = self.calculate_interstitial_velocity(flow_rate) self.axial_dispersion = u0 * self.length / (2 * NTP) + def calculate_bodenstein_number(self, flow_rate: float) -> float: + r"""Calculate the Bodenstein number for a given flow rate. + + Parameters + ---------- + flow_rate : float + volumetric flow rate + + The Bodenstein number is calculated as follows: + + .. math:: + Bo = \frac{u_{int} \cdot L_{Column}}{D_{ax}}, + + where $u_int$ is the interstitial velocity + + + Returns + ------- + Bo : float + Bodenstein number + + See Also + -------- + calculate_interstitial_velocity + NTP + + """ + u0 = self.calculate_interstitial_velocity(flow_rate) + return u0 * self.length / np.array(self.axial_dispersion) + class TubularReactor(TubularReactorBase): """Class for tubular reactors and tubing. @@ -747,10 +779,6 @@ class TubularReactor(TubularReactorBase): total_porosity = Constant(1) - c = SizedList(size='n_comp', default=0) - _initial_state = ['c'] - _parameters = ['c'] - def __init__(self, *args, discretization_scheme='FV', **kwargs): if discretization_scheme == 'FV': discretization = LRMDiscretizationFV() @@ -765,7 +793,11 @@ def __init__(self, *args, discretization_scheme='FV', **kwargs): ) -class LumpedRateModelWithoutPores(TubularReactorBase): +class ChromatographicColumnBase(TubularReactorBase): + pass + + +class LumpedRateModelWithoutPores(ChromatographicColumnBase): """Parameters for a lumped rate model without pores. Attributes @@ -794,10 +826,9 @@ class LumpedRateModelWithoutPores(TubularReactorBase): total_porosity = UnsignedFloat(ub=1) _parameters = ['total_porosity'] - c = SizedList(size='n_comp', default=0) _q = SizedUnsignedList(size='n_bound_states', default=0) - _initial_state = TubularReactorBase._initial_state + ['q'] + _initial_state = ChromatographicColumnBase._initial_state + ['q'] _parameters = _parameters + _initial_state def __init__(self, *args, discretization_scheme='FV', **kwargs): @@ -816,14 +847,14 @@ def q(self): @q.setter def q(self, q): - if isinstance(self.binding_model, NoBinding): + if isinstance(self.binding_model, NoBinding) and q not in (None, []): raise CADETProcessError("Cannot set q without binding model.") self._q = q - self.parameters['q'] = q + self.parameters['q'] = self._q -class LumpedRateModelWithPores(TubularReactorBase): +class LumpedRateModelWithPores(ChromatographicColumnBase): """Parameters for the lumped rate model with pores. Attributes @@ -835,7 +866,7 @@ class LumpedRateModelWithPores(TubularReactorBase): particle_radius : UnsignedFloat Radius of the particles. film_diffusion : List of unsigned floats. Length depends on n_comp. - Diffusion rate for components in pore volume. + Film diffusion coefficients for each component. pore_accessibility : List of unsigned floats. Length depends on n_comp. Accessibility of pores for components. c : List of unsigned floats. Length depends on n_comp @@ -846,8 +877,8 @@ class LumpedRateModelWithPores(TubularReactorBase): Initial concntration of the bound phase. solution_recorder : LRMPRecorder Solution recorder for the unit operation. - """ + supports_binding = True supports_bulk_reaction = True supports_particle_reaction = True @@ -870,11 +901,10 @@ class LumpedRateModelWithPores(TubularReactorBase): TubularReactorBase._section_dependent_parameters + \ ['film_diffusion', 'pore_accessibility'] - c = SizedList(size='n_comp', default=0) _cp = SizedUnsignedList(size='n_comp') _q = SizedUnsignedList(size='n_bound_states', default=0) - _initial_state = ['cp', 'q'] + _initial_state = ChromatographicColumnBase._initial_state + ['cp', 'q'] _parameters = _parameters + _initial_state def __init__(self, *args, discretization_scheme='FV', **kwargs): @@ -944,14 +974,14 @@ def q(self): @q.setter def q(self, q): - if isinstance(self.binding_model, NoBinding): + if isinstance(self.binding_model, NoBinding) and q not in (None, []): raise CADETProcessError("Cannot set q without binding model.") self._q = q self.parameters['q'] = q -class GeneralRateModel(TubularReactorBase): +class GeneralRateModel(ChromatographicColumnBase): """Parameters for the general rate model. Attributes @@ -963,19 +993,19 @@ class GeneralRateModel(TubularReactorBase): particle_radius : UnsignedFloat Radius of the particles. film_diffusion : List of unsigned floats. Length depends on n_comp. - Diffusion rate for components in pore volume. + Film diffusion coefficients for each component. pore_accessibility : List of unsigned floats. Length depends on n_comp. Accessibility of pores for components. pore_diffusion : List of unsigned floats. Length depends on n_comp. Diffusion rate for components in pore volume. surface_diffusion : List of unsigned floats. Length depends on n_comp. - Diffusion rate for components in adsrobed state. + Diffusion rate for components in adsorbed state. c : List of unsigned floats. Length depends on n_comp - Initial concentration of the reactor. + Initial concentration in the mobile phase. cp : List of unsigned floats. Length depends on n_comp - Initial concentration of the pores + Initial concentration in the stagnant mobile phase within the pores. q : List of unsigned floats. Length depends on n_comp - Initial concntration of the bound phase. + Initial concentration of the bound phase. solution_recorder : GRMRecorder Solution recorder for the unit operation. @@ -1001,11 +1031,10 @@ class GeneralRateModel(TubularReactorBase): TubularReactorBase._section_dependent_parameters + \ ['film_diffusion', 'pore_accessibility', 'pore_diffusion', 'surface_diffusion'] - c = SizedList(size='n_comp', default=0) _cp = SizedUnsignedList(size='n_comp') _q = SizedUnsignedList(size='n_bound_states', default=0) - _initial_state = ['cp', 'q'] + _initial_state = ChromatographicColumnBase._initial_state + ['cp', 'q'] _parameters = _parameters + _initial_state def __init__(self, *args, discretization_scheme='FV', **kwargs): @@ -1077,7 +1106,7 @@ def q(self): @q.setter def q(self, q): - if isinstance(self.binding_model, NoBinding): + if isinstance(self.binding_model, NoBinding) and q not in (None, []): raise CADETProcessError("Cannot set q without binding model.") self._q = q @@ -1143,7 +1172,7 @@ class Cstr(UnitBaseClass, SourceMixin, SinkMixin): init_liquid_volume = UnsignedFloat() const_solid_volume = UnsignedFloat(default=0) _V = UnsignedFloat() - _initial_state = ['c', 'q', 'init_liquid_volume'] + _initial_state = UnitBaseClass._initial_state + ['c', 'q', 'init_liquid_volume'] _parameters = _parameters + _initial_state def __init__(self, *args, **kwargs): @@ -1222,7 +1251,7 @@ def calculate_interstitial_rt(self, flow_rate): See Also -------- - u0 + calculate_interstitial_velocity """ return self.volume_liquid / flow_rate @@ -1233,7 +1262,7 @@ def q(self): @q.setter def q(self, q): - if isinstance(self.binding_model, NoBinding): + if isinstance(self.binding_model, NoBinding) and q not in (None, []): raise CADETProcessError("Cannot set q without binding model.") self._q = q @@ -1249,8 +1278,8 @@ class MCT(UnitBaseClass): Length of column. channel_cross_section_areas : List of unsinged floats. Lenght depends on nchannel. Diameter of column. - axial_dispersion : UnsignedFloat - Dispersion rate of components in axial direction. + axial_dispersion : List of unsigned floats. Length depends on n_comp and nchannel. + Axial dispersion coefficient for component and each component. flow_direction : Switch If 1: Forward flow. If -1: Backwards flow. @@ -1268,11 +1297,11 @@ class MCT(UnitBaseClass): length = UnsignedFloat() channel_cross_section_areas = SizedList(size='nchannel') - axial_dispersion = UnsignedFloat() + axial_dispersion = SizedUnsignedList(size=('n_comp', 'nchannel')) flow_direction = Switch(valid=[-1, 1], default=1) nchannel = UnsignedInteger() - exchange_matrix = SizedNdArray(size=('nchannel', 'nchannel','n_comp')) + exchange_matrix = SizedNdArray(size=('nchannel', 'nchannel', 'n_comp')) _parameters = [ 'length', @@ -1291,7 +1320,7 @@ class MCT(UnitBaseClass): [] c = SizedNdArray(size=('n_comp', 'nchannel'), default=0) - _initial_state = ['c'] + _initial_state = UnitBaseClass._initial_state + ['c'] _parameters = _parameters + _initial_state def __init__(self, *args, nchannel, **kwargs): @@ -1306,6 +1335,7 @@ def __init__(self, *args, nchannel, **kwargs): @property def nchannel(self): + """int: The number of channels in the MCT.""" return self._nchannel @nchannel.setter diff --git a/CADETProcess/simulator/cadetAdapter.py b/CADETProcess/simulator/cadetAdapter.py index 044f90b04..03fda8809 100644 --- a/CADETProcess/simulator/cadetAdapter.py +++ b/CADETProcess/simulator/cadetAdapter.py @@ -1,16 +1,12 @@ -from CADETProcess.dataStructure import Structure, ParameterWrapper from collections import defaultdict from functools import wraps import os -import platform from pathlib import Path -import shutil +import re import subprocess -from subprocess import TimeoutExpired import time import tempfile import warnings -import re from addict import Dict import numpy as np @@ -18,6 +14,7 @@ from CADETProcess import CADETProcessError from CADETProcess import settings +from CADETProcess.dataStructure import Structure, ParameterWrapper from CADETProcess.dataStructure import ( Bool, Switch, UnsignedFloat, UnsignedInteger, ) @@ -29,7 +26,7 @@ ) from CADETProcess.processModel import NoBinding, BindingBaseClass from CADETProcess.processModel import NoReaction, ReactionBaseClass -from CADETProcess.processModel import NoDiscretization, DGMixin +from CADETProcess.processModel import NoDiscretization from CADETProcess.processModel import ( UnitBaseClass, Inlet, Cstr, TubularReactor, LumpedRateModelWithoutPores ) @@ -87,11 +84,15 @@ class Cadet(SimulatorBase): """ - timeout = UnsignedFloat() use_dll = Bool(default=False) _force_constant_flow_rate = False - def __init__(self, install_path=None, temp_dir=None, *args, **kwargs): + def __init__( + self, + install_path=None, + temp_dir=None, + *args, **kwargs + ): super().__init__(*args, **kwargs) if install_path is None: @@ -120,6 +121,22 @@ def temp_dir(self): def temp_dir(self, temp_dir): self._temp_dir = temp_dir + @property + def timeout(self): + warnings.warn( + "This parameter is deprecated. Use solver_parameters.timeout instead.", + FutureWarning, + ) + return self.solver_parameters.timeout + + @timeout.setter + def timeout(self, timeout): + warnings.warn( + "This parameter is deprecated. Use solver_parameters.timeout instead.", + FutureWarning, + ) + self.solver_parameters.timeout = timeout + def locks_process(func): """Lock process to enable caching.""" @wraps(func) @@ -162,12 +179,22 @@ def check_cadet(self): """ lwe_hdf5_path = Path(self.temp_dir) / 'LWE.h5' - cadet_model = self.get_new_cadet_instance() + cadet = self.get_new_cadet_instance() + + cadet.create_lwe(lwe_hdf5_path) - cadet_model.create_lwe(lwe_hdf5_path) + # Check for CADET-Python >= v1.1, which introduced the .run_simulation interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .run_load() interface + # This check can be removed at some point in the future. + if hasattr(cadet, "run_simulation"): + return_information = cadet.run_simulation() + else: + return_information = cadet.run_load() + + cadet.delete_file() - cadet_model.run() - os.remove(lwe_hdf5_path) + if return_information.return_code != 0: + raise CADETProcessError(return_information) print("Test simulation completed successfully") @@ -177,9 +204,8 @@ def get_tempfile_name(self): f = next(tempfile._get_candidate_names()) return self.temp_dir / f'{f}.h5' - @locks_process - def run(self, process, cadet=None, file_path=None): + def _run(self, process, cadet=None, file_path=None): """Interface to the solver run function. The configuration is extracted from the process object and then saved @@ -230,9 +256,16 @@ def run(self, process, cadet=None, file_path=None): try: start = time.time() - return_information = cadet.run_load(timeout=self.timeout) + + # Check for CADET-Python >= v1.1, which introduced the .run_simulation interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .run_load() interface + # This check can be removed at some point in the future. + if hasattr(cadet, "run_simulation"): + return_information = cadet.run_simulation() + else: + return_information = cadet.run_load() elapsed = time.time() - start - except TimeoutExpired: + except subprocess.TimeoutExpired: raise CADETProcessError('Simulator timed out') from None finally: if not self.use_dll and file_path is None: @@ -274,6 +307,11 @@ def get_cadet_version(self) -> tuple[str, str]: RuntimeError If any unhandled event during running the subprocess occurs. """ + warnings.warn( + "This method will be deprecated in a future release." + "Use the `version` attribute instead.", + FutureWarning, + ) try: result = subprocess.run( [self.get_new_cadet_instance().cadet_cli_path, '--version'], @@ -297,6 +335,19 @@ def get_cadet_version(self) -> tuple[str, str]: except subprocess.CalledProcessError as e: raise RuntimeError(f"Command execution failed: {e}") + @property + def version(self) -> str: + """str: The version of the Cadet installation.""" + cadet = self.get_new_cadet_instance() + # Check for CADET-Python >= v1.1, which introduced the version property. + # If it's not present, assume CADET-Python <= 1.0.4 and directly access the + # CadetRunner attribute. + # This check can be removed at some point in the future. + if hasattr(cadet, "version"): + return cadet.version + else: + return cadet.cadet_runner.version + def get_new_cadet_instance(self): cadet = CadetAPI(install_path=self.install_path, use_dll=self.use_dll) return cadet @@ -311,7 +362,14 @@ def run_h5(self, file_path): cadet = self.get_new_cadet_instance() cadet.filename = file_path cadet.load() - cadet.run_load(timeout=self.timeout) + + # Check for CADET-Python >= v1.1, which introduced the .run_simulation interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .run_load() interface + # This check can be removed at some point in the future. + if hasattr(cadet, "run_simulation"): + return_information = cadet.run_simulation() + else: + return_information = cadet.run_load() return cadet @@ -871,6 +929,7 @@ def get_unit_config(self, unit): if not isinstance(unit.discretization, NoDiscretization): unit_config['discretization'] = unit.discretization.parameters + unit_config['discretization']['spatial_method'] = unit.discretization.spatial_method if isinstance(unit, Cstr) \ and not isinstance(unit.binding_model, NoBinding): @@ -1214,7 +1273,11 @@ class ModelSolverParameters(Structure): 'VELOCITY': 'flow_direction', }, 'fixed': { - 'PAR_SURFDIFFUSION_MULTIPLEX': 0, + 'COL_DISPERSION_MULTIPLEX': 3, + 'FILM_DIFFUSION_MULTIPLEX': 3, + 'PAR_DIFFUSION_MULTIPLEX': 3, + 'PAR_SURFDIFFUSION_MULTIPLEX': 3, + 'PORE_ACCESSIBILITY_MULTIPLEX': 3, }, }, 'LumpedRateModelWithPores': { @@ -1234,6 +1297,11 @@ class ModelSolverParameters(Structure): 'CROSS_SECTION_AREA': 'cross_section_area', 'VELOCITY': 'flow_direction', }, + 'fixed': { + 'COL_DISPERSION_MULTIPLEX': 3, + 'FILM_DIFFUSION_MULTIPLEX': 3, + 'PORE_ACCESSIBILITY_MULTIPLEX': 3, + }, }, 'LumpedRateModelWithoutPores': { 'name': 'LUMPED_RATE_MODEL_WITHOUT_PORES', @@ -1247,6 +1315,9 @@ class ModelSolverParameters(Structure): 'CROSS_SECTION_AREA': 'cross_section_area', 'VELOCITY': 'flow_direction', }, + 'fixed': { + 'COL_DISPERSION_MULTIPLEX': 3, + }, }, 'TubularReactor': { 'name': 'LUMPED_RATE_MODEL_WITHOUT_PORES', @@ -1260,6 +1331,7 @@ class ModelSolverParameters(Structure): }, 'fixed': { 'TOTAL_POROSITY': 1, + 'COL_DISPERSION_MULTIPLEX': 3, }, }, 'Cstr': { @@ -1285,6 +1357,9 @@ class ModelSolverParameters(Structure): 'EXCHANGE_MATRIX': 'exchange_matrix', 'VELOCITY': 'flow_direction', }, + 'fixed': { + 'COL_DISPERSION_MULTIPLEX': 7, + }, }, 'Inlet': { 'name': 'INLET', @@ -1725,6 +1800,7 @@ class SolverParameters(Structure): ---------- nthreads : int Number of used threads. + The default is 1. consistent_init_mode : int, optional Consistent initialization mode. Valid values are: @@ -1749,6 +1825,9 @@ class SolverParameters(Structure): - 6: None once, then full - 7: None once, then lean The default is 1. + timeout : float, optional + Timeout in seconds. Simulation is aborted if wall clock time exceeds the + provided value. If value is 0, no timeout is applied. The default is 0. See Also -------- @@ -1759,9 +1838,10 @@ class SolverParameters(Structure): nthreads = UnsignedInteger(default=1) consistent_init_mode = UnsignedInteger(default=1, ub=7) consistent_init_mode_sens = UnsignedInteger(default=1, ub=7) + timeout = UnsignedFloat(default=0) _parameters = [ - 'nthreads', 'consistent_init_mode', 'consistent_init_mode_sens' + 'nthreads', 'consistent_init_mode', 'consistent_init_mode_sens', 'timeout', ] diff --git a/CADETProcess/simulator/simulator.py b/CADETProcess/simulator/simulator.py index f31f55004..2213273fc 100644 --- a/CADETProcess/simulator/simulator.py +++ b/CADETProcess/simulator/simulator.py @@ -51,11 +51,14 @@ class SimulatorBase(Structure): n_cycles = UnsignedInteger(default=1) evaluate_stationarity = Bool(default=False) - n_cycles_min = UnsignedInteger(default=5) + n_cycles_min = UnsignedInteger(default=1) + n_cycles_batch = UnsignedInteger(default=5) n_cycles_max = UnsignedInteger(default=100) raise_exception_on_max_cycles = Bool(default=False) - def __init__(self, stationarity_evaluator=None): + def __init__(self, stationarity_evaluator=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self.logger = get_logger('Simulation') if stationarity_evaluator is None: @@ -248,6 +251,14 @@ def simulate(self, process, previous_results=None, **kwargs): if not process.check_config(): raise CADETProcessError("Process is not configured correctly.") + if self.n_cycles_max < self.n_cycles_min: + raise ValueError("n_cycles_max is set lower than n_cycles_min " + f"({self.n_cycles_max} vs {self.n_cycles_min}). ") + + # If "max" is below "batch", reduce "batch" to "max" to only run "max" cycles. + if self.n_cycles_max < self.n_cycles_batch: + self.n_cycles_batch = self.n_cycles_max + if not self.evaluate_stationarity: results = self.simulate_n_cycles( process, self.n_cycles, previous_results, **kwargs @@ -305,7 +316,7 @@ def simulate_n_cycles( if previous_results is not None: self.set_state_from_results(process, previous_results) - return self.run(process, **kwargs) + return self._run(process, **kwargs) self.n_cycles = n_cyc_orig @@ -349,7 +360,7 @@ def simulate_to_stationarity( raise TypeError('Expected Process') n_cyc_orig = self.n_cycles - self.n_cycles = self.n_cycles_min + self.n_cycles = max(self.n_cycles_min, self.n_cycles_batch) if results is not None: n_cyc = results.n_cycles @@ -357,12 +368,12 @@ def simulate_to_stationarity( n_cyc = 0 while True: - n_cyc += self.n_cycles_min + n_cyc += self.n_cycles_batch if results is not None: self.set_state_from_results(process, results) - new_results = self.run(process, **kwargs) + new_results = self._run(process, **kwargs) if results is None: results = new_results @@ -414,7 +425,7 @@ def set_state_from_results(self, process, results): return process @abstractmethod - def run(process, **kwargs): + def _run(process, **kwargs): """Abstract method for running a simulation. Parameters diff --git a/CADETProcess/smoothing2.py b/CADETProcess/smoothing2.py deleted file mode 100644 index 4becec822..000000000 --- a/CADETProcess/smoothing2.py +++ /dev/null @@ -1,102 +0,0 @@ -import numpy as np -import scipy.signal - -from CADETMatch import smoothing - -from CADETProcess.comparison import calculate_sse - - -def smooth_butter(reference, cutoff=None, resample=True): - reference.reset() - reference.normalize() - - if resample: - reference.resample() - - signal = reference.solution - - fs = 1.0 / (reference.time[1] - reference.time[0]) - nyquist = fs / 2 # 0.5 times the sampling frequency - - if cutoff is None: - cutoff = nyquist/10 - - sos = scipy.signal.butter(3, cutoff, output='sos') - filtered_signal = scipy.signal.sosfiltfilt(sos, signal, axis=0) - - sse = calculate_sse(reference.solution, filtered_signal) - - reference.solution = filtered_signal - reference.is_smoothed = True - - return sse - - -def smooth_savgol(reference, window_length=None, resample=True): - reference.reset() - reference.normalize() - - if resample: - reference.resample() - - signal = reference.solution - - if window_length is None: - window_length = len(signal)/10 - - filtered_signal = scipy.signal.savgol_filter( - signal, window_length, 3, mode='nearest', axis=0 - ) - - sse = calculate_sse(reference.solution, filtered_signal) - - reference.solution = filtered_signal - reference.is_smoothed = True - - return sse - - -def smooth_median(reference, kernel_size=None, resample=True): - reference.reset() - reference.normalize() - - if resample: - reference.resample() - - signal = reference.solution - - if kernel_size is None: - kernel_size = len(signal)/100 - - kernel_size = round(kernel_size) - - if kernel_size % 2 == 0: - kernel_size -= 1 - - filtered_signal = np.zeros(reference.solution.shape) - - for i in range(reference.n_comp): - filtered_signal[:, i] = scipy.signal.medfilt(signal[:, i], kernel_size) - - sse = calculate_sse(reference.solution, filtered_signal) - - reference.solution = filtered_signal - reference.is_smoothed = True - - return sse - - -def smooth_cadet_match(reference): - reference.reset() - - filtered_signal = np.zeros(reference.solution.shape) - - for i in range(reference.n_comp): - - signal = reference.solution[:, i] - - params = smoothing.find_smoothing_factors(reference.time, signal, None, None) - filtered_signal[:, i], _ = smoothing.full_smooth(reference.time, signal, *params) - - reference.solution = filtered_signal - reference.is_smoothed = True diff --git a/CADETProcess/solution.py b/CADETProcess/solution.py index 48261b40d..55c8e39c1 100755 --- a/CADETProcess/solution.py +++ b/CADETProcess/solution.py @@ -282,7 +282,7 @@ def update(self): self._dm_dt_interpolated = None def update_transform(self): - self.transform = transform.NormLinearTransform( + self.transform = transform.NormLinearTransformer( np.min(self.solution, axis=0), np.max(self.solution, axis=0), allow_extended_input=True, @@ -430,7 +430,8 @@ def smooth_data(self, s=None, crit_fs=None, crit_fs_der=None): self.is_smoothed = True def integral(self, start=None, end=None): - """Peak area in a fraction interval. + """ + Peak area in a fraction interval. Parameters ---------- @@ -443,7 +444,7 @@ def integral(self, start=None, end=None): Returns ------- Area : np.ndarray - Mass of all components in the fraction + Area of each component in the fraction. """ if start is None: @@ -489,7 +490,6 @@ def fraction_mass(self, start=None, end=None): ---------- start : float Start time of the fraction - end: float End time of the fraction @@ -716,9 +716,9 @@ def plot_purity( if layout is None: layout = plotting.Layout() - layout.x_label = '$time~/~s$' + layout.x_label = r'$time~/~s$' if x_axis_in_minutes: - layout.x_label = '$time~/~min$' + layout.x_label = r'$time~/~min$' layout.y_label = r'$Purity ~/~\%$' if start is not None: start /= 60 @@ -1737,7 +1737,8 @@ def antiderivative(self, time): return anti def integral(self, start=None, end=None): - """Definite integral between start and end. + """ + Definite integral between start and end. Parameters ---------- @@ -1753,13 +1754,13 @@ def integral(self, start=None, end=None): """ return np.array([ - self._solutions[comp].integral(start, end) - for comp in range(len(self._solutions)) + comp.integrate(start, end) + for comp in self._solutions ]) def __call__(self, t): return np.array([ - self._solutions[comp](t) for comp in range(len(self._solutions)) + comp(t) for comp in self._solutions ]).transpose() diff --git a/CADETProcess/tools/yamamoto.py b/CADETProcess/tools/yamamoto.py index 26b1952cc..794e72914 100644 --- a/CADETProcess/tools/yamamoto.py +++ b/CADETProcess/tools/yamamoto.py @@ -1,6 +1,7 @@ -from dataclasses import dataclass +from typing import NoReturn, Optional import numpy as np +import numpy.typing as npt import matplotlib.pyplot as plt from scipy.optimize import curve_fit @@ -8,13 +9,42 @@ from CADETProcess.processModel import TubularReactorBase, StericMassAction -__all__ = ['GradientExperiment', 'plot_experiments', 'YamamotoResults', 'fit_parameters'] +__all__ = [ + 'GradientExperiment', + 'plot_experiments', + 'YamamotoResults', + 'fit_parameters' +] class GradientExperiment(): def __init__( - self, time, c_salt, c_protein, gradient_volume, - c_salt_start=None, c_salt_end=None): + self, + time: npt.ArrayLike, + c_salt: npt.ArrayLike, + c_protein: npt.ArrayLike, + gradient_volume: float, + c_salt_start: Optional[float] = None, + c_salt_end: Optional[float] = None + ) -> NoReturn: + """ + Initialize a GradientExperiment instance. + + Parameters + ---------- + time : ArrayLike + Time points in seconds. + c_salt : ArrayLike + Salt concentration in mM. + c_protein : ArrayLike + Protein concentration(s) in mM. + gradient_volume : float + Gradient volume in m³. + c_salt_start : Optional[float], default=None + Starting salt concentration in mM. + c_salt_end : Optional[float], default=None + Ending salt concentration in mM. + """ self.time = np.array(time) self.c_salt = np.array(c_salt) @@ -33,24 +63,31 @@ def __init__( self.c_salt_end = c_salt_end @property - def n_proteins(self): + def n_proteins(self) -> int: + """int: Number of proteins.""" return self.c_protein.shape[1] @property - def c_salt_at_max(self): - """np.array: Salt concentration at protein peak maximum in mM.""" + def c_salt_at_max(self) -> float: + """float: Salt concentration at protein peak maximum in mM.""" max_c_protein = np.argmax(self.c_protein, axis=0) return self.c_salt[max_c_protein] @property - def t_at_max(self): + def t_at_max(self) -> float: + """int: Number of proteins.""" max_c_protein = np.argmax(self.c_protein, axis=0) return self.time[max_c_protein] - def calculate_normalized_gradient_slope(self, column_volume, total_porosity): - """Calculate normalized concentration gradient slope. + def calculate_normalized_gradient_slope( + self, + column_volume, + total_porosity + ) -> float: + """ + Calculate normalized concentration gradient slope. Parameters ---------- @@ -63,13 +100,38 @@ def calculate_normalized_gradient_slope(self, column_volume, total_porosity): ------- normalized_slope : float Gradient slope in mM. - """ slope = (self.c_salt_end - self.c_salt_start) / self.gradient_volume vol_factor = column_volume - total_porosity*column_volume return slope * vol_factor - def plot(self, fig=None, ax=None, sec_ax=None): + def plot( + self, + fig: Optional[plt.Figure] = None, + ax: Optional[plt.Axes] = None, + sec_ax: Optional[plt.Axes] = None, + ) -> tuple[plt.Figure, plt.Axes, plt.Axes]: + """ + Plot the gradient experiment data. + + Parameters + ---------- + fig : Optional[plt.Figure], default=None + Existing matplotlib Figure object. + ax : Optional[plt.Axes], default=None + Existing matplotlib Axes object for primary plot. + sec_ax : Optional[plt.Axes], default=None + Existing secondary Axes for salt concentration. + + Returns + ------- + fig : plt.Figure + The figure object containing the plot. + ax : plt.Axes + The axes object for the protein concentration. + sec_ax : plt.Axes + The secondary axes object for the salt concentration. + """ if ax is None: fig, ax = plt.subplots() @@ -91,32 +153,64 @@ def plot(self, fig=None, ax=None, sec_ax=None): return fig, ax, sec_ax -def plot_experiments(experiments): +def plot_experiments(experiments: list[GradientExperiment]): + """ + Plot multiple gradient experiments in a single figure. + + Parameters + ---------- + experiments : list of GradientExperiment + List of gradient experiment instances to plot. + """ fig = ax = sec_ax = None for exp in experiments: fig, ax, sec_ax = exp.plot(fig, ax, sec_ax) -def yamamoto_equation(log_c_salt_at_max_M, lambda_, nu, k_eq): - """ +def yamamoto_equation( + log_c_salt_at_max_M: float, + lambda_: float, + nu: float, + k_eq: float, + ) -> np.ndarray: + r""" + Calculate the theoretical normalized gradient slope using Yamamoto's method. + + Yamamoto's method is used in ion-exchange chromatography to model the relationship + between the normalized gradient slope (GH) and the peak salt concentration (I_R) + during a linear salt gradient elution. This method is based on the steric mass + action (SMA) model and allows for the determination of key chromatographic + parameters such as the characteristic charge (ν) and the equilibrium constant + (k_eq). + + The underlying equation is: + + .. math:: + \log(GH) = (ν + 1) \log(I_R) - \log(k_{eq} \cdot \lambda^ν \cdot (ν + 1)) + + where: + - GH: Normalized gradient slope (logarithmic scale) + - I_R: Salt concentration at the protein peak maximum (in M) + - ν (nu): Characteristic charge of the protein + - k_eq: Equilibrium constant (binding affinity) + - λ (lambda_): Resin capacity in mM Parameters ---------- - log_c_salt_at_max_M : float - log10 of salt concentrations in M at protein peak maximum. + log_c_salt_at_max_M : np.ndarray + Log10 of salt concentrations at protein peak maximum in M. lambda_ : float Resin capacity in mM. nu : float - Characteristic charge. - k_eq : float, optional - Equilibrium constant. + Characteristic charge of the molecule. + k_eq : float + Equilibrium constant of the binding model. Returns ------- - TYPE - DESCRIPTION. - + np.ndarray + Calculated normalized gradient slope (GH) values in logarithmic scale. """ lambda_M = lambda_/1000 @@ -126,24 +220,66 @@ def yamamoto_equation(log_c_salt_at_max_M, lambda_, nu, k_eq): class YamamotoResults: """Parameter values determined using Yamamoto's method.""" - def __init__(self, column, experiments, log_gradient_slope, log_c_salt_at_max_M): + def __init__( + self, + column, + experiments: list[GradientExperiment], + log_gradient_slope: npt.ArrayLike, + log_c_salt_at_max_M: npt.ArrayLike, + ) -> NoReturn: + """ + Initialize YamamotoResults with column, experiments, and log-transformed data. + + Parameters + ---------- + column : Column + Column object containing the binding model. + experiments : list of GradientExperiment + List of gradient experiment instances. + log_gradient_slope : np.ndarray + Normalized gradient slopes in logarithmic scale. + log_c_salt_at_max_M : np.ndarray + Log10 of salt concentrations at protein peak maximum in M. + """ self.column = column self.experiments = experiments self.log_gradient_slope = log_gradient_slope self.log_c_salt_at_max_M = log_c_salt_at_max_M @property - def characteristic_charge(self): - return self.column.binding_model.characteristic_charge[1:] + def characteristic_charge(self) -> np.ndarray: + """np.ndarray: Characteristic charges of the binding model.""" + return np.array(self.column.binding_model.characteristic_charge[1:]) @property - def k_eq(self): - return self.column.binding_model.adsorption_rate[1:] + def k_eq(self) -> np.ndarray: + """np.ndarray: Equilibrium constants of the binding model.""" + return np.array(self.column.binding_model.adsorption_rate[1:]) + + def plot( + self, + fig: Optional[plt.Figure] = None, + ax: Optional[plt.Axes] = None + ) -> tuple[plt.Figure, plt.Axes]: + """ + Plot the normalized gradient slope against the peak salt concentration. - def plot(self, fig=None, ax=None): + Parameters + ---------- + fig : Optional[plt.Figure], default=None + Existing matplotlib Figure object. + ax : Optional[plt.Axes], default=None + Existing matplotlib Axes object. + + Returns + ------- + fig : plt.Figure + The figure object containing the plot. + ax : plt.Axes + The axes object for the plot. + """ if ax is None: fig, ax = plt.subplots() - ax.set_ylabel('Normalized Gradient Slope $GH$ / $M$') ax.set_xlabel('Peak Salt Concentration $I_R$ / $M$') @@ -154,8 +290,8 @@ def plot(self, fig=None, ax=None): nu = self.characteristic_charge[i_p] x = [ - min(self.log_c_salt_at_max_M[:, i_p])*1.05, - max(self.log_c_salt_at_max_M[:, i_p])*0.95 + min(self.log_c_salt_at_max_M[:, i_p]) * 1.05, + max(self.log_c_salt_at_max_M[:, i_p]) * 0.95 ] y = yamamoto_equation( @@ -165,11 +301,9 @@ def plot(self, fig=None, ax=None): k_eq=k_eq, ) ax.plot(x, y, 'k') - - ax.plot(self.log_c_salt_at_max_M, self.log_gradient_slope, 'ro') + ax.plot(self.log_c_salt_at_max_M[:, i_p], self.log_gradient_slope, 'ro') fig.tight_layout() - return fig, ax @@ -215,7 +349,9 @@ def fit_parameters(experiments, column): log_c_salt_at_max_M[:, i_p] = np.log10(np.array(c_salt_at_max)/1000) nu[i_p], k_eq[i_p] = _fit_yamamoto( - log_c_salt_at_max_M[:, i_p], log_gradient_slope, column.binding_model.capacity + log_c_salt_at_max_M[:, i_p], + log_gradient_slope, + column.binding_model.capacity ) column.binding_model.characteristic_charge = [0, *nu.tolist()] @@ -229,25 +365,29 @@ def fit_parameters(experiments, column): return yamamoto_results -def _fit_yamamoto(log_c_salt_at_max_M, log_gradient_slope, lambda_): +def _fit_yamamoto( + log_c_salt_at_max_M: np.ndarray, + log_gradient_slope: np.ndarray, + lambda_: float + ) -> tuple[float, float]: """ + Fit the Yamamoto model to experimental data using non-linear curve fitting. Parameters ---------- - log_c_salt_at_max_M : list - log10 of salt concentrations at protein peak maximum in mM. - log_gradient_slope : TYPE - DESCRIPTION. - lambda_ : TYPE + log_c_salt_at_max_M : np.ndarray + Log10 of salt concentrations at protein peak maximum in mM. + log_gradient_slope : np.ndarray + Normalized gradient slopes in logarithmic scale. + lambda_ : float Resin capacity in mM. Returns ------- nu : float - Characteristic charge. - k_eq : TYPE, optional - Equilibrium constant. - + Fitted characteristic charge. + k_eq : float + Fitted equilibrium constant. """ bounds = ((0, 1e-10), (1000, 1000)) @@ -255,7 +395,11 @@ def yamamoto_wrapper(c_s, nu, k_eq): return yamamoto_equation(c_s, lambda_, nu, k_eq) results, pcov = curve_fit( - yamamoto_wrapper, log_c_salt_at_max_M, log_gradient_slope, bounds=bounds, p0=(1, 1) + yamamoto_wrapper, + log_c_salt_at_max_M, + log_gradient_slope, + bounds=bounds, + p0=(1, 1) ) return results diff --git a/CADETProcess/transform.py b/CADETProcess/transform.py index afcb67c5c..86bdc28b5 100644 --- a/CADETProcess/transform.py +++ b/CADETProcess/transform.py @@ -11,24 +11,25 @@ .. autosummary:: :toctree: generated/ - TransformBase - NoTransform - NormLinearTransform - NormLogTransform - AutoTransform + TransformerBase + NullTransformer + NormLinearTransformer + NormLogTransformer + AutoTransformer """ -from abc import ABC, abstractmethod, abstractproperty +from abc import ABC, abstractmethod +from typing import NoReturn, Optional, Union -import numpy as np -from matplotlib.axes import Axes import matplotlib.pyplot as plt +import numpy as np from CADETProcess import plotting +from CADETProcess.numerics import round_to_significant_digits -class TransformBase(ABC): +class TransformerBase(ABC): """ Base class for parameter transformation. @@ -37,13 +38,13 @@ class TransformBase(ABC): Attributes ---------- - lb_input : {float, array-like} + lb_input : float or np.ndarray Lower bounds of the input parameter space. - ub_input : {float, array-like} + ub_input : float or np.ndarray Upper bounds of the input parameter space. - lb : {float, array-like} + lb : float or np.ndarray Lower bounds of the output parameter space. - ub : {float, array-like} + ub : float or np.ndarray Upper bounds of the output parameter space. allow_extended_input : bool If True, the input value may exceed the lower/upper bounds. @@ -64,183 +65,198 @@ class TransformBase(ABC): Examples -------- - >>> class MyTransform(TransformBase): - ... def transform(self, x): + >>> class MyTransformer(TransformerBase): + ... def transform(self, x: float) -> float: ... return x ** 2 ... - >>> t = MyTransform(lb_input=0, ub_input=10, lb=-100, ub=100) + >>> t = MyTransformer(lb_input=0, ub_input=10, lb=-100, ub=100) >>> t.transform(3) 9 - """ def __init__( self, - lb_input=-np.inf, ub_input=np.inf, - allow_extended_input=False, allow_extended_output=False): - """Initialize TransformBase + lb_input: float | np.ndarray = -np.inf, + ub_input: float | np.ndarray = np.inf, + allow_extended_input: bool = False, + allow_extended_output: bool = False + ) -> NoReturn: + """Initialize TransformerBase. Parameters ---------- - lb_input : {float, array-like}, optional - Lower bounds of the input parameter space. The default is -inf. - ub_input : {float, array-like}, optional - Upper bounds of the input parameter space. The default is inf. + lb_input : float or np.ndarray, optional + Lower bounds of the input parameter space. Default is -inf. + ub_input : float or np.ndarray, optional + Upper bounds of the input parameter space. Default is inf. allow_extended_input : bool, optional If True, the input value may exceed the lower/upper bounds. - Else, an exception is thrown. - The default is False. + Else, an exception is thrown. Default is False. allow_extended_output : bool, optional If True, the output value may exceed the lower/upper bounds. - Else, an exception is thrown. - The default is False. + Else, an exception is thrown. Default is False. """ self.lb_input = lb_input self.ub_input = ub_input self.allow_extended_input = allow_extended_input self.allow_extended_output = allow_extended_output - @abstractproperty - def is_linear(self): - """bool: True if transformation is linear.""" + @property + @abstractmethod + def is_linear(self) -> bool: + """Return whether the transformation is linear.""" pass @property - def lb_input(self): - """{float, array-like}: The lower bounds of the input parameter space.""" + def lb_input(self) -> float | np.ndarray: + """Return the lower bounds of the input parameter space.""" return self._lb_input @lb_input.setter - def lb_input(self, lb_input): + def lb_input(self, lb_input: float | np.ndarray) -> NoReturn: self._lb_input = lb_input @property - def ub_input(self): - """{float, array-like}: The upper bounds of the input parameter space.""" + def ub_input(self) -> float | np.ndarray: + """Return the upper bounds of the input parameter space.""" return self._ub_input @ub_input.setter - def ub_input(self, ub_input): + def ub_input(self, ub_input: float | np.ndarray) -> NoReturn: self._ub_input = ub_input - @abstractproperty - def lb(self): - """{float, array-like}: The lower bounds of the output parameter space. - - Must be implemented in the child class. - """ + @property + @abstractmethod + def lb(self) -> float | np.ndarray: + """Return the lower bounds of the output parameter space.""" pass - @abstractproperty - def ub(self): - """{float, array-like}: The upper bounds of the output parameter space. - - Must be implemented in the child class. - """ + @property + @abstractmethod + def ub(self) -> float | np.ndarray: + """Return the upper bounds of the output parameter space.""" pass - def transform(self, x): + def transform(self, x: float | np.ndarray) -> float | np.ndarray: """Transform the input parameter space to the output parameter space. - Applies the transformation function _transform to x after performing input + Applies the transformation function `_transform` to `x` after performing input bounds checking. If the transformed value exceeds the output bounds, an error is raised. Parameters ---------- - x : {float, array} + x : float or np.ndarray Input parameter values. Returns ------- - {float, array} + float or np.ndarray Transformed parameter values. + + Raises + ------ + ValueError + If `x` exceeds input or output bounds and `allow_extended_*` is False. """ if ( - not self.allow_extended_input and - not np.all((self.lb_input <= x) * (x <= self.ub_input))): + not self.allow_extended_input and + not np.all((self.lb_input <= x) & (x <= self.ub_input)) + ): raise ValueError("Value exceeds input bounds.") + x = self._transform(x) + if ( - not self.allow_extended_output and - not np.all((self.lb <= x) * (x <= self.ub))): + not self.allow_extended_output and + not np.all((self.lb <= x) & (x <= self.ub)) + ): raise ValueError("Value exceeds output bounds.") return x @abstractmethod - def _transform(self, x): - """Transform the input parameter space to the output parameter space. + def _transform(self, x: float | np.ndarray) -> float | np.ndarray: + """Apply the transformation from input to output parameter space. Must be implemented in the child class. Parameters ---------- - x : {float, array} + x : float or np.ndarray Input parameter values. Returns ------- - {float, array} + float or np.ndarray Transformed parameter values. """ pass - def untransform(self, x): - """Transform the output parameter space to the input parameter space. - - Applies the transformation function _untransform to x after performing output - bounds checking. If the transformed value exceeds the input bounds, an error - is raised. + def untransform( + self, + x: float | np.ndarray, + significant_digits: Optional[int] = None + ) -> float | np.ndarray: + """Transform the output parameter space back to the input parameter space. Parameters ---------- - x : {float, array} + x : float or np.ndarray Output parameter values. + significant_digits : int, optional + float | np.ndarray of significant figures to which variable can be rounded. + If None, variable is not rounded. Returns ------- - {float, array} + float or np.ndarray Transformed parameter values. """ + x_ = round_to_significant_digits(x, digits=significant_digits) + if ( - not self.allow_extended_output and - not np.all((self.lb <= x) * (x <= self.ub))): + not self.allow_extended_output and + not np.all((self.lb <= x_) & (x_ <= self.ub)) + ): raise ValueError("Value exceeds output bounds.") - x = self._untransform(x) + + x_ = self._untransform(x_) + x_ = round_to_significant_digits(x_, digits=significant_digits) + if ( - not self.allow_extended_input and - not np.all((self.lb_input <= x) * (x <= self.ub_input))): + not self.allow_extended_input and + not np.all((self.lb_input <= x_) & (x_ <= self.ub_input)) + ): raise ValueError("Value exceeds input bounds.") - return x + return x_ @abstractmethod - def _untransform(self, x): - """Transform the output parameter space to the input parameter space. + def _untransform(self, x: float | np.ndarray) -> float | np.ndarray: + """Apply the inverse transformation from output to input parameter space. Must be implemented in the child class. Parameters ---------- - x : {float, array} + x : float or np.ndarray Output parameter values. Returns ------- - {float, array} + float or np.ndarray Transformed parameter values. """ pass @plotting.create_and_save_figure - def plot(self, ax, use_log_scale=False): - """ - Plot the transformed space against the input space. + def plot(self, ax: plt.Axes, use_log_scale: bool = False) -> NoReturn: + """Plot the transformed space against the input space. Parameters ---------- - ax : Axes + ax : plt.Axes The axes object to plot on. use_log_scale : bool, optional If True, use a logarithmic scale for the x-axis. @@ -252,330 +268,328 @@ def plot(self, ax, use_log_scale=False): x = self.untransform(y) ax.plot(x, y) - - ax.set_xlabel('Input Space') - ax.set_ylabel('Transformed Space') + ax.set_xlabel("Input Space") + ax.set_ylabel("Transformed Space") if use_log_scale: - ax.set_xscale('log') + ax.set_xscale("log") self.allow_extended_input = allow_extended_input - def __str__(self): + def __str__(self) -> str: """Return the class name as a string.""" return self.__class__.__name__ -class NoTransform(TransformBase): - """A class that implements no transformation. +class NullTransformer(TransformerBase): + """A transformer that performs no transformation. - Returns the input values without any transformation. + This class simply returns the input values as output without modification. See Also -------- - TransformBase : The base class for parameter transformation. + TransformerBase : The base class for parameter transformation. """ @property - def is_linear(self): + def is_linear(self) -> bool: + """Return True, as this is a linear transformation.""" return True @property - def lb(self): - """{float, array-like}: The lower bounds of the output parameter space.""" + def lb(self) -> float | np.ndarray: + """Return the lower bound of the output space (same as input lower bound).""" return self.lb_input @property - def ub(self): - """{float, array-like}: The upper bounds of the output parameter space.""" + def ub(self) -> float | np.ndarray: + """Return the upper bound of the output space (same as input upper bound).""" return self.ub_input - def _transform(self, x): - """Transform the input value to output value. + def _transform(self, x: float | np.ndarray) -> float | np.ndarray: + """Return the input value(s) as output without modification. Parameters ---------- - x : {float, array-like} + x : float or np.ndarray The input value(s) to be transformed. Returns ------- - {float, array-like} - The transformed output value(s). + float or np.ndarray + The transformed output value(s) (same as input). """ return x - def _untransform(self, x): - """Untransform the output value to input value. + def _untransform(self, x: float | np.ndarray) -> float | np.ndarray: + """Return the output value(s) as input without modification. Parameters ---------- - x : {float, array-like} + x : float or np.ndarray The output value(s) to be untransformed. Returns ------- - {float, array-like} - The untransformed input value(s). + float or np.ndarray + The untransformed input value(s) (same as output). """ return x -class NormLinearTransform(TransformBase): - """A class that implements a normalized linear transformation. +class NormLinearTransformer(TransformerBase): + """A transformer that normalizes values linearly to the range [0, 1]. - Transforms the input value to the range [0, 1] by normalizing it using - the lower and upper bounds of the input parameter space. + This transformation scales the input value between the given lower + and upper bounds into a normalized range of [0,1]. See Also -------- - TransformBase : The base class for parameter transformation. - + TransformerBase : The base class for parameter transformation. """ @property - def is_linear(self): + def is_linear(self) -> bool: + """Return True, as this is a linear transformation.""" return True @property - def lb(self): - """{float, array-like}: The lower bounds of the output parameter space.""" - return 0 + def lb(self) -> float: + """Return the lower bound of the output space (0).""" + return 0.0 @property - def ub(self): - """{float, array-like}: The upper bounds of the output parameter space.""" - return 1 + def ub(self) -> float: + """Return the upper bound of the output space (1).""" + return 1.0 - def _transform(self, x): - """Transform the input value to output value. + def _transform(self, x: float | np.ndarray) -> float | np.ndarray: + """Normalize input values to the range [0,1]. Parameters ---------- - x : {float, array-like} + x : float or np.ndarray The input value(s) to be transformed. Returns ------- - {float, array-like} - The transformed output value(s). + float or np.ndarray + The transformed output value(s) in the range [0,1]. """ return (x - self.lb_input) / (self.ub_input - self.lb_input) - def _untransform(self, x): - """Untransform the output value to input value. + def _untransform(self, x: float | np.ndarray) -> float | np.ndarray: + """Denormalize output values back to the original range. Parameters ---------- - x : {float, array-like} - The output value(s) to be untransformed. + x : float or np.ndarray + The output value(s) in the normalized range [0,1]. Returns ------- - {float, array-like} - The untransformed input value(s). + float or np.ndarray + The untransformed input value(s) in the original range. """ return (self.ub_input - self.lb_input) * x + self.lb_input -class NormLogTransform(TransformBase): - """A class that implements a normalized logarithmic transformation. +class NormLogTransformer(TransformerBase): + """A transformer that normalizes values logarithmically to the range [0, 1]. - Transforms the input value to the range [0, 1] using a logarithmic - transformation with the lower and upper bounds of the input parameter space. + This transformation scales input values logarithmically between the given lower + and upper bounds into a normalized range of [0,1]. See Also -------- - TransformBase : The base class for parameter transformation. - + TransformerBase : The base class for parameter transformation. """ @property - def is_linear(self): + def is_linear(self) -> bool: + """Return False, as this is a non-linear transformation.""" return False @property - def lb(self): - """{float, array-like}: The lower bounds of the output parameter space.""" - return 0 + def lb(self) -> float: + """Return the lower bound of the output space (0).""" + return 0.0 @property - def ub(self): - """{float, array-like}: The upper bounds of the output parameter space.""" - return 1 + def ub(self) -> float: + """Return the upper bound of the output space (1).""" + return 1.0 - def _transform(self, x): - """Transform the input value to output value. + def _transform(self, x: float | np.ndarray) -> float | np.ndarray: + """Normalize input values to the range [0,1] using a logarithmic transformation. Parameters ---------- - x : {float, array-like} + x : float or np.ndarray The input value(s) to be transformed. Returns ------- - {float, array-like} - The transformed output value(s). + float or np.ndarray + The transformed output value(s) in the range [0,1]. + + Raises + ------ + ValueError + If `lb_input` is non-positive, the transformation shifts all values accordingly. """ if self.lb_input <= 0: x_ = x + (abs(self.lb_input) + 1) ub = 1 + (self.ub_input - self.lb_input) - return np.log(x_) / (np.log(ub)) - + return np.log(x_) / np.log(ub) else: - return \ - np.log(x/self.lb_input) / np.log(self.ub_input/self.lb_input) + return np.log(x / self.lb_input) / np.log(self.ub_input / self.lb_input) - def _untransform(self, x): - """Transform the input value to output value. + def _untransform(self, x: float | np.ndarray) -> float | np.ndarray: + """Denormalize output values back to the original range using logarithmic inverse. Parameters ---------- - x : {float, array-like} - The input value(s) to be transformed. + x : float or np.ndarray + The output value(s) in the normalized range [0,1]. Returns ------- - {float, array-like} - The transformed output value(s). + float or np.ndarray + The untransformed input value(s) in the original range. """ if self.lb_input <= 0: return \ - np.exp(x * np.log(self.ub_input - self.lb_input + 1)) \ - + self.lb_input - 1 + np.exp(x * np.log(self.ub_input - self.lb_input + 1)) + self.lb_input - 1 else: - return \ - self.lb_input * np.exp(x * np.log(self.ub_input/self.lb_input)) + return self.lb_input * np.exp(x * np.log(self.ub_input / self.lb_input)) -class AutoTransform(TransformBase): - """A class that implements an automatic parameter transformation. +class AutoTransformer(TransformerBase): + """ + A transformer that automatically selects between linear and logarithmic transformations. Transforms the input value to the range [0, 1] using either - the :class:`NormLinearTransform` or the :class:`NormLogTransform` + the :class:`NormLinearTransformer` or the :class:`NormLogTransformer` based on the input parameter space. Attributes ---------- - linear : :class:`NormLinearTransform` + linear : NormLinearTransformer Instance of the linear normalization transform. - log : :class:`NormLogTransform` + log : NormLogTransformer Instance of the logarithmic normalization transform. + threshold : int + The maximum threshold to switch from linear to logarithmic transformation. See Also -------- - TransformBase - NormLinearTransform - NormLogTransform - + TransformerBase + NormLinearTransformer + NormLogTransformer """ - def __init__(self, *args, threshold=1000, **kwargs): - """Initialize an AutoTransform object. + def __init__(self, *args, threshold: int = 100, **kwargs) -> NoReturn: + """Initialize an AutoTransformer object. Parameters ---------- *args : tuple - Arguments for the :class:`TransformBase` class. + Arguments for the :class:`TransformerBase` class. threshold : int, optional The maximum threshold to switch from linear to logarithmic transformation. The default is 1000. **kwargs : dict - Keyword arguments for the :class:`TransformBase` class. + Keyword arguments for the :class:`TransformerBase` class. """ - self.linear = NormLinearTransform() - self.log = NormLogTransform() + self.linear = NormLinearTransformer() + self.log = NormLogTransformer() super().__init__(*args, **kwargs) self.threshold = threshold + self.linear.allow_extended_input = self.allow_extended_input - self.linear.allow_extended_output = self.allow_extended_input + self.linear.allow_extended_output = self.allow_extended_output self.log.allow_extended_input = self.allow_extended_input - self.log.allow_extended_output = self.allow_extended_input + self.log.allow_extended_output = self.allow_extended_output @property - def is_linear(self): + def is_linear(self) -> bool: + """Return True if linear transformation is used, otherwise False.""" return self.use_linear @property - def use_linear(self): - """bool: Indicates whether linear transformation is used.""" + def use_linear(self) -> bool: + """Determine whether linear transformation should be used.""" if self.lb_input <= 0: - return \ - np.log10(self.ub_input - self.lb_input) \ - <= np.log10(self.threshold) - else: - return self.ub_input/self.lb_input <= self.threshold + return np.log10(self.ub_input - self.lb_input) < np.log10(self.threshold) + return (self.ub_input / self.lb_input) < self.threshold @property - def use_log(self): - """bool: Indicates whether logarithmic transformation is used.""" + def use_log(self) -> bool: + """Return True if logarithmic transformation is used, otherwise False.""" return not self.use_linear @property - def lb(self): - """{float, array-like}: The lower bounds of the output parameter space.""" - return 0 + def lb(self) -> float: + """Return the lower bound of the output parameter space (0).""" + return 0.0 @property - def ub(self): - """{float, array-like}: The upper bounds of the output parameter space.""" - return 1 + def ub(self) -> float: + """Return the upper bound of the output parameter space (1).""" + return 1.0 @property - def lb_input(self): - """{float, array-like}: The lower bounds of the input parameter space.""" + def lb_input(self) -> float | np.ndarray: + """Return the lower bounds of the input parameter space.""" return self._lb_input @lb_input.setter - def lb_input(self, lb_input): + def lb_input(self, lb_input: float | np.ndarray) -> NoReturn: + """Set the lower bounds of the input parameter space.""" self.linear.lb_input = lb_input self.log.lb_input = lb_input self._lb_input = lb_input @property - def ub_input(self): - """{float, array-like}: The upper bounds of the input parameter space.""" + def ub_input(self) -> float | np.ndarray: + """Return the upper bounds of the input parameter space.""" return self._ub_input @ub_input.setter - def ub_input(self, ub_input): + def ub_input(self, ub_input: float | np.ndarray) -> NoReturn: + """Set the upper bounds of the input parameter space.""" self.linear.ub_input = ub_input self.log.ub_input = ub_input self._ub_input = ub_input - def _transform(self, x): - """Transform the input value to output value. + def _transform(self, x: float | np.ndarray) -> float | np.ndarray: + """Transform the input value to an output value in the range [0, 1]. Parameters ---------- - x : {float, array-like} + x : float or np.ndarray The input value(s) to be transformed. Returns ------- - {float, array-like} + float or np.ndarray The transformed output value(s). """ - if self.use_log: - return self.log.transform(x) - else: - return self.linear.transform(x) + return self.log._transform(x) if self.use_log else self.linear._transform(x) - def _untransform(self, x): - """Untransforms the output value to input value. + def _untransform(self, x: float | np.ndarray) -> float | np.ndarray: + """Untransform the output value back to the input parameter space. Parameters ---------- - x : {float, array-like} - The output value(s) to be transformed. + x : float or np.ndarray + The output value(s) in the transformed range. Returns ------- - {float, array-like} - The untransformed output value(s). + float or np.ndarray + The untransformed input value(s). """ - if self.use_log: - return self.log.untransform(x) - else: - return self.linear.untransform(x) + return self.log._untransform(x) if self.use_log else self.linear._untransform(x) diff --git a/docs/source/user_guide/process_evaluation/fractionation.md b/docs/source/user_guide/process_evaluation/fractionation.md index d8a659140..1adc4b75a 100644 --- a/docs/source/user_guide/process_evaluation/fractionation.md +++ b/docs/source/user_guide/process_evaluation/fractionation.md @@ -138,9 +138,9 @@ To add a fractionation event, the following arguments need to be provided: Here, component $A$ seems to have sufficient purity between $5 \colon 00~min$ and $5 \colon 45~min$ and component $B$ between $6 \colon 30~min$ and $9 \colon 00~min$. ```{code-cell} ipython3 -fractionator.add_fractionation_event('start_A', 0, 5*60, 'outlet') +fractionator.add_fractionation_event('start_A', 'A', 5*60, 'outlet') fractionator.add_fractionation_event('end_A', -1, 5.75*60) -fractionator.add_fractionation_event('start_B', 1, 6.5*60) +fractionator.add_fractionation_event('start_B', 'B', 6.5*60) fractionator.add_fractionation_event('end_B', -1, 9*60) ``` diff --git a/docs/source/user_guide/process_model/reaction.md b/docs/source/user_guide/process_model/reaction.md index b1b3fcc70..857d3a63f 100644 --- a/docs/source/user_guide/process_model/reaction.md +++ b/docs/source/user_guide/process_model/reaction.md @@ -48,8 +48,8 @@ component_system = ComponentSystem(['A', 'B']) To instantiate it, pass the {class}`~CADETProcess.processModel.ComponentSystem`. Then, add the reaction using the {meth}`~CADETProcess.processModel.MassActionLaw.add_reaction` method. The following arguments are expected: -- indices: The indices of the components that take part in the reaction (useful for bigger systems) -- stoichiometric coefficients in the order of the indices +- components: The components names that take part in the reaction (useful for bigger systems) +- stoichiometric coefficients in the order of components - forward reaction rate - backward reaction rate @@ -57,7 +57,7 @@ The following arguments are expected: from CADETProcess.processModel import MassActionLaw reaction_system = MassActionLaw(component_system) reaction_system.add_reaction( - indices=[0,1], + components=['A', 'B'], coefficients=[-1, 1], k_fwd=0.1, k_bwd=0 @@ -88,7 +88,7 @@ $$ ```{code-cell} ipython3 reaction_system = MassActionLaw(component_system) reaction_system.add_reaction( - indices=[0,1], + components=['A', 'B'], coefficients=[-2, 1], k_fwd=0.2, k_bwd=0.1 diff --git a/docs/source/user_guide/simulator.md b/docs/source/user_guide/simulator.md index 1dc0ad03c..ba0a29a9a 100644 --- a/docs/source/user_guide/simulator.md +++ b/docs/source/user_guide/simulator.md @@ -17,7 +17,6 @@ sys.path.append('../../../') ``` (simulation_guide)= - # Process Simulation To simulate a {class}`~CADETProcess.processModel.Process`, a simulator needs to be configured. @@ -99,6 +98,13 @@ The {class}`~CADETProcess.simulator.ModelSolverParameters` stores general parame print(process_simulator.solver_parameters) ``` +For instance, sometimes simulations can take a long time to finish. +To limit their runtime, set the `timeout` attribute. + +``` +process_simulator.solver_parameters.timeout = 300 +``` + For more information, see also {ref}`CADET Documentation`. ## Simulate Processes @@ -117,16 +123,7 @@ process.add_parameter_sensitivity('column.total_porosity') simulation_results = process_simulator.simulate(process) ``` -Sometimes simulations can take a long time to finish. -To limit their runtime, set the `timeout` attribute of the simulator. - -``` -process_simulator.timeout = 300 -simulation_results = process_simulator.simulate(process) -``` - (simulation_results_guide)= - ## Simulation Results The {class}`~CADETProcess.simulationResults.SimulationResults` object contains the results of the simulation. @@ -189,7 +186,6 @@ _ = simulation_results.sensitivity['column.total_porosity'].column.outlet.plot() ``` (stationarity_guide)= - ## Cyclic Stationarity Preparative chromatographic separations are operated in a repetitive fashion. diff --git a/docs/source/user_guide/tools/compartment_builder.md b/docs/source/user_guide/tools/compartment_builder.md index 6eaa0eeec..ce3218332 100644 --- a/docs/source/user_guide/tools/compartment_builder.md +++ b/docs/source/user_guide/tools/compartment_builder.md @@ -211,7 +211,7 @@ process_simulator = Cadet() process = builder_simple.process process.cycle_time = 100 -simulation_results = process_simulator.run(process) +simulation_results = process_simulator.simulate(process) ``` ## Visualization diff --git a/environment.yml b/environment.yml index 1829f5e0e..d22704977 100644 --- a/environment.yml +++ b/environment.yml @@ -4,4 +4,5 @@ channels: dependencies: - python>=3.10.* - cadet>=5.0.3 + - libsqlite<3.49 - pip diff --git a/examples/characterize_chromatographic_system/Yamamoto_method.md b/examples/characterize_chromatographic_system/Yamamoto_method.md index 607130f07..8995c91a3 100644 --- a/examples/characterize_chromatographic_system/Yamamoto_method.md +++ b/examples/characterize_chromatographic_system/Yamamoto_method.md @@ -22,7 +22,7 @@ root_dir = Path('../../../../').resolve() sys.path.append(root_dir.as_posix()) ``` -## The Yamamoto method +# The Yamamoto method This example demonstrates how to estimate SMA binding parameters based on multiple gradient elution chromatograms using the Yamamoto method. diff --git a/examples/characterize_chromatographic_system/Yamamoto_method.py b/examples/characterize_chromatographic_system/Yamamoto_method.py index 506d15f4d..f493eed78 100644 --- a/examples/characterize_chromatographic_system/Yamamoto_method.py +++ b/examples/characterize_chromatographic_system/Yamamoto_method.py @@ -21,7 +21,7 @@ sys.path.append(root_dir.as_posix()) # %% [markdown] -# ## The Yamamoto method +# # The Yamamoto method # # This example demonstrates how to estimate SMA binding parameters based on multiple gradient elution chromatograms # using the Yamamoto method. diff --git a/pyproject.toml b/pyproject.toml index 6013bdf7d..d48487c32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,7 @@ dependencies = [ "cadet-python>=1.0.4", "corner>=2.2.1", "diskcache>=5.4.0", - "hopsy>=1.4.0", + "hopsy>=1.5.3", "joblib>=1.3.0", "numpy>=1.21", "matplotlib>=3.4", @@ -54,10 +54,13 @@ docs = [ "sphinx_copybutton>=0.5.1", "sphinx-sitemap>=2.5.0", ] - ax = [ "ax-platform >=0.3.5" ] +coverage = [ + "coverage", + "pytest-cov", +] [project.urls] homepage = "https://github.com/fau-advanced-separations/CADET-Process" @@ -78,3 +81,9 @@ line-length = 88 testpaths = [ "tests", ] +pythonpath = [ + "tests", +] +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] diff --git a/tests/dataStructure/test_nested_dict.py b/tests/dataStructure/test_nested_dict.py new file mode 100644 index 000000000..35b30b066 --- /dev/null +++ b/tests/dataStructure/test_nested_dict.py @@ -0,0 +1,156 @@ +import copy +import pytest + +from CADETProcess.dataStructure.nested_dict import ( + check_nested, + generate_nested_dict, + insert_path, + get_leaves, + set_nested_value, + get_nested_value, + update_dict_recursively, + get_nested_attribute, + set_nested_attribute, + get_nested_list_value, + set_nested_list_value, +) + + +@pytest.fixture +def sample_dict(): + """Fixture providing a sample nested dictionary.""" + return { + "a": { + "b": { + "c": 42 + } + }, + "x": { + "y": { + "z": 99 + } + } + } + + +@pytest.fixture +def sample_list(): + """Fixture providing a sample nested list.""" + return [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + + +# --- TESTS FOR NESTED DICTIONARY FUNCTIONS --- # + +def test_check_nested(sample_dict): + assert check_nested(sample_dict, "a.b.c") is True + assert check_nested(sample_dict, "a.b") is False + assert check_nested(sample_dict, "x.y.z") is True + assert check_nested(sample_dict, "missing.path") is False + + +def test_generate_nested_dict(): + expected = {"a": {"b": {"c": 10}}} + assert generate_nested_dict("a.b.c", 10) == expected + + +def test_insert_path(sample_dict): + insert_path(sample_dict, "a.b.d", 100) + assert sample_dict["a"]["b"]["d"] == 100 + + insert_path(sample_dict, "new.path.here", 50) + assert sample_dict["new"]["path"]["here"] == 50 + + +def test_get_leaves(sample_dict): + leaves = set(get_leaves(sample_dict)) + assert leaves == {"a.b.c", "x.y.z"} + + +def test_set_nested_value(sample_dict): + set_nested_value(sample_dict, "a.b.c", 77) + assert sample_dict["a"]["b"]["c"] == 77 + + set_nested_value(sample_dict, "x.y.z", "test") + assert sample_dict["x"]["y"]["z"] == "test" + + +def test_get_nested_value(sample_dict): + assert get_nested_value(sample_dict, "a.b.c") == 42 + assert get_nested_value(sample_dict, "x.y.z") == 99 + + with pytest.raises(KeyError): + get_nested_value(sample_dict, "missing.path") + + +# --- TESTS FOR DICTIONARY UPDATING --- # + +def test_update_dict_recursively(sample_dict): + target = {"a": {"b": 1}, "c": 3} + updates = {"a": {"b": 2, "d": 4}, "c": 30, "e": 5} + + updated = update_dict_recursively(copy.deepcopy(target), updates) + assert updated == {"a": {"b": 2, "d": 4}, "c": 30, "e": 5} + + updated_existing = update_dict_recursively( + copy.deepcopy(target), updates, only_existing_keys=True + ) + assert updated_existing == {"a": {"b": 2}, "c": 30} + + +# --- TESTS FOR OBJECT ATTRIBUTE FUNCTIONS --- # + +class SampleObject: + def __init__(self): + self.a = SampleSubObject() + + +class SampleSubObject: + def __init__(self): + self.b = SampleInnerObject() + + +class SampleInnerObject: + def __init__(self): + self.c = 42 + + +@pytest.fixture +def sample_obj(): + return SampleObject() + + +def test_get_nested_attribute(sample_obj): + assert get_nested_attribute(sample_obj, "a.b.c") == 42 + + with pytest.raises(AttributeError): + get_nested_attribute(sample_obj, "a.b.d") + + +def test_set_nested_attribute(sample_obj): + set_nested_attribute(sample_obj, "a.b.c", 99) + assert sample_obj.a.b.c == 99 + + with pytest.raises(AttributeError): + set_nested_attribute(sample_obj, "a.c.b", 50) + + +# --- TESTS FOR NESTED LIST FUNCTIONS --- # + +def test_get_nested_list_value(sample_list): + assert get_nested_list_value(sample_list, (0, 1, 1)) == 4 + assert get_nested_list_value(sample_list, (1, 0, 1)) == 6 + + with pytest.raises(IndexError): + get_nested_list_value(sample_list, (3, 2, 1)) + + +def test_set_nested_list_value(sample_list): + set_nested_list_value(sample_list, (0, 1, 1), 99) + assert sample_list[0][1][1] == 99 + + with pytest.raises(IndexError): + set_nested_list_value(sample_list, (3, 2, 1), 100) + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/tests/optimization_problem_fixtures.py b/tests/optimization_problem_fixtures.py index e8f056835..b97e526a9 100644 --- a/tests/optimization_problem_fixtures.py +++ b/tests/optimization_problem_fixtures.py @@ -11,7 +11,6 @@ import numpy as np from CADETProcess.optimization import OptimizationProblem, OptimizationResults -from CADETProcess.transform import NormLinearTransform, NormLogTransform __all__ = [ 'Rosenbrock', @@ -76,6 +75,9 @@ def allow_test_failure_percentage( class TestProblem(OptimizationProblem): + # To prevent Pytest interpreting this class as test: + __test__ = False + @property def optimal_solution(self): """Must return X, F, and if it has, G.""" @@ -153,10 +155,20 @@ def test_if_solved( class LinearConstraintsSooTestProblem(TestProblem): - def __init__(self, transform=None, has_evaluator=False, *args, **kwargs): + def __init__( + self, + transform=None, + has_evaluator=False, + significant_digits=None, + *args, + **kwargs + ): self.test_abs_tol = 0.1 super().__init__('linear_constraints_single_objective', *args, **kwargs) - self.setup_variables(transform=transform) + self.setup_variables( + transform=transform, + significant_digits=significant_digits + ) self.setup_linear_constraints() if has_evaluator: eval_fun = lambda x: x @@ -168,10 +180,28 @@ def __init__(self, transform=None, has_evaluator=False, *args, **kwargs): else: self.add_objective(self._objective_function) - def setup_variables(self, transform): - self.add_variable('var_0', lb=-2, ub=2, transform=transform) - self.add_variable('var_1', lb=-2, ub=2, transform=transform) - self.add_variable('var_2', lb=0, ub=2, transform="log") + def setup_variables(self, transform, significant_digits=None): + self.add_variable( + 'var_0', + lb=-2, + ub=2, + transform=transform, + significant_digits=significant_digits, + ) + self.add_variable( + 'var_1', + lb=-2, + ub=2, + transform=transform, + significant_digits=significant_digits, + ) + self.add_variable( + 'var_2', + lb=0, + ub=2, + transform="log", + significant_digits=significant_digits, + ) def setup_linear_constraints(self): self.add_linear_constraint(['var_0', 'var_1'], [-1, -0.5], 0) @@ -350,10 +380,23 @@ def __init__( self.setup_linear_constraints() self.add_objective(self._objective_function) - def setup_variables(self: OptimizationProblem, transform=None): - self.add_variable('var_0', lb=-5, ub=5, transform=transform) - self.add_variable('var_1', lb=-5, ub=5, transform=transform) - self.add_variable('var_2', lb=-5, ub=5, transform=transform) + def setup_variables( + self: OptimizationProblem, + transform=None, + significant_digits=None + ): + self.add_variable( + 'var_0', lb=-5, ub=5, + transform=transform, significant_digits=significant_digits + ) + self.add_variable( + 'var_1', lb=-5, ub=5, + transform=transform, significant_digits=significant_digits + ) + self.add_variable( + 'var_2', lb=-5, ub=5, + transform=transform, significant_digits=significant_digits + ) def setup_linear_constraints(self): self.add_linear_equality_constraint( diff --git a/tests/test_binding.py b/tests/test_binding.py index ae0f9a4fa..84b6b4712 100644 --- a/tests/test_binding.py +++ b/tests/test_binding.py @@ -11,9 +11,6 @@ class Test_Binding(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): component_system = ComponentSystem(2) diff --git a/tests/test_buffer_capacity.py b/tests/test_buffer_capacity.py index 3dced6e66..31920447b 100644 --- a/tests/test_buffer_capacity.py +++ b/tests/test_buffer_capacity.py @@ -11,9 +11,6 @@ class TestBufferCapacity(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.components_simple = ComponentSystem(2, charges=[1, 2]) diff --git a/tests/test_cache.py b/tests/test_cache.py index d44da455a..16f8fb49e 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -4,8 +4,6 @@ class TestCache(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): self.cache_dict = ResultsCache(use_diskcache=False) diff --git a/tests/test_cadet_adapter.py b/tests/test_cadet_adapter.py index 78df4c9e1..d5dae964b 100644 --- a/tests/test_cadet_adapter.py +++ b/tests/test_cadet_adapter.py @@ -1,13 +1,10 @@ from pathlib import Path -import platform import shutil import unittest -import warnings from typing import Optional import pytest import numpy as np import numpy.testing as npt -from itertools import product from tests.create_LWE import create_lwe @@ -35,9 +32,6 @@ def detect_cadet(install_path: Optional[Path] = None): class Test_Adapter(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - @unittest.skipIf(found_cadet is False, "Skip if CADET is not installed.") def test_check_cadet(self): simulator = Cadet(install_path) @@ -48,7 +42,14 @@ def test_check_cadet(self): cwd = simulator.temp_dir sim = simulator.get_new_cadet_instance() sim.create_lwe(cwd / file_name) - sim.run() + + # Check for CADET-Python >= v1.1, which introduced the .run_simulation interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .run_load() interface + # This check can be removed at some point in the future. + if hasattr(sim, "run_simulation"): + return_information = sim.run_simulation() + else: + return_information = sim.run_load() @unittest.skipIf(found_cadet is False, "Skip if CADET is not installed.") def test_create_lwe(self): @@ -58,7 +59,14 @@ def test_create_lwe(self): cwd = simulator.temp_dir sim = simulator.get_new_cadet_instance() sim.create_lwe(cwd / file_name) - sim.run() + + # Check for CADET-Python >= v1.1, which introduced the .run_simulation interface. + # If it's not present, assume CADET-Python <= 1.0.4 and use the old .run_load() interface + # This check can be removed at some point in the future. + if hasattr(sim, "run_simulation"): + return_information = sim.run_simulation() + else: + return_information = sim.run_load() def tearDown(self): shutil.rmtree('./tmp', ignore_errors=True) @@ -85,20 +93,51 @@ def tearDown(self): } -test_cases = [ + +# Helper function to format parameters +def format_params(params): + if not params: + return "default" + return '-'.join(f"{k}={v}" for k, v in params.items()) + + +process_test_cases = [ pytest.param( (unit_type, params), - id=f"{unit_type}-{'default' if not params else '-'.join(f'{k}={v}' for k, v in params.items())}" + id=f"{unit_type}-{format_params(params)}", + ) + for unit_type in unit_types + for params in parameter_combinations + if not (unit_type in exclude_rules and params in exclude_rules[unit_type]) +] + +use_dll_options = [True, False] + +simulation_test_cases = [ + pytest.param( + (unit_type, params, use_dll), + id=f"{unit_type}-{format_params(params)}-dll={use_dll}", ) for unit_type in unit_types for params in parameter_combinations + for use_dll in use_dll_options if not (unit_type in exclude_rules and params in exclude_rules[unit_type]) ] + +simple_test_case = [ + pytest.param( + (use_dll, ), id=f"dll={use_dll}", + ) + for use_dll in use_dll_options +] + + def run_simulation( process: Process, - install_path: Optional[str] = None - ) -> SimulationResults: + install_path: Optional[str] = None, + use_dll: bool = False +) -> SimulationResults: """ Run the CADET simulation for the given process and handle potential issues. @@ -121,6 +160,7 @@ def run_simulation( """ try: process_simulator = Cadet(install_path) + process_simulator.use_dll = use_dll simulation_results = process_simulator.simulate(process) if not simulation_results.exit_flag == 0: @@ -147,14 +187,18 @@ def process(request: pytest.FixtureRequest): @pytest.fixture def simulation_results(request: pytest.FixtureRequest): """ - Fixture to set up the simulation for each unit type. + Fixture to set up the simulation for each unit type with different `use_dll` options. """ - unit_type, kwargs = request.param - process = create_lwe(unit_type, **kwargs) - simulation_results = run_simulation(process, install_path) + unit_type, kwargs, use_dll = request.param # Extract `use_dll` + process = create_lwe(unit_type, **kwargs) # Process remains unchanged + simulation_results = run_simulation( + process, install_path, use_dll=use_dll + ) return simulation_results -@pytest.mark.parametrize("process", test_cases, indirect=True) + +@pytest.mark.parametrize("process", process_test_cases, indirect=True) +@pytest.mark.slow class TestProcessWithLWE: def return_process_config(self, process: Process) -> dict: @@ -296,7 +340,7 @@ def check_general_rate_model(self, unit, unit_config): assert unit_config.INIT_C == n_comp * [0] assert unit_config.INIT_CP == n_comp * [0] assert unit_config.VELOCITY == unit.flow_direction - assert unit_config.COL_DISPERSION == 5.75e-08 + assert unit_config.COL_DISPERSION == n_comp * [5.75e-08] assert unit_config.CROSS_SECTION_AREA == np.pi * 0.01 ** 2 assert unit_config.COL_LENGTH == 0.014 assert unit_config.COL_POROSITY == 0.37 @@ -322,7 +366,7 @@ def check_tubular_reactor(self, unit, unit_config): assert unit_config.UNIT_TYPE == 'LUMPED_RATE_MODEL_WITHOUT_PORES' assert unit_config.INIT_C == n_comp * [0] assert unit_config.VELOCITY == unit.flow_direction - assert unit_config.COL_DISPERSION == 5.75e-08 + assert unit_config.COL_DISPERSION == n_comp * [5.75e-08] assert unit_config.CROSS_SECTION_AREA == np.pi * 0.01 ** 2 assert unit_config.COL_LENGTH == 0.014 assert unit_config.TOTAL_POROSITY == 1 @@ -345,7 +389,7 @@ def check_lumped_rate_model_without_pores(self, unit, unit_config): assert unit_config.UNIT_TYPE == 'LUMPED_RATE_MODEL_WITHOUT_PORES' assert unit_config.INIT_C == n_comp * [0] assert unit_config.VELOCITY == unit.flow_direction - assert unit_config.COL_DISPERSION == 5.75e-08 + assert unit_config.COL_DISPERSION == n_comp * [5.75e-08] assert unit_config.CROSS_SECTION_AREA == np.pi * 0.01 ** 2 assert unit_config.COL_LENGTH == 0.014 assert unit_config.TOTAL_POROSITY == 0.8425 @@ -371,7 +415,7 @@ def check_lumped_rate_model_with_pores(self, unit, unit_config): assert unit_config.INIT_Q == n_comp * [0] assert unit_config.INIT_CP == n_comp * [0] assert unit_config.VELOCITY == unit.flow_direction - assert unit_config.COL_DISPERSION == 5.75e-08 + assert unit_config.COL_DISPERSION == n_comp * [5.75e-08] assert unit_config.CROSS_SECTION_AREA == np.pi * 0.01 ** 2 assert unit_config.COL_LENGTH == 0.014 assert unit_config.COL_POROSITY == 0.37 @@ -406,9 +450,10 @@ def check_mct(self, unit, unit_config): [n_comp * [0.0], n_comp * [0.0], n_comp * [0.0]] ]) ) - assert unit_config.COL_DISPERSION == 5.75e-08 + assert unit_config.COL_DISPERSION == n_comp * n_channel * [5.75e-08] assert unit_config.NCHANNEL == 3 - assert unit_config.CHANNEL_CROSS_SECTION_AREAS == 3 * [2 * np.pi * (0.01 ** 2)] + assert unit_config.CHANNEL_CROSS_SECTION_AREAS == 3 * \ + [2 * np.pi * (0.01 ** 2)] self.check_discretization(unit, unit_config) @@ -509,6 +554,7 @@ def test_solver_config(self, process: Process): 'nthreads': 1, 'consistent_init_mode': 1, 'consistent_init_mode_sens': 1, + 'timeout': 0, 'user_solution_times': np.arange(0.0, 120 * 60 + 1), 'sections': { 'nsec': 3, @@ -565,7 +611,8 @@ def test_sensitivity_config(self, process: Process): npt.assert_equal(sensitivity_config, expected_sensitivity_config) -@pytest.mark.parametrize("simulation_results", test_cases, indirect=True) +@pytest.mark.parametrize("simulation_results", simulation_test_cases, indirect=True) +@pytest.mark.slow class TestResultsWithLWE: def test_trigger_simulation(self, simulation_results): """ @@ -603,7 +650,8 @@ def test_compare_solution_shape(self, simulation_results): # for units with ports else: # assert solution inlet is given for each port - assert len(simulation_results.solution[unit.name].inlet) == unit.n_ports + assert len( + simulation_results.solution[unit.name].inlet) == unit.n_ports # assert solution for channel 0 has shape (t, n_comp) assert simulation_results.solution[unit.name].inlet.channel_0.solution_shape == ( int(process.cycle_time+1), process.component_system.n_comp @@ -656,5 +704,23 @@ def test_compare_solution_shape(self, simulation_results): ) +@pytest.mark.parametrize( + "process, use_dll", + [ + (("LumpedRateModelWithPores", {}), True), + (("LumpedRateModelWithPores", {}), False), + ], + indirect=["process"] +) +def test_timeout(process, use_dll): + simulator = Cadet(use_dll=use_dll) + if int(simulator.version[0]) < 5 and int(simulator.version[1]) < 1: + return + + simulator.solver_parameters.timeout = 0.1 + + simulation_results = simulator.simulate(process) + + if __name__ == "__main__": pytest.main([__file__]) diff --git a/tests/test_carousel.py b/tests/test_carousel.py index 6e643ad90..824507dbb 100644 --- a/tests/test_carousel.py +++ b/tests/test_carousel.py @@ -12,9 +12,6 @@ class Test_Carousel(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(2) diff --git a/tests/test_compartment.py b/tests/test_compartment.py index 62a4074e2..ba68bea86 100644 --- a/tests/test_compartment.py +++ b/tests/test_compartment.py @@ -9,9 +9,6 @@ class Test_CompartmentBuilder(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(2) diff --git a/tests/test_deprecation.py b/tests/test_deprecation.py new file mode 100644 index 000000000..a4bb52e6c --- /dev/null +++ b/tests/test_deprecation.py @@ -0,0 +1,74 @@ +import unittest + +# Import the functions to test +from CADETProcess.dataStructure import deprecated_alias, rename_kwargs + +class TestDeprecatedAlias(unittest.TestCase): + + def test_basic_functionality(self): + @deprecated_alias(old_arg='new_arg') + def example_func(new_arg): + return new_arg + + # Test with new argument name + self.assertEqual(example_func(new_arg=42), 42) + + # Test with old argument name + with self.assertWarns(DeprecationWarning) as warning_context: + result = example_func(old_arg=42) + self.assertEqual(result, 42) + + # Verify warning message + warning_message = str(warning_context.warning) + self.assertIn("`old_arg` is deprecated", warning_message) + self.assertIn("use `new_arg` instead", warning_message) + + + def test_argument_conflict(self): + """Test error when both old and new argument names are used.""" + @deprecated_alias(old_arg='new_arg') + def example_func(new_arg): + return new_arg + + # Should raise TypeError when both old and new names are used + with self.assertRaises(TypeError) as context: + example_func(old_arg=1, new_arg=2) + + self.assertIn("received both old_arg and new_arg as arguments", + str(context.exception)) + + def test_rename_kwargs_direct(self): + """Test rename_kwargs function directly.""" + kwargs = {'old_arg': 42} + aliases = {'old_arg': 'new_arg'} + + with self.assertWarns(DeprecationWarning): + rename_kwargs('test_func', kwargs, aliases) + + self.assertIn('new_arg', kwargs) + self.assertNotIn('old_arg', kwargs) + self.assertEqual(kwargs['new_arg'], 42) + + def test_positional_args(self): + """Test that positional arguments work normally with the decorator.""" + @deprecated_alias(old_kwarg='new_kwarg') + def example_func(pos_arg, new_kwarg=None): + return f"{pos_arg}-{new_kwarg}" + + # Test with positional argument + self.assertEqual(example_func("test", new_kwarg=42), "test-42") + + # Test with old keyword argument + with self.assertWarns(DeprecationWarning): + self.assertEqual(example_func("test", old_kwarg=42), "test-42") + + def test_no_arguments(self): + """Test function with no arguments still works with decorator.""" + @deprecated_alias(old_arg='new_arg') + def example_func(): + return "success" + + self.assertEqual(example_func(), "success") + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_difference.py b/tests/test_difference.py index 6a61b6d63..8a9c1766d 100644 --- a/tests/test_difference.py +++ b/tests/test_difference.py @@ -40,8 +40,6 @@ from CADETProcess.comparison import SSE class TestSSE(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def test_metric(self): # Compare with itself @@ -76,8 +74,6 @@ def test_metric(self): from CADETProcess.comparison import NRMSE class TestNRMSE(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def test_metric(self): # Compare with itself @@ -112,8 +108,6 @@ def test_metric(self): from CADETProcess.comparison import PeakHeight class TestPeakHeight(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): # 2 Components, gaussian peaks, constant flow @@ -202,8 +196,6 @@ def test_metric(self): from CADETProcess.comparison import PeakPosition class TestPeakPosition(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): # 2 Components, gaussian peaks, constant flow @@ -275,8 +267,6 @@ def test_metric(self): from CADETProcess.comparison import Shape class TestShape(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): # 2 Components, gaussian peaks, constant flow @@ -303,7 +293,6 @@ def test_metric(self): use_derivative=False, components=['A'], normalize_metrics=False, - include_height=False, ) metrics_expected = [0, 0] metrics = difference.evaluate(self.reference) @@ -315,31 +304,17 @@ def test_metric(self): use_derivative=False, components=['A'], normalize_metrics=False, - include_height=False, ) metrics_expected = [5.5511151e-16, 10] metrics = difference.evaluate(self.reference) np.testing.assert_almost_equal(metrics, metrics_expected) - # Compare with other Gauss Peak, include height - difference = Shape( - self.reference_switched, - use_derivative=False, - components=['A'], - normalize_metrics=False, - include_height=True, - ) - metrics_expected = [5.5511151e-16, 10, 0] - metrics = difference.evaluate(self.reference) - np.testing.assert_almost_equal(metrics, metrics_expected) - # Compare with other Gauss Peak, normalize_metrics difference = Shape( self.reference_switched, use_derivative=False, components=['A'], normalize_metrics=True, - include_height=False, ) metrics_expected = [0, 4.6211716e-01] metrics = difference.evaluate(self.reference) @@ -351,31 +326,17 @@ def test_metric(self): use_derivative=True, components=['A'], normalize_metrics=False, - include_height=False, ) metrics_expected = [0, 10, 0, ] metrics = difference.evaluate(self.reference) np.testing.assert_almost_equal(metrics, metrics_expected) - # Compare with other Gauss Peak, include derivative and height - difference = Shape( - self.reference_switched, - use_derivative=True, - components=['A'], - normalize_metrics=False, - include_height=True, - ) - metrics_expected = [0, 10, 0, 0, 0, 0] - metrics = difference.evaluate(self.reference) - np.testing.assert_almost_equal(metrics, metrics_expected) - # Compare with other Gauss Peak, include derivative, normalize metrics difference = Shape( self.reference_switched, use_derivative=True, components=['A'], normalize_metrics=True, - include_height=False, ) metrics_expected = [0, 4.6211716e-01, 0] metrics = difference.evaluate(self.reference) @@ -396,8 +357,6 @@ def test_metric(self): class TestFractionation(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): fraction_1 = Fraction( diff --git a/tests/test_equilibrium.py b/tests/test_equilibrium.py index c1a9b6e8b..20ec842f5 100644 --- a/tests/test_equilibrium.py +++ b/tests/test_equilibrium.py @@ -10,8 +10,6 @@ class TestReactionEquilibrium(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): component_system = ComponentSystem(2) @@ -171,8 +169,6 @@ def test_reaction_equilibrium(self): class TestAdsorptionEquilibrium(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): component_system_mono = ComponentSystem() diff --git a/tests/test_events.py b/tests/test_events.py index 96c1fdcff..ded6eb982 100644 --- a/tests/test_events.py +++ b/tests/test_events.py @@ -11,7 +11,7 @@ Notes ----- Since the EventHandler defines an interface, that requires the -implementation of some methods, a TestHandler class is defined. +implementation of some methods, a HandlerFixture class is defined. Maybe this is too complicated, just use Process instead? """ @@ -31,7 +31,8 @@ plot = True -class TestPerformer(Structure): +class PerformerFixture(Structure): + n_entries = 4 n_coeff = 4 @@ -79,10 +80,11 @@ def section_dependent_parameters(self): return parameters -class TestHandler(CADETProcess.dynamicEvents.EventHandler): +class HandlerFixture(CADETProcess.dynamicEvents.EventHandler): + def __init__(self): self.name = None - self.performer = TestPerformer() + self.performer = PerformerFixture() super().__init__() @property @@ -100,7 +102,7 @@ def parameters(self, parameters): except KeyError: pass - super(TestHandler, self.__class__).parameters.fset(self, parameters) + super(HandlerFixture, self.__class__).parameters.fset(self, parameters) @property def section_dependent_parameters(self): @@ -117,11 +119,8 @@ def polynomial_parameters(self): class Test_Events(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setup_event_handler(self, add_events=False): - event_handler = TestHandler() + event_handler = HandlerFixture() event_handler.cycle_time = 20 if add_events: diff --git a/tests/test_flow_sheet.py b/tests/test_flow_sheet.py index 1e19563f7..100963e73 100644 --- a/tests/test_flow_sheet.py +++ b/tests/test_flow_sheet.py @@ -10,39 +10,38 @@ from CADETProcess.processModel import FlowSheet -def assert_almost_equal_dict( - dict_actual, dict_expected, decimal=7, verbose=True): - """Helper function to assert nested dicts are (almost) equal. - - Because of floating point calculations, it is necessary to use - `np.assert_almost_equal` to check the flow rates. However, this does not - work well with nested dicts which is why this helper function was written. - - Parameters - ---------- - dict_actual : dict - The object to check. - dict_expected : dict - The expected object. - decimal : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - """ - for key in dict_actual: - if isinstance(dict_actual[key], dict): - assert_almost_equal_dict(dict_actual[key], dict_expected[key]) - - else: - np.testing.assert_almost_equal( - dict_actual[key], dict_expected[key], - decimal=decimal, - err_msg=f'Dicts are not equal in key {key}.', - verbose=verbose - ) +def assert_almost_equal_dict(dict_actual, dict_expected, decimal=7, verbose=True): + """Helper function to assert nested dicts are (almost) equal. + + Because of floating point calculations, it is necessary to use + `np.assert_almost_equal` to check the flow rates. However, this does not + work well with nested dicts which is why this helper function was written. + + Parameters + ---------- + dict_actual : dict + The object to check. + dict_expected : dict + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + """ + for key in dict_actual: + if isinstance(dict_actual[key], dict): + assert_almost_equal_dict(dict_actual[key], dict_expected[key]) + + else: + np.testing.assert_almost_equal( + dict_actual[key], dict_expected[key], + decimal=decimal, + err_msg=f'Dicts are not equal in key {key}.', + verbose=verbose + ) def setup_single_cstr_flow_sheet(component_system=None): @@ -161,9 +160,6 @@ def setup_ssr_flow_sheet(component_system=None): class TestFlowSheet(unittest.TestCase): """Test general functionatlity of `FlowSheet` class.""" - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(2) @@ -210,7 +206,7 @@ def test_connections(self): 'origins': None, 'destinations': { None: { - cstr:[None], + cstr: [None], }, }, }, @@ -218,7 +214,7 @@ def test_connections(self): 'origins': None, 'destinations': { None: { - column:[None], + column: [None], }, }, }, @@ -226,13 +222,13 @@ def test_connections(self): cstr: { 'origins': { None: { - feed:[None], - column:[None], + feed: [None], + column: [None], }, }, 'destinations': { None: { - column:[None], + column: [None], }, }, }, @@ -240,112 +236,22 @@ def test_connections(self): column: { 'origins': { None: { - cstr:[None], - eluent:[None], + cstr: [None], + eluent: [None], }, }, 'destinations': { None: { - cstr:[None], - outlet:[None], + cstr: [None], + outlet: [None], }, }, }, outlet: { - 'origins':{ - None: { - column:[None], - }, - }, - - 'destinations': None, - }, - } - - self.assertDictEqual( - self.ssr_flow_sheet.connections, expected_connections - ) - - self.assertTrue(self.ssr_flow_sheet.connection_exists(feed, cstr)) - self.assertTrue(self.ssr_flow_sheet.connection_exists(eluent, column)) - self.assertTrue(self.ssr_flow_sheet.connection_exists(column, outlet)) - - self.assertFalse(self.ssr_flow_sheet.connection_exists(feed, eluent)) - - def test_name_decorator(self): - feed = Inlet(self.component_system, name='feed') - eluent = Inlet(self.component_system, name='eluent') - cstr = Cstr(self.component_system, name='cstr') - column = LumpedRateModelWithoutPores( - self.component_system, name='column' - ) - outlet = Outlet(self.component_system, name='outlet') - - flow_sheet = FlowSheet(self.component_system) - - flow_sheet.add_unit(feed) - flow_sheet.add_unit(eluent) - flow_sheet.add_unit(cstr) - flow_sheet.add_unit(column) - flow_sheet.add_unit(outlet) - - flow_sheet.add_connection('feed', 'cstr') - flow_sheet.add_connection(cstr, column) - flow_sheet.add_connection(eluent, 'column') - flow_sheet.add_connection(column, cstr) - flow_sheet.add_connection('column', outlet) - expected_connections = { - feed: { - 'origins': None, - 'destinations': { - 0: { - cstr:[0], - }, - }, - }, - eluent: { - 'origins': None, - 'destinations': { - 0: { - column:[0], - }, - }, - }, - - cstr: { 'origins': { - 0: { - feed:[0], - column:[0], - }, - }, - 'destinations': { - 0: { - column:[0], - }, - }, - }, - - column: { - 'origins': { - 0: { - cstr:[0], - eluent:[0], - }, - }, - 'destinations': { - 0: { - cstr:[0], - outlet:[0], - }, - }, - }, - - outlet: { - 'origins':{ - 0: { - column:[0], + None: { + column: [None], }, }, @@ -391,7 +297,7 @@ def test_name_decorator(self): 'origins': None, 'destinations': { None: { - cstr:[None], + cstr: [None], }, }, }, @@ -399,7 +305,7 @@ def test_name_decorator(self): 'origins': None, 'destinations': { None: { - column:[None], + column: [None], }, }, }, @@ -407,13 +313,13 @@ def test_name_decorator(self): cstr: { 'origins': { None: { - feed:[None], - column:[None], + feed: [None], + column: [None], }, }, 'destinations': { None: { - column:[None], + column: [None], }, }, }, @@ -421,22 +327,22 @@ def test_name_decorator(self): column: { 'origins': { None: { - cstr:[None], - eluent:[None], + cstr: [None], + eluent: [None], }, }, 'destinations': { None: { - outlet:[None], - cstr:[None], + outlet: [None], + cstr: [None], }, }, }, outlet: { - 'origins':{ + 'origins': { None: { - column:[None], + column: [None], }, }, @@ -466,7 +372,7 @@ def test_flow_rates(self): expected_flow_rates = { 'feed': { - 'total_out':{ + 'total_out': { None: (0, 0, 0, 0), }, @@ -648,7 +554,7 @@ def test_flow_rates(self): 'outlet': { 'origins': { None: { - 'column':{ + 'column': { None: (1.0, 0, 0, 0), }, }, @@ -870,10 +776,10 @@ def test_flow_rates(self): expected_flow_rates = { 'cstr': { 'total_in': { - None:[0.0, 0.0, 0.0, 0.0], + None: [0.0, 0.0, 0.0, 0.0], }, 'total_out': { - None:[0.0, 0.0, 0.0, 0.0] + None: [0.0, 0.0, 0.0, 0.0] }, 'origins': {}, 'destinations': {} @@ -894,7 +800,6 @@ def test_check_connectivity(self): with self.assertWarns(Warning): self.batch_flow_sheet.check_connections() - def test_output_state(self): column = self.ssr_flow_sheet.column @@ -981,7 +886,7 @@ def test_add_connection_error(self): self.ssr_flow_sheet.add_connection(inlet, column) with self.assertRaisesRegex(CADETProcessError, "not a port of"): - self.ssr_flow_sheet.set_output_state(column, [0.5,0.5], 'channel_5') + self.ssr_flow_sheet.set_output_state(column, [0.5, 0.5], 'channel_5') class TestCstrFlowRate(unittest.TestCase): @@ -995,9 +900,6 @@ class TestCstrFlowRate(unittest.TestCase): is set, it has properties similar to an `Inlet`. """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(2) @@ -1099,9 +1001,8 @@ def test_state_update(self): cstr_out_expected = [2., 2., 0., 0.] np.testing.assert_almost_equal(cstr_out, cstr_out_expected) + class TestPorts(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): self.setup_mct_flow_sheet() @@ -1114,9 +1015,9 @@ def setup_mct_flow_sheet(self): mct_flow_sheet = FlowSheet(self.component_system) inlet = Inlet(self.component_system, name='inlet') - mct_3c = MCT(self.component_system,nchannel=3, name='mct_3c') - mct_2c1 = MCT(self.component_system,nchannel=2, name='mct_2c1') - mct_2c2 = MCT(self.component_system,nchannel=2, name='mct_2c2') + mct_3c = MCT(self.component_system, nchannel=3, name='mct_3c') + mct_2c1 = MCT(self.component_system, nchannel=2, name='mct_2c1') + mct_2c2 = MCT(self.component_system, nchannel=2, name='mct_2c2') outlet1 = Outlet(self.component_system, name='outlet1') outlet2 = Outlet(self.component_system, name='outlet2') @@ -1128,14 +1029,19 @@ def setup_mct_flow_sheet(self): mct_flow_sheet.add_unit(outlet2) mct_flow_sheet.add_connection(inlet, mct_3c, destination_port='channel_0') - mct_flow_sheet.add_connection(mct_3c, mct_2c1, origin_port='channel_0', destination_port='channel_0') - mct_flow_sheet.add_connection(mct_3c, mct_2c1, origin_port='channel_0', destination_port='channel_1') - mct_flow_sheet.add_connection(mct_3c, mct_2c2, origin_port='channel_1', destination_port='channel_0') + mct_flow_sheet.add_connection( + mct_3c, mct_2c1, origin_port='channel_0', destination_port='channel_0' + ) + mct_flow_sheet.add_connection( + mct_3c, mct_2c1, origin_port='channel_0', destination_port='channel_1' + ) + mct_flow_sheet.add_connection( + mct_3c, mct_2c2, origin_port='channel_1', destination_port='channel_0' + ) mct_flow_sheet.add_connection(mct_2c1, outlet1, origin_port='channel_0') mct_flow_sheet.add_connection(mct_2c1, outlet1, origin_port='channel_1') mct_flow_sheet.add_connection(mct_2c2, outlet2, origin_port='channel_0') - self.mct_flow_sheet = mct_flow_sheet def setup_ccc_flow_sheet(self): @@ -1144,9 +1050,9 @@ def setup_ccc_flow_sheet(self): ccc_flow_sheet = FlowSheet(self.component_system) inlet = Inlet(self.component_system, name='inlet') - ccc1 = MCT(self.component_system,nchannel=2, name='ccc1') - ccc2 = MCT(self.component_system,nchannel=2, name='ccc2') - ccc3 = MCT(self.component_system,nchannel=2, name='ccc3') + ccc1 = MCT(self.component_system, nchannel=2, name='ccc1') + ccc2 = MCT(self.component_system, nchannel=2, name='ccc2') + ccc3 = MCT(self.component_system, nchannel=2, name='ccc3') outlet = Outlet(self.component_system, name='outlet') ccc_flow_sheet.add_unit(inlet) @@ -1156,11 +1062,14 @@ def setup_ccc_flow_sheet(self): ccc_flow_sheet.add_unit(outlet) ccc_flow_sheet.add_connection(inlet, ccc1, destination_port='channel_0') - ccc_flow_sheet.add_connection(ccc1, ccc2, origin_port='channel_0', destination_port='channel_0') - ccc_flow_sheet.add_connection(ccc2, ccc3, origin_port='channel_0', destination_port='channel_0') + ccc_flow_sheet.add_connection( + ccc1, ccc2, origin_port='channel_0', destination_port='channel_0' + ) + ccc_flow_sheet.add_connection( + ccc2, ccc3, origin_port='channel_0', destination_port='channel_0' + ) ccc_flow_sheet.add_connection(ccc3, outlet, origin_port='channel_0') - self.ccc_flow_sheet = ccc_flow_sheet def test_mct_connections(self): @@ -1270,7 +1179,11 @@ def test_mct_connections(self): self.mct_flow_sheet.connections, expected_connections ) - self.assertTrue(self.mct_flow_sheet.connection_exists(inlet, mct_3c, destination_port='channel_0')) + self.assertTrue( + self.mct_flow_sheet.connection_exists( + inlet, mct_3c, destination_port='channel_0' + ) + ) self.assertTrue(self.mct_flow_sheet.connection_exists(mct_3c, mct_2c1, 'channel_0', 'channel_0')) self.assertTrue(self.mct_flow_sheet.connection_exists(mct_3c, mct_2c1, 'channel_0', 'channel_1')) self.assertTrue(self.mct_flow_sheet.connection_exists(mct_3c, mct_2c2, 'channel_1', 'channel_0')) @@ -1355,9 +1268,9 @@ def test_ccc_connections(self): }, outlet: { - 'origins':{ + 'origins': { None: { - ccc3:['channel_0'], + ccc3: ['channel_0'], }, }, @@ -1392,78 +1305,78 @@ def test_mct_flow_rate_calculation(self): expected_flow = { 'inlet': { 'total_out': { - None: (1,0,0,0) + None: (1, 0, 0, 0) }, 'destinations': { None: { 'mct_3c': { - 'channel_0': (1,0,0,0) + 'channel_0': (1, 0, 0, 0) } } } }, 'mct_3c': { 'total_in': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0), - 'channel_2': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0), + 'channel_2': (0, 0, 0, 0) }, 'origins': { 'channel_0': { - 'inlet':{ - None: (1,0,0,0) + 'inlet': { + None: (1, 0, 0, 0) } } }, 'total_out': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0), - 'channel_2': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0), + 'channel_2': (0, 0, 0, 0) }, 'destinations': { 'channel_0': { 'mct_2c1': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) } }, 'channel_1': { 'mct_2c2': { - 'channel_0': (0,0,0,0) + 'channel_0': (0, 0, 0, 0) } } } }, 'mct_2c1': { 'total_in': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) }, 'origins': { 'channel_0': { 'mct_3c': { - 'channel_0': (1,0,0,0) + 'channel_0': (1, 0, 0, 0) } }, 'channel_1': { 'mct_3c': { - 'channel_0': (0,0,0,0) + 'channel_0': (0, 0, 0, 0) } } }, 'total_out': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) }, 'destinations': { 'channel_0': { 'outlet1': { - None: (1,0,0,0) + None: (1, 0, 0, 0) } }, 'channel_1': { 'outlet1': { - None: (0,0,0,0) + None: (0, 0, 0, 0) } } } @@ -1471,63 +1384,59 @@ def test_mct_flow_rate_calculation(self): 'mct_2c2': { 'total_in': { - 'channel_0': (0,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (0, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) }, 'origins': { 'channel_0': { 'mct_3c': { - 'channel_1': (0,0,0,0) + 'channel_1': (0, 0, 0, 0) } }, }, 'total_out': { - 'channel_0': (0,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (0, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) }, 'destinations': { 'channel_0': { 'outlet2': { - None: (0,0,0,0) + None: (0, 0, 0, 0) } } } }, 'outlet1': { 'total_in': { - None: (1,0,0,0) + None: (1, 0, 0, 0) }, 'origins': { None: { 'mct_2c1': { - 'channel_0': (1,0,0,0), - 'channel_1': (0,0,0,0) + 'channel_0': (1, 0, 0, 0), + 'channel_1': (0, 0, 0, 0) } } } }, 'outlet2': { 'total_in': { - None: (0,0,0,0) + None: (0, 0, 0, 0) }, 'origins': { None: { 'mct_2c2': { - 'channel_0': (0,0,0,0) + 'channel_0': (0, 0, 0, 0) } } } } } - flow_rates = mct_flow_sheet.get_flow_rates() - assert_almost_equal_dict(flow_rates, expected_flow) - - def test_port_add_connection(self): inlet = self.mct_flow_sheet['inlet'] @@ -1536,34 +1445,39 @@ def test_port_add_connection(self): outlet1 = self.mct_flow_sheet['outlet1'] with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.add_connection(inlet, mct_3c, origin_port=0, destination_port=5) + self.mct_flow_sheet.add_connection( + inlet, mct_3c, origin_port=0, destination_port=5 + ) with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.add_connection(inlet, mct_3c, origin_port=5, destination_port=0) + self.mct_flow_sheet.add_connection( + inlet, mct_3c, origin_port=5, destination_port=0 + ) with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.add_connection(mct_2c2, outlet1, origin_port=0, destination_port=5) + self.mct_flow_sheet.add_connection( + mct_2c2, outlet1, origin_port=0, destination_port=5 + ) def test_set_output_state(self): mct_3c = self.mct_flow_sheet['mct_3c'] with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.set_output_state(mct_3c, [0.5,0.5]) + self.mct_flow_sheet.set_output_state(mct_3c, [0.5, 0.5]) with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.set_output_state(mct_3c, [0.5,0.5], 'channel_5') + self.mct_flow_sheet.set_output_state(mct_3c, [0.5, 0.5], 'channel_5') with self.assertRaises(CADETProcessError): - self.mct_flow_sheet.set_output_state(mct_3c, {'mct_2c1': {'channel_0': 0.5 , 'channel_5': 0.5}}, 'channel_0') + self.mct_flow_sheet.set_output_state( + mct_3c, {'mct_2c1': {'channel_0': 0.5, 'channel_5': 0.5}}, 'channel_0' + ) class TestFlowRateMatrix(unittest.TestCase): """Test calculation of flow rates with another simple testcase by @daklauss""" - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(1) @@ -1698,8 +1612,6 @@ def test_matrix_example(self): class TestFlowRateSelfMatrix(unittest.TestCase): """Test special case where one unit is connected to itself.""" - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): self.component_system = ComponentSystem(1) @@ -1794,9 +1706,6 @@ class TestSingularFlowMatrix(unittest.TestCase): """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.component_system = ComponentSystem(1) diff --git a/tests/test_fractions.py b/tests/test_fractions.py index 46a9c15e3..4ed9b21bb 100644 --- a/tests/test_fractions.py +++ b/tests/test_fractions.py @@ -6,9 +6,6 @@ class Test_Fractions(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def create_fractions(self): m_0 = np.array([0, 0]) frac0 = CADETProcess.fractionation.Fraction(m_0, 1) diff --git a/tests/test_individual.py b/tests/test_individual.py index c7696c0b0..887f6808b 100644 --- a/tests/test_individual.py +++ b/tests/test_individual.py @@ -32,9 +32,6 @@ def test_hash_array(self): class TestIndividual(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): x = [1, 2] f = [-1] diff --git a/tests/test_numerics.py b/tests/test_numerics.py new file mode 100644 index 000000000..0f4050f0b --- /dev/null +++ b/tests/test_numerics.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest +from CADETProcess.numerics import round_to_significant_digits + + +def test_basic_functionality(): + values = np.array([123.456, 0.001234, 98765]) + expected = np.array([123.0, 0.00123, 98800.0]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_handling_zeros(): + values = np.array([0, 0.0, -0.0]) + expected = np.array([0.0, 0.0, -0.0]) + result = round_to_significant_digits(values, 3) + np.testing.assert_array_equal(result, expected) + + +def test_negative_numbers(): + values = np.array([-123.456, -0.001234, -98765]) + expected = np.array([-123.0, -0.00123, -98800.0]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_large_numbers(): + values = np.array([1.23e10, 9.87e15]) + expected = np.array([1.23e10, 9.87e15]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_small_numbers(): + values = np.array([1.23e-10, 9.87e-15]) + expected = np.array([1.23e-10, 9.87e-15]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_mixed_values(): + values = np.array([123.456, 0, -0.001234, 9.8765e-5]) + expected = np.array([123.0, 0.0, -0.00123, 9.88e-5]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_invalid_digits(): + with pytest.raises(ValueError, match="Number of significant digits must be greater than 0."): + round_to_significant_digits(np.array([123.456]), 0) + + +def test_empty_array(): + values = np.array([]) + expected = np.array([]) + result = round_to_significant_digits(values, 3) + np.testing.assert_array_equal(result, expected) + + +def test_non_array_input(): + values = [123.456, 0.001234, 98765] # List input + expected = np.array([123.0, 0.00123, 98800.0]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_none_digits(): + values = [123.456, 0.001234, 98765] # List input + expected = np.array([123.456, 0.001234, 98765]) + result = round_to_significant_digits(values, None) + np.testing.assert_allclose(result, expected, rtol=1e-6) + + +def test_nan_digits(): + values = np.array([123.456, np.nan, 98765]) + expected = np.array([123.0, np.nan, 98800.0]) + result = round_to_significant_digits(values, 3) + np.testing.assert_allclose(result, expected, rtol=1e-6) diff --git a/tests/test_optimization_problem.py b/tests/test_optimization_problem.py index 452d45d51..ab9538848 100644 --- a/tests/test_optimization_problem.py +++ b/tests/test_optimization_problem.py @@ -112,10 +112,6 @@ def min_results_2(cheap_results): class Test_OptimizationVariable(unittest.TestCase): - - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): self.evaluation_object = EvaluationObject() @@ -166,6 +162,28 @@ def test_index(self): indices=2 ) + def test_significant_digits(self): + var_no_precision = OptimizationVariable( + 'not_rounded_variable', + evaluation_objects=[self.evaluation_object], + parameter_path='scalar_param', + significant_digits=None + ) + var_no_precision.value = 1.1234 + np.testing.assert_equal(var_no_precision.value, 1.1234) + np.testing.assert_equal(self.evaluation_object.scalar_param, 1.1234) + + var_with_precision = OptimizationVariable( + 'rounded_variable', + evaluation_objects=[self.evaluation_object], + parameter_path='scalar_param', + significant_digits=2 + ) + var_with_precision.value = 1.1234 + np.testing.assert_equal(var_with_precision.value, 1.1) + np.testing.assert_equal(self.evaluation_object.scalar_param, 1.1) + + def test_sized_list_single(self): """ Extra test for array with single entry. @@ -330,14 +348,10 @@ def test_transform(self): ) -from tests.test_events import TestHandler +from tests.test_events import HandlerFixture class Test_OptimizationVariableEvents(unittest.TestCase): - - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): - evaluation_object = TestHandler() + evaluation_object = HandlerFixture() evt = evaluation_object.add_event( 'single_index_1D', 'performer.array_1d', 1, indices=0, time=1 @@ -571,7 +585,7 @@ def test_polynomial(self): def test_multi_eval_obj(self): """Test setting indexed variables for multiple evaluation objects.""" - evaluation_object_2 = TestHandler() + evaluation_object_2 = HandlerFixture() evt = evaluation_object_2.add_event( 'multi_index_ND', 'performer.ndarray', @@ -669,9 +683,6 @@ def setup_optimization_problem( class Test_OptimizationProblemSimple(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): optimization_problem = OptimizationProblem('simple', use_diskcache=False) @@ -708,9 +719,6 @@ def test_bounds(self): class Test_OptimizationProblemLinCon(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): optimization_problem = setup_optimization_problem( n_lincon=1, use_diskcache=False @@ -794,7 +802,7 @@ def test_initial_values(self): ) np.testing.assert_almost_equal(x0_chebyshev, x0_chebyshev_expected) - x0_seed_1_expected = [[0.5666524, 0.8499365]] + x0_seed_1_expected = [[0.3448127, 0.9138096]] x0_seed_1 = self.optimization_problem.create_initial_values( 1, seed=1 ) @@ -809,16 +817,16 @@ def test_initial_values(self): np.testing.assert_almost_equal(x0_seed_1_random, x0_chebyshev_expected) x0_seed_10_expected = [ - [0.566652437432882, 0.8499365450601801], - [0.16209066464206898, 0.7722304506499793], - [0.4265526985487967, 0.609778640412627], - [0.36121046800146495, 0.9264826793698916], - [0.7875352350909218, 0.8881166149888506], - [0.2685012584445143, 0.9067761747715452], - [0.9701611935982989, 0.9809160496933532], - [0.35086424227614005, 0.4668187637599064], - [0.8928778441932161, 0.9360696751348305], - [0.5365699848069944, 0.6516012021958184] + [0.34481272, 0.91380963], + [0.13935918, 0.7377129 ], + [0.5219434 , 0.63404393], + [0.23933603, 0.8127269 ], + [0.30483107, 0.980534 ], + [0.11605111, 0.86206301], + [0.1869498 , 0.99896377], + [0.55986142, 0.65237364], + [0.49963092, 0.98791549], + [0.6839696 , 0.92668444] ] x0_seed_10 = self.optimization_problem.create_initial_values( 10, seed=1 @@ -832,9 +840,6 @@ def test_initial_values(self): class Test_OptimizationProblemDepVar(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): optimization_problem = OptimizationProblem('simple', use_diskcache=False) @@ -908,7 +913,7 @@ def test_initial_values_without_dependencies(self): variables = self.optimization_problem.get_dependent_values(x0_chebyshev) np.testing.assert_almost_equal(variables, variables_expected) - x0_seed_1_expected = [[0.90164487, 0.27971297, 0.70490538]] + x0_seed_1_expected = [[0.8324936, 0.279713 , 0.6649623]] x0_seed_1 = self.optimization_problem.create_initial_values( 1, seed=1, include_dependent_variables=False ) @@ -930,16 +935,16 @@ def test_initial_values_without_dependencies(self): np.testing.assert_almost_equal(x0_seed_1_random, x0_chebyshev_expected) x0_seed_10_expected = [ - [0.90164487, 0.27971297, 0.70490538], - [0.78125338, 0.17275154, 0.54650281], - [0.97623563, 0.19106333, 0.79016462], - [0.12826546, 0.03476412, 0.05270397], - [0.89791146, 0.29062957, 0.7429437 ], - [0.8703531 , 0.20575487, 0.68237913], - [0.92572799, 0.01653708, 0.33539715], - [0.96337056, 0.07106034, 0.86232007], - [0.85559046, 0.4824452 , 0.84474955], - [0.8588277 , 0.73874869, 0.80355266] + [0.8324936 , 0.27971297, 0.66496231], + [0.73128644, 0.17275154, 0.5115499 ], + [0.95771264, 0.19106333, 0.79216119], + [0.81089326, 0.29062957, 0.6673637 ], + [0.76359474, 0.20575487, 0.59221373], + [0.9596961 , 0.6718687 , 0.90182574], + [0.89945427, 0.01653708, 0.49987409], + [0.92682306, 0.07106034, 0.8258856 ], + [0.77868893, 0.4824452 , 0.77038195], + [0.59375802, 0.03129941, 0.22275326] ] x0_seed_10 = self.optimization_problem.create_initial_values( 10, seed=1, include_dependent_variables=False @@ -964,7 +969,7 @@ def test_initial_values(self): independent_variables = self.optimization_problem.get_independent_values(x0_chebyshev) np.testing.assert_almost_equal(independent_variables, independent_variables_expected) - x0_seed_1_expected = [[0.9016449, 0.279713 , 0.279713 , 0.7049054]] + x0_seed_1_expected = [[0.8324936, 0.279713 , 0.279713 , 0.6649623]] x0_seed_1 = self.optimization_problem.create_initial_values( 1, seed=1, include_dependent_variables=True ) @@ -982,16 +987,16 @@ def test_initial_values(self): np.testing.assert_almost_equal(x0_seed_1_random, x0_chebyshev_expected) x0_seed_10_expected = [ - [0.90164487, 0.27971297, 0.27971297, 0.70490538], - [0.78125338, 0.17275154, 0.17275154, 0.54650281], - [0.97623563, 0.19106333, 0.19106333, 0.79016462], - [0.12826546, 0.03476412, 0.03476412, 0.05270397], - [0.89791146, 0.29062957, 0.29062957, 0.7429437 ], - [0.8703531 , 0.20575487, 0.20575487, 0.68237913], - [0.92572799, 0.01653708, 0.01653708, 0.33539715], - [0.96337056, 0.07106034, 0.07106034, 0.86232007], - [0.85559046, 0.4824452 , 0.4824452 , 0.84474955], - [0.8588277 , 0.73874869, 0.73874869, 0.80355266] + [0.8324936 , 0.27971297, 0.27971297, 0.66496231], + [0.73128644, 0.17275154, 0.17275154, 0.5115499 ], + [0.95771264, 0.19106333, 0.19106333, 0.79216119], + [0.81089326, 0.29062957, 0.29062957, 0.6673637 ], + [0.76359474, 0.20575487, 0.20575487, 0.59221373], + [0.9596961 , 0.6718687 , 0.6718687 , 0.90182574], + [0.89945427, 0.01653708, 0.01653708, 0.49987409], + [0.92682306, 0.07106034, 0.07106034, 0.8258856 ], + [0.77868893, 0.4824452 , 0.4824452 , 0.77038195], + [0.59375802, 0.03129941, 0.03129941, 0.22275326] ] x0_seed_10 = self.optimization_problem.create_initial_values( 10, seed=1, include_dependent_variables=True @@ -1007,9 +1012,6 @@ def test_initial_values(self): class Test_OptimizationProblemJacobian(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): def single_obj_single_var(x): return x[0]**2 @@ -1095,9 +1097,6 @@ class Test_OptimizationProblemConstraintTransforms(unittest.TestCase): tests if `A_transformed` and `b_transformed` properties are correctly computed. """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setup(self): settings.working_directory = './test_problem' @@ -1229,9 +1228,6 @@ class Test_OptimizationProblemEvaluator(unittest.TestCase): """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): eval_obj = EvaluationObject() @@ -1309,9 +1305,6 @@ def test_cache(self): class Test_MultiEvaluationObjects(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): eval_obj_1 = EvaluationObject(name='foo') eval_obj_2 = EvaluationObject(name='bar') diff --git a/tests/test_optimization_results.py b/tests/test_optimization_results.py index 48ba94183..9c5bcfe50 100644 --- a/tests/test_optimization_results.py +++ b/tests/test_optimization_results.py @@ -1,5 +1,8 @@ +import shutil import unittest +from pathlib import Path +from addict import Dict import numpy as np from CADETProcess.optimization import OptimizationResults @@ -9,13 +12,81 @@ from tests.test_optimization_problem import setup_optimization_problem -def setup_optimization_results( +class OptimizationResultsWithoutNans(OptimizationResults): + """ + Patch class that removes None values in the OptimizationResults during .to_dict() call. This allows + serialization with any version of CADET-Python pending the merge of https://github.com/cadet/CADET-Python/pull/48 + + """ + def to_dict(self) -> dict: + """Convert Results to a dictionary. + + Returns + ------- + addict.Dict + Results as a dictionary with populations stored as list of dictionaries. + """ + data = Dict() + data.system_information = self.system_information + data.optimizer_state = self.optimizer_state + data.population_all_id = str(self.population_all.id) + data.populations = {i: pop.to_dict() for i, pop in enumerate(self.populations)} + data.pareto_fronts = { + i: front.to_dict() for i, front in enumerate(self.pareto_fronts) + } + if self._meta_fronts is not None: + data.meta_fronts = { + i: front.to_dict() for i, front in enumerate(self.meta_fronts) + } + if self.time_elapsed is not None: + data.time_elapsed = self.time_elapsed + data.cpu_time = self.cpu_time + + data = self._remove_none_values(data) + + return data + + @staticmethod + def _remove_none_values(dictionary) -> dict: + """Remove None values from a dictionary. + + Parameters + ---------- + dictionary : dict + Dictionary to remove None values from. + + Returns + ------- + dict + Dictionary with None values removed. + """ + delete_keys = [] + for key, value in dictionary.items(): + if value is None: + delete_keys.append(key) + elif isinstance(value, dict): + OptimizationResultsWithoutNans._remove_none_values(value) + + for key in delete_keys: + del dictionary[key] + + return dictionary + + + +def setup_optimization_problem_and_results( n_gen=3, n_ind=3, n_vars=2, n_obj=1, n_nonlin=0, n_meta=0, rng=None, initialize_data=True): optimization_problem = setup_optimization_problem(n_vars, n_obj, n_nonlin, n_meta) optimizer = U_NSGA3() - optimization_results = OptimizationResults(optimization_problem, optimizer) + optimization_results = OptimizationResultsWithoutNans(optimization_problem, optimizer) + results_dir = Path("tmp") / "optimization_results" + + shutil.rmtree(results_dir, ignore_errors=True) + results_dir.mkdir(exist_ok=True, parents=True) + (results_dir / "figures").mkdir(exist_ok=True, parents=True) + optimization_results.results_directory = results_dir if initialize_data: if rng is None: @@ -28,15 +99,12 @@ def setup_optimization_results( if n_meta > 0: optimization_results.update_meta() - return optimization_results + return optimization_problem, optimization_results class TestOptimizationResults(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): - self.optimization_results = setup_optimization_results() + _, self.optimization_results = setup_optimization_problem_and_results() def test_history(self): n_gen_expected = 3 @@ -66,11 +134,28 @@ def test_history(self): def test_serialization(self): data = self.optimization_results.to_dict() - results_new = setup_optimization_results(initialize_data=False) + _, results_new = setup_optimization_problem_and_results(initialize_data=False) results_new.update_from_dict(data) data_new = results_new.to_dict() np.testing.assert_equal(data, data_new) + def test_io_serialization(self): + optimization_problem, _ = setup_optimization_problem_and_results() + + # save_results() needs to happen after creation of optimization_problem, + # because during optimization_problem creation the results folder gets cleared and that would delete the save + self.optimization_results.save_results("checkpoint") + + optimizer = U_NSGA3() + optimization_results_new = optimizer.load_results( + checkpoint_path=self.optimization_results.results_directory / "checkpoint.h5", + optimization_problem=optimization_problem + ) + np.testing.assert_equal( + desired=self.optimization_results.to_dict(), + actual=OptimizationResultsWithoutNans._remove_none_values(optimization_results_new.to_dict()) + ) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_optimizer_behavior.py b/tests/test_optimizer_behavior.py index acd543019..b0af2265d 100644 --- a/tests/test_optimizer_behavior.py +++ b/tests/test_optimizer_behavior.py @@ -178,7 +178,7 @@ def optimizer(request): # %% Tests - +@pytest.mark.slow def test_convergence(optimization_problem: TestProblem, optimizer: OptimizerBase): # only test problems that the optimizer can handle. The rest of the tests # will be marked as passed @@ -194,7 +194,7 @@ def test_convergence(optimization_problem: TestProblem, optimizer: OptimizerBase else: optimization_problem.test_if_solved(results, MOO_TEST_KWARGS) - +@pytest.mark.slow def test_from_initial_values( optimization_problem: TestProblem, optimizer: OptimizerBase ): @@ -233,7 +233,7 @@ def __call__(self, results): raise RuntimeError("Max number of evaluations reached. Aborting!") self.n_calls += 1 - +@pytest.mark.slow def test_resume_from_checkpoint( optimization_problem: TestProblem, optimizer: OptimizerBase ): diff --git a/tests/test_parallelization_adapter.py b/tests/test_parallelization_adapter.py index 788057b1b..9172aaa67 100644 --- a/tests/test_parallelization_adapter.py +++ b/tests/test_parallelization_adapter.py @@ -42,9 +42,6 @@ class TestParallelizationBackend(unittest.TestCase): """Test initializing parallelization backends and n_cores attribute.""" - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def test_n_cores(self): with self.assertRaises(ValueError): sequential_backend = SequentialBackend(n_cores=n_cores) @@ -89,9 +86,6 @@ class TestParallelEvaluation(unittest.TestCase): """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def tearDown(self): shutil.rmtree('./tmp', ignore_errors=True) shutil.rmtree('./test_parallelization', ignore_errors=True) @@ -117,7 +111,10 @@ def evaluation_function(x): file_name = simulator.get_tempfile_name() cwd = simulator.temp_dir sim = simulator.create_lwe(cwd / file_name) - sim.run() + if hasattr(sim, "run_simulation"): + return_information = sim.run_simulation() + else: + return_information = sim.run_load() return True @@ -140,9 +137,6 @@ class TestOptimizerParallelizationBackend(unittest.TestCase): """ - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def tearDown(self): settings.working_directory = None diff --git a/tests/test_parameters.py b/tests/test_parameters.py index ab1f6500e..97a11d278 100755 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -5,7 +5,7 @@ from CADETProcess.dataStructure import ( Structure, Constant, Switch, - Typed, Integer, String, List, + Typed, Integer, Float, String, List, Callable, RangedFloat, UnsignedInteger, SizedList, SizedNdArray, @@ -13,14 +13,12 @@ Polynomial, NdPolynomial, Vector, Matrix, DependentlyModulatedUnsignedList, + Aggregator, SizedAggregator, SizedClassDependentAggregator, ) class TestDescription(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): param_with_description = Integer(description='foo') @@ -36,8 +34,6 @@ def test_modified_descriptor(self): class TestParameterDictionaries(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): class Model(Structure): @@ -91,9 +87,6 @@ def test_parameters_dict_setter(self): class TestConstant(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): const_int = Constant(value=0) @@ -120,9 +113,6 @@ class NoValue(Structure): class TestSwitch(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): switch = Switch(valid=['foo', 'bar'], default='foo') @@ -146,9 +136,6 @@ class InvalidDefault(Structure): class TestTyped(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): string_param = String(default='foo') @@ -197,9 +184,6 @@ class WrongDefaultType(Structure): class TestCallable(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): def default_method(x): return x @@ -229,9 +213,6 @@ def test_default(self): class TestRanged(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): bound_float_param = RangedFloat(lb=-1, ub=1, default=0) @@ -264,9 +245,6 @@ class InvalidDefault(Structure): class TestSizedUnified(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): sized_list = SizedList(size=4, default=0) @@ -344,9 +322,6 @@ class InvalidDefaultRange(Structure): class TestSizedDependent(unittest.TestCase): """Previous test methods for dependently sized parameters.""" - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): dep_1 = UnsignedInteger() @@ -448,9 +423,6 @@ class InvalidDefault(Structure): class TestPolynomial(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): poly_param = Polynomial(n_coeff=2, default=0) @@ -580,9 +552,6 @@ def test_default(self): class TestModulated(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): class Model(Structure): n_mod = 4 @@ -624,5 +593,115 @@ def test_value(self): self.model.matrix = [1, 2] +class TestAggregator(unittest.TestCase): + def setUp(self): + class DummyInstance(Structure): + float_param = Float(default=1.0) + sized_param = SizedNdArray(size=4) + sized_param_transposed = SizedNdArray(size=2) + + class Model(Structure): + aggregator = Aggregator('float_param', 'container') + sized_aggregator = SizedAggregator('sized_param', 'container') + transposed_sized_aggregator = SizedAggregator( + 'sized_param_transposed', 'container', transpose=True + ) + + def __init__(self): + self.container = [ + DummyInstance( + float_param=i, + sized_param=[float(i*j) for j in range(4)], + sized_param_transposed=[float(i*j) for j in range(2)] + ) + for i in range(3) + ] + + self.model = Model() + + def test_value(self): + # Aggregator + self.assertAlmostEqual(self.model.aggregator, [0.0, 1.0, 2.0]) + + new_value = [1, 2, 3] + self.model.aggregator = new_value + + self.assertAlmostEqual(self.model.aggregator, new_value) + for con, val in zip(self.model.container, new_value): + self.assertAlmostEqual(con.float_param, val) + + # Test setting incorrect types or dimensions: + with self.assertRaises((ValueError, TypeError)): + self.model.aggregator = 3 + + # Test setting slices and indexes + self.model.aggregator[0] = 3.0 + self.assertAlmostEqual(self.model.aggregator[0], 3.0) + + # SizedAggregator + np.testing.assert_almost_equal( + self.model.sized_aggregator, + [ + [0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6], + ] + ) + + new_value = [ + [1., 2., 3., 4.], + [2., 3., 4., 5.], + [3., 4., 5., 6.], + ] + self.model.sized_aggregator = new_value + + np.testing.assert_almost_equal(self.model.sized_aggregator, new_value) + for con, val in zip(self.model.container, new_value): + np.testing.assert_almost_equal(con.sized_param, val) + + # Test setting incorrect types or dimensions: + with self.assertRaises((ValueError, TypeError)): + self.model.sized_aggregator = 3 + + with self.assertRaises((ValueError, TypeError)): + self.model.sized_aggregator = [1., 2., 3., 4.] + + with self.assertRaises(IndexError): + self.model.sized_aggregator[3] = 3.0 + + # Test setting slices and indexes + new_value = [ + [1.5, 2.5, 3.5, 4.5], + [2.5, 3.5, 4.5, 5.5], + [3.5, 4.5, 5.5, 6.5], + ] + + self.model.sized_aggregator[0] = new_value[0] + np.testing.assert_almost_equal(self.model.sized_aggregator[0], new_value[0]) + + self.model.sized_aggregator[0, 0] = 7.5 + np.testing.assert_almost_equal(self.model.sized_aggregator[0, 0], 7.5) + + # Transposed SizedAggregator + np.testing.assert_almost_equal( + self.model.transposed_sized_aggregator, + [ + [0, 0, 0], + [0, 1, 2] + ] + ) + new_value = [ + [1, 2, 3], + [2, 3, 4] + ] + self.model.transposed_sized_aggregator = new_value + + np.testing.assert_almost_equal( + self.model.transposed_sized_aggregator, new_value + ) + for con, val in zip(self.model.container, np.array(new_value).T): + np.testing.assert_almost_equal(con.sized_param_transposed, val) + + if __name__ == '__main__': unittest.main() diff --git a/tests/test_population.py b/tests/test_population.py index 45fa7e661..a15a570ee 100644 --- a/tests/test_population.py +++ b/tests/test_population.py @@ -22,8 +22,6 @@ def setup_population(n_ind, n_vars, n_obj, n_nonlin=0, n_meta=0, rng=None): class TestPopulation(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): x = [1, 2] @@ -238,8 +236,6 @@ def test_from_dict(self): class TestPareto(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): front = ParetoFront(3) diff --git a/tests/test_process.py b/tests/test_process.py index c489f4dc5..1d8984e51 100644 --- a/tests/test_process.py +++ b/tests/test_process.py @@ -1,24 +1,183 @@ +import numpy as np import copy -import unittest +import pytest +from CADETProcess.processModel import ComponentSystem, FlowSheet, Process, Inlet +from examples.batch_elution.process import process as batch_elution_process -from CADETProcess import CADETProcessError -from examples.batch_elution.process import process +TEST_PROFILE_CYCLE_TIME = 155 * 60 -class Test_process(unittest.TestCase): +def new_inlet_only_process() -> Process: + """Create a new inlet-only process for testing.""" + component_system = ComponentSystem(1) + flow_sheet = FlowSheet(component_system) + inlet = Inlet(component_system, 'inlet') + flow_sheet.add_unit(inlet) + process = Process(flow_sheet, 'inlet_only') + process.cycle_time = TEST_PROFILE_CYCLE_TIME + return process - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def create_process(self): - return copy.deepcopy(process) +def new_batch_elution_process() -> Process: + """Create a deep copy of the batch elution process for testing.""" + return copy.deepcopy(batch_elution_process) - def test_inlet_profile(self): - pass - def test_check_cstr_volume(self): - pass +@pytest.fixture(params=["inlet_only"]) +def process_fixture(request) -> Process: + """ + Pytest fixture that initializes a fresh process for every test. + Supports `inlet_only` (default) and `batch_elution` (for future tests). + """ + if request.param == "inlet_only": + return new_inlet_only_process() + elif request.param == "batch_elution": + return new_batch_elution_process() + else: + raise ValueError(f"Unknown process type: {request.param}") -if __name__ == '__main__': - unittest.main() + +def derivative_of_negative_gaussian( + t: np.ndarray, + center: float, + sigma: float, + amplitude: float, + constant_value: float + ) -> np.ndarray: + """ + Compute the derivative of a negative Gaussian function. + + Used for generating flow rate profiles. + """ + neg_gauss = -amplitude * np.exp(-((t - center) ** 2) / (2 * sigma ** 2)) + neg_gauss_derivative = -((t - center) / (sigma ** 2)) * neg_gauss + return neg_gauss_derivative + constant_value + + +def generate_input_profile( + profile_type: str = "gaussian", + duration: float = TEST_PROFILE_CYCLE_TIME, + n_points: int = 1, + **kwargs + ) -> tuple[np.ndarray, np.ndarray]: + """Generate a time and flow rate profile for different test cases.""" + if profile_type == "gaussian": + time_points = np.linspace(0, duration, n_points) + pulse_center = kwargs.get("pulse_center", duration * 0.6) + sigma = kwargs.get("sigma", duration * 0.1) + amplitude = kwargs.get("amplitude", 1e-10) + constant_value = kwargs.get("constant_value", 1e-5) + input_profile = derivative_of_negative_gaussian( + time_points, pulse_center, sigma, amplitude, constant_value + ) + + elif profile_type == "constant": + time_points = np.linspace(0, duration, n_points) + input_profile = np.full_like(time_points, kwargs.get("value", 1e-5)) + + elif profile_type == "linear": + time_points = np.linspace(0, duration, n_points) + input_profile = np.linspace( + kwargs.get("start", -1e-5), kwargs.get("end", 1e-5), n_points + ) + + elif profile_type == "two_point": + time_points = np.array([0, 1]) # At least two points to prevent interpolation errors + input_profile = np.array( + [kwargs.get("value", 1e-5), kwargs.get("value", 1e-5)] + ) + + else: + raise ValueError(f"Unknown profile type: {profile_type}") + + return time_points, input_profile + + +def test_add_flow_rate_profile_negative_gaussian(process_fixture: Process) -> None: + """Test adding a negative Gaussian flow rate profile.""" + process = process_fixture + time_points, input_profile = generate_input_profile( + "gaussian", n_points=TEST_PROFILE_CYCLE_TIME + ) + + process.add_flow_rate_profile('inlet', time_points, input_profile) + + assert len(process.events) > 0, "No events were added to the process" + assert np.allclose( + input_profile, + process.parameter_timelines['flow_sheet.inlet.flow_rate'].value(time_points), + rtol=1e-2, + atol=1e-8 + ), "Event does not match the expected flow rate profile." + + +def test_add_flow_rate_profile_two_point(process_fixture: Process) -> None: + """Test adding a flow rate profile with only two points.""" + process = process_fixture + time_points, input_profile = generate_input_profile("two_point") + + process.add_flow_rate_profile( + 'inlet', time_points, input_profile, interpolation_method='pchip' + ) + + assert len(process.events) > 0, "No events were added for a two-point profile" + assert np.allclose( + input_profile, + process.parameter_timelines['flow_sheet.inlet.flow_rate'].value(time_points), + rtol=1e-2, + atol=1e-8 + ), "Event does not match the expected flow rate profile." + + +def test_add_flow_rate_profile_constant_function(process_fixture: Process) -> None: + """Test adding a constant flow rate profile.""" + process = process_fixture + time_points, input_profile = generate_input_profile("constant", n_points=50) + + process.add_flow_rate_profile( + 'inlet', time_points, input_profile, interpolation_method='pchip' + ) + + assert len(process.events) > 0, "No events were added for a constant function" + assert np.allclose( + input_profile, + process.parameter_timelines['flow_sheet.inlet.flow_rate'].value(time_points), + rtol=1e-2, + atol=1e-8 + ), "Event does not match the expected flow rate profile." + + +def test_add_flow_rate_profile_unordered_time(process_fixture: Process) -> None: + """Test exception when time points are unordered.""" + process = process_fixture + time_points = np.array([10, 0, 20, 5]) + input_profile = np.array([1e-5, 1e-5, 1e-5, 1e-5]) + + with pytest.raises(ValueError, match="`x` must be strictly increasing sequence."): + process.add_flow_rate_profile( + 'inlet', time_points, input_profile, interpolation_method='pchip' + ) + + +@pytest.mark.parametrize("method", ["cubic", "pchip", None]) +def test_add_flow_rate_profile_different_interpolation_methods( + process_fixture: Process, method: str + ) -> None: + """Test different interpolation methods for adding a flow rate profile.""" + process = process_fixture + time_points, input_profile = generate_input_profile( + "gaussian", n_points=TEST_PROFILE_CYCLE_TIME + ) + + process.add_flow_rate_profile( + 'inlet', time_points, input_profile, interpolation_method=method + ) + + assert len(process.events) > 0, f"No events were added for interpolation method {method}" + assert np.allclose( + input_profile, + process.parameter_timelines['flow_sheet.inlet.flow_rate'].value(time_points), + rtol=1e-2, + atol=1e-8 + ), f"Event does not match the expected flow rate profile for method {method}." diff --git a/tests/test_pymoo.py b/tests/test_pymoo.py index 5c2f9fbd2..cc3428c11 100644 --- a/tests/test_pymoo.py +++ b/tests/test_pymoo.py @@ -8,8 +8,6 @@ class Test_OptimizationProblemSimple(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def tearDown(self): shutil.rmtree('./results_simple', ignore_errors=True) diff --git a/tests/test_reaction.py b/tests/test_reaction.py index 4d2b4eac5..59502438b 100644 --- a/tests/test_reaction.py +++ b/tests/test_reaction.py @@ -13,9 +13,6 @@ class Test_Reaction(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def create_simple_bulk_reaction(self, is_kinetic=True, k_fwd_min=100): # 0: NH4+(aq) <=> NH3(aq) + H+(aq) component_system = ComponentSystem() @@ -25,13 +22,13 @@ def create_simple_bulk_reaction(self, is_kinetic=True, k_fwd_min=100): ) reaction_model = MassActionLaw(component_system, 'simple') reaction_model.add_reaction( - [1, 2, 0], [-1, 1, 1], 10**(-9.2)*1e3, + ['NH4+', 'NH3', 'H+'], [-1, 1, 1], 10**(-9.2)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) return reaction_model def create_complex_bulk_reaction(self, is_kinetic=True, k_fwd_min=1): - # Reactions + # All Reactions # 0: NH4+(aq) <=> NH3(aq) + H+(aq) # 1: Lys2+(aq) <=> Lys+(aq) + H+(aq) # 2: Lys+(aq) <=> Lys(aq) + H+(aq) @@ -55,32 +52,39 @@ def create_complex_bulk_reaction(self, is_kinetic=True, k_fwd_min=1): charge=[2, 1, 0, -1] ) reaction_model = MassActionLaw(component_system, 'complex') + # 0: NH4+(aq) <=> NH3(aq) + H+(aq) reaction_model.add_reaction( - [1, 2, 0], [-1, 1, 1], 10**(-9.2)*1e3, + ['NH4+','NH3', 'H+'], [-1, 1, 1], 10**(-9.2)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 1: Lys2+(aq) <=> Lys+(aq) + H+(aq) reaction_model.add_reaction( - [3, 4, 0], [-1, 1, 1], 10**(-2.20)*1e3, + ['Lys2+', 'Lys+', 'H+'], [-1, 1, 1], 10**(-2.20)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 2: Lys+(aq) <=> Lys(aq) + H+(aq) reaction_model.add_reaction( - [4, 5, 0], [-1, 1, 1], 10**(-8.90)*1e3, + ['Lys+', 'Lys', 'H+'], [-1, 1, 1], 10**(-8.90)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 3: Lys(aq) <=> Lys-(aq) + H+(aq) reaction_model.add_reaction( - [5, 6, 0], [-1, 1, 1], 10**(-10.28)*1e3, + ['Lys', 'Lys-', 'H+'], [-1, 1, 1], 10**(-10.28)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 4: Arg2+ <=> Arg+ + H+ reaction_model.add_reaction( - [7, 8, 0], [-1, 1, 1], 10**(-2.18)*1e3, + ['Arg2+', 'Arg+', 'H+'], [-1, 1, 1], 10**(-2.18)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 5: Arg+ <=> Arg + H+ reaction_model.add_reaction( - [8, 9, 0], [-1, 1, 1], 10**(-9.09)*1e3, + ['Arg+', 'Arg', 'H+'], [-1, 1, 1], 10**(-9.09)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) + # 6: Arg <=> Arg- + H+ reaction_model.add_reaction( - [9, 10, 0], [-1, 1, 1], 10**(-13.2)*1e3, + ['Arg', 'Arg-', 'H+'], [-1, 1, 1], 10**(-13.2)*1e3, is_kinetic=is_kinetic, k_fwd_min=k_fwd_min ) @@ -158,51 +162,51 @@ def create_cross_phase_reaction(self): # Pore Liquid # 0: NH4+(aq) <=> NH3(aq) + H+(aq) - # 1: Lys2+(aq) <=> Lys+(aq) + H+(aq) - # 2: Lys+(aq) <=> Lys(aq) + H+(aq) - # 3: Lys(aq) <=> Lys-(aq) + H+(aq) reaction_model.add_liquid_reaction( - [0, 1, -1], [-1, 1, 1], 10**(-9.2)*1e3, is_kinetic=False + ['NH4+', 'NH3', 'H+'], [-1, 1, 1], 10**(-9.2)*1e3, is_kinetic=False ) + # 1: Lys2+(aq) <=> Lys+(aq) + H+(aq) reaction_model.add_liquid_reaction( - [2, 3, -1], [-1, 1, 1], 10**(-2.20)*1e3, is_kinetic=False + ['Lys2+', 'Lys+', 'H+'], [-1, 1, 1], 10**(-2.20)*1e3, is_kinetic=False ) + # 2: Lys+(aq) <=> Lys(aq) + H+(aq) reaction_model.add_liquid_reaction( - [3, 4, -1], [-1, 1, 1], 10**(-8.90)*1e3, is_kinetic=False + ['Lys+', 'Lys', 'H+'], [-1, 1, 1], 10**(-8.90)*1e3, is_kinetic=False ) + # 3: Lys(aq) <=> Lys-(aq) + H+(aq) reaction_model.add_liquid_reaction( - [4, 5, -1], [-1, 1, 1], 10**(-10.28)*1e3, is_kinetic=False + ['Lys', 'Lys-', 'H+'], [-1, 1, 1], 10**(-10.28)*1e3, is_kinetic=False ) # Adsorption k_fwd_min_ads = 100 # 0: NH4+(aq) + H+(s) <=> NH4+(s) + H+(aq) - # 1: NH4+(s) <=> NH3(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [0, -1, 0, -1], [-1, -1, 1, 1], [0, 1, 1, 0], 1/1.5, + ['NH4+', 'H+', 'NH4+', 'H+'], [-1, -1, 1, 1], [0, 1, 1, 0], 1/1.5, k_fwd_min=k_fwd_min_ads ) + # 1: NH4+(s) <=> NH3(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [0, 1, -1], [-1, 1, 1], [1, 0, 1], 1.5, k_fwd_min=k_fwd_min_ads + ['NH4+', 'NH3', 'H+'], [-1, 1, 1], [1, 0, 1], 1.5, k_fwd_min=k_fwd_min_ads ) # 2: Lys2+(aq) + 2H+(s) <=> Lys2+(s) + 2H+(aq) - # 3: Lys2+(s) <=> Lys+(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [2, -1, 2, -1], [-1, -2, 1, 2], [0, 1, 1, 0], 1/5, + ['Lys2+', 'H+', 'Lys2+', 'H+'], [-1, -2, 1, 2], [0, 1, 1, 0], 1/5, k_fwd_min=k_fwd_min_ads ) + # 3: Lys2+(s) <=> Lys+(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [2, 3, -1], [-1, 1, 1], [1, 0, 1], 5, + ['Lys2+', 'Lys+', 'H+'], [-1, 1, 1], [1, 0, 1], 5, k_fwd_min=k_fwd_min_ads ) # 4: Lys+(aq) + H+(s) <=> Lys+(s) + H+(aq) - # 5: Lys+(s) <=> Lys(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [3, -1, 3, -1], [-1, -1, 1, 1], [0, 1, 1, 0], 1/0.75, + ['Lys+', 'H+', 'Lys+', 'H+'], [-1, -1, 1, 1], [0, 1, 1, 0], 1/0.75, k_fwd_min=k_fwd_min_ads ) + # 5: Lys+(s) <=> Lys(aq) + H+(s) reaction_model.add_cross_phase_reaction( - [3, 4, -1], [-1, 1, 1], [1, 0, 1], 0.75, + ['Lys+', 'Lys', 'H+'], [-1, 1, 1], [1, 0, 1], 0.75, k_fwd_min=k_fwd_min_ads ) diff --git a/tests/test_sections.py b/tests/test_sections.py index f2c08b8d5..e95d9fadc 100644 --- a/tests/test_sections.py +++ b/tests/test_sections.py @@ -7,8 +7,6 @@ class TestGenerateIndices(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def test_generate_indices(self): shape = (3, 3) @@ -41,8 +39,6 @@ def test_generate_indices(self): class TestSection(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): self.constant_section_single = Section(0, 1, 1) @@ -108,8 +104,6 @@ def test_section_integral(self): class TestTimeLine(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def create_timeline_constant_single(self): """Piecewise constant sections with single entry.""" @@ -277,8 +271,6 @@ def test_tl_from_profile(self): class TestMultiTimeLine(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def create_timeline_constant_multi(self): """Piecewise constant sections with multiple entries managed by MultiTimeline.""" diff --git a/tests/test_solution.py b/tests/test_solution.py index deea5f926..5e727b22a 100644 --- a/tests/test_solution.py +++ b/tests/test_solution.py @@ -91,8 +91,6 @@ class TestSolution(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): # 2 Components, constant concentration, constant flow @@ -427,8 +425,6 @@ def test_fraction_mass(self): class TestSliceSolution(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) def setUp(self): # 2 Components, gaussian peaks, constant flow @@ -541,9 +537,6 @@ def test_total_concentration(self): class TestPlot(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) - def setUp(self): # 2 Components, gaussian peaks, constant flow self.solution_species = SolutionIO( diff --git a/tests/test_transform.py b/tests/test_transform.py index fef000435..02212afe4 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -1,16 +1,14 @@ import unittest from CADETProcess.transform import ( - NoTransform, NormLinearTransform, NormLogTransform, AutoTransform + NullTransformer, NormLinearTransformer, NormLogTransformer, AutoTransformer ) -class Test_Transform(unittest.TestCase): - def __init__(self, methodName='runTest'): - super().__init__(methodName) +class Test_Transformer(unittest.TestCase): def test_input_range(self): - transform = NormLinearTransform(0, 100) + transform = NormLinearTransformer(0, 100) with self.assertRaises(ValueError): in_ = -10 @@ -21,7 +19,7 @@ def test_input_range(self): out = transform.transform(in_) def test_output_range(self): - transform = NormLinearTransform(0, 100) + transform = NormLinearTransformer(0, 100) with self.assertRaises(ValueError): in_ = -1 @@ -32,7 +30,7 @@ def test_output_range(self): out = transform.untransform(in_) def test_no_transform(self): - transform = NoTransform(0, 100) + transform = NullTransformer(0, 100) self.assertAlmostEqual(transform.lb, 0) self.assertAlmostEqual(transform.ub, 100) @@ -47,7 +45,7 @@ def test_no_transform(self): self.assertAlmostEqual(out_expected, out) def test_linear(self): - transform = NormLinearTransform(0, 100) + transform = NormLinearTransformer(0, 100) self.assertAlmostEqual(transform.lb, 0) self.assertAlmostEqual(transform.ub, 1) @@ -83,7 +81,7 @@ def test_linear(self): def test_log(self): """Missing: Special case for lb_input <= 0""" - transform = NormLogTransform(1, 1000) + transform = NormLogTransformer(1, 1000) self.assertAlmostEqual(transform.lb, 0) self.assertAlmostEqual(transform.ub, 1) @@ -130,12 +128,12 @@ def test_log(self): def test_auto(self): threshold = 1000 - transform = AutoTransform(1, 100, threshold=threshold) + transform = AutoTransformer(1, 100, threshold=threshold) self.assertTrue(transform.use_linear) self.assertFalse(transform.use_log) # Expect Log behaviour - transform = AutoTransform(1, 1001, threshold=threshold) + transform = AutoTransformer(1, 1001, threshold=threshold) self.assertFalse(transform.use_linear) self.assertTrue(transform.use_log) diff --git a/tests/test_unit_operation.py b/tests/test_unit_operation.py index 936135b60..d5fa1919a 100644 --- a/tests/test_unit_operation.py +++ b/tests/test_unit_operation.py @@ -1,20 +1,18 @@ -""" -Todo ----- -Add tests for -- section dependent parameters, polynomial parameters -""" -import unittest - +import pytest import numpy as np -from CADETProcess import CADETProcessError from CADETProcess.processModel import ComponentSystem from CADETProcess.processModel import ( - Inlet, Cstr, - TubularReactor, LumpedRateModelWithPores, LumpedRateModelWithoutPores, MCT + Inlet, + Cstr, + TubularReactor, + LumpedRateModelWithPores, + LumpedRateModelWithoutPores, + GeneralRateModel, + MCT, ) + length = 0.6 diameter = 0.024 @@ -24,287 +22,444 @@ bed_porosity = 0.3 particle_porosity = 0.6 +particle_radius = 1e-4 total_porosity = bed_porosity + (1 - bed_porosity) * particle_porosity const_solid_volume = volume * (1 - total_porosity) init_liquid_volume = volume * total_porosity axial_dispersion = 4.7e-7 +film_diffusion_0 = 0 +film_diffusion_1 = 1e-6 +pore_diffusion_0 = 0 +pore_diffusion_1 = 1e-11 -channel_cross_section_areas = [0.1,0.1,0.1] +nchannel = 3 +channel_cross_section_areas = [0.1, 0.1, 0.1] exchange_matrix = np.array([ - [[0.0],[0.01],[0.0]], - [[0.02],[0.0],[0.03]], - [[0.0],[0.0],[0.0]] - ]) + [[0.0], [0.01], [0.0]], + [[0.02], [0.0], [0.03]], + [[0.0], [0.0], [0.0]] +]) flow_direction = 1 -class Test_Unit_Operation(unittest.TestCase): - - def __init__(self, methodName='runTest'): - super().__init__(methodName) - - def setUp(self): - self.component_system = ComponentSystem(2) - - def create_source(self): - source = Inlet(self.component_system, name='test') - - return source - - def create_cstr(self): - cstr = Cstr(self.component_system, name='test') - - cstr.const_solid_volume = const_solid_volume - cstr.init_liquid_volume = init_liquid_volume - - cstr.flow_rate = 1 - - return cstr - - def create_tubular_reactor(self): - tube = TubularReactor(self.component_system, name='test') - - tube.length = length - tube.diameter = diameter - tube.axial_dispersion = axial_dispersion - - return tube - - def create_MCT(self, components): - mct = MCT(ComponentSystem(components), nchannel=3, name='test') - - mct.length = length - mct.channel_cross_section_areas = channel_cross_section_areas - mct.axial_dispersion = 0 - mct.flow_direction = flow_direction - - return mct - - def create_lrmwop(self): - lrmwop = LumpedRateModelWithoutPores( - self.component_system, name='test' - ) - - lrmwop.length = length - lrmwop.diameter = diameter - lrmwop.axial_dispersion = axial_dispersion - lrmwop.total_porosity = total_porosity - - return lrmwop - - def create_lrmwp(self): - lrmwp = LumpedRateModelWithPores( - self.component_system, name='test' +@pytest.fixture +def component_system(): + return ComponentSystem(2) + + +@pytest.fixture +def inlet(component_system): + return Inlet(component_system, name="test_inlet") + + +@pytest.fixture +def cstr(component_system): + cstr = Cstr(component_system, name="test_cstr") + cstr.const_solid_volume = const_solid_volume + cstr.init_liquid_volume = init_liquid_volume + cstr.flow_rate = 1 + return cstr + + +@pytest.fixture +def tubular_reactor(component_system): + tubular_reactor = TubularReactor(component_system, name="test_tubular_reactor") + tubular_reactor.length = length + tubular_reactor.diameter = diameter + tubular_reactor.axial_dispersion = axial_dispersion + return tubular_reactor + + +@pytest.fixture +def lrm(component_system): + lrm = LumpedRateModelWithoutPores(component_system, name="test_lrm") + lrm.length = length + lrm.diameter = diameter + lrm.axial_dispersion = axial_dispersion + lrm.total_porosity = total_porosity + return lrm + + +@pytest.fixture +def lrmp(component_system): + lrmp = LumpedRateModelWithPores(component_system, name="test_lrmp") + lrmp.length = length + lrmp.diameter = diameter + lrmp.axial_dispersion = axial_dispersion + lrmp.bed_porosity = bed_porosity + lrmp.particle_radius = particle_radius + lrmp.particle_porosity = particle_porosity + lrmp.film_diffusion = [film_diffusion_0, film_diffusion_1] + return lrmp + + +@pytest.fixture +def grm(components=2): + grm = GeneralRateModel(ComponentSystem(components), name="test_grm") + grm.length = length + grm.diameter = diameter + grm.axial_dispersion = axial_dispersion + grm.bed_porosity = bed_porosity + grm.particle_radius = particle_radius + grm.particle_porosity = particle_porosity + grm.film_diffusion = [film_diffusion_0, film_diffusion_1] + grm.pore_diffusion = [pore_diffusion_0, pore_diffusion_1] + return grm + + +@pytest.fixture +def mct(components=1): + mct = MCT(ComponentSystem(components), nchannel=3, name="test_mct") + mct.length = length + mct.channel_cross_section_areas = channel_cross_section_areas + mct.axial_dispersion = axial_dispersion + mct.exchange_matrix = exchange_matrix + return mct + + +@pytest.mark.parametrize("unit_operation, expected_geometry", [ + ("cstr", { + "volume_liquid": total_porosity * volume, + "volume_solid": (1 - total_porosity) * volume, + }), + ("lrm", { + "cross_section_area": cross_section_area, + "total_porosity": total_porosity, + "volume": volume, + "volume_interstitial": total_porosity * volume, + "volume_liquid": total_porosity * volume, + "volume_solid": (1 - total_porosity) * volume, + }), + ("lrmp", { + "cross_section_area": cross_section_area, + "total_porosity": total_porosity, + "volume": volume, + "volume_interstitial": bed_porosity * volume, + "volume_liquid": total_porosity * volume, + "volume_solid": (1 - total_porosity) * volume, + }), + ("mct", { + "volume": length * sum(channel_cross_section_areas), + }), +]) +def test_geometry(unit_operation, expected_geometry, request): + unit = request.getfixturevalue(unit_operation) + + if "total_porosity" in expected_geometry: + assert unit.total_porosity == expected_geometry["total_porosity"] + + if "volume" in expected_geometry: + assert unit.volume == expected_geometry["volume"] + + if "volume_interstitial" in expected_geometry: + assert np.isclose( + unit.volume_interstitial, expected_geometry["volume_interstitial"] ) - lrmwp.length = length - lrmwp.diameter = diameter - lrmwp.axial_dispersion = axial_dispersion - lrmwp.bed_porosity = bed_porosity - lrmwp.particle_porosity = particle_porosity - - return lrmwp - - def test_geometry(self): - cstr = self.create_cstr() - lrmwop = self.create_lrmwop() - lrmwp = self.create_lrmwp() - - self.assertEqual(lrmwop.cross_section_area, cross_section_area) - self.assertEqual(lrmwp.cross_section_area, cross_section_area) - - self.assertEqual(lrmwop.total_porosity, total_porosity) - self.assertEqual(lrmwp.total_porosity, total_porosity) - - self.assertEqual(lrmwop.volume, volume) - self.assertEqual(lrmwp.volume, volume) - - volume_interstitial = total_porosity * volume - self.assertAlmostEqual(lrmwop.volume_interstitial, volume_interstitial) - volume_interstitial = bed_porosity * volume - self.assertAlmostEqual(lrmwp.volume_interstitial, volume_interstitial) - - volume_liquid = total_porosity * volume - self.assertAlmostEqual(cstr.volume_liquid, volume_liquid) - self.assertAlmostEqual(lrmwop.volume_liquid, volume_liquid) - self.assertAlmostEqual(lrmwp.volume_liquid, volume_liquid) - - volume_solid = (1 - total_porosity) * volume - self.assertAlmostEqual(cstr.volume_solid, volume_solid) - self.assertAlmostEqual(lrmwop.volume_solid, volume_solid) - self.assertAlmostEqual(lrmwp.volume_solid, volume_solid) - - lrmwop.cross_section_area = cross_section_area/2 - self.assertAlmostEqual(lrmwop.diameter, diameter/(2**0.5)) - - def test_convection_dispersion(self): - tube = self.create_tubular_reactor() - lrmwp = self.create_lrmwp() - - flow_rate = 0 - tube.length = 1 - tube.cross_section_area = 1 - tube.axial_dispersion = 0 - - with self.assertRaises(ZeroDivisionError): - tube.calculate_interstitial_velocity(flow_rate) - with self.assertRaises(ZeroDivisionError): - tube.calculate_superficial_velocity(flow_rate) - with self.assertRaises(ZeroDivisionError): - tube.NTP(flow_rate) - - flow_rate = 2 - tube.axial_dispersion = 3 - self.assertAlmostEqual(tube.calculate_interstitial_velocity(flow_rate), 2) - self.assertAlmostEqual(tube.calculate_interstitial_rt(flow_rate), 0.5) - self.assertAlmostEqual(tube.calculate_superficial_velocity(flow_rate), 2) - self.assertAlmostEqual(tube.calculate_superficial_rt(flow_rate), 0.5) - self.assertAlmostEqual(tube.NTP(flow_rate), 1/3) - - tube.set_axial_dispersion_from_NTP(1/3, 2) - self.assertAlmostEqual(tube.axial_dispersion, 3) - - flow_rate = 2 - lrmwp.length = 1 - lrmwp.bed_porosity = 0.5 - lrmwp.cross_section_area = 1 - self.assertAlmostEqual(lrmwp.calculate_interstitial_velocity(flow_rate), 4) - self.assertAlmostEqual(lrmwp.calculate_interstitial_rt(flow_rate), 0.25) - self.assertAlmostEqual(lrmwp.calculate_superficial_velocity(flow_rate), 2) - self.assertAlmostEqual(lrmwp.calculate_superficial_rt(flow_rate), 0.5) - - def test_poly_properties(self): - source = self.create_source() - - ref = np.array([[1, 0, 0, 0], [1, 0, 0, 0]]) - source.c = 1 - np.testing.assert_equal(source.c, ref) - source.c = [1, 1] - np.testing.assert_equal(source.c, ref) - - ref = np.array([[1, 0, 0, 0], [2, 0, 0, 0]]) - source.c = [1, 2] - np.testing.assert_equal(source.c, ref) - source.c = [[1, 0], [2, 0]] - np.testing.assert_equal(source.c, ref) - - ref = np.array([[1, 2, 0, 0], [3, 4, 0, 0]]) - source.c = [[1, 2], [3, 4]] - np.testing.assert_equal(source.c, ref) - source.c = ref - np.testing.assert_equal(source.c, ref) - - cstr = self.create_cstr() - - ref = np.array([1, 0, 0, 0]) - cstr.flow_rate = 1 - np.testing.assert_equal(cstr.flow_rate, ref) - cstr.flow_rate = [1, 0] - np.testing.assert_equal(cstr.flow_rate, ref) - - ref = np.array([1, 1, 0, 0]) - cstr.flow_rate = [1, 1] - np.testing.assert_equal(cstr.flow_rate, ref) - cstr.flow_rate = ref - np.testing.assert_equal(cstr.flow_rate, ref) - - def test_parameters(self): - """ - Notes - ----- - Currently, only getting parameters is tested. Should also test if - setting works. For this, adsorption parameters should be provided. - """ - cstr = self.create_cstr() - parameters_expected = { - 'flow_rate': np.array([1, 0, 0, 0]), - 'init_liquid_volume': init_liquid_volume, - 'flow_rate_filter': 0, - 'c': [0, 0], - 'q': [], - 'const_solid_volume': const_solid_volume, + if "volume_liquid" in expected_geometry: + assert np.isclose(unit.volume_liquid, expected_geometry["volume_liquid"]) + + if "volume_solid" in expected_geometry: + assert np.isclose(unit.volume_solid, expected_geometry["volume_solid"]) + + if "cross_section_area" in expected_geometry: + assert unit.cross_section_area == expected_geometry["cross_section_area"] + + unit.cross_section_area = cross_section_area / 2 + assert np.isclose(unit.diameter, diameter / (2**0.5)) + + +@pytest.mark.parametrize("input_c, expected_c", [ + (1, np.array([[1, 0, 0, 0], [1, 0, 0, 0]])), + ([1, 1], np.array([[1, 0, 0, 0], [1, 0, 0, 0]])), + ([1, 2], np.array([[1, 0, 0, 0], [2, 0, 0, 0]])), + ([[1, 0], [2, 0]], np.array([[1, 0, 0, 0], [2, 0, 0, 0]])), + ([[1, 2], [3, 4]], np.array([[1, 2, 0, 0], [3, 4, 0, 0]])), +]) +def test_polynomial_inlet_concentration(inlet, input_c, expected_c): + inlet.c = input_c + np.testing.assert_equal(inlet.c, expected_c) + + +@pytest.mark.parametrize("unit_operation, input_flow_rate, expected_flow_rate", [ + ("inlet", 1, np.array([1, 0, 0, 0])), + ("inlet", [1, 0], np.array([1, 0, 0, 0])), + ("inlet", [1, 1], np.array([1, 1, 0, 0])), + ("cstr", 1, np.array([1, 0, 0, 0])), + ("cstr", [1, 0], np.array([1, 0, 0, 0])), + ("cstr", [1, 1], np.array([1, 1, 0, 0])), +]) +def test_polynomial_flow_rate( + unit_operation, + input_flow_rate, + expected_flow_rate, + request + ): + unit = request.getfixturevalue(unit_operation) + unit.flow_rate = input_flow_rate + np.testing.assert_equal(unit.flow_rate, expected_flow_rate) + + +@pytest.mark.parametrize("unit_operation, expected_parameters", [ + ("cstr", { + "flow_rate": np.array([1, 0, 0, 0]), + "init_liquid_volume": init_liquid_volume, + "flow_rate_filter": 0, + "c": [0, 0], + "q": None, + "const_solid_volume": const_solid_volume, + }), + ("tubular_reactor", { + "length": length, + "diameter": diameter, + "axial_dispersion": [axial_dispersion, axial_dispersion], + "flow_direction": flow_direction, + "c": [0, 0], + "discretization": { + "ncol": 100, + "use_analytic_jacobian": True, + "reconstruction": "WENO", + "weno": { + "boundary_model": 0, + "weno_eps": 1e-10, + "weno_order": 3 + }, + "consistency_solver": { + "solver_name": "LEVMAR", + "init_damping": 0.01, + "min_damping": 0.0001, + "max_iterations": 50, + "subsolvers": "LEVMAR" + } } - - np.testing.assert_equal(parameters_expected, cstr.parameters) - - sec_dep_parameters_expected = { - 'flow_rate': np.array([1, 0, 0, 0]), - 'flow_rate_filter': 0, + }), + ("lrm", { + "length": length, + "diameter": diameter, + "total_porosity": total_porosity, + "axial_dispersion": [axial_dispersion, axial_dispersion], + "flow_direction": flow_direction, + "c": [0, 0], + "q": None, + "discretization": { + "ncol": 100, + "use_analytic_jacobian": True, + "reconstruction": "WENO", + "weno": { + "boundary_model": 0, + "weno_eps": 1e-10, + "weno_order": 3 + }, + "consistency_solver": { + "solver_name": "LEVMAR", + "init_damping": 0.01, + "min_damping": 0.0001, + "max_iterations": 50, + "subsolvers": "LEVMAR" + } } - np.testing.assert_equal( - sec_dep_parameters_expected, cstr.section_dependent_parameters - ) - - poly_parameters = { - 'flow_rate': np.array([1, 0, 0, 0]), + }), + ("lrmp", { + "length": length, + "diameter": diameter, + "bed_porosity": bed_porosity, + "axial_dispersion": [axial_dispersion, axial_dispersion], + "pore_accessibility": [1, 1], + "film_diffusion": [film_diffusion_0, film_diffusion_1], + "particle_radius": particle_radius, + "particle_porosity": particle_porosity, + "flow_direction": flow_direction, + "c": [0, 0], + "cp": [0, 0], + "q": None, + "discretization": { + "ncol": 100, + "par_geom": "SPHERE", + "use_analytic_jacobian": True, + "gs_type": True, + "max_krylov": 0, + "max_restarts": 10, + "schur_safety": 1e-08, + "reconstruction": "WENO", + "weno": { + "boundary_model": 0, + "weno_eps": 1e-10, + "weno_order": 3 + }, + "consistency_solver": { + "solver_name": "LEVMAR", + "init_damping": 0.01, + "min_damping": 0.0001, + "max_iterations": 50, + "subsolvers": "LEVMAR" + } } - np.testing.assert_equal( - poly_parameters, cstr.polynomial_parameters - ) - - self.assertEqual(cstr.required_parameters, ['init_liquid_volume']) - - - def test_MCT(self): - """ - Notes - ----- - Tests Parameters, Volumes and Attributes depending on nchannel. Should be later integrated into general testing workflow. - """ - total_porosity = 1 - - mct = self.create_MCT(1) - - mct.exchange_matrix = exchange_matrix - - parameters_expected = { - 'c': np.array([[0., 0., 0.]]), - 'axial_dispersion' : 0, - 'channel_cross_section_areas' : channel_cross_section_areas, - 'length' : length, - 'exchange_matrix': exchange_matrix, - 'flow_direction' : 1, - 'nchannel' : 3 + }), + ("grm", { + "length": length, + "diameter": diameter, + "bed_porosity": bed_porosity, + "axial_dispersion": [axial_dispersion, axial_dispersion], + "pore_accessibility": [1, 1], + "film_diffusion": [film_diffusion_0, film_diffusion_1], + "particle_radius": particle_radius, + "particle_porosity": particle_porosity, + "pore_diffusion": [pore_diffusion_0, pore_diffusion_1], + "surface_diffusion": None, + "flow_direction": flow_direction, + "c": [0, 0], + "cp": [0, 0], + "q": None, + "discretization": { + "ncol": 100, + "par_geom": "SPHERE", + "npar": 5, + "par_disc_type": "EQUIDISTANT_PAR", + "par_boundary_order": 2, + "fix_zero_surface_diffusion": False, + "use_analytic_jacobian": True, + "gs_type": True, + "max_krylov": 0, + "max_restarts": 10, + "schur_safety": 1e-08, + "reconstruction": "WENO", + "weno": { + "boundary_model": 0, + "weno_eps": 1e-10, + "weno_order": 3 + }, + "consistency_solver": { + "solver_name": "LEVMAR", + "init_damping": 0.01, + "min_damping": 0.0001, + "max_iterations": 50, + "subsolvers": "LEVMAR" + } } - np.testing.assert_equal(parameters_expected, {key: value for key, value in mct.parameters.items() if key != 'discretization'}) - - volume = length*sum(channel_cross_section_areas) - volume_liquid = volume*total_porosity - volume_solid = (total_porosity-1)*volume - - self.assertAlmostEqual(mct.volume_liquid, volume_liquid) - self.assertAlmostEqual(mct.volume_solid, volume_solid) - - with self.assertRaises(ValueError): - mct.exchange_matrix = np.array([[ - [0.0, 0.01, 0.0], - [0.02, 0.0, 0.03], - [0.0, 0.0, 0.0] - ]]) - - mct.nchannel = 2 - with self.assertRaises(ValueError): - mct.exchange_matrix - mct.channel_cross_section_areas - - self.assertTrue(mct.nchannel*mct.component_system.n_comp == mct.c.size) - - mct2 = self.create_MCT(2) + }), + ("mct", { + "nchannel": nchannel, + "length": length, + "channel_cross_section_areas": channel_cross_section_areas, + "axial_dispersion": nchannel * [axial_dispersion], + "exchange_matrix": exchange_matrix, + "flow_direction": 1, + "c": [[0, 0, 0]], + "discretization": { + "ncol": 100, + "use_analytic_jacobian": True, + "reconstruction": "WENO", + "weno": { + "boundary_model": 0, + "weno_eps": 1e-10, + "weno_order": 3 + }, + "consistency_solver": { + "solver_name": "LEVMAR", + "init_damping": 0.01, + "min_damping": 0.0001, + "max_iterations": 50, + "subsolvers": "LEVMAR" + } + } + }), +]) +def test_parameters(unit_operation, expected_parameters, request): + unit = request.getfixturevalue(unit_operation) + np.testing.assert_equal(expected_parameters, unit.parameters) + + +@pytest.mark.parametrize("unit_operation, flow_rate, expected_velocity", [ + ("tubular_reactor", 2, 2), + ("lrmp", 2, 4), + ("tubular_reactor", 0, ZeroDivisionError), + ("lrmp", 0, ZeroDivisionError), +]) +def test_interstitial_velocity(unit_operation, flow_rate, expected_velocity, request): + unit = request.getfixturevalue(unit_operation) + unit.length = 1 + unit.cross_section_area = 1 + unit.axial_dispersion = 3 if unit_operation == "tubular_reactor" else [3, 2] + + if unit_operation == "lrmp": + unit.bed_porosity = 0.5 + + if expected_velocity == ZeroDivisionError: + with pytest.raises(ZeroDivisionError): + unit.calculate_interstitial_velocity(flow_rate) + else: + assert np.isclose( + unit.calculate_interstitial_velocity(flow_rate), expected_velocity + ) - with self.assertRaises(ValueError): - mct2.exchange_matrix = np.array([[ - [0.0, 0.01, 0.0], - [0.02, 0.0, 0.03], - [0.0, 0.0, 0.0] - ], - [ - [0.0, 0.01, 0.0], - [0.02, 0.0, 0.03], - [0.0, 0.0, 0.0] - ]]) +@pytest.mark.parametrize("unit_operation, flow_rate, expected_velocity", [ + ("tubular_reactor", 2, 2), + ("lrmp", 2, 2), + ("tubular_reactor", 0, ZeroDivisionError), + ("lrmp", 0, ZeroDivisionError), +]) +def test_superficial_velocity(unit_operation, flow_rate, expected_velocity, request): + unit = request.getfixturevalue(unit_operation) + unit.length = 1 + unit.cross_section_area = 1 + + if unit_operation == "lrmp": + unit.bed_porosity = 0.5 + + if expected_velocity == ZeroDivisionError: + with pytest.raises(ZeroDivisionError): + unit.calculate_superficial_velocity(flow_rate) + else: + assert np.isclose( + unit.calculate_superficial_velocity(flow_rate), expected_velocity + ) -if __name__ == '__main__': - unittest.main() +@pytest.mark.parametrize("unit_operation, flow_rate, expected_ntp", [ + ("tubular_reactor", 2, [1/3, 1/3]), + ("tubular_reactor", 0, ZeroDivisionError), +]) +def test_ntp(unit_operation, flow_rate, expected_ntp, request): + unit = request.getfixturevalue(unit_operation) + unit.length = 1 + unit.cross_section_area = 1 + unit.axial_dispersion = 3 + + if expected_ntp == ZeroDivisionError: + with pytest.raises(ZeroDivisionError): + unit.NTP(flow_rate) + else: + np.testing.assert_almost_equal(unit.NTP(flow_rate), expected_ntp) + + +@pytest.mark.parametrize( + "unit_operation, flow_rate, axial_dispersion, expected_bodenstein", [ + ("tubular_reactor", 2, 3, [2/3, 2/3]), + ("lrmp", 2, [3, 2], [4/3, 2]), + ("lrmp", 2, [1, 2], [4, 2]), + ] +) +def test_bodenstein_number( + unit_operation, + flow_rate, + axial_dispersion, + expected_bodenstein, + request + ): + unit = request.getfixturevalue(unit_operation) + unit.length = 1 + unit.cross_section_area = 1 + unit.axial_dispersion = axial_dispersion + + if unit_operation == "lrmp": + unit.bed_porosity = 0.5 + + np.testing.assert_almost_equal( + unit.calculate_bodenstein_number(flow_rate), expected_bodenstein + ) + + +if __name__ == "__main__": + pytest.main([__file__])