Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ pipeline {
pip3 install -e .
export MPLBACKEND="agg"
export OPENBLAS_NUM_THREADS=1
pytest --cov-report term-missing:skip-covered
pytest --cov-report term-missing:skip-covered --mocks all
'''
}
discoverGitReferenceBuild()
Expand Down
22 changes: 22 additions & 0 deletions scilpy/denoise/tests/fixtures/mocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-

import nibabel as nib
import numpy as np
import pytest


@pytest.fixture(scope='function')
def bilateral_filtering(mock_creator, expected_results):
"""
Mock to patch the angle aware bilateral filtering function.
Needs to be namespace patched by scripts.
"""
def _mock_side_effect(*args, **kwargs):
if expected_results is None or len(expected_results) == 0:
return None

return nib.load(expected_results).get_fdata(dtype=np.float32)

return mock_creator("scilpy.denoise.bilateral_filtering",
"angle_aware_bilateral_filtering",
side_effect=_mock_side_effect)
31 changes: 31 additions & 0 deletions scilpy/reconst/noddi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-

import amico
from os.path import join
import tempfile


def get_evaluator(dwi, scheme_filename, mask, para_diff, iso_diff,
lambda1, lambda2, intra_vol_fraction, intra_orientation_dist,
kernels_dir=None):

with tempfile.TemporaryDirectory() as tmp_dir:
# Setup AMICO
amico.core.setup()
ae = amico.Evaluation('.', '.')
ae.load_data(dwi, scheme_filename, mask_filename=mask)
# Compute the response functions
ae.set_model("NODDI")

ae.model.set(para_diff, iso_diff, intra_vol_fraction,
intra_orientation_dist, False)

ae.set_solver(lambda1=lambda1, lambda2=lambda2)

ae.set_config('ATOMS_path',
kernels_dir or join(tmp_dir.name, 'kernels',
ae.model.id))

ae.generate_kernels(regenerate=not kernels_dir)

return ae
14 changes: 14 additions & 0 deletions scilpy/reconst/tests/fixtures/mocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# -*- coding: utf-8 -*-

import pytest


@pytest.fixture(scope='function')
def amico_evaluator(mock_creator):
"""
Mock to patch amico's kernel generation and fitting.
Does not need to be namespace patched by scripts.
"""
return mock_creator("amico", "Evaluation",
mock_attributes=["fit", "generate_kernels",
"load_kernels", "save_results"])
Empty file added scilpy/tests/__init__.py
Empty file.
2 changes: 2 additions & 0 deletions scilpy/tests/arrays.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-

import copy

import numpy as np
Expand Down
39 changes: 39 additions & 0 deletions scilpy/tests/checks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-

import numpy as np


def _nd_array_match(_arr1, _arr2, _rtol=1E-05, _atol=1E-8):
return np.allclose(_arr1, _arr2, rtol=_rtol, atol=_atol)


def _mse_metrics(_arr1, _arr2):
_mse = (_arr1 - _arr2) ** 2.
return np.mean(_mse), np.max(_mse)


def assert_images_close(img1, img2):
dtype = img1.header.get_data_dtype()

assert np.allclose(img1.affine, img2.affine), "Images affines don't match"

assert _nd_array_match(img1.get_fdata(dtype=dtype),
img2.get_fdata(dtype=dtype)), \
"Images data don't match. MSE : {} | max SE : {}".format(
*_mse_metrics(img1.get_fdata(dtype=dtype),
img2.get_fdata(dtype=dtype)))



def assert_images_not_close(img1, img2, affine_must_match=True):
dtype = img1.header.get_data_dtype()

if affine_must_match:
assert np.allclose(img1.affine, img2.affine), \
"Images affines don't match"

assert not _nd_array_match(img1.get_fdata(dtype=dtype),
img2.get_fdata(dtype=dtype)), \
"Images data should not match. MSE : {} | max SE : {}".format(
*_mse_metrics(img1.get_fdata(dtype=dtype),
img2.get_fdata(dtype=dtype)))
1 change: 1 addition & 0 deletions scilpy/tests/dict.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-

dict_to_average = {
"sub-01": {
Expand Down
175 changes: 175 additions & 0 deletions scilpy/tests/plugin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
# -*- coding: utf-8 -*-

from glob import glob
from os.path import realpath
from unittest.mock import DEFAULT
import pytest
import warnings


"""
Scilpy Pytest plugin. As of now, this plugin is only used to handle mocking,
but it can be extended to hook into other parts of the testing process.

Mocking interface
-----------------

Writing mocking fixtures is long and tedious. The interface provided by
unittest.mock, and the pytest-mock overhead, is cumbersome. Inasmuch, with
the default pytest framework, it is impossible to share mocks between tests
that are not located within the same module.

This plugin registers early into pytest and thus, can investiguate the modules'
structure and load stuff into the tests namespaces before they are executed.

- It first hooks into pytest_addoption to add a new command line option to
load mocks for any or all modules in the scilpy package, --mocks.

- It then hooks into pytest_configure to load the mocks activated by the user
into the test session. Note that this way, all mocks associated with a module
gets injected into pytest's namespace, which might not be granular enough for
some use cases (see below).

To make mock creation and collection in test instances, and to allow for a more
granular selection of mocks from mocking modules, this plugin provides two
fixtures:

- mock_creator : the mock_creator fixture exposes the bases interface of
unittest.mock patchers, but with a more convenient syntax. It
is able to patch mutliple attributes at once, and can be
configured to create the patched object if it does not exist.

- mock_collector : the mock_collector fixture is a helper function that is used
to collect specific mocks from mocking modules. It is also
used to modify the namespace into which the mocked objects
get patched. This is required for some mocks to be used with
scripts, when their import is relative (e.g. from . import).

Creating a new mock is done using the mock_creator fixture. All mocks must be
placed inside the scilpy library, in the tests directories of their respective
modules, in fixtures/mocks.py. This is own they get discovered by the plugin.

- A mock fixture must have a relevant name (e.g. amico_evaluator patches
several parts of the amico.Evaluation class). Its return value is the result
of calling the mock_creator fixture.

- The mock_creator fixture does not need to be imported, it is provided
automatically by the pytest framework. Simply add mock_creator as a parameter
to the mock fixture function.

Using mocks in tests is done using the mock_collector fixture. Like the
mock_creator, it is provided automatically by the pytest framework. Simply
add mock_collector as a parameter to the test function that requires mocking.
To use the mocks, call the mock_collector fixture with the list of mock names
to use. Additionally, the mock_collector fixture can be used to modify the
namespace into which the mocks are injected, by providing a patch_path argument
as a second parameter. The returned dictionary indexes loaded mocks by their
name and can be used to assert their usage throughout the test case.
"""


AUTOMOCK = DEFAULT

# Load mock modules from all library tests

MOCK_MODULES = list(_module.replace("/", ".").replace(".py", "")
for _module in glob("**/tests/fixtures/mocks.py",
root_dir=realpath('.'),
recursive=True))

MOCK_PACKAGES = list(m.split(".")[-4] for m in MOCK_MODULES)


# Helper function to select mocks

def _get_active_mocks(include_list=None, exclude_list=None):
"""
Returns a list of all packages with active mocks in the current session

Parameters
----------
include_list: iterable or None
List of scilpy packages to consider

exclude_list: iterable or None
List of scilpy packages to exclude from consideration

Returns
-------
list: list of packages with active mocks
"""

def _active_names():
def _exclude(_l):
if exclude_list is None:
return _l
return filter(lambda _i: _i not in exclude_list, _l)

if include_list is None or len(include_list) == 0:
return []

if "all" in include_list:
return _exclude(MOCK_PACKAGES)

return _exclude([_m for _m in include_list if _m in MOCK_PACKAGES])

return list(map(lambda _m: _m[1],
filter(lambda _m: _m[0] in _active_names(),
zip(MOCK_PACKAGES, MOCK_MODULES))))


# Create hooks and fixtures to handle mocking from pytest command line

def pytest_addoption(parser):
parser.addoption(
"--mocks",
nargs='+',
choices=["all"] + MOCK_PACKAGES,
help="Load mocks for scilpy packages to accelerate"
"tests and prevent testing external dependencies")


def pytest_configure(config):
_toggle_mocks = config.getoption("--mocks")
for _mock_mod in _get_active_mocks(_toggle_mocks):
config.pluginmanager.import_plugin(_mock_mod)


@pytest.fixture
def mock_collector(request):
"""
Pytest fixture to collect a specific set of mocks for a test case
"""
def _collector(mock_names, patch_path=None):
try:
return {_name: request.getfixturevalue(_name)(patch_path)
for _name in mock_names}
except pytest.FixtureLookupError:
warnings.warn(f"Some fixtures in {mock_names} cannot be found.")
return None
return _collector


@pytest.fixture
def mock_creator(mocker):
"""
Pytest fixture to create a namespace patchable mock
"""
def _mocker(base_module, object_name, side_effect=None,
mock_attributes=None):

def _patcher(module_name=None):
_base = base_module if module_name is None else module_name
_mock_target = "{}.{}".format(_base, object_name)

if mock_attributes is not None:
return mocker.patch.multiple(_mock_target,
**{a: AUTOMOCK
for a in mock_attributes})

return mocker.patch(_mock_target, side_effect=side_effect,
create=True)

return _patcher

return _mocker
1 change: 0 additions & 1 deletion scilpy/version.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-

import itertools
import glob
import os

Expand Down
51 changes: 18 additions & 33 deletions scripts/scil_NODDI_maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import sys
import tempfile

import amico
from dipy.io.gradients import read_bvals_bvecs
import numpy as np

Expand All @@ -27,6 +26,7 @@
assert_output_dirs_exist_and_empty,
redirect_stdout_c)
from scilpy.gradients.bvec_bval_tools import fsl2mrtrix, identify_shells
from scilpy.reconst.noddi import get_evaluator

EPILOG = """
Reference:
Expand Down Expand Up @@ -126,48 +126,33 @@ def main():
'at {}.'.format(len(shells_centroids), shells_centroids))

with redirected_stdout:
# Load the data
amico.core.setup()
ae = amico.Evaluation('.', '.')
ae.load_data(args.in_dwi,
tmp_scheme_filename,
mask_filename=args.mask)
# Compute the response functions
ae.set_model("NODDI")

intra_vol_frac = np.linspace(0.1, 0.99, 12)
intra_orient_distr = np.hstack((np.array([0.03, 0.06]),
np.linspace(0.09, 0.99, 10)))

ae.model.set(args.para_diff, args.iso_diff,
intra_vol_frac, intra_orient_distr,
False)
ae.set_solver(lambda1=args.lambda1, lambda2=args.lambda2)

# The kernels are, by default, set to be in the current directory
# Depending on the choice, manually change the saving location
if args.save_kernels:
kernels_dir = os.path.join(args.save_kernels)
regenerate_kernels = True
elif args.load_kernels:
kernels_dir = os.path.join(args.load_kernels)
regenerate_kernels = False
else:
kernels_dir = os.path.join(tmp_dir.name, 'kernels', ae.model.id)
regenerate_kernels = True

ae.set_config('ATOMS_path', kernels_dir)
ae.set_config('OUTPUT_path', args.out_dir)
ae.generate_kernels(regenerate=regenerate_kernels)
# One of those is going to be None, or have a value.If it is a valid
# kernels path, then everything will work in get_evaluator.
kernels_dir = (args.save_kernels or
args.load_kernels or
os.path.join(tmp_dir.name, 'kernels'))

# Load the data
amico = get_evaluator(args.in_dwi, tmp_scheme_filename, args.mask,
args.para_diff, args.iso_diff,
args.lambda1, args.lambda2,
intra_vol_frac, intra_orient_distr,
kernels_dir=kernels_dir)

if args.compute_only:
return

ae.load_kernels()
amico.load_kernels()

# Model fit
ae.fit()
amico.fit()

# Save the results
ae.save_results()
amico.save_results()

tmp_dir.cleanup()

Expand Down
Loading