Skip to content

Commit 755039f

Browse files
authored
Replace Nosetest library calls with Pytest in code (nilearn#422)
* Removed use of nose tester from test_check_events_file_uses_tab_separators.py & test_datasets.py * Removed use of nose tester from test_dmtx.py * Removed use of nose tester from test_first_level_model.py * Removed use of nose tester from test_model.py * Removed use of nose tester from test_second_level_model.py * Removed use of nose tester from 3 modules * Removed assert_true & assert_raises from test_utils.py * Removed assert_true & assert_raises from test_thresholding.py * Removed more nose test based calls from * Placate Flake8 * Fixed incorrect word * Pinned Pytest version to 3.9 in certain builds to use tmp_path fixture * Pinned Pytest version to 3.9 in certain builds to use tmp_path fixture - 2 * Tweak version spec syntax * request_mocker fixture available for module wide access * Use pip to force install compatible pytest version - Removed compatibility shim for tmp_path. * Corrected syntax of specifying Pypi version * Tetsing with pytest 4.0 * Pytest installation spec are no longer at multiple places * Pytest installation spec in ☺travis.yml ensuring one place * Removed pathlib import (unnecessary, & Py2 incompatible) * Pinned Pytest version to 3.9 in certain builds to use tmp_path fixture -3 * Replaced lingering pld tmpdir with pytests tmp_path * Replaced assert_equal calls with Pytest compatible assert * Converted Path object to str to comply with existing tests * Now using pytest to skip a test to fix incorrect call duing testing * Py2 DeprecationWarning not raised with Pytest==3.9.1 so increasing it * Added proper spacing
1 parent 3bafb76 commit 755039f

17 files changed

+485
-494
lines changed

.travis.yml

+3-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ matrix:
2929
- env: DISTRIB="conda" PYTHON_VERSION="2.7"
3030
NUMPY_VERSION="1.11.2" SCIPY_VERSION="0.19"
3131
SCIKIT_LEARN_VERSION="0.18" MATPLOTLIB_VERSION="1.5.1"
32-
NIBABEL_VERSION="2.0.2" PANDAS_VERSION="*" COVERAGE="true"
32+
NIBABEL_VERSION="2.0.2" PANDAS_VERSION="*"
33+
COVERAGE="true"
3334
# Python 3.4 with intermediary versions
3435
- env: DISTRIB="conda" PYTHON_VERSION="3.4"
3536
NUMPY_VERSION="1.11.0" SCIPY_VERSION="0.17"
@@ -47,6 +48,7 @@ matrix:
4748
PANDAS_VERSION="*" BOTO3=true
4849
install:
4950
- source continuous_integration/install.sh
51+
- pip install pytest>3.9 pytest-cov
5052

5153
before_script:
5254
- make clean

continuous_integration/install.sh

+2-3
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ create_new_venv() {
2222
deactivate
2323
virtualenv --system-site-packages testvenv
2424
source testvenv/bin/activate
25-
pip install nose pytest pytest-cov
25+
pip install nose pytest==3.9.1 pytest-cov
2626
}
2727

2828
print_conda_requirements() {
@@ -33,7 +33,7 @@ print_conda_requirements() {
3333
# if yes which version to install. For example:
3434
# - for numpy, NUMPY_VERSION is used
3535
# - for scikit-learn, SCIKIT_LEARN_VERSION is used
36-
TO_INSTALL_ALWAYS="pip nose pytest pytest-cov libgfortran=3.0=0 nomkl"
36+
TO_INSTALL_ALWAYS="pip nose libgfortran=3.0=0 nomkl"
3737
REQUIREMENTS="$TO_INSTALL_ALWAYS"
3838
TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas"
3939
for PACKAGE in $TO_INSTALL_MAYBE; do
@@ -112,7 +112,6 @@ elif [[ "$DISTRIB" == "conda" ]]; then
112112
fi
113113
pip install nilearn
114114

115-
116115
else
117116
echo "Unrecognized distribution ($DISTRIB); cannot setup travis environment."
118117
exit 1

nistats/conftest.py

+10
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,10 @@
22

33
import numpy as np
44
import pytest
5+
56
from _pytest.doctest import DoctestItem
7+
from nilearn.datasets import func, utils
8+
from nilearn.datasets.tests import test_utils as tst
69

710
try:
811
import matplotlib
@@ -30,3 +33,10 @@ def pytest_collection_modifyitems(items):
3033
for item in items:
3134
if isinstance(item, DoctestItem):
3235
item.add_marker(skip_marker)
36+
37+
38+
@pytest.fixture()
39+
def request_mocker():
40+
tst.setup_mock(utils, func)
41+
yield
42+
tst.teardown_mock(utils, func)

nistats/tests/test_check_events_file_uses_tab_separators.py

+9-12
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,7 @@
11
import pandas as pd
2+
import pytest
23

34
from nibabel.tmpdirs import InTemporaryDirectory
4-
from nose.tools import (assert_raises,
5-
assert_true,
6-
)
7-
85
from nistats.utils import _check_events_file_uses_tab_separators
96

107

@@ -34,11 +31,11 @@ def _create_test_file(temp_csv, test_data, delimiter):
3431

3532
def _run_test_for_invalid_separator(filepath, delimiter_name):
3633
if delimiter_name not in ('tab', 'comma'):
37-
with assert_raises(ValueError):
34+
with pytest.raises(ValueError):
3835
_check_events_file_uses_tab_separators(events_files=filepath)
3936
else:
4037
result = _check_events_file_uses_tab_separators(events_files=filepath)
41-
assert_true(result is None)
38+
assert result is None
4239

4340

4441
def test_for_invalid_separator():
@@ -59,7 +56,7 @@ def test_with_2D_dataframe():
5956
events_pandas_dataframe = pd.DataFrame(data_for_pandas_dataframe)
6057
result = _check_events_file_uses_tab_separators(
6158
events_files=events_pandas_dataframe)
62-
assert_true(result is None)
59+
assert result is None
6360

6461

6562
def test_with_1D_dataframe():
@@ -68,19 +65,19 @@ def test_with_1D_dataframe():
6865
events_pandas_dataframe = pd.DataFrame(dataframe_)
6966
result = _check_events_file_uses_tab_separators(
7067
events_files=events_pandas_dataframe)
71-
assert_true(result is None)
68+
assert result is None
7269

7370
def test_for_invalid_filepath():
7471
filepath = 'junk_file_path.csv'
7572
result = _check_events_file_uses_tab_separators(events_files=filepath)
76-
assert_true(result is None)
73+
assert result is None
7774

7875

7976
def test_for_pandas_dataframe():
8077
events_pandas_dataframe = pd.DataFrame([['a', 'b', 'c'], [0, 1, 2]])
8178
result = _check_events_file_uses_tab_separators(
8279
events_files=events_pandas_dataframe)
83-
assert_true(result is None)
80+
assert result is None
8481

8582

8683
def test_binary_opening_an_image():
@@ -91,7 +88,7 @@ def test_binary_opening_an_image():
9188
temp_img_file = 'temp_img.gif'
9289
with open(temp_img_file, 'wb') as temp_img_obj:
9390
temp_img_obj.write(img_data)
94-
with assert_raises(ValueError):
91+
with pytest.raises(ValueError):
9592
_check_events_file_uses_tab_separators(
9693
events_files=temp_img_file)
9794

@@ -102,7 +99,7 @@ def test_binary_bytearray_of_ints_data():
10299
temp_bin_file = 'temp_bin.bin'
103100
with open(temp_bin_file, 'wb') as temp_bin_obj:
104101
temp_bin_obj.write(temp_data_bytearray_from_ints)
105-
with assert_raises(ValueError):
102+
with pytest.raises(ValueError):
106103
_check_events_file_uses_tab_separators(
107104
events_files=temp_bin_file)
108105

nistats/tests/test_datasets.py

+56-81
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,10 @@
33

44
import numpy as np
55
import pandas as pd
6+
67
from nibabel.tmpdirs import TemporaryDirectory
78
from nilearn._utils.compat import _basestring
8-
from nilearn.datasets import func, utils
9-
from nilearn.datasets.tests import test_utils as tst
109
from nilearn.datasets.utils import _get_dataset_dir
11-
from nose import with_setup
12-
from nose.tools import (assert_equal,
13-
assert_true,
14-
)
1510

1611
from nistats import datasets
1712
from nistats.datasets import fetch_openneuro_dataset_index, \
@@ -21,26 +16,16 @@
2116
datadir = os.path.join(currdir, 'data')
2217

2318

24-
def setup_mock():
25-
return tst.setup_mock(utils, func)
26-
27-
28-
def teardown_mock():
29-
return tst.teardown_mock(utils, func)
30-
31-
32-
@with_setup(setup_mock, teardown_mock)
33-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
34-
def test_fetch_bids_langloc_dataset():
35-
data_dir = os.path.join(tst.tmpdir, 'bids_langloc_example')
19+
def test_fetch_bids_langloc_dataset(request_mocker, tmp_path):
20+
data_dir = str(tmp_path / 'bids_langloc_example')
3621
os.mkdir(data_dir)
3722
main_folder = os.path.join(data_dir, 'bids_langloc_dataset')
3823
os.mkdir(main_folder)
3924

40-
datadir, dl_files = datasets.fetch_bids_langloc_dataset(tst.tmpdir)
25+
datadir, dl_files = datasets.fetch_bids_langloc_dataset(str(tmp_path))
4126

42-
assert_true(isinstance(datadir, _basestring))
43-
assert_true(isinstance(dl_files, list))
27+
assert isinstance(datadir, _basestring)
28+
assert isinstance(dl_files, list)
4429

4530

4631
def test_select_from_index():
@@ -60,35 +45,35 @@ def test_select_from_index():
6045

6146
# Only 1 subject and not subject specific files get downloaded
6247
new_urls = datasets.select_from_index(urls, n_subjects=1)
63-
assert_true(len(new_urls) == 6)
64-
assert_true(data_prefix + '/sub-yyy.html' not in new_urls)
48+
assert len(new_urls) == 6
49+
assert data_prefix + '/sub-yyy.html' not in new_urls
6550

6651
# 2 subjects and not subject specific files get downloaded
6752
new_urls = datasets.select_from_index(urls, n_subjects=2)
68-
assert_true(len(new_urls) == 9)
69-
assert_true(data_prefix + '/sub-yyy.html' in new_urls)
53+
assert len(new_urls) == 9
54+
assert data_prefix + '/sub-yyy.html' in new_urls
7055
# ALL subjects and not subject specific files get downloaded
7156
new_urls = datasets.select_from_index(urls, n_subjects=None)
72-
assert_true(len(new_urls) == 9)
57+
assert len(new_urls) == 9
7358

7459
# test inclusive filters. Only files with task-rest
7560
new_urls = datasets.select_from_index(
7661
urls, inclusion_filters=['*task-rest*'])
77-
assert_true(len(new_urls) == 2)
78-
assert_true(data_prefix + '/stuff.html' not in new_urls)
62+
assert len(new_urls) == 2
63+
assert data_prefix + '/stuff.html' not in new_urls
7964

8065
# test exclusive filters. only files without ses-01
8166
new_urls = datasets.select_from_index(
8267
urls, exclusion_filters=['*ses-01*'])
83-
assert_true(len(new_urls) == 6)
84-
assert_true(data_prefix + '/stuff.html' in new_urls)
68+
assert len(new_urls) == 6
69+
assert data_prefix + '/stuff.html' in new_urls
8570

8671
# test filter combination. only files with task-rest and without ses-01
8772
new_urls = datasets.select_from_index(
8873
urls, inclusion_filters=['*task-rest*'],
8974
exclusion_filters=['*ses-01*'])
90-
assert_true(len(new_urls) == 1)
91-
assert_true(data_prefix + '/sub-xxx/ses-02_task-rest.txt' in new_urls)
75+
assert len(new_urls) == 1
76+
assert data_prefix + '/sub-xxx/ses-02_task-rest.txt' in new_urls
9277

9378

9479
def test_fetch_openneuro_dataset_index():
@@ -115,13 +100,11 @@ def test_fetch_openneuro_dataset_index():
115100
assert urls == mock_json_content
116101

117102

118-
@with_setup(setup_mock, teardown_mock)
119-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
120-
def test_fetch_openneuro_dataset():
103+
def test_fetch_openneuro_dataset(request_mocker, tmp_path):
121104
dataset_version = 'ds000030_R1.0.4'
122105
data_prefix = '{}/{}/uncompressed'.format(
123106
dataset_version.split('_')[0], dataset_version)
124-
data_dir = _get_dataset_dir(data_prefix, data_dir=tst.tmpdir,
107+
data_dir = _get_dataset_dir(data_prefix, data_dir=str(tmp_path),
125108
verbose=1)
126109
url_file = os.path.join(data_dir, 'urls.json')
127110
# Prepare url files for subject and filter tests
@@ -138,16 +121,16 @@ def test_fetch_openneuro_dataset():
138121

139122
# Only 1 subject and not subject specific files get downloaded
140123
datadir, dl_files = datasets.fetch_openneuro_dataset(
141-
urls, tst.tmpdir, dataset_version)
142-
assert_true(isinstance(datadir, _basestring))
143-
assert_true(isinstance(dl_files, list))
144-
assert_true(len(dl_files) == 9)
124+
urls, str(tmp_path), dataset_version)
125+
assert isinstance(datadir, _basestring)
126+
assert isinstance(dl_files, list)
127+
assert len(dl_files) == 9
145128

146129

147130
def test_fetch_localizer():
148131
dataset = datasets.fetch_localizer_first_level()
149-
assert_true(isinstance(dataset['events'], _basestring))
150-
assert_true(isinstance(dataset.epi_img, _basestring))
132+
assert isinstance(dataset['events'], _basestring)
133+
assert isinstance(dataset.epi_img, _basestring)
151134

152135

153136
def _mock_original_spm_auditory_events_file():
@@ -174,10 +157,8 @@ def _mock_bids_compliant_spm_auditory_events_file():
174157
return actual_events_data_string, events_filepath
175158

176159

177-
@with_setup(setup_mock, teardown_mock)
178-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
179-
def test_fetch_language_localizer_demo_dataset():
180-
data_dir = tst.tmpdir
160+
def test_fetch_language_localizer_demo_dataset(request_mocker, tmp_path):
161+
data_dir = str(tmp_path)
181162
expected_data_dir, expected_files= _mock_language_localizer_demo_dataset(
182163
data_dir)
183164
actual_data_dir, actual_subdirs = fetch_language_localizer_demo_dataset(
@@ -240,26 +221,24 @@ def test_make_spm_auditory_events_file():
240221
actual_events_data_string = replace_win_line_ends(actual_events_data_string)
241222
expected_events_data_string = replace_win_line_ends(expected_events_data_string)
242223

243-
assert_equal(actual_events_data_string, expected_events_data_string)
224+
assert actual_events_data_string == expected_events_data_string
244225

245226

246-
@with_setup(setup_mock, teardown_mock)
247-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
248-
def test_fetch_spm_auditory():
227+
def test_fetch_spm_auditory(request_mocker, tmp_path):
249228
import nibabel as nib
250229
import shutil
251230
saf = ["fM00223/fM00223_%03i.img" % index for index in range(4, 100)]
252231
saf_ = ["fM00223/fM00223_%03i.hdr" % index for index in range(4, 100)]
253232

254-
data_dir = os.path.join(tst.tmpdir, 'spm_auditory')
233+
data_dir = str(tmp_path / 'spm_auditory')
255234
os.mkdir(data_dir)
256235
subject_dir = os.path.join(data_dir, 'sub001')
257236
os.mkdir(subject_dir)
258237
os.mkdir(os.path.join(subject_dir, 'fM00223'))
259238
os.mkdir(os.path.join(subject_dir, 'sM00223'))
260239

261-
path_img = os.path.join(tst.tmpdir, 'tmp.img')
262-
path_hdr = os.path.join(tst.tmpdir, 'tmp.hdr')
240+
path_img = str(tmp_path / 'tmp.img')
241+
path_hdr = str(tmp_path / 'tmp.hdr')
263242
nib.save(nib.Nifti1Image(np.zeros((2, 3, 4)), np.eye(4)), path_img)
264243
shutil.copy(path_img, os.path.join(subject_dir, "sM00223/sM00223_002.img"))
265244
shutil.copy(path_hdr, os.path.join(subject_dir, "sM00223/sM00223_002.hdr"))
@@ -268,16 +247,14 @@ def test_fetch_spm_auditory():
268247
for file_ in saf_:
269248
shutil.copy(path_hdr, os.path.join(subject_dir, file_))
270249

271-
dataset = datasets.fetch_spm_auditory(data_dir=tst.tmpdir)
272-
assert_true(isinstance(dataset.anat, _basestring))
273-
assert_true(isinstance(dataset.func[0], _basestring))
274-
assert_equal(len(dataset.func), 96)
250+
dataset = datasets.fetch_spm_auditory(data_dir=str(tmp_path))
251+
assert isinstance(dataset.anat, _basestring)
252+
assert isinstance(dataset.func[0], _basestring)
253+
assert len(dataset.func) == 96
275254

276255

277-
@with_setup(setup_mock, teardown_mock)
278-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
279-
def test_fetch_spm_multimodal():
280-
data_dir = os.path.join(tst.tmpdir, 'spm_multimodal_fmri')
256+
def test_fetch_spm_multimodal(request_mocker, tmp_path):
257+
data_dir = str(tmp_path / 'spm_multimodal_fmri')
281258
os.mkdir(data_dir)
282259
subject_dir = os.path.join(data_dir, 'sub001')
283260
os.mkdir(subject_dir)
@@ -293,23 +270,21 @@ def test_fetch_spm_multimodal():
293270
open(os.path.join(dir_, 'fMETHODS-000%i-%i-01.img' %
294271
(session + 5, i)), 'a').close()
295272

296-
dataset = datasets.fetch_spm_multimodal_fmri(data_dir=tst.tmpdir)
297-
assert_true(isinstance(dataset.anat, _basestring))
298-
assert_true(isinstance(dataset.func1[0], _basestring))
299-
assert_equal(len(dataset.func1), 390)
300-
assert_true(isinstance(dataset.func2[0], _basestring))
301-
assert_equal(len(dataset.func2), 390)
302-
assert_equal(dataset.slice_order, 'descending')
303-
assert_true(dataset.trials_ses1, _basestring)
304-
assert_true(dataset.trials_ses2, _basestring)
273+
dataset = datasets.fetch_spm_multimodal_fmri(data_dir=str(tmp_path))
274+
assert isinstance(dataset.anat, _basestring)
275+
assert isinstance(dataset.func1[0], _basestring)
276+
assert len(dataset.func1) == 390
277+
assert isinstance(dataset.func2[0], _basestring)
278+
assert len(dataset.func2) == 390
279+
assert dataset.slice_order == 'descending'
280+
assert isinstance(dataset.trials_ses1, _basestring)
281+
assert isinstance(dataset.trials_ses2, _basestring)
305282

306283

307-
@with_setup(setup_mock, teardown_mock)
308-
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
309-
def test_fiac():
284+
def test_fiac(request_mocker, tmp_path):
310285
# Create dummy 'files'
311-
fiac_dir = os.path.join(tst.tmpdir, 'fiac_nistats', 'nipy-data-0.2',
312-
'data', 'fiac')
286+
fiac_dir = str(tmp_path / 'fiac_nistats' / 'nipy-data-0.2' /
287+
'data' / 'fiac')
313288
fiac0_dir = os.path.join(fiac_dir, 'fiac0')
314289
os.makedirs(fiac0_dir)
315290
for session in [1, 2]:
@@ -321,9 +296,9 @@ def test_fiac():
321296
mask = os.path.join(fiac0_dir, 'mask.nii.gz')
322297
open(mask, 'a').close()
323298

324-
dataset = datasets.fetch_fiac_first_level(data_dir=tst.tmpdir)
325-
assert_true(isinstance(dataset.func1, _basestring))
326-
assert_true(isinstance(dataset.func2, _basestring))
327-
assert_true(isinstance(dataset.design_matrix1, _basestring))
328-
assert_true(isinstance(dataset.design_matrix2, _basestring))
329-
assert_true(isinstance(dataset.mask, _basestring))
299+
dataset = datasets.fetch_fiac_first_level(data_dir=str(tmp_path))
300+
assert isinstance(dataset.func1, _basestring)
301+
assert isinstance(dataset.func2, _basestring)
302+
assert isinstance(dataset.design_matrix1, _basestring)
303+
assert isinstance(dataset.design_matrix2, _basestring)
304+
assert isinstance(dataset.mask, _basestring)

0 commit comments

Comments
 (0)