Skip to content

Commit cf4e25e

Browse files
committed
Merge pull request #289 from matthew-brett/nasty-windows-fixes
MRG: nasty windows fixes An absolutely horrible bug for MSVC casting from float to uint64 caused some test failures on the buildbots. Debugging, it did seem the code could work more robustly in a couple of cases. Otherwise test for this horrible misfeature when present. Some formatting changes and a fix for orderreddict testing when on Windows. Delete temporary files to satisfy Windows' more stringent file attachments.
2 parents c54080f + 42e0315 commit cf4e25e

10 files changed

+192
-94
lines changed

.travis.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,9 @@ matrix:
2727
env:
2828
- DEPENDS="numpy==1.5.1 pydicom==0.9.7"
2929
before_install:
30-
- virtualenv venv
30+
- virtualenv --python=python venv
3131
- source venv/bin/activate
32+
- python --version # just to check
3233
- pip install nose # always
3334
- pip install --no-index -f http://travis-wheels.scikit-image.org $DEPENDS
3435
# pydicom <= 0.9.8 doesn't install on python 3

bin/parrec2nii

+1-1
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def proc_file(infile, opts):
198198
# container for potential NIfTI1 header extensions
199199
if opts.store_header:
200200
# dump the full PAR header content into an extension
201-
with open(infile, 'r') as fobj:
201+
with open(infile, 'rb') as fobj: # contents must be bytes
202202
hdr_dump = fobj.read()
203203
dump_ext = nifti1.Nifti1Extension('comment', hdr_dump)
204204
nhdr.extensions.append(dump_ext)

nibabel/arraywriters.py

+39-33
Original file line numberDiff line numberDiff line change
@@ -411,15 +411,16 @@ def _do_scaling(self):
411411
def _iu2iu(self):
412412
# (u)int to (u)int scaling
413413
mn, mx = self.finite_range()
414-
if self._out_dtype.kind == 'u':
414+
out_dt = self._out_dtype
415+
if out_dt.kind == 'u':
415416
# We're checking for a sign flip. This can only work for uint
416417
# output, because, for int output, the abs min of the type is
417-
# greater than the abs max, so the data either fit into the range
418-
# (tested for in _do_scaling), or this test can't pass
419-
# Need abs that deals with max neg ints. abs problem only arises
420-
# when all the data is set to max neg integer value
421-
imax = np.iinfo(self._out_dtype).max
422-
if mx <= 0 and int_abs(mn) <= imax: # sign flip enough?
418+
# greater than the abs max, so the data either fits into the range
419+
# (tested for in _do_scaling), or this test can't pass. Need abs
420+
# that deals with max neg ints. abs problem only arises when all
421+
# the data is set to max neg integer value
422+
o_min, o_max = shared_range(self.scaler_dtype, out_dt)
423+
if mx <= 0 and int_abs(mn) <= as_int(o_max): # sign flip enough?
423424
# -1.0 * arr will be in scaler_dtype precision
424425
self.slope = -1.0
425426
return
@@ -563,15 +564,19 @@ def _iu2iu(self):
563564
# range may be greater than the largest integer for this type.
564565
# as_int needed to work round numpy 1.4.1 int casting bug
565566
out_dtype = self._out_dtype
566-
t_min, t_max = np.iinfo(out_dtype).min, np.iinfo(out_dtype).max
567-
type_range = as_int(t_max) - as_int(t_min)
567+
# Options in this method are scaling using intercept only. These will
568+
# have to pass through ``self.scaler_dtype`` (because the intercept is
569+
# in this type).
570+
o_min, o_max = [as_int(v)
571+
for v in shared_range(self.scaler_dtype, out_dtype)]
572+
type_range = o_max - o_min
568573
mn2mx = mx - mn
569574
if mn2mx <= type_range: # might offset be enough?
570-
if t_min == 0: # uint output - take min to 0
575+
if o_min == 0: # uint output - take min to 0
571576
# decrease offset with floor_exact, meaning mn >= t_min after
572577
# subtraction. But we may have pushed the data over t_max,
573578
# which we check below
574-
inter = floor_exact(mn - t_min, self.scaler_dtype)
579+
inter = floor_exact(mn - o_min, self.scaler_dtype)
575580
else: # int output - take midpoint to 0
576581
# ceil below increases inter, pushing scale up to 0.5 towards
577582
# -inf, because ints have abs min == abs max + 1
@@ -581,15 +586,16 @@ def _iu2iu(self):
581586
inter = floor_exact(midpoint, self.scaler_dtype)
582587
# Need to check still in range after floor_exact-ing
583588
int_inter = as_int(inter)
584-
assert mn - int_inter >= t_min
585-
if mx - int_inter <= t_max:
589+
assert mn - int_inter >= o_min
590+
if mx - int_inter <= o_max:
586591
self.inter = inter
587592
return
588593
# Try slope options (sign flip) and then range scaling
589594
super(SlopeInterArrayWriter, self)._iu2iu()
590595

591596
def _range_scale(self, in_min, in_max):
592-
""" Calculate scaling, intercept based on data range and output type """
597+
""" Calculate scaling, intercept based on data range and output type
598+
"""
593599
if in_max == in_min: # Only one number in array
594600
self.slope = 1.
595601
self.inter = in_min
@@ -604,10 +610,10 @@ def _range_scale(self, in_min, in_max):
604610
in_min, in_max = np.array([in_min, in_max], dtype=big_float)
605611
in_range = np.diff([in_min, in_max])
606612
else: # max possible (u)int range is 2**64-1 (int64, uint64)
607-
# int_to_float covers this range. On windows longdouble is the same
608-
# as double so in_range will be 2**64 - thus overestimating slope
609-
# slightly. Casting to int needed to allow in_max-in_min to be larger than
610-
# the largest (u)int value
613+
# int_to_float covers this range. On windows longdouble is the
614+
# same as double so in_range will be 2**64 - thus overestimating
615+
# slope slightly. Casting to int needed to allow in_max-in_min to
616+
# be larger than the largest (u)int value
611617
in_min, in_max = as_int(in_min), as_int(in_max)
612618
in_range = int_to_float(in_max - in_min, big_float)
613619
# Cast to float for later processing.
@@ -624,13 +630,13 @@ def _range_scale(self, in_min, in_max):
624630
# raise an error when writing
625631
out_min, out_max = shared_range(working_dtype, out_dtype)
626632
out_min, out_max = np.array((out_min, out_max), dtype = big_float)
627-
# We want maximum precision for the calculations. Casting will
628-
# not lose precision because min/max are of fp type.
633+
# We want maximum precision for the calculations. Casting will not lose
634+
# precision because min/max are of fp type.
629635
assert [v.dtype.kind for v in (out_min, out_max)] == ['f', 'f']
630636
out_range = out_max - out_min
631637
"""
632-
Think of the input values as a line starting (left) at in_min and ending
633-
(right) at in_max.
638+
Think of the input values as a line starting (left) at in_min and
639+
ending (right) at in_max.
634640
635641
The output values will be a line starting at out_min and ending at
636642
out_max.
@@ -666,20 +672,20 @@ def _range_scale(self, in_min, in_max):
666672
We can't change the range of the saved data (the whole range of the
667673
integer type) or the range of the output data (the values we input). We
668674
can change the intermediate values ``saved_data * slope`` by choosing
669-
the sign of the slope to match the in_min or in_max to the left or right
670-
end of the saved data range.
675+
the sign of the slope to match the in_min or in_max to the left or
676+
right end of the saved data range.
671677
672-
If the out_dtype is signed int, then abs(out_min) = abs(out_max) + 1 and
673-
the absolute value and therefore precision for values at the left and
674-
right of the saved data range are very similar (e.g. -128 * slope, 127 *
675-
slope respectively).
678+
If the out_dtype is signed int, then abs(out_min) = abs(out_max) + 1
679+
and the absolute value and therefore precision for values at the left
680+
and right of the saved data range are very similar (e.g. -128 * slope,
681+
127 * slope respectively).
676682
677-
If the out_dtype is unsigned int, then the absolute value at the left is
678-
0 and the precision is much higher than for the right end of the range
679-
(e.g. 0 * slope, 255 * slope).
683+
If the out_dtype is unsigned int, then the absolute value at the left
684+
is 0 and the precision is much higher than for the right end of the
685+
range (e.g. 0 * slope, 255 * slope).
680686
681-
If the out_dtype is unsigned int then we choose the sign of the slope to
682-
match the smaller of the in_min, in_max to the zero end of the saved
687+
If the out_dtype is unsigned int then we choose the sign of the slope
688+
to match the smaller of the in_min, in_max to the zero end of the saved
683689
range.
684690
"""
685691
if out_min == 0 and np.abs(in_max) < np.abs(in_min):

nibabel/casting.py

+12-2
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@ class CastingError(Exception):
1414
pass
1515

1616

17+
# Test for VC truncation when casting floats to uint64
18+
# Christoph Gohlke says this is so for MSVC <= 2010 because VC is using x87
19+
# instructions; see:
20+
# https://github.com/scipy/scipy/blob/99bb8411f6391d921cb3f4e56619291e91ddf43b/scipy/ndimage/tests/test_datatypes.py#L51
21+
_test_val = 2**63 + 2**11 # Should be exactly representable in float64
22+
TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val
23+
24+
1725
def float_to_int(arr, int_type, nan2zero=True, infmax=False):
1826
""" Convert floating point array `arr` to type `int_type`
1927
@@ -107,8 +115,8 @@ def shared_range(flt_type, int_type):
107115
""" Min and max in float type that are >=min, <=max in integer type
108116
109117
This is not as easy as it sounds, because the float type may not be able to
110-
exactly represent the max or min integer values, so we have to find the next
111-
exactly representable floating point value to do the thresholding.
118+
exactly represent the max or min integer values, so we have to find the
119+
next exactly representable floating point value to do the thresholding.
112120
113121
Parameters
114122
----------
@@ -151,6 +159,8 @@ def shared_range(flt_type, int_type):
151159
mx = floor_exact(ii.max, flt_type)
152160
if mx == np.inf:
153161
mx = fi.max
162+
elif TRUNC_UINT64 and int_type == np.uint64:
163+
mx = min(mx, flt_type(2**63))
154164
_SHARED_RANGES[key] = (mn, mx)
155165
return mn, mx
156166

nibabel/externals/tests/test_ordereddict.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212

1313
def test_right_version():
1414
# If Python < 2.7, use our own copy, else use system copy
15-
class_path = inspect.getfile(OrderedDict)
16-
rel_dir = dirname(relpath(realpath(class_path), MY_PATH))
1715
if sys.version_info[:2] < (2, 7):
16+
class_path = realpath(inspect.getfile(OrderedDict))
17+
rel_dir = dirname(relpath(class_path, MY_PATH))
1818
assert_equal(rel_dir, '..')
1919
else:
2020
import collections

nibabel/tests/test_arraywriters.py

+32-9
Original file line numberDiff line numberDiff line change
@@ -135,17 +135,40 @@ def test_no_scaling():
135135
kwargs = (dict(check_scaling=False) if awt == ArrayWriter
136136
else dict(calc_scale=False))
137137
aw = awt(arr, out_dtype, **kwargs)
138-
with suppress_warnings(): # cast to real from cplx
138+
with suppress_warnings():
139139
back_arr = round_trip(aw)
140-
exp_back = arr.astype(float)
140+
exp_back = arr.copy()
141+
# If converting to floating point type, casting is direct.
142+
# Otherwise we will need to do float-(u)int casting at some point.
141143
if out_dtype in IUINT_TYPES:
142-
with np.errstate(invalid='ignore'):
143-
exp_back = np.round(exp_back)
144-
if hasattr(aw, 'slope') and in_dtype in FLOAT_TYPES:
145-
# Finite scaling sets infs to min / max
146-
exp_back = np.clip(exp_back, 0, 1)
147-
else:
148-
exp_back = np.clip(exp_back, *shared_range(float, out_dtype))
144+
if in_dtype in CFLOAT_TYPES:
145+
# Working precision is (at least) float
146+
with suppress_warnings():
147+
exp_back = exp_back.astype(float)
148+
# Float to iu conversion will always round, clip
149+
with np.errstate(invalid='ignore'):
150+
exp_back = np.round(exp_back)
151+
if hasattr(aw, 'slope') and in_dtype in FLOAT_TYPES:
152+
# Finite scaling sets infs to min / max
153+
exp_back = np.clip(exp_back, 0, 1)
154+
else:
155+
# Clip to shared range of working precision
156+
exp_back = np.clip(exp_back,
157+
*shared_range(float, out_dtype))
158+
else: # iu input and output type
159+
# No scaling, never gets converted to float.
160+
# Does get clipped to range of output type
161+
mn_out, mx_out = _dt_min_max(out_dtype)
162+
if (mn_in, mx_in) != (mn_out, mx_out):
163+
# Use smaller of input, output range to avoid np.clip
164+
# upcasting the array because of large clip limits.
165+
exp_back = np.clip(exp_back,
166+
max(mn_in, mn_out),
167+
min(mx_in, mx_out))
168+
elif in_dtype in COMPLEX_TYPES:
169+
# always cast to real from complex
170+
with suppress_warnings():
171+
exp_back = exp_back.astype(float)
149172
exp_back = exp_back.astype(out_dtype)
150173
# Sometimes working precision is float32 - allow for small differences
151174
assert_allclose_safely(back_arr, exp_back)

nibabel/tests/test_nifti1.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -627,7 +627,7 @@ def test_float128(self):
627627
assert_raises(HeaderDataError, hdr.set_data_dtype, np.longdouble)
628628

629629

630-
class TestNifti1Pair(tana.TestAnalyzeImage, tspm.ScalingMixin):
630+
class TestNifti1Pair(tana.TestAnalyzeImage, tspm.ImageScalingMixin):
631631
# Run analyze-flavor spatialimage tests
632632
image_class = Nifti1Pair
633633
supported_np_types = TestNifti1PairHeader.supported_np_types

nibabel/tests/test_scripts.py

+10
Original file line numberDiff line numberDiff line change
@@ -96,22 +96,29 @@ def check_conversion(cmd, pr_data, out_fname):
9696
assert_true(np.allclose(data, pr_data))
9797
assert_true(np.allclose(img.header['cal_min'], data.min()))
9898
assert_true(np.allclose(img.header['cal_max'], data.max()))
99+
del img, data # for windows to be able to later delete the file
99100
# Check minmax options
100101
run_command(cmd + ['--minmax', '1', '2'])
101102
img = load(out_fname)
103+
data = img.get_data()
102104
assert_true(np.allclose(data, pr_data))
103105
assert_true(np.allclose(img.header['cal_min'], 1))
104106
assert_true(np.allclose(img.header['cal_max'], 2))
107+
del img, data # for windows
105108
run_command(cmd + ['--minmax', 'parse', '2'])
106109
img = load(out_fname)
110+
data = img.get_data()
107111
assert_true(np.allclose(data, pr_data))
108112
assert_true(np.allclose(img.header['cal_min'], data.min()))
109113
assert_true(np.allclose(img.header['cal_max'], 2))
114+
del img, data # for windows
110115
run_command(cmd + ['--minmax', '1', 'parse'])
111116
img = load(out_fname)
117+
data = img.get_data()
112118
assert_true(np.allclose(data, pr_data))
113119
assert_true(np.allclose(img.header['cal_min'], 1))
114120
assert_true(np.allclose(img.header['cal_max'], data.max()))
121+
del img, data
115122

116123

117124
@script_test
@@ -134,6 +141,8 @@ def test_parrec2nii():
134141
assert_almost_equal(img.header.get_zooms(), eg_dict['zooms'])
135142
# Standard save does not save extensions
136143
assert_equal(len(img.header.extensions), 0)
144+
# Delete previous img, data to make Windows happier
145+
del img, data
137146
# Does not overwrite unless option given
138147
code, stdout, stderr = run_command(
139148
['parrec2nii', fname], check_code=False)
@@ -161,6 +170,7 @@ def test_parrec2nii():
161170
run_command(base_cmd + ['--store-header'])
162171
img = load(out_froot)
163172
assert_equal(len(img.header.extensions), 1)
173+
del img # To help windows delete the file
164174

165175

166176
@script_test

0 commit comments

Comments
 (0)