diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index bdd2d548f..751eb6ad1 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -30,15 +30,7 @@ def __init__(self, array, out_dtype=None) """ import numpy as np -from .casting import ( - as_int, - best_float, - floor_exact, - int_abs, - int_to_float, - shared_range, - type_info, -) +from .casting import best_float, floor_exact, int_abs, shared_range, type_info from .volumeutils import array_to_file, finite_range @@ -152,9 +144,8 @@ def scaling_needed(self): # No scaling needed if data already fits in output type # But note - we need to convert to ints, to avoid conversion to float # during comparisons, and therefore int -> float conversions which are - # not exact. Only a problem for uint64 though. We need as_int here to - # work around a numpy 1.4.1 bug in uint conversion - if as_int(mn) >= as_int(info.min) and as_int(mx) <= as_int(info.max): + # not exact. Only a problem for uint64 though. + if int(mn) >= int(info.min) and int(mx) <= int(info.max): return False return True @@ -392,7 +383,7 @@ def _do_scaling(self): out_max, out_min = info.max, info.min # If left as int64, uint64, comparisons will default to floats, and # these are inexact for > 2**53 - so convert to int - if as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min): + if int(mx) <= int(out_max) and int(mn) >= int(out_min): # already in range return # (u)int to (u)int scaling @@ -410,7 +401,7 @@ def _iu2iu(self): # that deals with max neg ints. abs problem only arises when all # the data is set to max neg integer value o_min, o_max = shared_range(self.scaler_dtype, out_dt) - if mx <= 0 and int_abs(mn) <= as_int(o_max): # sign flip enough? + if mx <= 0 and int_abs(mn) <= int(o_max): # sign flip enough? # -1.0 * arr will be in scaler_dtype precision self.slope = -1.0 return @@ -427,7 +418,7 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = (int_to_float(v, big_float) for v in (out_min, out_max)) + out_min, out_max = (big_float(v) for v in (out_min, out_max)) if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( @@ -546,14 +537,13 @@ def to_fileobj(self, fileobj, order='F'): def _iu2iu(self): # (u)int to (u)int - mn, mx = (as_int(v) for v in self.finite_range()) + mn, mx = (int(v) for v in self.finite_range()) # range may be greater than the largest integer for this type. - # as_int needed to work round numpy 1.4.1 int casting bug out_dtype = self._out_dtype # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = (as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)) + o_min, o_max = (int(v) for v in shared_range(self.scaler_dtype, out_dtype)) type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -565,12 +555,12 @@ def _iu2iu(self): else: # int output - take midpoint to 0 # ceil below increases inter, pushing scale up to 0.5 towards # -inf, because ints have abs min == abs max + 1 - midpoint = mn + as_int(np.ceil(mn2mx / 2.0)) + midpoint = mn + int(np.ceil(mn2mx / 2.0)) # Floor exact decreases inter, so pulling scaled values more # positive. This may make mx - inter > t_max inter = floor_exact(midpoint, self.scaler_dtype) # Need to check still in range after floor_exact-ing - int_inter = as_int(inter) + int_inter = int(inter) assert mn - int_inter >= o_min if mx - int_inter <= o_max: self.inter = inter @@ -594,14 +584,13 @@ def _range_scale(self, in_min, in_max): in_min, in_max = np.array([in_min, in_max], dtype=big_float) in_range = np.diff([in_min, in_max]) else: # max possible (u)int range is 2**64-1 (int64, uint64) - # int_to_float covers this range. On windows longdouble is the - # same as double so in_range will be 2**64 - thus overestimating - # slope slightly. Casting to int needed to allow in_max-in_min to - # be larger than the largest (u)int value - in_min, in_max = as_int(in_min), as_int(in_max) - in_range = int_to_float(in_max - in_min, big_float) + # On windows longdouble is the same as double so in_range will be 2**64 - + # thus overestimating slope slightly. Casting to int needed to allow + # in_max-in_min to be larger than the largest (u)int value + in_min, in_max = int(in_min), int(in_max) + in_range = big_float(in_max - in_min) # Cast to float for later processing. - in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max)) + in_min, in_max = (big_float(v) for v in (in_min, in_max)) if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) diff --git a/nibabel/casting.py b/nibabel/casting.py index 743ce4706..f3e04f30f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -10,6 +10,8 @@ import numpy as np +from .deprecated import deprecate_with_version + class CastingError(Exception): pass @@ -402,6 +404,7 @@ def _check_maxexp(np_type, maxexp): return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two**maxexp) +@deprecate_with_version('as_int() is deprecated. Use int() instead.', '5.2.0', '7.0.0') def as_int(x, check=True): """Return python integer representation of number @@ -411,9 +414,6 @@ def as_int(x, check=True): It is also useful to work around a numpy 1.4.1 bug in conversion of uints to python ints. - This routine will still raise an OverflowError for values that are outside - the range of float64. - Parameters ---------- x : object @@ -439,30 +439,13 @@ def as_int(x, check=True): >>> as_int(2.1, check=False) 2 """ - x = np.array(x) - if x.dtype.kind in 'iu': - # This works around a nasty numpy 1.4.1 bug such that: - # >>> int(np.uint32(2**32-1) - # -1 - return int(str(x)) ix = int(x) - if ix == x: - return ix - fx = np.floor(x) - if check and fx != x: + if check and ix != x: raise FloatingError(f'Not an integer: {x}') - if not fx.dtype.type == np.longdouble: - return int(x) - # Subtract float64 chunks until we have all of the number. If the int is - # too large, it will overflow - ret = 0 - while fx != 0: - f64 = np.float64(fx) - fx -= f64 - ret += int(f64) - return ret + return ix +@deprecate_with_version('int_to_float(..., dt) is deprecated. Use dt() instead.', '5.2.0', '7.0.0') def int_to_float(val, flt_type): """Convert integer `val` to floating point type `flt_type` @@ -484,18 +467,13 @@ def int_to_float(val, flt_type): ------- f : numpy scalar of type `flt_type` + + Examples + -------- + >>> int_to_float(1, np.float32) + 1.0 """ - if flt_type is not np.longdouble: - return flt_type(val) - # The following works around a nasty numpy 1.4.1 bug such that: - # >>> int(np.uint32(2**32-1) - val = int(val) - faval = np.longdouble(0) - while val != 0: - f64 = np.float64(val) - faval += f64 - val -= int(f64) - return faval + return flt_type(val) def floor_exact(val, flt_type): @@ -542,14 +520,14 @@ def floor_exact(val, flt_type): val = int(val) flt_type = np.dtype(flt_type).type sign = 1 if val > 0 else -1 - try: # int_to_float deals with longdouble safely - fval = int_to_float(val, flt_type) + try: + fval = flt_type(val) except OverflowError: return sign * np.inf if not np.isfinite(fval): return fval info = type_info(flt_type) - diff = val - as_int(fval) + diff = val - int(fval) if diff >= 0: # floating point value <= val return fval # Float casting made the value go up diff --git a/nibabel/conftest.py b/nibabel/conftest.py index cf0139232..5eba256fa 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,3 +1,5 @@ +import sys + import numpy as np import pytest @@ -12,3 +14,18 @@ def legacy_printoptions(): if Version(np.__version__) >= Version('1.22'): np.set_printoptions(legacy='1.21') + + +@pytest.fixture +def max_digits(): + # Set maximum number of digits for int/str conversion for + # duration of a test + try: + orig_max_str_digits = sys.get_int_max_str_digits() + yield sys.set_int_max_str_digits + sys.set_int_max_str_digits(orig_max_str_digits) + except AttributeError: # pragma: no cover + # Nothing to do for versions of Python that lack these methods + # They were added as DoS protection in Python 3.11 and backported to + # some other versions. + yield lambda x: None diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 4e024d6e3..cb7b8d686 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -26,7 +26,7 @@ from .. import imageglobals from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError -from ..casting import as_int, sctypes_aliases +from ..casting import sctypes_aliases from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types @@ -308,8 +308,7 @@ def test_shapes(self): assert hdr.get_data_shape() == shape # Check max works, but max+1 raises error dim_dtype = hdr.structarr['dim'].dtype - # as_int for safety to deal with numpy 1.4.1 int conversion errors - mx = as_int(np.iinfo(dim_dtype).max) + mx = int(np.iinfo(dim_dtype).max) shape = (mx,) hdr.set_data_shape(shape) assert hdr.get_data_shape() == shape diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d04b996bb..f345952aa 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -10,12 +10,10 @@ from ..casting import ( CastingError, able_int_type, - as_int, best_float, float_to_int, floor_log2, int_abs, - int_to_float, longdouble_precision_improved, sctypes, shared_range, @@ -42,7 +40,7 @@ def test_shared_range(): if casted_mx != imax: # The shared_range have told us that they believe the imax does # not have an exact representation. - fimax = int_to_float(imax, ft) + fimax = ft(imax) if np.isfinite(fimax): assert int(fimax) != imax # Therefore the imax, cast back to float, and to integer, will @@ -68,7 +66,7 @@ def test_shared_range(): if casted_mn != imin: # The shared_range have told us that they believe the imin does # not have an exact representation. - fimin = int_to_float(imin, ft) + fimin = ft(imin) if np.isfinite(fimin): assert int(fimin) != imin # Therefore the imin, cast back to float, and to integer, will @@ -101,11 +99,6 @@ def test_casting(): mn, mx = shared_range(ft, it) with np.errstate(invalid='ignore'): iarr = float_to_int(farr, it) - # Dammit - for long doubles we need to jump through some hoops not - # to round to numbers outside the range - if ft is np.longdouble: - mn = as_int(mn) - mx = as_int(mx) exp_arr = np.array([mn, mx, mn, mx, 0, 0, 11], dtype=it) assert_array_equal(iarr, exp_arr) # Now test infmax version @@ -149,7 +142,7 @@ def test_int_abs(): assert udtype.kind == 'u' assert idtype.itemsize == udtype.itemsize mn, mx = in_arr - e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting + e_mn = int(mx) + 1 assert int_abs(mx) == mx assert int_abs(mn) == e_mn assert_array_equal(int_abs(in_arr), [e_mn, mx]) diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 82fdc4402..3e6e7f426 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -11,12 +11,10 @@ FloatingError, _check_maxexp, _check_nmant, - as_int, ceil_exact, floor_exact, floor_log2, have_binary128, - int_to_float, longdouble_precision_improved, ok_floats, on_powerpc, @@ -128,110 +126,31 @@ def test_check_nmant_nexp(): assert _check_maxexp(t, ti['maxexp']) -def test_as_int(): - # Integer representation of number - assert as_int(2.0) == 2 - assert as_int(-2.0) == -2 - with pytest.raises(FloatingError): - as_int(2.1) - with pytest.raises(FloatingError): - as_int(-2.1) - assert as_int(2.1, False) == 2 - assert as_int(-2.1, False) == -2 - v = np.longdouble(2**64) - assert as_int(v) == 2**64 - # Have all long doubles got 63+1 binary bits of precision? Windows 32-bit - # longdouble appears to have 52 bit precision, but we avoid that by checking - # for known precisions that are less than that required - try: - nmant = type_info(np.longdouble)['nmant'] - except FloatingError: - nmant = 63 # Unknown precision, let's hope it's at least 63 - v = np.longdouble(2) ** (nmant + 1) - 1 - assert as_int(v) == 2 ** (nmant + 1) - 1 - # Check for predictable overflow - nexp64 = floor_log2(type_info(np.float64)['max']) - with np.errstate(over='ignore'): - val = np.longdouble(2**nexp64) * 2 # outside float64 range - assert val > np.finfo('float64').max - # TODO: Should this actually still overflow? Does it matter? - if FP_OVERFLOW_WARN: - ctx = pytest.raises(OverflowError) - else: - ctx = nullcontext() - out_val = None - with ctx: - out_val = as_int(val) - if out_val is not None: - assert out_val == val - with ctx: - out_val = as_int(-val) - if out_val is not None: - assert out_val == -val - - -def test_int_to_float(): - # Convert python integer to floating point - # Standard float types just return cast value - for ie3 in IEEE_floats: - nmant = type_info(ie3)['nmant'] - for p in range(nmant + 3): - i = 2**p + 1 - assert int_to_float(i, ie3) == ie3(i) - assert int_to_float(-i, ie3) == ie3(-i) - # IEEEs in this case are binary formats only - nexp = floor_log2(type_info(ie3)['max']) - # Values too large for the format - smn, smx = -(2 ** (nexp + 1)), 2 ** (nexp + 1) - if ie3 is np.float64: - with pytest.raises(OverflowError): - int_to_float(smn, ie3) - with pytest.raises(OverflowError): - int_to_float(smx, ie3) - else: - assert int_to_float(smn, ie3) == ie3(smn) - assert int_to_float(smx, ie3) == ie3(smx) - # Longdoubles do better than int, we hope - LD = np.longdouble - # up to integer precision of float64 nmant, we get the same result as for - # casting directly +def test_int_longdouble_np_regression(): + # Test longdouble conversion from int works as expected + # Previous versions of numpy would fail, and we used a custom int_to_float() + # function. This test remains to ensure we don't need to bring it back. nmant = type_info(np.float64)['nmant'] - for p in range(nmant + 2): # implicit - i = 2**p - 1 - assert int_to_float(i, LD) == LD(i) - assert int_to_float(-i, LD) == LD(-i) - # Above max of float64, we're hosed - nexp64 = floor_log2(type_info(np.float64)['max']) - smn64, smx64 = -(2 ** (nexp64 + 1)), 2 ** (nexp64 + 1) - # The algorithm here implemented goes through float64, so supermax and - # supermin will cause overflow errors - with pytest.raises(OverflowError): - int_to_float(smn64, LD) - with pytest.raises(OverflowError): - int_to_float(smx64, LD) - try: - nmant = type_info(np.longdouble)['nmant'] - except FloatingError: # don't know where to test - return # test we recover precision just above nmant i = 2 ** (nmant + 1) - 1 - assert as_int(int_to_float(i, LD)) == i - assert as_int(int_to_float(-i, LD)) == -i + assert int(np.longdouble(i)) == i + assert int(np.longdouble(-i)) == -i # If longdouble can cope with 2**64, test if nmant >= 63: # Check conversion to int; the line below causes an error subtracting # ints / uint64 values, at least for Python 3.3 and numpy dev 1.8 big_int = np.uint64(2**64 - 1) - assert as_int(int_to_float(big_int, LD)) == big_int + assert int(np.longdouble(big_int)) == big_int -def test_as_int_np_fix(): - # Test as_int works for integers. We need as_int for integers because of a +def test_int_np_regression(): + # Test int works as expected for integers. + # We previously used a custom as_int() for integers because of a # numpy 1.4.1 bug such that int(np.uint32(2**32-1) == -1 for t in sctypes['int'] + sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) - assert (mn, mx) == (as_int(mn), as_int(mx)) + assert (mn, mx) == (int(mn), int(mx)) def test_floor_exact_16(): @@ -253,7 +172,9 @@ def test_floor_exact_64(): assert floor_exact(test_val, np.float64) == 2 ** (e + 1) - int(gap) -def test_floor_exact(): +def test_floor_exact(max_digits): + max_digits(4950) # max longdouble is ~10**4932 + to_test = IEEE_floats + [float] try: type_info(np.longdouble)['nmant'] @@ -264,16 +185,16 @@ def test_floor_exact(): to_test.append(np.longdouble) # When numbers go above int64 - I believe, numpy comparisons break down, # so we have to cast to int before comparison - int_flex = lambda x, t: as_int(floor_exact(x, t)) - int_ceex = lambda x, t: as_int(ceil_exact(x, t)) + int_flex = lambda x, t: int(floor_exact(x, t)) + int_ceex = lambda x, t: int(ceil_exact(x, t)) for t in to_test: # A number bigger than the range returns the max info = type_info(t) - assert floor_exact(2**5000, t) == np.inf - assert ceil_exact(2**5000, t) == np.inf + assert floor_exact(10**4933, t) == np.inf + assert ceil_exact(10**4933, t) == np.inf # A number more negative returns -inf - assert floor_exact(-(2**5000), t) == -np.inf - assert ceil_exact(-(2**5000), t) == -np.inf + assert floor_exact(-(10**4933), t) == -np.inf + assert ceil_exact(-(10**4933), t) == -np.inf # Check around end of integer precision nmant = info['nmant'] for i in range(nmant + 1): @@ -302,7 +223,7 @@ def test_floor_exact(): for i in range(5): iv = 2 ** (nmant + 1 + i) gap = 2 ** (i + 1) - assert as_int(t(iv) + t(gap)) == iv + gap + assert int(t(iv) + t(gap)) == iv + gap for j in range(1, gap): assert int_flex(iv + j, t) == iv assert int_flex(iv + gap + j, t) == iv + gap diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index db99ae3a4..b11a62180 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -17,6 +17,8 @@ ( '8.0.0', [ + ('nibabel.casting', 'as_int'), + ('nibabel.casting', 'int_to_float'), ('nibabel.tmpdirs', 'TemporaryDirectory'), ], ),