Skip to content

RF: Deprecate longdouble<->int hacks #1272

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Nov 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 16 additions & 27 deletions nibabel/arraywriters.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,7 @@ def __init__(self, array, out_dtype=None)
"""
import numpy as np

from .casting import (
as_int,
best_float,
floor_exact,
int_abs,
int_to_float,
shared_range,
type_info,
)
from .casting import best_float, floor_exact, int_abs, shared_range, type_info
from .volumeutils import array_to_file, finite_range


Expand Down Expand Up @@ -152,9 +144,8 @@ def scaling_needed(self):
# No scaling needed if data already fits in output type
# But note - we need to convert to ints, to avoid conversion to float
# during comparisons, and therefore int -> float conversions which are
# not exact. Only a problem for uint64 though. We need as_int here to
# work around a numpy 1.4.1 bug in uint conversion
if as_int(mn) >= as_int(info.min) and as_int(mx) <= as_int(info.max):
# not exact. Only a problem for uint64 though.
if int(mn) >= int(info.min) and int(mx) <= int(info.max):
return False
return True

Expand Down Expand Up @@ -392,7 +383,7 @@ def _do_scaling(self):
out_max, out_min = info.max, info.min
# If left as int64, uint64, comparisons will default to floats, and
# these are inexact for > 2**53 - so convert to int
if as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min):
if int(mx) <= int(out_max) and int(mn) >= int(out_min):
# already in range
return
# (u)int to (u)int scaling
Expand All @@ -410,7 +401,7 @@ def _iu2iu(self):
# that deals with max neg ints. abs problem only arises when all
# the data is set to max neg integer value
o_min, o_max = shared_range(self.scaler_dtype, out_dt)
if mx <= 0 and int_abs(mn) <= as_int(o_max): # sign flip enough?
if mx <= 0 and int_abs(mn) <= int(o_max): # sign flip enough?
# -1.0 * arr will be in scaler_dtype precision
self.slope = -1.0
return
Expand All @@ -427,7 +418,7 @@ def _range_scale(self, in_min, in_max):
# not lose precision because min/max are of fp type.
out_min, out_max = np.array((out_min, out_max), dtype=big_float)
else: # (u)int
out_min, out_max = (int_to_float(v, big_float) for v in (out_min, out_max))
out_min, out_max = (big_float(v) for v in (out_min, out_max))
if self._out_dtype.kind == 'u':
if in_min < 0 and in_max > 0:
raise WriterError(
Expand Down Expand Up @@ -546,14 +537,13 @@ def to_fileobj(self, fileobj, order='F'):

def _iu2iu(self):
# (u)int to (u)int
mn, mx = (as_int(v) for v in self.finite_range())
mn, mx = (int(v) for v in self.finite_range())
# range may be greater than the largest integer for this type.
# as_int needed to work round numpy 1.4.1 int casting bug
out_dtype = self._out_dtype
# Options in this method are scaling using intercept only. These will
# have to pass through ``self.scaler_dtype`` (because the intercept is
# in this type).
o_min, o_max = (as_int(v) for v in shared_range(self.scaler_dtype, out_dtype))
o_min, o_max = (int(v) for v in shared_range(self.scaler_dtype, out_dtype))
type_range = o_max - o_min
mn2mx = mx - mn
if mn2mx <= type_range: # might offset be enough?
Expand All @@ -565,12 +555,12 @@ def _iu2iu(self):
else: # int output - take midpoint to 0
# ceil below increases inter, pushing scale up to 0.5 towards
# -inf, because ints have abs min == abs max + 1
midpoint = mn + as_int(np.ceil(mn2mx / 2.0))
midpoint = mn + int(np.ceil(mn2mx / 2.0))
# Floor exact decreases inter, so pulling scaled values more
# positive. This may make mx - inter > t_max
inter = floor_exact(midpoint, self.scaler_dtype)
# Need to check still in range after floor_exact-ing
int_inter = as_int(inter)
int_inter = int(inter)
assert mn - int_inter >= o_min
if mx - int_inter <= o_max:
self.inter = inter
Expand All @@ -594,14 +584,13 @@ def _range_scale(self, in_min, in_max):
in_min, in_max = np.array([in_min, in_max], dtype=big_float)
in_range = np.diff([in_min, in_max])
else: # max possible (u)int range is 2**64-1 (int64, uint64)
# int_to_float covers this range. On windows longdouble is the
# same as double so in_range will be 2**64 - thus overestimating
# slope slightly. Casting to int needed to allow in_max-in_min to
# be larger than the largest (u)int value
in_min, in_max = as_int(in_min), as_int(in_max)
in_range = int_to_float(in_max - in_min, big_float)
# On windows longdouble is the same as double so in_range will be 2**64 -
# thus overestimating slope slightly. Casting to int needed to allow
# in_max-in_min to be larger than the largest (u)int value
in_min, in_max = int(in_min), int(in_max)
in_range = big_float(in_max - in_min)
# Cast to float for later processing.
in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max))
in_min, in_max = (big_float(v) for v in (in_min, in_max))
if out_dtype.kind == 'f':
# Type range, these are also floats
info = type_info(out_dtype)
Expand Down
52 changes: 15 additions & 37 deletions nibabel/casting.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

import numpy as np

from .deprecated import deprecate_with_version


class CastingError(Exception):
pass
Expand Down Expand Up @@ -402,6 +404,7 @@ def _check_maxexp(np_type, maxexp):
return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two**maxexp)


@deprecate_with_version('as_int() is deprecated. Use int() instead.', '5.2.0', '7.0.0')
def as_int(x, check=True):
"""Return python integer representation of number

Expand All @@ -411,9 +414,6 @@ def as_int(x, check=True):
It is also useful to work around a numpy 1.4.1 bug in conversion of uints
to python ints.

This routine will still raise an OverflowError for values that are outside
the range of float64.

Parameters
----------
x : object
Expand All @@ -439,30 +439,13 @@ def as_int(x, check=True):
>>> as_int(2.1, check=False)
2
"""
x = np.array(x)
if x.dtype.kind in 'iu':
# This works around a nasty numpy 1.4.1 bug such that:
# >>> int(np.uint32(2**32-1)
# -1
return int(str(x))
ix = int(x)
if ix == x:
return ix
fx = np.floor(x)
if check and fx != x:
if check and ix != x:
raise FloatingError(f'Not an integer: {x}')
if not fx.dtype.type == np.longdouble:
return int(x)
# Subtract float64 chunks until we have all of the number. If the int is
# too large, it will overflow
ret = 0
while fx != 0:
f64 = np.float64(fx)
fx -= f64
ret += int(f64)
return ret
return ix


@deprecate_with_version('int_to_float(..., dt) is deprecated. Use dt() instead.', '5.2.0', '7.0.0')
def int_to_float(val, flt_type):
"""Convert integer `val` to floating point type `flt_type`

Expand All @@ -484,18 +467,13 @@ def int_to_float(val, flt_type):
-------
f : numpy scalar
of type `flt_type`

Examples
--------
>>> int_to_float(1, np.float32)
1.0
"""
if flt_type is not np.longdouble:
return flt_type(val)
# The following works around a nasty numpy 1.4.1 bug such that:
# >>> int(np.uint32(2**32-1)
val = int(val)
faval = np.longdouble(0)
while val != 0:
f64 = np.float64(val)
faval += f64
val -= int(f64)
return faval
return flt_type(val)


def floor_exact(val, flt_type):
Expand Down Expand Up @@ -542,14 +520,14 @@ def floor_exact(val, flt_type):
val = int(val)
flt_type = np.dtype(flt_type).type
sign = 1 if val > 0 else -1
try: # int_to_float deals with longdouble safely
fval = int_to_float(val, flt_type)
try:
fval = flt_type(val)
except OverflowError:
return sign * np.inf
if not np.isfinite(fval):
return fval
info = type_info(flt_type)
diff = val - as_int(fval)
diff = val - int(fval)
if diff >= 0: # floating point value <= val
return fval
# Float casting made the value go up
Expand Down
17 changes: 17 additions & 0 deletions nibabel/conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import sys

import numpy as np
import pytest

Expand All @@ -12,3 +14,18 @@ def legacy_printoptions():

if Version(np.__version__) >= Version('1.22'):
np.set_printoptions(legacy='1.21')


@pytest.fixture
def max_digits():
# Set maximum number of digits for int/str conversion for
# duration of a test
try:
orig_max_str_digits = sys.get_int_max_str_digits()
yield sys.set_int_max_str_digits
sys.set_int_max_str_digits(orig_max_str_digits)
except AttributeError: # pragma: no cover
# Nothing to do for versions of Python that lack these methods
# They were added as DoS protection in Python 3.11 and backported to
# some other versions.
yield lambda x: None
5 changes: 2 additions & 3 deletions nibabel/tests/test_analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from .. import imageglobals
from ..analyze import AnalyzeHeader, AnalyzeImage
from ..arraywriters import WriterError
from ..casting import as_int, sctypes_aliases
from ..casting import sctypes_aliases
from ..nifti1 import Nifti1Header
from ..optpkg import optional_package
from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types
Expand Down Expand Up @@ -308,8 +308,7 @@ def test_shapes(self):
assert hdr.get_data_shape() == shape
# Check max works, but max+1 raises error
dim_dtype = hdr.structarr['dim'].dtype
# as_int for safety to deal with numpy 1.4.1 int conversion errors
mx = as_int(np.iinfo(dim_dtype).max)
mx = int(np.iinfo(dim_dtype).max)
shape = (mx,)
hdr.set_data_shape(shape)
assert hdr.get_data_shape() == shape
Expand Down
13 changes: 3 additions & 10 deletions nibabel/tests/test_casting.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
from ..casting import (
CastingError,
able_int_type,
as_int,
best_float,
float_to_int,
floor_log2,
int_abs,
int_to_float,
longdouble_precision_improved,
sctypes,
shared_range,
Expand All @@ -42,7 +40,7 @@ def test_shared_range():
if casted_mx != imax:
# The shared_range have told us that they believe the imax does
# not have an exact representation.
fimax = int_to_float(imax, ft)
fimax = ft(imax)
if np.isfinite(fimax):
assert int(fimax) != imax
# Therefore the imax, cast back to float, and to integer, will
Expand All @@ -68,7 +66,7 @@ def test_shared_range():
if casted_mn != imin:
# The shared_range have told us that they believe the imin does
# not have an exact representation.
fimin = int_to_float(imin, ft)
fimin = ft(imin)
if np.isfinite(fimin):
assert int(fimin) != imin
# Therefore the imin, cast back to float, and to integer, will
Expand Down Expand Up @@ -101,11 +99,6 @@ def test_casting():
mn, mx = shared_range(ft, it)
with np.errstate(invalid='ignore'):
iarr = float_to_int(farr, it)
# Dammit - for long doubles we need to jump through some hoops not
# to round to numbers outside the range
if ft is np.longdouble:
mn = as_int(mn)
mx = as_int(mx)
exp_arr = np.array([mn, mx, mn, mx, 0, 0, 11], dtype=it)
assert_array_equal(iarr, exp_arr)
# Now test infmax version
Expand Down Expand Up @@ -149,7 +142,7 @@ def test_int_abs():
assert udtype.kind == 'u'
assert idtype.itemsize == udtype.itemsize
mn, mx = in_arr
e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting
e_mn = int(mx) + 1
assert int_abs(mx) == mx
assert int_abs(mn) == e_mn
assert_array_equal(int_abs(in_arr), [e_mn, mx])
Expand Down
Loading