Skip to content

Commit dabfc46

Browse files
authored
Merge pull request #1250 from mscheltienne/np.sctypes
Replace np.sctypes for numpy 2.0 compat
2 parents 67ea997 + a71eebf commit dabfc46

30 files changed

+188
-108
lines changed

.zenodo.json

+5
Original file line numberDiff line numberDiff line change
@@ -381,6 +381,11 @@
381381
{
382382
"name": "Suter, Peter"
383383
}
384+
{
385+
"affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland",
386+
"name": "Mathieu Scheltienne",
387+
"orcid": "0000-0001-8316-7436"
388+
},
384389
],
385390
"keywords": [
386391
"neuroimaging"

doc/source/index.rst

+1
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ contributed code and discussion (in rough order of appearance):
125125
* Jacob Roberts
126126
* Horea Christian
127127
* Fabian Perez
128+
* Mathieu Scheltienne
128129

129130
License reprise
130131
===============

nibabel/casting.py

+45-11
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from __future__ import annotations
77

88
import warnings
9-
from numbers import Integral
109
from platform import machine, processor
1110

1211
import numpy as np
@@ -23,6 +22,43 @@ class CastingError(Exception):
2322
_test_val = 2**63 + 2**11 # Should be exactly representable in float64
2423
TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val
2524

25+
# np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead.
26+
sctypes = {
27+
'int': [
28+
getattr(np, dtype) for dtype in ('int8', 'int16', 'int32', 'int64') if hasattr(np, dtype)
29+
],
30+
'uint': [
31+
getattr(np, dtype)
32+
for dtype in ('uint8', 'uint16', 'uint32', 'uint64')
33+
if hasattr(np, dtype)
34+
],
35+
'float': [
36+
getattr(np, dtype)
37+
for dtype in ('float16', 'float32', 'float64', 'float96', 'float128')
38+
if hasattr(np, dtype)
39+
],
40+
'complex': [
41+
getattr(np, dtype)
42+
for dtype in ('complex64', 'complex128', 'complex192', 'complex256')
43+
if hasattr(np, dtype)
44+
],
45+
'others': [bool, object, bytes, str, np.void],
46+
}
47+
sctypes_aliases = {
48+
getattr(np, dtype)
49+
for dtype in (
50+
'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong',
51+
'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501
52+
'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501
53+
'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501
54+
# other names of the built-in scalar types
55+
'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501
56+
# other
57+
'object_', 'void',
58+
)
59+
if hasattr(np, dtype)
60+
} # fmt:skip
61+
2662

2763
def float_to_int(arr, int_type, nan2zero=True, infmax=False):
2864
"""Convert floating point array `arr` to type `int_type`
@@ -252,7 +288,7 @@ def type_info(np_type):
252288
return ret
253289
info_64 = np.finfo(np.float64)
254290
if dt.kind == 'c':
255-
assert np_type is np.longcomplex
291+
assert np_type is np.clongdouble
256292
vals = (nmant, nexp, width / 2)
257293
else:
258294
assert np_type is np.longdouble
@@ -280,7 +316,7 @@ def type_info(np_type):
280316
# Oh dear, we don't recognize the type information. Try some known types
281317
# and then give up. At this stage we're expecting exotic longdouble or
282318
# their complex equivalent.
283-
if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):
319+
if np_type not in (np.longdouble, np.clongdouble) or width not in (16, 32):
284320
raise FloatingError(f'We had not expected type {np_type}')
285321
if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024):
286322
# double pair on PPC. The _check_nmant routine does not work for this
@@ -290,13 +326,13 @@ def type_info(np_type):
290326
# Got float64 despite everything
291327
pass
292328
elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384):
293-
# binary 128, but with some busted type information. np.longcomplex
329+
# binary 128, but with some busted type information. np.clongdouble
294330
# seems to break here too, so we need to use np.longdouble and
295331
# complexify
296332
two = np.longdouble(2)
297333
# See: https://matthew-brett.github.io/pydagogue/floating_point.html
298334
max_val = (two**113 - 1) / (two**112) * two**16383
299-
if np_type is np.longcomplex:
335+
if np_type is np.clongdouble:
300336
max_val += 0j
301337
ret = dict(
302338
min=-max_val,
@@ -453,9 +489,7 @@ def int_to_float(val, flt_type):
453489
return flt_type(val)
454490
# The following works around a nasty numpy 1.4.1 bug such that:
455491
# >>> int(np.uint32(2**32-1)
456-
# -1
457-
if not isinstance(val, Integral):
458-
val = int(str(val))
492+
val = int(val)
459493
faval = np.longdouble(0)
460494
while val != 0:
461495
f64 = np.float64(val)
@@ -714,7 +748,7 @@ def ok_floats():
714748
Remove longdouble if it has no higher precision than float64
715749
"""
716750
# copy float list so we don't change the numpy global
717-
floats = np.sctypes['float'][:]
751+
floats = sctypes['float'][:]
718752
if best_float() != np.longdouble and np.longdouble in floats:
719753
floats.remove(np.longdouble)
720754
return sorted(floats, key=lambda f: type_info(f)['nmant'])
@@ -750,10 +784,10 @@ def able_int_type(values):
750784
mn = min(values)
751785
mx = max(values)
752786
if mn >= 0:
753-
for ityp in np.sctypes['uint']:
787+
for ityp in sctypes['uint']:
754788
if mx <= np.iinfo(ityp).max:
755789
return ityp
756-
for ityp in np.sctypes['int']:
790+
for ityp in sctypes['int']:
757791
info = np.iinfo(ityp)
758792
if mn >= info.min and mx <= info.max:
759793
return ityp

nibabel/conftest.py

+9
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,14 @@
1+
import numpy as np
12
import pytest
23

34
# Ignore warning requesting help with nicom
45
with pytest.warns(UserWarning):
56
import nibabel.nicom
7+
8+
9+
@pytest.fixture(scope='session', autouse=True)
10+
def legacy_printoptions():
11+
from packaging.version import Version
12+
13+
if Version(np.__version__) >= Version('1.22'):
14+
np.set_printoptions(legacy='1.21')

nibabel/ecat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -923,7 +923,7 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None):
923923
endianness = native_code
924924

925925
stream.seek(pos)
926-
make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream)
926+
make_array_writer(data.view(data.dtype.newbyteorder(endianness)), dtype).to_fileobj(stream)
927927

928928
def to_file_map(self, file_map=None):
929929
"""Write ECAT7 image to `file_map` or contained ``self.file_map``

nibabel/freesurfer/io.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def _fread3(fobj):
3131
n : int
3232
A 3 byte int
3333
"""
34-
b1, b2, b3 = np.fromfile(fobj, '>u1', 3)
34+
b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(np.int64)
3535
return (b1 << 16) + (b2 << 8) + b3
3636

3737

nibabel/freesurfer/tests/test_io.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,13 @@
44
import struct
55
import time
66
import unittest
7-
import warnings
87
from os.path import isdir
98
from os.path import join as pjoin
109
from pathlib import Path
1110

1211
import numpy as np
1312
import pytest
14-
from numpy.testing import assert_allclose, assert_array_equal
13+
from numpy.testing import assert_allclose
1514

1615
from ...fileslice import strided_scalar
1716
from ...testing import clear_and_catch_warnings
@@ -105,8 +104,10 @@ def test_geometry():
105104
assert np.array_equal(faces, faces2)
106105

107106
# Validate byte ordering
108-
coords_swapped = coords.byteswap().newbyteorder()
109-
faces_swapped = faces.byteswap().newbyteorder()
107+
coords_swapped = coords.byteswap()
108+
coords_swapped = coords_swapped.view(coords_swapped.dtype.newbyteorder())
109+
faces_swapped = faces.byteswap()
110+
faces_swapped = faces_swapped.view(faces_swapped.dtype.newbyteorder())
110111
assert np.array_equal(coords_swapped, coords)
111112
assert np.array_equal(faces_swapped, faces)
112113

nibabel/freesurfer/tests/test_mghformat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ def test_mghheader_default_structarr():
345345
for endianness in (None,) + BIG_CODES:
346346
hdr2 = MGHHeader.default_structarr(endianness=endianness)
347347
assert hdr2 == hdr
348-
assert hdr2.newbyteorder('>') == hdr
348+
assert hdr2.view(hdr2.dtype.newbyteorder('>')) == hdr
349349

350350
for endianness in LITTLE_CODES:
351351
with pytest.raises(ValueError):

nibabel/nifti1.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
if have_binary128():
9090
# Only enable 128 bit floats if we really have IEEE binary 128 longdoubles
9191
_float128t: type[np.generic] = np.longdouble
92-
_complex256t: type[np.generic] = np.longcomplex
92+
_complex256t: type[np.generic] = np.clongdouble
9393
else:
9494
_float128t = np.void
9595
_complex256t = np.void
@@ -2443,10 +2443,14 @@ def _get_analyze_compat_dtype(arr):
24432443
return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32')
24442444

24452445
mn, mx = arr.min(), arr.max()
2446-
if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32):
2447-
return np.dtype('int32')
2448-
if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32):
2449-
return np.dtype('float32')
2446+
if arr.dtype.kind in 'iu':
2447+
info = np.iinfo('int32')
2448+
if mn >= info.min and mx <= info.max:
2449+
return np.dtype('int32')
2450+
elif arr.dtype.kind == 'f':
2451+
info = np.finfo('float32')
2452+
if mn >= info.min and mx <= info.max:
2453+
return np.dtype('float32')
24502454

24512455
raise ValueError(
24522456
f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})'

nibabel/quaternions.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@
2929

3030
import numpy as np
3131

32-
MAX_FLOAT = np.maximum_sctype(float)
32+
from .casting import sctypes
33+
34+
MAX_FLOAT = sctypes['float'][-1]
3335
FLOAT_EPS = np.finfo(float).eps
3436

3537

nibabel/spatialimages.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@
139139
import numpy as np
140140

141141
from .arrayproxy import ArrayLike
142+
from .casting import sctypes_aliases
142143
from .dataobj_images import DataobjImage
143144
from .filebasedimages import FileBasedHeader, FileBasedImage
144145
from .fileholders import FileMap
@@ -333,7 +334,7 @@ def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]:
333334
else:
334335
raise e
335336
supported = set()
336-
for np_type in set(np.sctypeDict.values()):
337+
for np_type in sctypes_aliases:
337338
try:
338339
obj.set_data_dtype(np_type)
339340
except HeaderDataError:

nibabel/streamlines/trk.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ def _read_header(fileobj):
577577
endianness = swapped_code
578578

579579
# Swap byte order
580-
header_rec = header_rec.newbyteorder()
580+
header_rec = header_rec.view(header_rec.dtype.newbyteorder())
581581
if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
582582
msg = (
583583
f"Invalid hdr_size: {header_rec['hdr_size']} "

nibabel/tests/test_analyze.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@
2626
from .. import imageglobals
2727
from ..analyze import AnalyzeHeader, AnalyzeImage
2828
from ..arraywriters import WriterError
29-
from ..casting import as_int
30-
from ..loadsave import read_img_data
29+
from ..casting import as_int, sctypes_aliases
3130
from ..nifti1 import Nifti1Header
3231
from ..optpkg import optional_package
3332
from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types
@@ -52,9 +51,7 @@
5251
def add_duplicate_types(supported_np_types):
5352
# Update supported numpy types with named scalar types that map to the same set of dtypes
5453
dtypes = {np.dtype(t) for t in supported_np_types}
55-
supported_np_types.update(
56-
scalar for scalar in set(np.sctypeDict.values()) if np.dtype(scalar) in dtypes
57-
)
54+
supported_np_types.update(scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes)
5855

5956

6057
class TestAnalyzeHeader(tws._TestLabeledWrapStruct):

nibabel/tests/test_arrayproxy.py

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import contextlib
1313
import gzip
1414
import pickle
15-
import warnings
1615
from io import BytesIO
1716
from unittest import mock
1817

nibabel/tests/test_arraywriters.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,14 @@
2020
get_slope_inter,
2121
make_array_writer,
2222
)
23-
from ..casting import int_abs, on_powerpc, shared_range, type_info
23+
from ..casting import int_abs, sctypes, shared_range, type_info
2424
from ..testing import assert_allclose_safely, suppress_warnings
2525
from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file
2626

27-
FLOAT_TYPES = np.sctypes['float']
28-
COMPLEX_TYPES = np.sctypes['complex']
29-
INT_TYPES = np.sctypes['int']
30-
UINT_TYPES = np.sctypes['uint']
27+
FLOAT_TYPES = sctypes['float']
28+
COMPLEX_TYPES = sctypes['complex']
29+
INT_TYPES = sctypes['int']
30+
UINT_TYPES = sctypes['uint']
3131
CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES
3232
IUINT_TYPES = INT_TYPES + UINT_TYPES
3333
NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES
@@ -61,7 +61,8 @@ def test_arraywriters():
6161
assert aw.out_dtype == arr.dtype
6262
assert_array_equal(arr, round_trip(aw))
6363
# Byteswapped should be OK
64-
bs_arr = arr.byteswap().newbyteorder('S')
64+
bs_arr = arr.byteswap()
65+
bs_arr = bs_arr.view(bs_arr.dtype.newbyteorder('S'))
6566
bs_aw = klass(bs_arr)
6667
bs_aw_rt = round_trip(bs_aw)
6768
# assert against original array because POWER7 was running into

nibabel/tests/test_casting.py

+9-8
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import numpy as np
77
import pytest
8-
from numpy.testing import assert_array_almost_equal, assert_array_equal
8+
from numpy.testing import assert_array_equal
99

1010
from ..casting import (
1111
CastingError,
@@ -17,15 +17,16 @@
1717
int_abs,
1818
int_to_float,
1919
longdouble_precision_improved,
20+
sctypes,
2021
shared_range,
2122
ulp,
2223
)
2324
from ..testing import suppress_warnings
2425

2526

2627
def test_shared_range():
27-
for ft in np.sctypes['float']:
28-
for it in np.sctypes['int'] + np.sctypes['uint']:
28+
for ft in sctypes['float']:
29+
for it in sctypes['int'] + sctypes['uint']:
2930
# Test that going a bit above or below the calculated min and max
3031
# either generates the same number when cast, or the max int value
3132
# (if this system generates that) or something smaller (because of
@@ -54,7 +55,7 @@ def test_shared_range():
5455
assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax))
5556
else:
5657
assert np.all(bit_bigger <= casted_mx)
57-
if it in np.sctypes['uint']:
58+
if it in sctypes['uint']:
5859
assert mn == 0
5960
continue
6061
# And something larger for the minimum
@@ -90,8 +91,8 @@ def test_shared_range_inputs():
9091

9192

9293
def test_casting():
93-
for ft in np.sctypes['float']:
94-
for it in np.sctypes['int'] + np.sctypes['uint']:
94+
for ft in sctypes['float']:
95+
for it in sctypes['int'] + sctypes['uint']:
9596
ii = np.iinfo(it)
9697
arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6]
9798
farr_orig = np.array(arr, dtype=ft)
@@ -140,7 +141,7 @@ def test_casting():
140141

141142

142143
def test_int_abs():
143-
for itype in np.sctypes['int']:
144+
for itype in sctypes['int']:
144145
info = np.iinfo(itype)
145146
in_arr = np.array([info.min, info.max], dtype=itype)
146147
idtype = np.dtype(itype)
@@ -188,7 +189,7 @@ def test_able_int_type():
188189

189190
def test_able_casting():
190191
# Check the able_int_type function guesses numpy out type
191-
types = np.sctypes['int'] + np.sctypes['uint']
192+
types = sctypes['int'] + sctypes['uint']
192193
for in_type in types:
193194
in_info = np.iinfo(in_type)
194195
in_mn, in_mx = in_info.min, in_info.max

0 commit comments

Comments
 (0)