Skip to content

Add Cupy space #1401

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 38 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
2898db6
ENH: add cupy tensor space
Oct 6, 2017
f8fed7a
MAINT: improve import of cupy
Nov 23, 2017
1c64fcb
WIP: fix cupy->numpy transfers
Nov 23, 2017
8564f2f
WIP: fix custom cupy kernels
Nov 23, 2017
f2085f8
MAINT: minor fixes
Nov 23, 2017
0654031
WIP: implement cupy space unit tests
Nov 23, 2017
dab3926
WIP: fix cublas stuff
Nov 24, 2017
ffde709
WIP: fix cublas, weight propagation, implement (add,multiply).(reduce…
Nov 25, 2017
3298d74
TST: ignore cupy_tensors in doctests if cupy is not available
Nov 25, 2017
dd7f635
MAINT: fix various numpy and cupy tensor things
Nov 25, 2017
5ab4756
ENH: allow indexing of CupyTensor with CupyTensor
Nov 25, 2017
fefb722
MAINT: minor stuff
Nov 25, 2017
6648425
MAINT: change weighted inf-norm to ignore weight
Nov 25, 2017
a1cd482
ENH: make comparison of cupy arrays in tests faster
Nov 25, 2017
cad427c
WIP: fix tensor tests for cupy
Nov 25, 2017
658051f
MAINT: adapt all_almost_equal to cupy
Nov 26, 2017
8a7dc33
WIP: fix cupy tensor tests
Nov 26, 2017
d456e6e
WIP: fix cupy tests
Nov 27, 2017
f0136b5
ENH: make ufuncs and ufunc_ops work with 2 outputs
Nov 27, 2017
1e89425
MAINT: make weighting methods numpy-independent
Nov 27, 2017
41fe853
WIP: fix cupy tests
Nov 27, 2017
039f76f
WIP: fix cupy stuff
Nov 28, 2017
07822cc
WIP: fix cupy tensor tests
Nov 28, 2017
47f74c9
WIP: fix pspace noise_array and out aliasing issue
Nov 29, 2017
cc95a04
ENH: implement Numpy-style broadcasting in pspace ufuncs
Nov 29, 2017
3084f1d
WIP: simplify tests and add utils for array modules
Nov 29, 2017
e3e364b
ENH: add impl to writable_array
Nov 29, 2017
d0c9705
ENH: add impl to asarray, tests, and fix real and imag for cupy
Nov 29, 2017
f4f376c
WIP: add asarray helper, fix diff_ops for cupy
Nov 30, 2017
d0fc8e2
MAINT: fix utils
Nov 30, 2017
c632802
MAINT: exclude unsupported ufunc operators
Nov 30, 2017
7879a50
MAINT: fix bad parens in test
Nov 30, 2017
4d1183e
BUG: fix indexing with cupy tensor
Nov 30, 2017
4ac503d
MAINT: minor fix in diff_ops
Nov 30, 2017
5c638d6
TST: mark cupy-only test as skip if cupy is not available
Dec 1, 2017
3cdde9f
MAINT: fix Py2-incompatible doctest in utility
Dec 1, 2017
87f8401
MAINT: Minor improvements related to cupy
adler-j Feb 14, 2018
958dacc
WIP: Fixes to tests due to cupy rebase
adler-j Jun 28, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions odl/contrib/mrc/test/uncompr_bin_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@
# --- Tests --- #


def test_uncompr_bin_io_without_header(shape, floating_dtype, order):
def test_uncompr_bin_io_without_header(shape, odl_floating_dtype, order):
"""Test I/O bypassing the header processing."""
dtype = np.dtype(floating_dtype)
dtype = np.dtype(odl_floating_dtype)
with tempfile.NamedTemporaryFile() as named_file:
file = named_file.file

Expand Down
86 changes: 45 additions & 41 deletions odl/discr/diff_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
from odl.discr.lp_discr import DiscreteLp
from odl.operator.tensor_ops import PointwiseTensorFieldOperator
from odl.space import ProductSpace
from odl.util import writable_array, signature_string, indent
from odl.util import (
writable_array, asarray, array_module, signature_string, indent)


__all__ = ('PartialDerivative', 'Gradient', 'Divergence', 'Laplacian')
Expand Down Expand Up @@ -137,9 +138,9 @@ def _call(self, x, out=None):
if out is None:
out = self.range.element()

# TODO: this pipes CUDA arrays through NumPy. Write native operator.
with writable_array(out) as out_arr:
finite_diff(x.asarray(), axis=self.axis, dx=self.dx,
impl = self.domain.impl
with writable_array(out, impl=impl) as out_arr:
finite_diff(x, axis=self.axis, dx=self.dx, impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const, out=out_arr)
return out
Expand Down Expand Up @@ -190,8 +191,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Gradient(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -347,16 +347,15 @@ def _call(self, x, out=None):
if out is None:
out = self.range.element()

x_arr = x.asarray()
ndim = self.domain.ndim
dx = self.domain.cell_sides
impl = self.domain.impl

for axis in range(ndim):
with writable_array(out[axis]) as out_arr:
finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method,
pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=out_arr)
with writable_array(out[axis], impl=impl) as out_arr:
finite_diff(x, axis=axis, dx=dx[axis], impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const, out=out_arr)
return out

def derivative(self, point=None):
Expand Down Expand Up @@ -414,8 +413,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Divergence(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -494,10 +492,12 @@ def __init__(self, domain=None, range=None, method='forward',
... [2., 3., 4., 5., 6.]])
>>> f = div.domain.element([data, data])
>>> div_f = div(f)
>>> print(div_f)
[[ 2., 2., 2., 2., -3.],
[ 2., 2., 2., 2., -4.],
[ -1., -2., -3., -4., -12.]]
>>> div_f
uniform_discr([ 0., 0.], [ 3., 5.], (3, 5)).element(
[[ 2., 2., 2., 2., -3.],
[ 2., 2., 2., 2., -4.],
[ -1., -2., -3., -4., -12.]]
)

Verify adjoint:

Expand Down Expand Up @@ -559,11 +559,13 @@ def _call(self, x, out=None):

ndim = self.range.ndim
dx = self.range.cell_sides
impl = self.range.impl

tmp = np.empty(out.shape, out.dtype, order=out.space.default_order)
with writable_array(out) as out_arr:
tmp = array_module(impl).empty(
out.shape, out.dtype, order=out.space.default_order)
with writable_array(out, impl=impl) as out_arr:
for axis in range(ndim):
finite_diff(x[axis], axis=axis, dx=dx[axis],
finite_diff(x[axis], axis=axis, dx=dx[axis], impl=impl,
method=self.method, pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=tmp)
Expand Down Expand Up @@ -623,8 +625,7 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


class Laplacian(PointwiseTensorFieldOperator):
Expand Down Expand Up @@ -714,24 +715,23 @@ def _call(self, x, out=None):
else:
out.set_zero()

x_arr = x.asarray()
out_arr = out.asarray()
tmp = np.empty(out.shape, out.dtype, order=out.space.default_order)

ndim = self.domain.ndim
dx = self.domain.cell_sides
impl = self.domain.impl
tmp = array_module(impl).empty(
out.shape, out.dtype, order=out.space.default_order)

with writable_array(out) as out_arr:
with writable_array(out, impl=impl) as out_arr:
for axis in range(ndim):
# TODO: this can be optimized
finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2,
finite_diff(x, axis=axis, dx=dx[axis] ** 2, impl=impl,
method='forward',
pad_mode=self.pad_mode,
pad_const=self.pad_const, out=tmp)

out_arr += tmp

finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2,
finite_diff(x, axis=axis, dx=dx[axis] ** 2, impl=impl,
method='backward',
pad_mode=self.pad_mode,
pad_const=self.pad_const, out=tmp)
Expand Down Expand Up @@ -781,11 +781,11 @@ def __repr__(self):

def __str__(self):
"""Return ``str(self)``."""
dom_ran_str = '\n-->\n'.join([repr(self.domain), repr(self.range)])
return '{}:\n{}'.format(self.__class__.__name__, indent(dom_ran_str))
return repr(self)


def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
def finite_diff(f, axis, dx=1.0, method='forward', out=None, impl='numpy',
**kwargs):
"""Calculate the partial derivative of ``f`` along a given ``axis``.

In the interior of the domain of f, the partial derivative is computed
Expand Down Expand Up @@ -819,6 +819,9 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
out : `numpy.ndarray`, optional
An N-dimensional array to which the output is written. Has to have
the same shape as the input array ``f``.
impl : str, optional
Implementation backend for array manipulations. Usually handled by
the calling code.
pad_mode : string, optional
The padding mode to use outside the domain.

Expand Down Expand Up @@ -883,7 +886,7 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
>>> out is finite_diff(f, axis=0, out=out)
True
"""
f_arr = np.asarray(f)
f_arr = asarray(f, impl=impl)
ndim = f_arr.ndim

if f_arr.shape[axis] < 2:
Expand Down Expand Up @@ -912,8 +915,9 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):
pad_const = kwargs.pop('pad_const', 0)
pad_const = f.dtype.type(pad_const)

arrmod = array_module(impl)
if out is None:
out = np.empty_like(f_arr)
out = arrmod.empty_like(f_arr)
else:
if out.shape != f.shape:
raise ValueError('expected output shape {}, got {}'
Expand All @@ -931,24 +935,24 @@ def finite_diff(f, axis, dx=1.0, method='forward', out=None, **kwargs):

# create slice objects: initially all are [:, :, ..., :]

# Swap axes so that the axis of interest is first. This is a O(1)
# Swap axes so that the axis of interest is first. This is an O(1)
# operation and is done to simplify the code below.
out, out_in = np.swapaxes(out, 0, axis), out
f_arr = np.swapaxes(f_arr, 0, axis)
out, out_in = arrmod.swapaxes(out, 0, axis), out
f_arr = arrmod.swapaxes(f_arr, 0, axis)

# Interior of the domain of f
if method == 'central':
# 1D equivalent: out[1:-1] = (f[2:] - f[:-2])/2.0
np.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1])
arrmod.subtract(f_arr[2:], f_arr[:-2], out=out[1:-1])
out[1:-1] /= 2.0

elif method == 'forward':
# 1D equivalent: out[1:-1] = (f[2:] - f[1:-1])
np.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1])
arrmod.subtract(f_arr[2:], f_arr[1:-1], out=out[1:-1])

elif method == 'backward':
# 1D equivalent: out[1:-1] = (f[1:-1] - f[:-2])
np.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1])
arrmod.subtract(f_arr[1:-1], f_arr[:-2], out=out[1:-1])

# Boundaries
if pad_mode == 'constant':
Expand Down
16 changes: 9 additions & 7 deletions odl/discr/discretization.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,16 +331,18 @@ def copy(self):
"""Create an identical (deep) copy of this element."""
return self.space.element(self.tensor.copy())

def asarray(self, out=None):
"""Extract the data of this array as a numpy array.
def asarray(self, out=None, impl='numpy'):
"""Extract the data of this element as an array.

Parameters
----------
out : `numpy.ndarray`, optional
Array in which the result should be written in-place.
Has to be contiguous and of the correct dtype.
out : ndarray, optional
Array into which the result should be written. Must be contiguous
and of the correct dtype.
impl : str, optional
Array backend for the output, used when ``out`` is not given.
"""
return self.tensor.asarray(out=out)
return self.tensor.asarray(out=out, impl=impl)

def astype(self, dtype):
"""Return a copy of this element with new ``dtype``.
Expand Down Expand Up @@ -409,7 +411,7 @@ def __setitem__(self, indices, values):
indices = indices.tensor
if isinstance(values, type(self)):
values = values.tensor
self.tensor.__setitem__(indices, values)
self.tensor[indices] = values

def sampling(self, ufunc, **kwargs):
"""Sample a continuous function and assign to this element.
Expand Down
23 changes: 0 additions & 23 deletions odl/discr/lp_discr.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,29 +755,6 @@ def conj(self, out=None):
self.tensor.conj(out=out.tensor)
return out

def __setitem__(self, indices, values):
"""Set values of this element.

Parameters
----------
indices : int or `slice`
The position(s) that should be set
values : scalar or `array-like`
Value(s) to be assigned.
If ``indices`` is an integer, ``values`` must be a scalar
value.
If ``indices`` is a slice, ``values`` must be
broadcastable to the size of the slice (same size,
shape ``(1,)`` or scalar).
For ``indices == slice(None)``, i.e. in the call
``vec[:] = values``, a multi-dimensional array of correct
shape is allowed as ``values``.
"""
if values in self.space:
self.tensor[indices] = values.tensor
else:
super(DiscreteLpElement, self).__setitem__(indices, values)

def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Interface to Numpy's ufunc machinery.

Expand Down
8 changes: 4 additions & 4 deletions odl/set/space.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,10 @@ def __pow__(self, shape):
>>> r2 ** 4
ProductSpace(rn(2), 4)

Multiple powers work as expected:
Multiple powers work as expected (first entry is outermost power):

>>> r2 ** (4, 2)
ProductSpace(ProductSpace(rn(2), 4), 2)
>>> r2 ** (3, 4)
ProductSpace(ProductSpace(rn(2), 4), 3)
"""
from odl.space import ProductSpace

Expand All @@ -387,7 +387,7 @@ def __pow__(self, shape):
shape = tuple(shape)

pspace = self
for n in shape:
for n in reversed(shape):
pspace = ProductSpace(pspace, n)

return pspace
Expand Down
3 changes: 3 additions & 0 deletions odl/space/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
from .npy_tensors import *
__all__ += npy_tensors.__all__

from .cupy_tensors import *
__all__ += cupy_tensors.__all__

from .pspace import *
__all__ += pspace.__all__

Expand Down
27 changes: 6 additions & 21 deletions odl/space/base_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
"""Base classes for implementations of tensor spaces."""

from __future__ import print_function, division, absolute_import
from builtins import object
from numbers import Integral
import numpy as np

Expand All @@ -18,7 +17,8 @@
from odl.util import (
is_numeric_dtype, is_real_dtype, is_floating_dtype,
is_real_floating_dtype, is_complex_floating_dtype, safe_int_conv,
array_str, dtype_str, signature_string, indent, writable_array)
array_str, dtype_str, signature_string, indent, writable_array,
none_context)
from odl.util.ufuncs import TensorSpaceUfuncs
from odl.util.utility import TYPE_MAP_R2C, TYPE_MAP_C2R

Expand Down Expand Up @@ -815,26 +815,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):

# --- Evaluate ufunc --- #

# Trivial context used to create a single code path for the ufunc
# evaluation. For `None` output parameter(s), this is used instead of
# `writable_array`.
class CtxNone(object):
"""Trivial context manager class.

When used as ::

with CtxNone() as obj:
# do stuff with `obj`

the returned ``obj`` is ``None``.
"""
__enter__ = __exit__ = lambda *_: None

if method == '__call__':
if ufunc.nout == 1:
# Make context for output (trivial one returns `None`)
if out is None:
out_ctx = CtxNone()
out_ctx = none_context()
else:
out_ctx = writable_array(out, **array_kwargs)

Expand All @@ -851,11 +836,11 @@ class CtxNone(object):
if out1 is not None:
out1_ctx = writable_array(out1, **array_kwargs)
else:
out1_ctx = CtxNone()
out1_ctx = none_context()
if out2 is not None:
out2_ctx = writable_array(out2, **array_kwargs)
else:
out2_ctx = CtxNone()
out2_ctx = none_context()

# Evaluate ufunc
with out1_ctx as out1_arr, out2_ctx as out2_arr:
Expand All @@ -872,7 +857,7 @@ class CtxNone(object):
else: # method != '__call__'
# Make context for output (trivial one returns `None`)
if out is None:
out_ctx = CtxNone()
out_ctx = none_context()
else:
out_ctx = writable_array(out, **array_kwargs)

Expand Down
Loading