diff --git a/odl_cpp_utils b/odl_cpp_utils index 9e70607..fc8f04c 160000 --- a/odl_cpp_utils +++ b/odl_cpp_utils @@ -1 +1 @@ -Subproject commit 9e7060773aaf23ba3d146be976cda0e6c0ef3961 +Subproject commit fc8f04c2e798fd58f39c21402e35aa7460ee1944 diff --git a/odlcuda/cu_ntuples.py b/odlcuda/cu_ntuples.py index 8bad138..ad16f4a 100644 --- a/odlcuda/cu_ntuples.py +++ b/odlcuda/cu_ntuples.py @@ -21,13 +21,13 @@ import numpy as np from odl.set.sets import RealNumbers -from odl.space.base_ntuples import FnBase, FnBaseVector +from odl.space.base_tensors import TensorSpace, Tensor from odl.space.weighting import ( - Weighting, ArrayWeighting, ConstWeighting, NoWeighting, + Weighting, ArrayWeighting, ConstWeighting, CustomInner, CustomNorm, CustomDist) -from odl.util.utility import dtype_repr, signature_string +from odl.util.utility import dtype_str, signature_string -from odlcuda.ufuncs import CudaFnUfuncs +from odlcuda.ufuncs import CudaTensorSpaceUfuncs try: import odlcuda.odlcuda_ as backend @@ -37,9 +37,9 @@ CUDA_AVAILABLE = False -__all__ = ('CudaFn', 'CudaFnVector', +__all__ = ('CudaTensorSpace', 'CudaTensor', 'CUDA_DTYPES', 'CUDA_AVAILABLE', - 'CudaFnConstWeighting', 'CudaFnArrayWeighting') + 'CudaTensorSpaceConstWeighting', 'CudaTensorSpaceArrayWeighting') def _get_int_type(): @@ -81,19 +81,22 @@ def _add_if_exists(dtype, name): CUDA_DTYPES = list(set(CUDA_DTYPES)) # Remove duplicates -class CudaFn(FnBase): +IMPL_NAME = 'odlcuda' - """The space `FnBase`, implemented in CUDA. + +class CudaTensorSpace(TensorSpace): + + """The space `TensorSpace`, implemented in CUDA. Requires the compiled ODL extension ``odlcuda``. """ - def __init__(self, size, dtype='float32', **kwargs): + def __init__(self, shape, dtype='float32', **kwargs): """Initialize a new instance. Parameters ---------- - size : positive `int` + shape : positive `int` The number of dimensions of the space dtype : `object` The data type of the storage array. Can be provided in any @@ -107,7 +110,7 @@ def __init__(self, size, dtype='float32', **kwargs): Use weighted inner product, norm, and dist. The following types are supported as ``weight``: - `FnWeightingBase` : + `Weighting` : Use this weighting as-is. Compatibility with this space's elements is not checked during init. @@ -120,7 +123,7 @@ def __init__(self, size, dtype='float32', **kwargs): main memory, which results in slower space functions due to a copy during evaluation. - `CudaFnVector` : + `CudaTensor` : same as 1-dim. array-like, except that copying is avoided if the ``dtype`` of the vector is the same as this space's ``dtype``. @@ -141,7 +144,7 @@ def __init__(self, size, dtype='float32', **kwargs): dist : `callable`, optional The distance function defining a metric on the space. - It must accept two `FnVector` arguments and + It must accept two `CudaTensor` arguments and fulfill the following mathematical conditions for any three vectors ``x, y, z``: @@ -155,7 +158,7 @@ def __init__(self, size, dtype='float32', **kwargs): norm : `callable`, optional The norm implementation. It must accept an - `FnVector` argument, return a `float` and satisfy the + `CudaTensor` argument, return a `float` and satisfy the following conditions for all vectors ``x, y`` and scalars ``s``: @@ -171,7 +174,7 @@ def __init__(self, size, dtype='float32', **kwargs): inner : `callable`, optional The inner product implementation. It must accept two - `FnVector` arguments, return a element from + `CudaTensor` arguments, return a element from the field of the space (real or complex number) and satisfy the following conditions for all vectors ``x, y, z`` and scalars ``s``: @@ -187,7 +190,7 @@ def __init__(self, size, dtype='float32', **kwargs): raise TypeError('data type {!r} not supported in CUDA' ''.format(dtype)) - super(CudaFn, self).__init__(size, dtype) + super(CudaTensorSpace, self).__init__(shape, dtype) self._vector_impl = _TYPE_MAP_NPY2CUDA[self.dtype] dist = kwargs.pop('dist', None) @@ -204,29 +207,40 @@ def __init__(self, size, dtype='float32', **kwargs): if isinstance(weighting, Weighting): self.__weighting = weighting elif np.isscalar(weighting): - self.__weighting = CudaFnConstWeighting( + self.__weighting = CudaTensorSpaceConstWeighting( weighting, exponent=exponent) - elif isinstance(weighting, CudaFnVector): - self.__weighting = CudaFnArrayWeighting( + elif isinstance(weighting, CudaTensor): + self.__weighting = CudaTensorSpaceArrayWeighting( weighting, exponent=exponent) else: - # Must make a CudaFnVector from the array + # Must make a CudaTensor from the array weighting = self.element(np.asarray(weighting)) if weighting.ndim == 1: - self.__weighting = CudaFnArrayWeighting( + self.__weighting = CudaTensorSpaceArrayWeighting( weighting, exponent=exponent) else: raise ValueError('invalid weighting argument {!r}' ''.format(weighting)) elif dist is not None: - self.__weighting = CudaFnCustomDist(dist) + self.__weighting = CudaTensorSpaceCustomDist(dist) elif norm is not None: - self.__weighting = CudaFnCustomNorm(norm) + self.__weighting = CudaTensorSpaceCustomNorm(norm) elif inner is not None: # Use fast dist implementation - self.__weighting = CudaFnCustomInner(inner) + self.__weighting = CudaTensorSpaceCustomInner(inner) else: # all None -> no weighing - self.__weighting = CudaFnNoWeighting(exponent) + self.__weighting = CudaTensorSpaceConstWeighting( + 1.0, exponent=exponent) + + @property + def impl(self): + """Name of the implementation back-end: ``'odlcuda'``.""" + return 'odlcuda' + + @property + def default_order(self): + """Default storage order for new elements in this space: ``'F'``.""" + return 'F' @property def exponent(self): @@ -240,8 +254,9 @@ def weighting(self): @property def is_weighted(self): - """Return `True` if the weighting is not `CudaFnNoWeighting`.""" - return not isinstance(self.weighting, CudaFnNoWeighting) + """Return `True` if the weighting is not `CudaTensorSpaceNoWeighting`.""" + return not (isinstance(self.weighting, CudaTensorSpaceConstWeighting) + and self.weighting.const == 1.0) def element(self, inp=None, data_ptr=None): """Create a new element. @@ -251,7 +266,7 @@ def element(self, inp=None, data_ptr=None): inp : `array-like` or scalar, optional Input to initialize the new element. - If ``inp`` is a `numpy.ndarray` of shape ``(size,)`` + If ``inp`` is a `numpy.ndarray` of shape ``(shape,)`` and the same data type as this space, the array is wrapped, not copied. Other array-like objects are copied (with broadcasting @@ -270,25 +285,25 @@ def element(self, inp=None, data_ptr=None): Returns ------- - element : `CudaFnVector` + element : `CudaTensor` The new element Notes ----- - This method preserves "array views" of correct size and type, + This method preserves "array views" of correct shape and type, see the examples below. TODO: No, it does not yet! Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> x = uc3.element(np.array([1, 2, 3], dtype='uint8')) >>> x - CudaFn(3, 'uint8').element([1, 2, 3]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([1, 2, 3]) >>> y = uc3.element([1, 2, 3]) >>> y - CudaFn(3, 'uint8').element([1, 2, 3]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([1, 2, 3]) """ if inp is None: if data_ptr is None: @@ -312,9 +327,9 @@ def element(self, inp=None, data_ptr=None): else: # Array-like input. Need to go through a NumPy array arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=1) - if arr.shape != (self.size,): + if arr.shape != self.shape: raise ValueError('expected input shape {}, got {}' - ''.format((self.size,), arr.shape)) + ''.format(self.shape, arr.shape)) elem = self.element() elem[:] = arr return elem @@ -330,9 +345,9 @@ def _lincomb(self, a, x1, b, x2, out): ---------- a, b : `field` element Scalar to multiply ``x`` and ``y`` with. - x, y : `CudaFnVector` + x, y : `CudaTensor` The summands. - out : `CudaFnVector` + out : `CudaTensor` The Vector that the result is written to. Returns @@ -341,14 +356,14 @@ def _lincomb(self, a, x1, b, x2, out): Examples -------- - >>> r3 = CudaFn(3) + >>> r3 = CudaTensorSpace(3) >>> x = r3.element([1, 2, 3]) >>> y = r3.element([4, 5, 6]) >>> out = r3.element() >>> r3.lincomb(2, x, 3, y, out) # out is returned - CudaFn(3).element([14.0, 19.0, 24.0]) + rn(3, impl='odlcuda').element([ 14., 19., 24.]) >>> out - CudaFn(3).element([14.0, 19.0, 24.0]) + rn(3, impl='odlcuda').element([ 14., 19., 24.]) """ out.data.lincomb(a, x1.data, b, x2.data) @@ -357,7 +372,7 @@ def _inner(self, x1, x2): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Returns ------- @@ -367,7 +382,7 @@ def _inner(self, x1, x2): Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> x = uc3.element([1, 2, 3]) >>> y = uc3.element([3, 1, 5]) >>> uc3.inner(x, y) @@ -380,7 +395,7 @@ def _integral(self, x): Parameters ---------- - x : `CudaFnVector` + x : `CudaTensor` The vector whose integral should be computed. Returns @@ -390,9 +405,9 @@ def _integral(self, x): Examples -------- - >>> r3 = CudaFn(2, dtype='float32') + >>> r3 = CudaTensorSpace(2, dtype='float32') >>> x = r3.element([3, -1]) - >>> r3.integral(x) + >>> r3._integral(x) 2.0 Notes @@ -409,7 +424,7 @@ def _dist(self, x1, x2): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` The vectors whose mutual distance is calculated Returns @@ -419,7 +434,7 @@ def _dist(self, x1, x2): Examples -------- - >>> r2 = CudaFn(2) + >>> r2 = CudaTensorSpace(2) >>> x = r2.element([3, 8]) >>> y = r2.element([0, 4]) >>> r2.dist(x, y) @@ -435,7 +450,7 @@ def _norm(self, x): Parameters ---------- - x : `CudaFnVector` + x : `CudaTensor` Returns ------- @@ -444,7 +459,7 @@ def _norm(self, x): Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> x = uc3.element([2, 3, 6]) >>> uc3.norm(x) 7.0 @@ -461,9 +476,9 @@ def _multiply(self, x1, x2, out): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Factors in product - out : `CudaFnVector` + out : `CudaTensor` Element to which the result is written Returns @@ -473,14 +488,14 @@ def _multiply(self, x1, x2, out): Examples -------- - >>> rn = CudaFn(3) + >>> rn = CudaTensorSpace(3) >>> x1 = rn.element([5, 3, 2]) >>> x2 = rn.element([1, 2, 3]) >>> out = rn.element() >>> rn.multiply(x1, x2, out) # out is returned - CudaFn(3).element([5.0, 6.0, 6.0]) + rn(3, impl='odlcuda').element([ 5., 6., 6.]) >>> out - CudaFn(3).element([5.0, 6.0, 6.0]) + rn(3, impl='odlcuda').element([ 5., 6., 6.]) """ out.data.multiply(x1.data, x2.data) @@ -494,9 +509,9 @@ def _divide(self, x1, x2, out): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Factors in the product - out : `CudaFnVector` + out : `CudaTensor` Element to which the result is written Returns @@ -506,14 +521,14 @@ def _divide(self, x1, x2, out): Examples -------- - >>> rn = CudaFn(3) + >>> rn = CudaTensorSpace(3) >>> x1 = rn.element([5, 3, 2]) >>> x2 = rn.element([1, 2, 2]) >>> out = rn.element() >>> rn.divide(x1, x2, out) # out is returned - CudaFn(3).element([5.0, 1.5, 1.0]) + rn(3, impl='odlcuda').element([ 5. , 1.5, 1. ]) >>> out - CudaFn(3).element([5.0, 1.5, 1.0]) + rn(3, impl='odlcuda').element([ 5. , 1.5, 1. ]) """ out.data.divide(x1.data, x2.data) @@ -532,7 +547,7 @@ def __eq__(self, other): ------- equals : `bool` `True` if other is an instance of this space's type - with the same ``size``, ``dtype`` and space functions, + with the same ``shape``, ``dtype`` and space functions, otherwise `False`. Examples @@ -543,8 +558,8 @@ def __eq__(self, other): >>> from functools import partial >>> dist2 = partial(dist, p=2) - >>> r3 = CudaFn(3, dist=dist2) - >>> r3_same = CudaFn(3, dist=dist2) + >>> r3 = CudaTensorSpace(3, dist=dist2) + >>> r3_same = CudaTensorSpace(3, dist=dist2) >>> r3 == r3_same True @@ -552,29 +567,29 @@ def __eq__(self, other): same applies for ``norm`` and ``inner``: >>> dist1 = partial(dist, p=1) - >>> r3_1 = CudaFn(3, dist=dist1) - >>> r3_2 = CudaFn(3, dist=dist2) + >>> r3_1 = CudaTensorSpace(3, dist=dist1) + >>> r3_2 = CudaTensorSpace(3, dist=dist2) >>> r3_1 == r3_2 False Be careful with Lambdas - they result in non-identical function objects: - >>> r3_lambda1 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1)) - >>> r3_lambda2 = CudaFn(3, dist=lambda x, y: norm(x-y, p=1)) + >>> r3_lambda1 = CudaTensorSpace(3, dist=lambda x, y: norm(x-y, p=1)) + >>> r3_lambda2 = CudaTensorSpace(3, dist=lambda x, y: norm(x-y, p=1)) >>> r3_lambda1 == r3_lambda2 False """ if other is self: return True - return (super(CudaFn, self).__eq__(other) and + return (super(CudaTensorSpace, self).__eq__(other) and self.weighting == other.weighting) @property def impl(self): - """Name of the implementation: ``'cuda'``.""" - return 'cuda' + """Name of the implementation: ``'odlcuda'``.""" + return 'odlcuda' @staticmethod def available_dtypes(): @@ -583,7 +598,7 @@ def available_dtypes(): @staticmethod def default_dtype(field=None): - """Return the default of `CudaFn` data type for a given field. + """Return the default of `CudaTensorSpace` data type for a given field. Parameters ---------- @@ -611,11 +626,11 @@ def __repr__(self): elif self.is_complex: ctor = 'cn' else: - ctor = 'fn' + ctor = 'tensor_space' posargs = [self.size] - default_dtype_str = dtype_repr(self.default_dtype(self.field)) - optargs = [('dtype', dtype_repr(self.dtype), default_dtype_str), + default_dtype_str = dtype_str(self.default_dtype(self.field)) + optargs = [('dtype', dtype_str(self.dtype), default_dtype_str), ('impl', self.impl, 'numpy')] inner_str = signature_string(posargs, optargs) @@ -632,22 +647,22 @@ def __str__(self): @property def element_type(self): - """ `CudaFnVector` """ - return CudaFnVector + """ `CudaTensor` """ + return CudaTensor -class CudaFnVector(FnBaseVector): +class CudaTensor(Tensor): - """Representation of a `CudaFn` element.""" + """Representation of a `CudaTensorSpace` element.""" def __init__(self, space, data): """Initialize a new instance.""" - super(CudaFnVector, self).__init__(space) + super(CudaTensor, self).__init__(space) self.__data = data @property def data(self): - """The data container of this vector, type ``CudaFnImplVector``.""" + """The data container of this vector, type ``CudaTensorSpaceImplVector``.""" return self.__data @property @@ -666,7 +681,7 @@ def __eq__(self, other): Examples -------- - >>> r3 = CudaFn(3, 'float32') + >>> r3 = CudaTensorSpace(3, 'float32') >>> x = r3.element([1, 2, 3]) >>> x == x True @@ -676,7 +691,7 @@ def __eq__(self, other): >>> y = r3.element([0, 0, 0]) >>> x == y False - >>> r3_2 = CudaFn(3, 'uint8') + >>> r3_2 = CudaTensorSpace(3, 'uint8') >>> z = r3_2.element([1, 2, 3]) >>> x != z True @@ -693,15 +708,15 @@ def copy(self): Returns ------- - copy : `CudaFnVector` + copy : `CudaTensor` The deep copy Examples -------- - >>> vec1 = CudaFn(3, 'uint8').element([1, 2, 3]) + >>> vec1 = CudaTensorSpace(3, 'uint8').element([1, 2, 3]) >>> vec2 = vec1.copy() >>> vec2 - CudaFn(3, 'uint8').element([1, 2, 3]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([1, 2, 3]) >>> vec1 == vec2 True >>> vec1 is vec2 @@ -732,7 +747,7 @@ def asarray(self, start=None, stop=None, step=None, out=None): Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> y = uc3.element([1, 2, 3]) >>> y.asarray() array([1, 2, 3], dtype=uint8) @@ -767,34 +782,34 @@ def __getitem__(self, indices): Returns ------- - values : scalar or `CudaFnVector` + values : scalar or `CudaTensor` The value(s) at the index (indices) Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> y = uc3.element([1, 2, 3]) >>> y[0] 1 >>> z = y[1:3] >>> z - CudaFn(2, 'uint8').element([2, 3]) + tensor_space(2, dtype='uint8', impl='odlcuda').element([2, 3]) >>> y[::2] - CudaFn(2, 'uint8').element([1, 3]) + tensor_space(2, dtype='uint8', impl='odlcuda').element([1, 3]) >>> y[::-1] - CudaFn(3, 'uint8').element([3, 2, 1]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([3, 2, 1]) The returned value is a view, modifications are reflected in the original data: >>> z[:] = [4, 5] >>> y - CudaFn(3, 'uint8').element([1, 4, 5]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([1, 4, 5]) """ if isinstance(indices, slice): data = self.data.getslice(indices) - return type(self.space)(data.size, data.dtype).element(data) + return type(self.space)(data.shape, data.dtype).element(data) else: return self.data.__getitem__(indices) @@ -808,13 +823,13 @@ def __setitem__(self, indices, values): ---------- indices : `int` or `slice` The position(s) that should be set - values : scalar, `array-like` or `CudaFnVector` + values : scalar, `array-like` or `CudaTensor` The value(s) that are to be assigned. If ``index`` is an `int`, ``value`` must be single value. If ``index`` is a `slice`, ``value`` must be broadcastable - to the size of the slice (same size, shape (1,) + to the shape of the slice (same shape, (1,) or single value). Returns @@ -823,23 +838,23 @@ def __setitem__(self, indices, values): Examples -------- - >>> uc3 = CudaFn(3, 'uint8') + >>> uc3 = CudaTensorSpace(3, 'uint8') >>> y = uc3.element([1, 2, 3]) >>> y[0] = 5 >>> y - CudaFn(3, 'uint8').element([5, 2, 3]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([5, 2, 3]) >>> y[1:3] = [7, 8] >>> y - CudaFn(3, 'uint8').element([5, 7, 8]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([5, 7, 8]) >>> y[:] = np.array([0, 0, 0]) >>> y - CudaFn(3, 'uint8').element([0, 0, 0]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([0, 0, 0]) Scalar assignment >>> y[:] = 5 >>> y - CudaFn(3, 'uint8').element([5, 5, 5]) + tensor_space(3, dtype='uint8', impl='odlcuda').element([5, 5, 5]) """ if (isinstance(values, type(self)) and indices in (slice(None), Ellipsis)): @@ -859,24 +874,24 @@ def __setitem__(self, indices, values): @property def ufuncs(self): - """`CudaFnUfuncs`, access to numpy style ufuncs. + """`CudaTensorSpaceUfuncs`, access to numpy style ufuncs. Examples -------- - >>> r2 = CudaFn(2) + >>> r2 = CudaTensorSpace(2) >>> x = r2.element([1, -2]) >>> x.ufuncs.absolute() - CudaFn(2).element([1.0, 2.0]) + rn(2, impl='odlcuda').element([ 1., 2.]) These functions can also be used with broadcasting >>> x.ufuncs.add(3) - CudaFn(2).element([4.0, 1.0]) + array([ 4., 1.], dtype=float32) and non-space elements >>> x.ufuncs.subtract([3, 3]) - CudaFn(2).element([-2.0, -5.0]) + array([-2., -5.]) There is also support for various reductions (sum, prod, min, max) @@ -889,9 +904,9 @@ def ufuncs(self): >>> out = r2.element() >>> result = x.ufuncs.add(y, out=out) >>> result - CudaFn(2).element([4.0, 2.0]) - >>> result is out - True + array([ 4., 2.], dtype=float32) + >>> out + rn(2, impl='odlcuda').element([ 4., 2.]) Notes ----- @@ -900,31 +915,31 @@ def ufuncs(self): See also -------- - odl.util.ufuncs.FnBaseUfuncs - Base class for ufuncs in `FnBase` spaces. + odl.util.ufuncs.TensorSpaceUfuncs + Base class for ufuncs in `TensorSpace` spaces. """ - return CudaFnUfuncs(self) + return CudaTensorSpaceUfuncs(self) def _weighting(weighting, exponent): """Return a weighting whose type is inferred from the arguments.""" if np.isscalar(weighting): - weighting = CudaFnConstWeighting( + weighting = CudaTensorSpaceConstWeighting( weighting, exponent) - elif isinstance(weighting, CudaFnVector): - weighting = CudaFnArrayWeighting( + elif isinstance(weighting, CudaTensor): + weighting = CudaTensorSpaceArrayWeighting( weighting, exponent=exponent) else: weight_ = np.asarray(weighting) if weight_.dtype == object: raise ValueError('bad weighting {}'.format(weighting)) if weight_.ndim == 1: - weighting = CudaFnArrayWeighting( + weighting = CudaTensorSpaceArrayWeighting( weight_, exponent) elif weight_.ndim == 2: raise NotImplementedError('matrix weighting not implemented ' 'for CUDA spaces') -# weighting = CudaFnMatrixWeighting( +# weighting = CudaTensorSpaceMatrixWeighting( # weight_, exponent) else: raise ValueError('array-like weight must have 1 or 2 dimensions, ' @@ -978,9 +993,9 @@ def _inner_diagweight(x1, x2, w): return x1.data.inner_weight(x2.data, w.data) -class CudaFnArrayWeighting(ArrayWeighting): +class CudaTensorSpaceArrayWeighting(ArrayWeighting): - """Vector weighting for `CudaFn`. + """Vector weighting for `CudaTensorSpace`. For exponent 2.0, a new weighted inner product with vector ``w`` is defined as:: @@ -1016,25 +1031,25 @@ def __init__(self, vector, exponent=2.0): Parameters ---------- - vector : `CudaFnVector` + vector : `CudaTensor` Weighting vector of the inner product, norm and distance exponent : positive `float` Exponent of the norm. For values other than 2.0, the inner product is not defined. """ - if not isinstance(vector, CudaFnVector): - raise TypeError('vector {!r} is not a CudaFnVector instance' + if not isinstance(vector, CudaTensor): + raise TypeError('vector {!r} is not a CudaTensor instance' ''.format(vector)) - super(CudaFnArrayWeighting, self).__init__( - vector, impl='cuda', exponent=exponent) + super(CudaTensorSpaceArrayWeighting, self).__init__( + vector, impl='odlcuda', exponent=exponent) def inner(self, x1, x2): """Calculate the vector weighted inner product of two vectors. Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Vectors whose inner product is calculated Returns @@ -1054,7 +1069,7 @@ def norm(self, x): Parameters ---------- - x : `CudaFnVector` + x : `CudaTensor` Vector whose norm is calculated Returns @@ -1072,7 +1087,7 @@ def dist(self, x1, x2): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Vectors whose mutual distance is calculated Returns @@ -1086,9 +1101,9 @@ def dist(self, x1, x2): return _pdist_diagweight(x1, x2, self.exponent, self.array) -class CudaFnConstWeighting(ConstWeighting): +class CudaTensorSpaceConstWeighting(ConstWeighting): - """Weighting of `CudaFn` by a constant. + """Weighting of `CudaTensorSpace` by a constant. For exponent 2.0, a new weighted inner product with constant ``c`` is defined as:: @@ -1128,15 +1143,15 @@ def __init__(self, constant, exponent=2.0): Exponent of the norm. For values other than 2.0, the inner product is not defined. """ - super(CudaFnConstWeighting, self).__init__( - constant, impl='cuda', exponent=exponent) + super(CudaTensorSpaceConstWeighting, self).__init__( + constant, impl='odlcuda', exponent=exponent) def inner(self, x1, x2): """Calculate the constant-weighted inner product of two vectors. Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Vectors whose inner product is calculated Returns @@ -1156,7 +1171,7 @@ def norm(self, x): Parameters ---------- - x1 : `CudaFnVector` + x1 : `CudaTensor` Vector whose norm is calculated Returns @@ -1177,7 +1192,7 @@ def dist(self, x1, x2): Parameters ---------- - x1, x2 : `CudaFnVector` + x1, x2 : `CudaTensor` Vectors whose mutual distance is calculated Returns @@ -1192,54 +1207,9 @@ def dist(self, x1, x2): _pdist_default(x1, x2, self.exponent)) -class CudaFnNoWeighting(NoWeighting, CudaFnConstWeighting): - - """Weighting of `CudaFn` with constant 1. - - For exponent 2.0, the unweighted inner product is defined as:: - - := b^H a - - with ``b^H`` standing for transposed complex conjugate. - - For other exponents, only norm and dist are defined. - """ - - # Implement singleton pattern for efficiency in the default case - _instance = None - - def __new__(cls, *args, **kwargs): - """Implement singleton pattern if ``exponent==2.0``.""" - if len(args) == 0: - exponent = kwargs.pop('exponent', 2.0) - else: - exponent = args[0] - args = args[1:] - - if exponent == 2.0: - if not cls._instance: - cls._instance = super(CudaFnConstWeighting, cls).__new__( - cls, *args, **kwargs) - return cls._instance - else: - return super(CudaFnConstWeighting, cls).__new__( - cls, *args, **kwargs) - - def __init__(self, exponent=2.0): - """Initialize a new instance. - - Parameters - ---------- - exponent : positive `float` - Exponent of the norm. For values other than 2.0, the inner - product is not defined. - """ - super(CudaFnNoWeighting, self).__init__(exponent=exponent, impl='cuda') - - -class CudaFnCustomInner(CustomInner): +class CudaTensorSpaceCustomInner(CustomInner): - """Class for handling a user-specified inner product on `CudaFn`.""" + """Class for handling a user-specified inner product on `CudaTensorSpace`.""" def __init__(self, inner): """Initialize a new instance. @@ -1248,7 +1218,7 @@ def __init__(self, inner): ---------- inner : `callable` The inner product implementation. It must accept two - `FnVector` arguments, return an element from their space's + `CudaTensor` arguments, return an element from their space's field (real or complex number) and satisfy the following conditions for all vectors ``x, y, z`` and scalars ``s``: @@ -1256,12 +1226,12 @@ def __init__(self, inner): - `` = s * + `` - `` = 0`` if and only if ``x = 0`` """ - super(CudaFnCustomInner, self).__init__(inner, impl='cuda') + super(CudaTensorSpaceCustomInner, self).__init__(inner, impl='odlcuda') -class CudaFnCustomNorm(CustomNorm): +class CudaTensorSpaceCustomNorm(CustomNorm): - """Class for handling a user-specified norm in `CudaFn`. + """Class for handling a user-specified norm in `CudaTensorSpace`. Note that this removes ``inner``. """ @@ -1272,7 +1242,7 @@ def __init__(self, norm): Parameters ---------- norm : `callable` - The norm implementation. It must accept a `CudaFnVector` + The norm implementation. It must accept a `CudaTensor` argument, return a `float` and satisfy the following conditions for all vectors ``x, y`` and scalars ``s``: @@ -1281,12 +1251,12 @@ def __init__(self, norm): - ``||s * x|| = |s| * ||x||`` - ``||x + y|| <= ||x|| + ||y||`` """ - super(CudaFnCustomNorm, self).__init__(norm, impl='cuda') + super(CudaTensorSpaceCustomNorm, self).__init__(norm, impl='odlcuda') -class CudaFnCustomDist(CustomDist): +class CudaTensorSpaceCustomDist(CustomDist): - """Class for handling a user-specified distance in `CudaFn`. + """Class for handling a user-specified distance in `CudaTensorSpace`. Note that this removes ``inner`` and ``norm``. """ @@ -1297,8 +1267,8 @@ def __init__(self, dist): Parameters ---------- dist : `callable` - The distance function defining a metric on `Fn`. It must - accept two `FnVector` arguments, return a `float` and and + The distance function defining a metric on `CudaTensorSpace`. + It must accept two `CudaTensor` arguments, return a `float` and and fulfill the following mathematical conditions for any three vectors ``x, y, z``: @@ -1307,7 +1277,7 @@ def __init__(self, dist): - ``dist(x, y) = dist(y, x)`` - ``dist(x, y) <= dist(x, z) + dist(z, y)`` """ - super(CudaFnCustomDist, self).__init__(dist, impl='cuda') + super(CudaTensorSpaceCustomDist, self).__init__(dist, impl='odlcuda') if __name__ == '__main__': diff --git a/odlcuda/odl_plugin.py b/odlcuda/odl_plugin.py index 846c3a9..8934cfd 100644 --- a/odlcuda/odl_plugin.py +++ b/odlcuda/odl_plugin.py @@ -3,9 +3,9 @@ from odlcuda import cu_ntuples -def fn_impls(): - return {'cuda': cu_ntuples.CudaFn} +def tensor_space_impls(): + return {'odlcuda': cu_ntuples.CudaTensorSpace} -def fn_impl_names(): - return tuple(fn_impls().keys()) +def tensor_space_impl_names(): + return tuple(tensor_space_impls().keys()) diff --git a/odlcuda/ufuncs.py b/odlcuda/ufuncs.py index adf112d..b03a49c 100644 --- a/odlcuda/ufuncs.py +++ b/odlcuda/ufuncs.py @@ -18,7 +18,7 @@ """Ufuncs for ODL vectors. These functions are internal and should only be used as methods on -`FnBaseVector` type spaces. +`TensorSpace` type spaces. See `numpy.ufuncs `_ @@ -27,23 +27,23 @@ Notes ----- The default implementation of these methods make heavy use of the -``FnBaseVector.__array__`` to extract a `numpy.ndarray` from the vector, -and then apply a ufunc to it. Afterwards, ``FnBaseVector.__array_wrap__`` +``Tensor.__array__`` to extract a `numpy.ndarray` from the vector, +and then apply a ufunc to it. Afterwards, ``Tensor.__array_wrap__`` is used to re-wrap the data into the appropriate space. """ -from odl.util.ufuncs import FnBaseUfuncs +from odl.util.ufuncs import TensorSpaceUfuncs -__all__ = ('CudaFnUfuncs',) +__all__ = ('CudaTensorSpaceUfuncs',) # Optimizations for CUDA def _make_nullary_fun(name): def fun(self): - return getattr(self.vector.data, name)() + return getattr(self.elem.data, name)() - fun.__doc__ = getattr(FnBaseUfuncs, name).__doc__ + fun.__doc__ = getattr(TensorSpaceUfuncs, name).__doc__ fun.__name__ = name return fun @@ -51,20 +51,20 @@ def fun(self): def _make_unary_fun(name): def fun(self, out=None): if out is None: - out = self.vector.space.element() - getattr(self.vector.data, name)(out.data) + out = self.elem.space.element() + getattr(self.elem.data, name)(out.data) return out - fun.__doc__ = getattr(FnBaseUfuncs, name).__doc__ + fun.__doc__ = getattr(TensorSpaceUfuncs, name).__doc__ fun.__name__ = name return fun -class CudaFnUfuncs(FnBaseUfuncs): +class CudaTensorSpaceUfuncs(TensorSpaceUfuncs): - """Ufuncs for `CudaFnVector` objects. + """Ufuncs for `CudaTensor` objects. - Internal object, should not be created except in `CudaFnVector`. + Internal object, should not be created except in `CudaTensor`. """ # Ufuncs diff --git a/test/cu_ntuples_test.py b/test/cu_ntuples_test.py index dc46d76..8a78c96 100644 --- a/test/cu_ntuples_test.py +++ b/test/cu_ntuples_test.py @@ -21,9 +21,10 @@ import odl from odlcuda.cu_ntuples import ( - CudaFn, CudaFnVector, - CudaFnNoWeighting, CudaFnConstWeighting, CudaFnArrayWeighting, - CudaFnCustomInner, CudaFnCustomNorm, CudaFnCustomDist, + CudaTensorSpace, CudaTensor, + CudaTensorSpaceConstWeighting, CudaTensorSpaceArrayWeighting, + CudaTensorSpaceCustomInner, CudaTensorSpaceCustomNorm, + CudaTensorSpaceCustomDist, CUDA_DTYPES) from odl.util.testutils import ( @@ -48,14 +49,14 @@ def _pos_vector(fn): @pytest.fixture(scope="module", ids=spc_ids, params=spc_params) def fn(request): size, dtype = request.param.split() - return CudaFn(int(size), dtype=dtype) + return CudaTensorSpace(int(size), dtype=dtype) exponent = simple_fixture('exponent', [2.0, 1.0, float('inf'), 0.5, 1.5, 3.0]) -dtype = simple_fixture('dtype', CudaFn.available_dtypes(), +dtype = simple_fixture('dtype', CudaTensorSpace.available_dtypes(), fmt=" {name}='{value.name}' ") -ufunc = simple_fixture('ufunc', odl.util.ufuncs.UFUNCS) -reduction = simple_fixture('reduction', odl.util.ufuncs.REDUCTIONS) +ufunc = simple_fixture('ufunc', [p[0] for p in odl.util.ufuncs.UFUNCS]) +reduction = simple_fixture('reduction', ['sum', 'prod', 'min', 'max']) # --- CUDA space tests --- # @@ -63,38 +64,38 @@ def fn(request): def test_init_cuda_fn(dtype): # verify that the code runs - CudaFn(3, dtype=dtype).element() + CudaTensorSpace(3, dtype=dtype).element() def test_init_exponent(exponent, dtype): - CudaFn(3, dtype=dtype, exponent=exponent) + CudaTensorSpace(3, dtype=dtype, exponent=exponent) def test_init_cuda_fn_bad_dtype(): with pytest.raises(TypeError): - CudaFn(3, dtype=np.ndarray) + CudaTensorSpace(3, dtype=np.ndarray) with pytest.raises(TypeError): - CudaFn(3, dtype=str) + CudaTensorSpace(3, dtype=str) with pytest.raises(TypeError): - CudaFn(3, dtype=np.matrix) + CudaTensorSpace(3, dtype=np.matrix) def test_init_weighting(exponent): const = 1.5 - weight_vec = _pos_vector(CudaFn(3)) - weight_elem = CudaFn(3, dtype='float32').element(weight_vec) + weight_vec = _pos_vector(CudaTensorSpace(3)) + weight_elem = CudaTensorSpace(3, dtype='float32').element(weight_vec) - f3_none = CudaFn(3, dtype='float32', exponent=exponent) - f3_const = CudaFn(3, dtype='float32', weighting=const, exponent=exponent) - f3_vec = CudaFn(3, dtype='float32', weighting=weight_vec, + f3_none = CudaTensorSpace(3, dtype='float32', exponent=exponent) + f3_const = CudaTensorSpace(3, dtype='float32', weighting=const, exponent=exponent) + f3_vec = CudaTensorSpace(3, dtype='float32', weighting=weight_vec, exponent=exponent) - f3_elem = CudaFn(3, dtype='float32', weighting=weight_elem, + f3_elem = CudaTensorSpace(3, dtype='float32', weighting=weight_elem, exponent=exponent) - weighting_none = CudaFnNoWeighting(exponent=exponent) - weighting_const = CudaFnConstWeighting(const, exponent=exponent) - weighting_vec = CudaFnArrayWeighting(weight_vec, exponent=exponent) - weighting_elem = CudaFnArrayWeighting(weight_elem, exponent=exponent) + weighting_none = CudaTensorSpaceConstWeighting(1.0, exponent=exponent) + weighting_const = CudaTensorSpaceConstWeighting(const, exponent=exponent) + weighting_vec = CudaTensorSpaceArrayWeighting(weight_vec, exponent=exponent) + weighting_elem = CudaTensorSpaceArrayWeighting(weight_elem, exponent=exponent) assert f3_none.weighting == weighting_none assert f3_const.weighting == weighting_const @@ -128,22 +129,22 @@ def test_vector_cuda(): # Rn inp = [1.0, 2.0, 3.0] - x = odl.vector(inp, dtype='float32', impl='cuda') - assert isinstance(x, CudaFnVector) + x = odl.vector(inp, dtype='float32', impl='odlcuda') + assert isinstance(x, CudaTensor) assert x.dtype == np.dtype('float32') assert all_equal(x, inp) - x = odl.vector([1.0, 2.0, float('inf')], dtype='float32', impl='cuda') + x = odl.vector([1.0, 2.0, float('inf')], dtype='float32', impl='odlcuda') assert x.dtype == np.dtype('float32') - assert isinstance(x, CudaFnVector) + assert isinstance(x, CudaTensor) - x = odl.vector([1.0, 2.0, float('nan')], dtype='float32', impl='cuda') + x = odl.vector([1.0, 2.0, float('nan')], dtype='float32', impl='odlcuda') assert x.dtype == np.dtype('float32') - assert isinstance(x, CudaFnVector) + assert isinstance(x, CudaTensor) - x = odl.vector([1, 2, 3], dtype='float32', impl='cuda') + x = odl.vector([1, 2, 3], dtype='float32', impl='odlcuda') assert x.dtype == np.dtype('float32') - assert isinstance(x, CudaFnVector) + assert isinstance(x, CudaTensor) def test_zero(fn): @@ -161,7 +162,7 @@ def test_list_init(fn): def test_ndarray_init(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) x0 = np.array([1., 2., 3.]) x = r3.element(x0) @@ -177,7 +178,7 @@ def test_ndarray_init(): def test_getitem(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) y = [1, 2, 3] x = r3.element(y) @@ -186,7 +187,7 @@ def test_getitem(): def test_iterator(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) y = [1, 2, 3] x = r3.element(y) @@ -194,7 +195,7 @@ def test_iterator(): def test_getitem_index_error(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) x = r3.element([1, 2, 3]) with pytest.raises(IndexError): @@ -205,7 +206,7 @@ def test_getitem_index_error(): def test_setitem(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) x = r3.element([42, 42, 42]) for index in [0, 1, 2, -1, -2, -3]: @@ -214,7 +215,7 @@ def test_setitem(): def test_setitem_index_error(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) x = r3.element([1, 2, 3]) with pytest.raises(IndexError): @@ -226,7 +227,7 @@ def test_setitem_index_error(): def _test_getslice(slice): # Validate get against python list behaviour - r6 = CudaFn(6) + r6 = CudaTensorSpace(6) y = [0, 1, 2, 3, 4, 5] x = r6.element(y) @@ -247,7 +248,7 @@ def test_getslice(): def test_slice_of_slice(): # Verify that creating slices from slices works as expected - r10 = CudaFn(10) + r10 = CudaTensorSpace(10) xh = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] xd = r10.element(xh) @@ -264,7 +265,7 @@ def test_slice_of_slice(): def test_slice_is_view(): # Verify that modifications of a view modify the original data - r10 = CudaFn(10) + r10 = CudaTensorSpace(10) xh = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) xd = r10.element(xh) @@ -279,7 +280,7 @@ def test_slice_is_view(): def test_getslice_index_error(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) xd = r3.element([1, 2, 3]) # Bad slice @@ -289,7 +290,7 @@ def test_getslice_index_error(): def _test_setslice(slice): # Validate set against python list behaviour - r6 = CudaFn(6) + r6 = CudaTensorSpace(6) z = [7, 8, 9, 10, 11, 10] y = [0, 1, 2, 3, 4, 5] x = r6.element(y) @@ -312,7 +313,7 @@ def test_setslice(): def test_setslice_index_error(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) xd = r3.element([1, 2, 3]) # Bad slice @@ -331,7 +332,7 @@ def test_setslice_index_error(): def test_inner(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) x = r3.element([1, 2, 3]) y = r3.element([5, 3, 9]) @@ -341,7 +342,7 @@ def test_inner(): assert almost_equal(r3.inner(x, y), correct_inner) # Exponent != 2 -> no inner product - r3 = CudaFn(3, exponent=1) + r3 = CudaTensorSpace(3, exponent=1) x = r3.element([1, 2, 3]) y = r3.element([5, 3, 9]) @@ -352,7 +353,7 @@ def test_inner(): def test_norm(exponent): - r3 = CudaFn(3, exponent=exponent) + r3 = CudaTensorSpace(3, exponent=exponent) xarr, x = noise_elements(r3) correct_norm = np.linalg.norm(xarr, ord=exponent) @@ -368,7 +369,7 @@ def test_norm(exponent): def test_dist(exponent): - r3 = CudaFn(3, exponent=exponent) + r3 = CudaTensorSpace(3, exponent=exponent) [xarr, yarr], [x, y] = noise_elements(r3, n=2) correct_dist = np.linalg.norm(xarr - yarr, ord=exponent) @@ -386,7 +387,7 @@ def test_dist(exponent): def test_astype(): # Complex not implemented - rn = CudaFn(3, weighting=1.5) + rn = CudaTensorSpace(3, weighting=1.5) assert rn.astype('float32') == rn with pytest.raises(TypeError): @@ -605,7 +606,7 @@ def idiv_aliased(x): def test_incompatible_operations(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) R3h = odl.rn(3) xA = r3.zero() xB = R3h.zero() @@ -655,7 +656,7 @@ def test_transpose(fn): def test_modify(): - r3 = CudaFn(3) + r3 = CudaTensorSpace(3) xd = r3.element([1, 2, 3]) yd = r3.element(data_ptr=xd.data_ptr) @@ -665,8 +666,8 @@ def test_modify(): def test_sub_vector(): - r6 = CudaFn(6) - r3 = CudaFn(3) + r6 = CudaTensorSpace(6) + r3 = CudaTensorSpace(3) xd = r6.element([1, 2, 3, 4, 5, 6]) yd = r3.element(data_ptr=xd.data_ptr) @@ -676,8 +677,8 @@ def test_sub_vector(): def test_offset_sub_vector(): - r6 = CudaFn(6) - r3 = CudaFn(3) + r6 = CudaTensorSpace(6) + r3 = CudaTensorSpace(3) xd = r6.element([1, 2, 3, 4, 5, 6]) yd = r3.element(data_ptr=xd.data_ptr + 3 * xd.space.dtype.itemsize) @@ -689,9 +690,9 @@ def test_offset_sub_vector(): def _test_dtype(dt): if dt not in CUDA_DTYPES: with pytest.raises(TypeError): - r3 = CudaFn(3, dt) + r3 = CudaTensorSpace(3, dt) else: - r3 = CudaFn(3, dt) + r3 = CudaTensorSpace(3, dt) x = r3.element([1, 2, 3]) y = r3.element([4, 5, 6]) z = x + y @@ -710,16 +711,16 @@ def test_dtypes(): def test_const_init(exponent): const = 1.5 - CudaFnConstWeighting(const, exponent=exponent) + CudaTensorSpaceConstWeighting(const, exponent=exponent) def test_const_equals(exponent): constant = 1.5 - weighting = CudaFnConstWeighting(constant, exponent=exponent) - weighting2 = CudaFnConstWeighting(constant, exponent=exponent) - other_weighting = CudaFnConstWeighting(2.5, exponent=exponent) - wrong_exp = CudaFnConstWeighting(constant, exponent=exponent + 1) + weighting = CudaTensorSpaceConstWeighting(constant, exponent=exponent) + weighting2 = CudaTensorSpaceConstWeighting(constant, exponent=exponent) + other_weighting = CudaTensorSpaceConstWeighting(2.5, exponent=exponent) + wrong_exp = CudaTensorSpaceConstWeighting(constant, exponent=exponent + 1) assert weighting == weighting assert weighting == weighting2 @@ -731,22 +732,22 @@ def test_const_equals(exponent): def test_const_inner(): - rn = CudaFn(5) + rn = CudaTensorSpace(5) [xarr, yarr], [x, y] = noise_elements(rn, 2) constant = 1.5 - weighting = CudaFnConstWeighting(constant) + weighting = CudaTensorSpaceConstWeighting(constant) true_inner = constant * np.vdot(yarr, xarr) assert almost_equal(weighting.inner(x, y), true_inner) def test_const_norm(exponent): - rn = CudaFn(5) + rn = CudaTensorSpace(5) xarr, x = noise_elements(rn) constant = 1.5 - weighting = CudaFnConstWeighting(constant, exponent=exponent) + weighting = CudaTensorSpaceConstWeighting(constant, exponent=exponent) factor = 1 if exponent == float('inf') else constant ** (1 / exponent) true_norm = factor * np.linalg.norm(xarr, ord=exponent) @@ -760,11 +761,11 @@ def test_const_norm(exponent): def test_const_dist(exponent): - rn = CudaFn(5) + rn = CudaTensorSpace(5) [xarr, yarr], [x, y] = noise_elements(rn, n=2) constant = 1.5 - weighting = CudaFnConstWeighting(constant, exponent=exponent) + weighting = CudaTensorSpaceConstWeighting(constant, exponent=exponent) factor = 1 if exponent == float('inf') else constant ** (1 / exponent) true_dist = factor * np.linalg.norm(xarr - yarr, ord=exponent) @@ -778,46 +779,46 @@ def test_const_dist(exponent): def test_vector_init(): - rn = CudaFn(5) + rn = CudaTensorSpace(5) weight_vec = _pos_vector(rn) - CudaFnArrayWeighting(weight_vec) - CudaFnArrayWeighting(rn.element(weight_vec)) + CudaTensorSpaceArrayWeighting(weight_vec) + CudaTensorSpaceArrayWeighting(rn.element(weight_vec)) def test_vector_is_valid(): - rn = CudaFn(5) + rn = CudaTensorSpace(5) weight = _pos_vector(rn) - weighting = CudaFnArrayWeighting(weight) + weighting = CudaTensorSpaceArrayWeighting(weight) assert weighting.is_valid() # Invalid weight[0] = 0 - weighting = CudaFnArrayWeighting(weight) + weighting = CudaTensorSpaceArrayWeighting(weight) assert not weighting.is_valid() def test_vector_equals(): - rn = CudaFn(5) + rn = CudaTensorSpace(5) weight = _pos_vector(rn) - weighting = CudaFnArrayWeighting(weight) - weighting2 = CudaFnArrayWeighting(weight) + weighting = CudaTensorSpaceArrayWeighting(weight) + weighting2 = CudaTensorSpaceArrayWeighting(weight) assert weighting == weighting2 def test_vector_inner(): - rn = CudaFn(5) + rn = CudaTensorSpace(5) [xarr, yarr], [x, y] = noise_elements(rn, 2) - weight = _pos_vector(CudaFn(5)) + weight = _pos_vector(CudaTensorSpace(5)) - weighting = CudaFnArrayWeighting(weight) + weighting = CudaTensorSpaceArrayWeighting(weight) true_inner = np.vdot(yarr, xarr * weight.asarray()) @@ -825,16 +826,16 @@ def test_vector_inner(): # Exponent != 2 -> no inner product, should raise with pytest.raises(NotImplementedError): - CudaFnArrayWeighting(weight, exponent=1.0).inner(x, y) + CudaTensorSpaceArrayWeighting(weight, exponent=1.0).inner(x, y) def test_vector_norm(exponent): - rn = CudaFn(5) + rn = CudaTensorSpace(5) xarr, x = noise_elements(rn) - weight = _pos_vector(CudaFn(5)) + weight = _pos_vector(CudaTensorSpace(5)) - weighting = CudaFnArrayWeighting(weight, exponent=exponent) + weighting = CudaTensorSpaceArrayWeighting(weight, exponent=exponent) if exponent in (1.0, float('inf')): true_norm = np.linalg.norm(weight.asarray() * xarr, ord=exponent) @@ -851,12 +852,12 @@ def test_vector_norm(exponent): def test_vector_dist(exponent): - rn = CudaFn(5) + rn = CudaTensorSpace(5) [xarr, yarr], [x, y] = noise_elements(rn, n=2) - weight = _pos_vector(CudaFn(5)) + weight = _pos_vector(CudaTensorSpace(5)) - weighting = CudaFnArrayWeighting(weight, exponent=exponent) + weighting = CudaTensorSpaceArrayWeighting(weight, exponent=exponent) if exponent in (1.0, float('inf')): true_dist = np.linalg.norm(weight.asarray() * (xarr - yarr), @@ -879,9 +880,9 @@ def test_custom_inner(fn): def inner(x, y): return np.vdot(y, x) - w = CudaFnCustomInner(inner) - w_same = CudaFnCustomInner(inner) - w_other = CudaFnCustomInner(np.dot) + w = CudaTensorSpaceCustomInner(inner) + w_same = CudaTensorSpaceCustomInner(inner) + w_other = CudaTensorSpaceCustomInner(np.dot) assert w == w assert w == w_same @@ -899,7 +900,7 @@ def inner(x, y): assert almost_equal(w.dist(x, y), true_dist, places=3) with pytest.raises(TypeError): - CudaFnCustomInner(1) + CudaTensorSpaceCustomInner(1) def test_custom_norm(fn): @@ -910,9 +911,9 @@ def test_custom_norm(fn): def other_norm(x): return np.linalg.norm(x, ord=1) - w = CudaFnCustomNorm(norm) - w_same = CudaFnCustomNorm(norm) - w_other = CudaFnCustomNorm(other_norm) + w = CudaTensorSpaceCustomNorm(norm) + w_same = CudaTensorSpaceCustomNorm(norm) + w_other = CudaTensorSpaceCustomNorm(other_norm) assert w == w assert w == w_same @@ -928,7 +929,7 @@ def other_norm(x): assert almost_equal(w.dist(x, y), true_dist) with pytest.raises(TypeError): - CudaFnCustomNorm(1) + CudaTensorSpaceCustomNorm(1) def test_custom_dist(fn): @@ -940,9 +941,9 @@ def dist(x, y): def other_dist(x, y): return np.linalg.norm(x - y, ord=1) - w = CudaFnCustomDist(dist) - w_same = CudaFnCustomDist(dist) - w_other = CudaFnCustomDist(other_dist) + w = CudaTensorSpaceCustomDist(dist) + w_same = CudaTensorSpaceCustomDist(dist) + w_other = CudaTensorSpaceCustomDist(other_dist) assert w == w assert w == w_same @@ -958,36 +959,37 @@ def other_dist(x, y): assert almost_equal(w.dist(x, y), true_dist) with pytest.raises(TypeError): - CudaFnCustomDist(1) + CudaTensorSpaceCustomDist(1) def test_ufuncs(fn, ufunc): - name, n_args, n_out, _ = ufunc if (np.issubsctype(fn.dtype, np.floating) and - name in ['bitwise_and', - 'bitwise_or', - 'bitwise_xor', - 'invert', - 'left_shift', - 'right_shift']): + ufunc in ['bitwise_and', + 'bitwise_or', + 'bitwise_xor', + 'invert', + 'left_shift', + 'right_shift']): # Skip integer only methods if floating point type return - # Get the ufunc from numpy as reference - ufunc = getattr(np, name) + # Get the ufunc from numpy as reference + func = getattr(np, ufunc) + n_in = func.nin + n_out = func.nout # Create some data - arrays, vectors = noise_elements(fn, n_args + n_out) - in_arrays = arrays[:n_args] - out_arrays = arrays[n_args:] + arrays, vectors = noise_elements(fn, n_in + n_out) + in_arrays = arrays[:n_in] + out_arrays = arrays[n_in:] data_vector = vectors[0] - in_vectors = vectors[1:n_args] - out_vectors = vectors[n_args:] + in_vectors = vectors[1:n_in] + out_vectors = vectors[n_in:] # Out of place: with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = ufunc(*in_arrays) - vec_fun = getattr(data_vector.ufuncs, name) + npy_result = func(*in_arrays) + vec_fun = getattr(data_vector.ufuncs, ufunc) odl_result = vec_fun(*in_vectors) assert all_almost_equal(odl_result, npy_result) @@ -1000,8 +1002,8 @@ def test_ufuncs(fn, ufunc): # In place: with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = ufunc(*(in_arrays + out_arrays)) - vec_fun = getattr(data_vector.ufuncs, name) + npy_result = func(*(in_arrays + out_arrays)) + vec_fun = getattr(data_vector.ufuncs, ufunc) odl_result = vec_fun(*(in_vectors + out_vectors)) assert all_almost_equal(odl_result, npy_result) @@ -1014,16 +1016,14 @@ def test_ufuncs(fn, ufunc): def test_reductions(fn, reduction): - name, _ = reduction - - reduction = getattr(np, name) + func = getattr(np, reduction) # Create some data x_arr, x = noise_elements(fn, 1) with np.errstate(all='ignore'): # avoid pytest warnings - npy_result = reduction(x_arr) - odl_result = getattr(x.ufuncs, name)() + npy_result = func(x_arr) + odl_result = getattr(x.ufuncs, reduction)() assert almost_equal(odl_result, npy_result)