-
Notifications
You must be signed in to change notification settings - Fork 4
port scalarmath tests #16
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -81,6 +81,18 @@ def base(self): | |
def T(self): | ||
return self.transpose() | ||
|
||
@property | ||
def real(self): | ||
return asarray(self._tensor.real) | ||
|
||
@property | ||
def imag(self): | ||
try: | ||
return asarray(self._tensor.imag) | ||
except RuntimeError: | ||
zeros = torch.zeros_like(self._tensor) | ||
return ndarray._from_tensor_and_base(zeros, None) | ||
|
||
# ctors | ||
def astype(self, dtype): | ||
newt = ndarray() | ||
|
@@ -102,6 +114,13 @@ def __str__(self): | |
|
||
### comparisons ### | ||
def __eq__(self, other): | ||
try: | ||
t_other = asarray(other).get | ||
except RuntimeError: | ||
# Failed to convert other to array: definitely not equal. | ||
# TODO: generalize, delegate to ufuncs | ||
falsy = torch.full(self.shape, fill_value=False, dtype=bool) | ||
return asarray(falsy) | ||
lezcano marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return asarray(self._tensor == asarray(other).get()) | ||
|
||
def __neq__(self, other): | ||
|
@@ -119,7 +138,6 @@ def __ge__(self, other): | |
def __le__(self, other): | ||
return asarray(self._tensor <= asarray(other).get()) | ||
|
||
|
||
def __bool__(self): | ||
try: | ||
return bool(self._tensor) | ||
|
@@ -141,6 +159,9 @@ def __hash__(self): | |
def __float__(self): | ||
return float(self._tensor) | ||
|
||
def __int__(self): | ||
return int(self._tensor) | ||
|
||
# XXX : are single-element ndarrays scalars? | ||
def is_integer(self): | ||
if self.shape == (): | ||
|
@@ -167,7 +188,10 @@ def __iadd__(self, other): | |
|
||
def __sub__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__sub__(other_tensor)) | ||
try: | ||
return asarray(self._tensor.__sub__(other_tensor)) | ||
except RuntimeError as e: | ||
raise TypeError(e.args) | ||
ev-br marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
def __mul__(self, other): | ||
other_tensor = asarray(other).get() | ||
|
@@ -177,10 +201,30 @@ def __rmul__(self, other): | |
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__rmul__(other_tensor)) | ||
|
||
def __floordiv__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__floordiv__(other_tensor)) | ||
|
||
def __ifloordiv__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__ifloordiv__(other_tensor)) | ||
|
||
def __truediv__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__truediv__(other_tensor)) | ||
|
||
def __itruediv__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__itruediv__(other_tensor)) | ||
|
||
def __mod__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__mod__(other_tensor)) | ||
|
||
def __imod__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__imod__(other_tensor)) | ||
|
||
def __or__(self, other): | ||
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__or__(other_tensor)) | ||
|
@@ -189,10 +233,22 @@ def __ior__(self, other): | |
other_tensor = asarray(other).get() | ||
return asarray(self._tensor.__ior__(other_tensor)) | ||
|
||
|
||
def __invert__(self): | ||
return asarray(self._tensor.__invert__()) | ||
|
||
def __abs__(self): | ||
return asarray(self._tensor.__abs__()) | ||
|
||
def __neg__(self): | ||
try: | ||
return asarray(self._tensor.__neg__()) | ||
except RuntimeError as e: | ||
raise TypeError(e.args) | ||
ev-br marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
def __pow__(self, exponent): | ||
exponent_tensor = asarray(exponent).get() | ||
return asarray(self._tensor.__pow__(exponent_tensor)) | ||
|
||
### methods to match namespace functions | ||
|
||
def squeeze(self, axis=None): | ||
|
@@ -301,7 +357,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=NoValue, *, where=NoVal | |
|
||
if dtype is None: | ||
dtype = self.dtype | ||
if not _dtypes.is_floating(dtype): | ||
if _dtypes.is_integer(dtype): | ||
dtype = _dtypes.default_float_type() | ||
torch_dtype = _dtypes.torch_dtype_from(dtype) | ||
|
||
|
@@ -321,7 +377,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=NoValue, | |
|
||
if dtype is None: | ||
dtype = self.dtype | ||
if not _dtypes.is_floating(dtype): | ||
if _dtypes.is_integer(dtype): | ||
dtype = _dtypes.default_float_type() | ||
torch_dtype = _dtypes.torch_dtype_from(dtype) | ||
|
||
|
@@ -343,67 +399,80 @@ def __setitem__(self, index, value): | |
return self._tensor.__setitem__(index, value) | ||
|
||
|
||
def asarray(a, dtype=None, order=None, *, like=None): | ||
_util.subok_not_ok(like) | ||
if order is not None: | ||
# This is the ideally the only place which talks to ndarray directly. | ||
# The rest goes through asarray (preferred) or array. | ||
|
||
def array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, | ||
lezcano marked this conversation as resolved.
Show resolved
Hide resolved
|
||
like=None): | ||
_util.subok_not_ok(like, subok) | ||
if order != 'K': | ||
ev-br marked this conversation as resolved.
Show resolved
Hide resolved
|
||
raise NotImplementedError | ||
|
||
if isinstance(a, ndarray): | ||
if dtype is not None and dtype != a.dtype: | ||
a = a.astype(dtype) | ||
return a | ||
# a happy path | ||
if isinstance(object, ndarray): | ||
if copy is False and dtype is None and ndmin <= object.ndim: | ||
return object | ||
|
||
if isinstance(a, (list, tuple)): | ||
# handle lists of ndarrays, [1, [2, 3], ndarray(4)] etc | ||
# lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists | ||
if isinstance(object, (list, tuple)): | ||
a1 = [] | ||
for elem in a: | ||
for elem in object: | ||
if isinstance(elem, ndarray): | ||
a1.append(elem.get().tolist()) | ||
else: | ||
a1.append(elem) | ||
object = a1 | ||
|
||
# get the tensor from "object" | ||
if isinstance(object, ndarray): | ||
tensor = object._tensor | ||
base = object | ||
elif isinstance(object, torch.Tensor): | ||
tensor = object | ||
base = None | ||
else: | ||
a1 = a | ||
tensor = torch.as_tensor(object) | ||
base = None | ||
|
||
torch_dtype = _dtypes.torch_dtype_from(dtype) | ||
# At this point, `tensor.dtype` is the pytorch default. Our default may | ||
# differ, so need to typecast. However, we cannot just do `tensor.to`, | ||
# because if our desired dtype is wider then pytorch's, `tensor` | ||
# may have lost precision: | ||
|
||
# This and array(...) are the only places which talk to ndarray directly. | ||
# The rest goes through asarray (preferred) or array. | ||
out = ndarray() | ||
tt = torch.as_tensor(a1, dtype=torch_dtype) | ||
out._tensor = tt | ||
return out | ||
# int(torch.as_tensor(1e12)) - 1e12 equals -4096 (try it!) | ||
|
||
# Therefore, we treat `tensor.dtype` as a hint, and convert the | ||
# original object *again*, this time with an explicit dtype. | ||
dtyp = _dtypes.dtype_from_torch(tensor.dtype) | ||
default = _dtypes.get_default_dtype_for(dtyp) | ||
torch_dtype = _dtypes.torch_dtype_from(default) | ||
Comment on lines
+446
to
+448
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we may want to overload There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, I get the feeling that if PyTorch defaults are the same as those in NumPy (via There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's maybe not rely on this, not just yet at least. When we have a more complete coverage, let's experiment with how much we can peel out. |
||
|
||
def array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, | ||
like=None): | ||
_util.subok_not_ok(like, subok) | ||
if order != 'K': | ||
raise NotImplementedError | ||
|
||
if isinstance(object, (list, tuple)): | ||
obj = asarray(object) | ||
return array(obj, dtype, copy=copy, order=order, subok=subok, | ||
ndmin=ndmin, like=like) | ||
tensor = torch.as_tensor(object, dtype=torch_dtype) | ||
|
||
if isinstance(object, ndarray): | ||
result = object._tensor | ||
|
||
if dtype != object.dtype: | ||
torch_dtype = _dtypes.torch_dtype_from(dtype) | ||
result = result.to(torch_dtype) | ||
else: | ||
# type cast if requested | ||
if dtype is not None: | ||
torch_dtype = _dtypes.torch_dtype_from(dtype) | ||
result = torch.as_tensor(object, dtype=torch_dtype) | ||
tensor = tensor.to(torch_dtype) | ||
base = None | ||
lezcano marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# adjust ndim if needed | ||
ndim_extra = ndmin - tensor.ndim | ||
if ndim_extra > 0: | ||
tensor = tensor.view((1,)*ndim_extra + tensor.shape) | ||
base = None | ||
|
||
# copy if requested | ||
if copy: | ||
result = result.clone() | ||
tensor = tensor.clone() | ||
base = None | ||
|
||
ndim_extra = ndmin - result.ndim | ||
if ndim_extra > 0: | ||
result = result.reshape((1,)*ndim_extra + result.shape) | ||
out = ndarray() | ||
out._tensor = result | ||
return out | ||
return ndarray._from_tensor_and_base(tensor, base) | ||
|
||
|
||
def asarray(a, dtype=None, order=None, *, like=None): | ||
if order is None: | ||
order = 'K' | ||
return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0) | ||
|
||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why not simply
return asarray(zeros)
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
At some point we'll need to rationalize these two forms, agree.
Basically,
asarray
is anything array-like in, array out; here we explicitly construct the tensor, so my fingers naturally typed this line. The line above, withasarray(self._tensor.imag)
, should be changed to follow line 94.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I mean,
asarray(Tensor)
has the same semantics as_from_tensor_and_base(Tensor, None)
, so we can decide to always prefer the first one over the latter one for conciseness and consistency.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not always
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
note that I suggested doing so when using
torch.Tensor
s.