Skip to content

Instantly share code, notes, and snippets.

@honno
Created February 20, 2023 11:21
Show Gist options
  • Save honno/64cf60c84dc36ee99d47f88254c3c722 to your computer and use it in GitHub Desktop.
Save honno/64cf60c84dc36ee99d47f88254c3c722 to your computer and use it in GitHub Desktop.
diff --git a/torch_np/tests/numpy_tests/core/test_indexing.py b/torch_np/tests/numpy_tests/core/test_indexing.py
index b2918fd..a1b6c6d 100644
--- a/torch_np/tests/numpy_tests/core/test_indexing.py
+++ b/torch_np/tests/numpy_tests/core/test_indexing.py
@@ -2,6 +2,7 @@ import sys
import warnings
import functools
import operator
+import re
import pytest
from pytest import raises as assert_raises
@@ -15,6 +16,11 @@ from torch_np.testing import (
)
+xfail_neg_step = pytest.mark.xfail(
+ reason="torch does not support indexing with negative slice steps"
+)
+
+
class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -39,8 +45,21 @@ class TestIndexing:
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
- assert_raises(IndexError, lambda: a[0.0:, 0.0])
- assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+ # Note torch validates index arguments "depth-first", so will prioritise
+ # raising TypeError over IndexError, e.g.
+ #
+ # >>> a = np.array([[[5]]])
+ # >>> a[0.0:, 0.0]
+ # IndexError: only integers, slices (`:`), ellipsis (`...`),
+ # numpy.newaxis # (`None`) and integer or boolean arrays are
+ # valid indices
+ # >>> t = torch.as_tensor([[[5]]]) # identical to a
+ # >>> t[0.0:, 0.0]
+ # TypeError: slice indices must be integers or None or have an
+ # __index__ method
+ #
+ assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0])
+ assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
@@ -73,11 +92,18 @@ class TestIndexing:
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
+ @pytest.mark.xfail(reason="torch allows slicing with non-0d array components")
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
+ # Conversely, using scalars doesn't raise in NumPy, e.g.
+ #
+ # >>> i = np.int64(1)
+ # >>> a[i:i:i]
+ # array([], shape=(0, 1, 1), dtype=int64)
+ #
def test_none_index(self):
# `None` index adds newaxis
@@ -91,19 +117,29 @@ class TestIndexing:
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
+ pytest.xfail(
+ "torch doesn't have scalar types with distinct instancing behaviours"
+ )
assert_(isinstance(a[()], np.int_))
+ @pytest.mark.xfail(reason="torch does not have an equivalent to np.void")
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
+ @pytest.mark.xfail(
+ reason=(
+ "torch does not support integer indexing int tensors with uints - "
+ "uint8 tensor indexes are treated as boolean masks (deprecated)"
+ )
+ )
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
- u_index = index.astype(np.uintp)
+ u_index = index.astype(np.uintp) # i.e. cast to default uint indexing dtype
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
@@ -169,7 +205,8 @@ class TestIndexing:
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
- assert_raises(IndexError, a.__getitem__, 1 << 64)
+ # Note torch raises RuntimeError here
+ assert_raises((IndexError, RuntimeError), a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
@@ -212,10 +249,11 @@ class TestIndexing:
def f(a, v):
a[a > -1] = v
- assert_raises(ValueError, f, a, [])
- assert_raises(ValueError, f, a, [1, 2, 3])
- assert_raises(ValueError, f, a[:1], [1, 2, 3])
+ assert_raises((ValueError, RuntimeError), f, a, [])
+ assert_raises((ValueError, RuntimeError), f, a, [1, 2, 3])
+ assert_raises((ValueError, RuntimeError), f, a[:1], [1, 2, 3])
+ @pytest.mark.xfail(reason="torch does not support object dtype")
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
@@ -258,6 +296,7 @@ class TestIndexing:
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
+ @xfail_neg_step
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
@@ -275,6 +314,7 @@ class TestIndexing:
a[b, :] = c
assert_equal(a[0], [0, 1])
+ @xfail_neg_step
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
@@ -295,6 +335,7 @@ class TestIndexing:
assert_equal(a, b)
+ @pytest.mark.xfail(reason="torch does not limit dims to 32")
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
@@ -331,11 +372,11 @@ class TestIndexing:
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
- assert_raises(IndexError, a.__setitem__, ind, 0)
+ assert_raises((IndexError, RuntimeError), a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
- assert_raises(IndexError, a.__setitem__, ind, 0)
+ assert_raises((IndexError, RuntimeError), a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
@@ -352,6 +393,7 @@ class TestIndexing:
res[3] = -1
assert_array_equal(a, res)
+ @pytest.mark.xfail(reason="XXX: wrapping view stuff is TBD")
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
@@ -373,6 +415,7 @@ class TestIndexing:
a[...] = s
assert_((a == 1).all())
+ @pytest.mark.xfail(reason="XXX: wrapping view stuff is TBD")
def test_array_like_values(self):
# Similar to the above test, but use a memoryview instead
a = np.zeros((5, 5))
@@ -387,6 +430,7 @@ class TestIndexing:
a[...] = memoryview(s)
assert_array_equal(a, s)
+ @pytest.mark.xfail(reason="XXX: recarray stuff is TBD")
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
@@ -397,6 +441,7 @@ class TestIndexing:
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
+ @pytest.mark.xfail(reason="can't determine f-style contiguous in torch")
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
@@ -408,6 +453,7 @@ class TestIndexing:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
+ @pytest.mark.xfail(reason="torch has no type distinct from a 0-d array")
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
@@ -442,6 +488,12 @@ class TestIndexing:
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
+ @pytest.mark.xfail(
+ reason=(
+ "torch does not support integer indexing int tensors with uints - "
+ "uint8 tensor indexes are treated as boolean masks (deprecated)"
+ )
+ )
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
@@ -458,6 +510,7 @@ class TestIndexing:
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+ @pytest.mark.xfail(reason="XXX: wrapping view stuff is TBD")
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
@@ -492,8 +545,9 @@ class TestIndexing:
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
+ @pytest.mark.xfail(reason="XXX: low-prio behaviour to support")
def test_broken_sequence_not_nd_index(self):
- # See gh-5063:
+ # See https://github.com/numpy/numpy/issues/5063
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
@@ -531,6 +585,7 @@ class TestIndexing:
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+ @xfail_neg_step
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
@@ -541,6 +596,7 @@ class TestIndexing:
arr[slices] = 10
assert_array_equal(arr, 10.)
+ @pytest.mark.xfail(reason="torch does not support character/string dtypes")
def test_character_assignment(self):
# This is an example a function going through CopyObject which
# used to have an untested special path for scalars
@@ -558,12 +614,17 @@ class TestIndexing:
# These are limitations based on the number of arguments we can process.
# For `num=32` (and all boolean cases), the result is actually define;
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
+ if not (isinstance(index, np.ndarray) and original_ndim < num):
+ # non-xfail cases fail because of assigning too many indices
+ pytest.xfail("torch does not limit dims to 32")
arr = np.ones((1,) * original_ndim)
with pytest.raises(IndexError):
arr[(index,) * num]
with pytest.raises(IndexError):
arr[(index,) * num] = 1.
+
+ @pytest.mark.xfail(reason="XXX: wrapping view stuff is TBD")
@pytest.mark.skipif(IS_WASM, reason="no threading")
def test_structured_advanced_indexing(self):
# Test that copyswap(n) used by integer array indexing is threadsafe
@@ -593,10 +654,17 @@ class TestIndexing:
a = np.arange(25).reshape((5, 5))
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
+ pytest.xfail(
+ "torch happily consumes non-tuple sequences with multi-axis "
+ "indices (i.e. slices) as an index, whereas NumPy invalidates "
+ "them, assumedly to keep things simple. This invalidation "
+ "behaviour is just too niche to bother emulating."
+ )
assert_raises(IndexError, a.__getitem__, [slice(None)])
class TestFieldIndexing:
+ @pytest.mark.xfail(reason="torch has no type distinct from a 0-d array")
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -626,20 +694,20 @@ class TestBroadcastedAssignments:
a = np.zeros(5)
# Too large and not only ones.
- assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
- assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
- assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[...], np.ones((2, 1)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
- assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
- assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
- assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
- assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
- assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[...], np.zeros((5, 2)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[...], np.zeros((5, 0)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[:, [0]], np.zeros((5, 2)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[:, [0]], np.zeros((5, 0)))
+ assert_raises((ValueError, RuntimeError), assign, a, s_[[0], :], np.zeros((2, 1)))
@pytest.mark.parametrize("index", [
(..., [1, 2], slice(None)),
@@ -649,13 +717,15 @@ class TestBroadcastedAssignments:
values = np.zeros((100, 100)) # will never broadcast below
arr = np.zeros((3, 4, 5, 6, 7))
- # We currently report without any spaces (could be changed)
- shape_str = str(arr[index].shape).replace(" ", "")
- with pytest.raises(ValueError) as e:
+ with pytest.raises((ValueError, RuntimeError)) as e:
arr[index] = values
- assert str(e.value).endswith(shape_str)
+ shape = arr[index].shape
+ r_inner_shape = (
+ "".join(f"{side}, ?" for side in shape[:-1]) + str(shape[-1])
+ )
+ assert re.search(fr"[\(\[]{r_inner_shape}[\]\)]$", str(e.value))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
@@ -664,6 +734,7 @@ class TestBroadcastedAssignments:
assert_((a[:3, :3] == [2, 3, 4]).all())
+ @xfail_neg_step
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
@@ -672,6 +743,7 @@ class TestBroadcastedAssignments:
assert_((a[::-1] == v).all())
[email protected](reason="XXX: wrapping view stuff is TBD")
class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
@@ -737,6 +809,9 @@ class TestSubclasses:
class TestFancyIndexingCast:
+ @pytest.mark.xfail(
+ reason="XXX: low-prio to support assigning complex values on floating arrays"
+ )
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -759,6 +834,7 @@ class TestFancyIndexingCast:
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence:
+ @pytest.mark.xfail(reason="torch does not support object dtype")
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -791,6 +867,7 @@ class TestFancyIndexingEquivalence:
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
+ @pytest.mark.xfail(reason="torch does not support character/string dtypes")
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
@@ -806,6 +883,7 @@ class TestFancyIndexingEquivalence:
assert_array_equal(a, b[0])
[email protected](reason="XXX: requires broadcast() and broadcast_to()")
class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
@@ -847,7 +925,7 @@ class TestMultiIndexingAutomated:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
- np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
+ np.array([[0, -1], [0, 1]], dtype=np.dtype('intp')),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
@@ -1147,6 +1225,7 @@ class TestMultiIndexingAutomated:
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
+ pytest.xfail("XXX: wrapping view stuff is TBD")
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
@@ -1263,9 +1342,15 @@ class TestFloatNonIntegerArgument:
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+ pytest.xfail("XXX: take not implemented")
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+ @pytest.mark.xfail(
+ reason=(
+ "torch doesn't have scalar types with distinct element-wise behaviours"
+ )
+ )
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
@@ -1289,13 +1374,17 @@ class TestBooleanIndexing:
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
- assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
- assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
+
+ pytest.xfail("XXX: take not implemented")
assert_raises(TypeError, np.take, args=(a, [0], False))
+ pytest.xfail("torch consumes boolean tensors as ints, no bother raising here")
+ assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+ assert_raises(TypeError, operator.index, np.array(True))
+
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
@@ -1310,32 +1399,24 @@ class TestBooleanIndexing:
# This used to incorrectly work (and give an array of shape (0,))
idx1 = np.array([[False]*9])
- assert_raises_regex(IndexError,
- "boolean index did not match indexed array along dimension 0; "
- "dimension is 3 but corresponding boolean dimension is 1",
- lambda: a[idx1])
+ with pytest.raises(IndexError):
+ a[idx1]
# This used to incorrectly give a ValueError: operands could not be broadcast together
idx2 = np.array([[False]*8 + [True]])
- assert_raises_regex(IndexError,
- "boolean index did not match indexed array along dimension 0; "
- "dimension is 3 but corresponding boolean dimension is 1",
- lambda: a[idx2])
+ with pytest.raises(IndexError):
+ a[idx2]
# This is the same as it used to be. The above two should work like this.
idx3 = np.array([[False]*10])
- assert_raises_regex(IndexError,
- "boolean index did not match indexed array along dimension 0; "
- "dimension is 3 but corresponding boolean dimension is 1",
- lambda: a[idx3])
+ with pytest.raises(IndexError):
+ a[idx3]
# This used to give ValueError: non-broadcastable operand
a = np.ones((1, 1, 2))
idx = np.array([[[True], [False]]])
- assert_raises_regex(IndexError,
- "boolean index did not match indexed array along dimension 1; "
- "dimension is 1 but corresponding boolean dimension is 2",
- lambda: a[idx])
+ with pytest.raises(IndexError):
+ a[idx]
class TestArrayToIndexDeprecation:
@@ -1346,9 +1427,17 @@ class TestArrayToIndexDeprecation:
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
+ pytest.xfail("XXX: take not implemented")
+ assert_raises(TypeError, np.take, a, [0], a)
+
+ pytest.xfail(
+ "Multi-dimensional tensors are indexable just as long as they only "
+ "contain a single element, no bother raising here"
+ )
assert_raises(TypeError, operator.index, np.array([1]))
+
+ pytest.xfail("torch consumes tensors as ints, no bother raising here")
assert_raises(TypeError, np.reshape, a, (a, -1))
- assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike:
@@ -1358,6 +1447,13 @@ class TestNonIntegerArrayLike:
an integer.
"""
+ @pytest.mark.xfail(
+ reason=(
+ "torch consumes floats by way of falling back on its deprecated "
+ "__index__ behaviour, no bother raising here"
+ )
+ )
+ @pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_basic(self):
a = np.arange(10)
@@ -1372,6 +1468,12 @@ class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
+ @pytest.mark.xfail(
+ reason=(
+ "torch currently consumes multiple ellipsis, no bother raising "
+ "here. See https://github.com/pytorch/pytorch/issues/59787#issue-917252204"
+ )
+ )
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment