2 from torch
import sparse
8 from common_utils
import TestCase, run_tests, skipIfRocm, do_test_dtypes, do_test_empty_full, load_tests
9 from common_cuda
import TEST_CUDA
10 from numbers
import Number
15 load_tests = load_tests
19 @functools.wraps(inner)
20 def outer(self, *args, **kwargs):
22 raise unittest.SkipTest(
"Test is CPU-only")
23 inner(self, *args, **kwargs)
28 @functools.wraps(inner)
29 def outer(self, *args, **kwargs):
31 raise unittest.SkipTest(
"Test is GPU-only")
32 inner(self, *args, **kwargs)
50 def sparse_empty_factory(*args, **kwargs):
51 kwargs[
'dtype'] = kwargs.get(
'dtype', self.
value_dtype)
52 kwargs[
'layout'] = kwargs.get(
'laytout', torch.sparse_coo)
53 kwargs[
'device'] = kwargs.get(
'device', self.
device)
54 return torch.empty(*args, **kwargs)
57 def sparse_tensor_factory(*args, **kwargs):
58 kwargs[
'dtype'] = kwargs.get(
'dtype', self.
value_dtype)
59 kwargs[
'device'] = kwargs.get(
'device', self.
device)
60 return torch.sparse_coo_tensor(*args, **kwargs)
63 super(TestSparse, self).setUp()
65 def _gen_sparse(self, sparse_dim, nnz, with_size):
66 if isinstance(with_size, Number):
67 with_size = [with_size] * sparse_dim
78 Test if a CPU tensor is uncoalesced. This is used to ensure 79 correctness of the uncoalesced tensor generation algorithm. 81 assert not x.is_coalesced()
82 existing_indices = set()
83 for i
in range(x._nnz()):
84 index = str(x._indices()[:, i])
85 if index
in existing_indices:
88 existing_indices.add(index)
90 def randn(self, *args, **kwargs):
92 Variant of torch.randn that also works in the TEST_CUDA case. 99 shape_sparse_dim_nnz = [
104 ((100, 20, 3), 2, 0),
110 for shape, sparse_dim, nnz
in shape_sparse_dim_nnz:
111 indices_shape = torch.Size((sparse_dim, nnz))
112 values_shape = torch.Size((nnz,) + shape[sparse_dim:])
113 printed.append(
"# shape: {}".format(torch.Size(shape)))
114 printed.append(
"# nnz: {}".format(nnz))
115 printed.append(
"# sparse_dim: {}".format(sparse_dim))
116 printed.append(
"# indices shape: {}".format(indices_shape))
117 printed.append(
"# values shape: {}".format(values_shape))
119 indices = torch.arange(indices_shape.numel(), dtype=self.
index_tensor(0).dtype,
120 device=self.
device).view(indices_shape)
121 for d
in range(sparse_dim):
122 indices[d].clamp_(max=(shape[d] - 1))
124 indices[:, -1] = indices[:, 0]
125 values_numel = values_shape.numel()
126 values = torch.arange(values_numel, dtype=self.
value_dtype,
127 device=self.
device).view(values_shape).div_(values_numel / 2.)
130 dtypes = [torch.int32]
131 if values.dtype == torch.double:
132 dtypes.append(torch.float)
134 dtypes.append(torch.double)
136 printed.append(
"########## {} ##########".format(dtype))
137 x = sp_tensor.detach().to(dtype)
138 printed.append(
"# sparse tensor")
139 printed.append(str(x))
140 if x.dtype.is_floating_point:
141 printed.append(
"# after requires_grad_")
142 printed.append(str(x.requires_grad_()))
143 printed.append(
"# after addition")
144 printed.append(str(x + x))
145 printed.append(
"# _indices")
146 printed.append(str(x._indices()))
147 printed.append(
"# _values")
148 printed.append(str(x._values()))
152 def test_basic(self):
153 def test_shape(sparse_dims, nnz, with_size):
154 if isinstance(with_size, Number):
155 with_size = [with_size] * sparse_dims
156 x, i, v = self.
_gen_sparse(sparse_dims, nnz, with_size)
165 with self.
assertRaisesRegex(RuntimeError,
"Cannot get indices on an uncoalesced tensor"):
167 with self.
assertRaisesRegex(RuntimeError,
"Cannot get values on an uncoalesced tensor"):
173 test_shape(3, 10, 100)
174 test_shape(3, 10, [100, 100, 100])
175 test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
176 test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
179 i = self.
index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]])
180 v = self.
value_tensor([[idx**2, idx]
for idx
in range(i.size(1))])
189 def test_coalecce(self):
190 for empty_i, empty_v, empty_nnz
in itertools.product([
True,
False], repeat=3):
191 sparse_size = []
if empty_i
else [2, 1]
192 dense_size = [1, 0, 2]
if empty_v
else [1, 2]
193 nnz = 0
if empty_nnz
else 5
195 t, _, _ = self.
_gen_sparse(len(sparse_size), nnz, sparse_size + dense_size)
198 def test_ctor_size_checks(self):
210 lambda: self.
sparse_tensor(indices, values, torch.Size([2, 1, 1])))
219 lambda: self.
sparse_tensor(indices, values, torch.Size([2, 4, 2, 1])))
221 def test_to_dense(self):
222 def test_tensor(x, res):
231 x.requires_grad_(
True)
232 gradcheck(fn, (x,), check_sparse_nnz=
True)
267 def test_to_sparse(self):
268 shape = [10, 5, 19, 8]
270 for dim, dim_sz
in enumerate(shape, 1):
272 rnnz = torch.randint(2, max_nnz, (1,)).item()
273 for nnz
in [0, 1, rnnz]:
275 d = expected.to_dense()
276 result = d.to_sparse(dim)
282 self.assertRaises(RuntimeError,
lambda: sp.to_sparse())
284 def test_scalar(self):
289 a_coalesced = a.coalesce()
290 self.assertTrue(a_coalesced.is_coalesced())
298 a_coalesced = a.coalesce()
299 self.assertTrue(a_coalesced.is_coalesced())
307 a_coalesced = a.coalesce()
308 self.assertTrue(a_coalesced.is_coalesced())
312 def test_shared(self):
327 def test_to_dense_hybrid(self):
328 def test_tensor(x, res):
337 x.requires_grad_(
True)
338 gradcheck(fn, (x,), check_sparse_nnz=
True)
371 def test_contig(self):
372 def test_tensor(x, exp_i, exp_v):
378 [1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
379 [92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
384 [0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
385 [31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
387 exp_v = self.
value_tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7])
388 test_tensor(x, exp_i, exp_v)
403 test_tensor(x, exp_i, exp_v)
418 test_tensor(x, exp_i, exp_v)
434 test_tensor(x, exp_i, exp_v)
449 test_tensor(x, exp_i, exp_v)
451 def test_contig_hybrid(self):
452 def test_tensor(x, exp_i, exp_v):
458 [1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
459 [92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
462 [1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
463 [6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
467 [0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
468 [31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
471 [2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
472 [3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
474 test_tensor(x, exp_i, exp_v)
481 v = self.
value_tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]])
488 exp_v = self.
value_tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]])
489 test_tensor(x, exp_i, exp_v)
504 test_tensor(x, exp_i, exp_v)
512 v = self.
value_tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]])
520 test_tensor(x, exp_i, exp_v)
535 test_tensor(x, exp_i, exp_v)
537 def test_clone(self):
538 def test_shape(sparse_dims, nnz, with_size):
539 x = self.
_gen_sparse(sparse_dims, nnz, with_size)[0]
541 self.assertFalse(x.is_coalesced())
543 self.assertFalse(y.is_coalesced())
545 self.assertTrue(x.is_coalesced())
547 self.assertTrue(y.is_coalesced())
550 test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
551 test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
553 def test_Sparse_to_Sparse_copy_(self):
558 x1, _, _ = self.
_gen_sparse(sparse_dims, nnz, sizes)
559 x2, _, _ = self.
_gen_sparse(sparse_dims, nnz + 10, sizes)
562 x2_dense = x2.to_dense()
567 x1 = x1.to(torch.float32)
568 x2 = x2.to(torch.float64)
574 self.assertRaises(RuntimeError,
lambda: x1.copy_(x2.narrow_copy(0, 0, 1)))
577 self.assertRaises(RuntimeError,
lambda: x1.copy_(torch.randn(5, 5)))
580 x1, _, _ = self.
_gen_sparse(sparse_dims, nnz, sizes)
581 x2, _, _ = self.
_gen_sparse(sparse_dims, nnz + 10, sizes)
582 x2.requires_grad_(
True)
585 x2_clone = x2.clone()
587 expected_grad = x2_clone * 2
588 self.
assertEqual(expected_grad.to_dense(), x2.grad.to_dense())
593 def test_Sparse_to_Sparse_copy_multi_gpu(self):
598 x1, _, _ = self.
_gen_sparse(sparse_dims, nnz, sizes)
599 x2, _, _ = self.
_gen_sparse(sparse_dims, nnz + 10, sizes)
602 def test_cross_device(x1, x2):
603 x1_device = x1.device
605 self.
assertEqual(x2.to(
'cuda:0').to_dense(), x1.to_dense())
608 test_cross_device(x1, x2.to(
'cuda:1'))
609 test_cross_device(x1, x2.to(
'cpu'))
613 x2.requires_grad_(
True)
616 x2_clone = x2.clone().to(
'cuda:0')
618 expected_grad = x2_clone * 2
619 self.
assertEqual(expected_grad.to_dense(), x2.grad.to(
'cuda:0').to_dense())
623 def test_cuda_empty(self):
632 x = torch.sparse.FloatTensor(2, 3, 4)
635 x = torch.sparse.FloatTensor(2, 3, 4, 0)
638 def test_transpose(self):
639 def test_shape(sparse_dims, nnz, with_size):
640 x = self.
_gen_sparse(sparse_dims, nnz, with_size)[0]
643 for i, j
in itertools.combinations(range(4), 2):
644 x = x.transpose_(i, j)
645 y = y.transpose(i, j)
648 x = x.transpose(i, j)
649 y = y.transpose(i, j)
653 test_shape(4, 3, [7, 7, 7, 3, 3, 3, 0])
654 test_shape(4, 0, [0, 0, 7, 3, 3, 3, 0])
657 def test_coalesce_transpose_mm(self):
658 def test_shape(di, dj, dk, nnz):
660 y = torch.randn(dj, dk)
662 x_coalesced = x.coalesce()
663 self.assertTrue(x_coalesced.is_coalesced())
665 x_coalesced_t = x_coalesced.t()
667 self.
assertEqual(x_coalesced_t.is_coalesced(), di * nnz == 0)
669 res = torch.mm(x_coalesced_t, y)
670 expected = torch.mm(self.
safeToDense(x_coalesced_t), y)
673 test_shape(10, 20, 30, 20)
674 test_shape(0, 20, 30, 0)
675 test_shape(10, 0, 30, 0)
676 test_shape(10, 20, 0, 0)
677 test_shape(10, 20, 0, 20)
679 def test_t_empty(self):
680 def test_in_place(x):
681 shape_original = x.shape
683 self.
assertEqual(torch.Size([shape_original[1], shape_original[0]]), x.size())
689 def test_not_in_place(x):
690 shape_original = x.shape
692 self.
assertEqual(torch.Size([shape_original[1], shape_original[0]]), y.size())
706 def test_add_zeros(self):
707 def test_shape(sparse_dims, nnz, sizes):
708 x, _, _ = self.
_gen_sparse(sparse_dims, nnz, sizes)
709 zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
715 test_shape(1, 20, [1])
716 test_shape(4, 20, [3, 17, 19, 5])
717 test_shape(2, 20, [3, 17, 19, 5])
718 test_shape(2, 20, [3, 17, 19, 0])
722 def test_shapes(shapes, dim, fail_message=None):
723 inputs = [self.
_gen_sparse(shape[0], shape[1], shape[2])[0]
727 torch.cat(inputs, dim)
729 result = torch.cat(inputs, dim)
730 dense_result = torch.cat([t.to_dense()
for t
in inputs], dim)
734 [(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], 1)
737 test_shapes([(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4])], 0,
738 "All tensors must have the same shape: \\[2, 3, 4].*\\[2, 1, 4]")
741 [(2, 10, [2, 3, 4]), (2, 10, [2, 1, 4]), (2, 10, [2, 4, 4])], 1)
743 test_shapes([(2, 10, [2, 3, 4]), (2, 10, [2, 3, 7])], 2)
744 test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 1)
745 test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 2)
747 test_shapes([(2, 10, [2, 3, 4]), (3, 10, [2, 3, 4])], 0,
748 "All tensors must have the same.*2, 1, but tensor at position 1 has 3, 0.")
751 [(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], -2)
757 "Concatenating sparse tensors, but a dense tensor was found at position 1."):
760 def test_unsqueeze(self):
761 def test_shape(sparse_dims, nnz, sizes, unsqueeze_dim, fail_message=None):
762 x, _, _ = self.
_gen_sparse(sparse_dims, nnz, sizes)
765 torch.unsqueeze(x, unsqueeze_dim)
767 result = torch.unsqueeze(x, unsqueeze_dim)
768 dense_result = torch.unsqueeze(x.to_dense(), unsqueeze_dim)
772 test_shape(3, 10, [5, 7, 11], 0)
775 test_shape(3, 10, [5, 7, 11, 13, 17], 0)
776 test_shape(3, 10, [5, 7, 11, 13, 17], 3)
779 test_shape(3, 10, [5, 7, 11, 13, 17], 4)
780 test_shape(3, 10, [5, 7, 11, 13, 17], 5)
783 test_shape(3, 10, [5, 7, 11, 13, 17], -1)
784 test_shape(3, 10, [5, 7, 11, 13, 17], -6)
787 test_shape(3, 10, [5, 7, 11, 13, 17], -7,
"Dimension out of range")
788 test_shape(3, 10, [5, 7, 11, 13, 17], 6,
"Dimension out of range")
792 def test_shape(di, dj, dk, nnz):
794 t = torch.randn(di, dk)
795 y = torch.randn(dj, dk)
796 alpha = random.random()
797 beta = random.random()
799 res = torch.addmm(alpha, t, beta, x, y)
800 expected = torch.addmm(alpha, t, beta, self.
safeToDense(x), y)
803 res = torch.addmm(t, x, y)
811 test_shape(10, 100, 100, 20)
812 test_shape(100, 1000, 200, 20)
813 test_shape(64, 10000, 300, 20)
814 test_shape(0, 100, 100, 0)
815 test_shape(10, 0, 100, 0)
816 test_shape(10, 100, 0, 0)
817 test_shape(10, 100, 0, 20)
820 def test_saddmm(self):
821 def test_shape(di, dj, dk, nnz):
824 y = torch.randn(dj, dk)
825 alpha = random.random()
826 beta = random.random()
828 res = torch.saddmm(alpha, t, beta, x, y)
832 res = torch.saddmm(t, x, y)
836 res = torch.smm(x, y)
840 test_shape(7, 5, 3, 20)
841 test_shape(1000, 100, 100, 20)
842 test_shape(3000, 64, 300, 20)
843 test_shape(0, 100, 100, 0)
844 test_shape(1000, 0, 100, 0)
845 test_shape(1000, 100, 0, 0)
847 def test_sparse_addmm(self):
848 def test_shape(m, n, p, nnz, broadcast):
850 D1 = torch.randn((), device=self.
device).requires_grad_(
True)
852 D1 = torch.randn(n, p, device=self.
device).requires_grad_(
True)
853 D2 = torch.randn(m, p, device=self.
device).requires_grad_(
True)
855 S_dense = S.to_dense().requires_grad_(
True)
856 S.requires_grad_(
True)
861 gradcheck(fn, (S, D1, D2), check_sparse_nnz=
True)
863 test_shape(7, 8, 9, 20,
False)
864 test_shape(7, 8, 9, 20,
True)
866 def test_sparse_mm(self):
867 def test_shape(d1, d2, d3, nnz):
868 D = torch.randn(d2, d3, device=self.
device).requires_grad_(
True)
870 S_dense = S.to_dense().requires_grad_(
True)
871 S.requires_grad_(
True)
876 gradcheck(fn, (S, D), check_sparse_nnz=
True)
878 test_shape(7, 8, 9, 20)
882 def test_shape(di, dj, dk, nnz):
884 y = self.
randn(dj, dk)
886 res = torch.dsmm(x, y)
890 test_shape(7, 5, 3, 20)
891 test_shape(1000, 100, 100, 20)
892 test_shape(3000, 64, 300, 20)
893 test_shape(0, 100, 100, 0)
894 test_shape(1000, 0, 100, 0)
895 test_shape(1000, 100, 0, 0)
896 test_shape(1000, 100, 0, 20)
900 def test_shape(di, dj, dk, nnz):
902 y = self.
randn(dj, dk)
904 res = torch.hsmm(x, y)
908 test_shape(7, 5, 3, 20)
909 test_shape(1000, 100, 100, 20)
910 test_shape(3000, 64, 300, 20)
911 test_shape(0, 100, 100, 0)
912 test_shape(1000, 0, 100, 0)
913 test_shape(1000, 100, 0, 0)
914 test_shape(1000, 100, 0, 20)
916 def _test_spadd_shape(self, nnz, shape_i, shape_v=None):
917 shape = shape_i + (shape_v
or [])
918 x, _, _ = self.
_gen_sparse(len(shape_i), nnz, shape)
919 y = self.
randn(*shape)
922 res = torch.add(y, r, x)
932 y.transpose_(0, len(s) - 1)
935 res = torch.add(y, r, x)
940 x, i, v = self.
_gen_sparse(len(shape_i), nnz, shape)
944 x_ = self.
sparse_tensor(i[:, ::2], v[:int(nnz / 2)], x.shape)
945 res = torch.add(y, r, x_)
950 x_ = self.
sparse_tensor(i[:, :int(nnz / 2)], v[::2], x.shape)
951 res = torch.add(y, r, x_)
957 res = torch.add(y, r, x_)
961 def test_spadd(self):
970 def test_spadd_hybrid(self):
981 def test_shape(sparse_dims, nnz, with_size):
982 x, _, _ = self.
_gen_sparse(sparse_dims, nnz, with_size)
986 test_shape(3, 10, 100)
987 test_shape(4, 10, [100, 100, 100, 5, 5, 5, 0])
988 test_shape(4, 0, [0, 0, 100, 5, 5, 5, 0])
991 def test_sparse_sum(self):
993 def run_tests(S, td=None):
994 D = S.coalesce().to_dense().detach().requires_grad_(
True)
1004 res = res.to_dense()
1006 gradcheck(fn, (S,), check_sparse_nnz=
True)
1011 self.
assertEqual(S_sum.to_dense()
if S_sum.is_sparse
else S_sum, D_sum)
1016 res = res.to_dense()
1018 gradcheck(fn, (S,), check_sparse_nnz=
True)
1022 with_size = [5, 5, 1, 4]
1024 for i
in range(1, 5):
1025 test_dims += itertools.combinations(range(len(with_size)), i)
1031 [0., 1., 0., 2.]]).to_sparse()
1036 S = self.
_gen_sparse(sparse_dims, nnz, with_size)[0]
1037 self.assertRaises(RuntimeError,
lambda: S.sum())
1046 empty_S = torch.sparse_coo_tensor(size=with_size)
1049 empty_S.requires_grad_(
True)
1051 empty_S_sum.backward()
1052 self.
assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
1055 S = self.
_gen_sparse(sparse_dims, nnz, with_size)[0]
1056 run_tests(S.requires_grad_(
True))
1058 for test_dim
in test_dims:
1059 S = self.
_gen_sparse(sparse_dims, nnz, with_size)[0]
1060 run_tests(S.requires_grad_(
True), test_dim)
1062 def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v=None):
1063 shape = shape_i + (shape_v
or [])
1064 x1, _, _ = self.
_gen_sparse(len(shape_i), nnz_x1, shape)
1065 x2, _, _ = self.
_gen_sparse(len(shape_i), nnz_x2, shape)
1112 expected = torch.zeros(x1.size())
1119 self.assertTrue(y.is_coalesced())
1122 if not x1.is_coalesced():
1131 def test_basic_ops(self):
1142 def test_basic_ops_hybrid(self):
1156 def test_add_dense_sparse_mismatch(self):
1157 def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size):
1159 sparse_y = self.
sparse_tensor(torch.zeros(sparse_dims_shape, dtype=torch.int64, device=self.
device),
1161 torch.Size(sparse_size))
1164 "add: expected 'self' and 'other' to have same size"):
1167 test_shape([3, 4], [1, 4], [4, 4, 4], [3, 4, 4])
1168 test_shape([3, 4, 0], [1, 4], [4, 4, 4, 0], [3, 4, 4, 0])
1170 def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v=None):
1171 shape = shape_i + (shape_v
or [])
1172 x1, _, _ = self.
_gen_sparse(len(shape_i), nnz_x1, shape)
1173 x2, _, _ = self.
_gen_sparse(len(shape_i), nnz_x2, shape)
1182 def _test_sparse_mask_fixed(self):
1197 res = dense.sparse_mask(x)
1206 x = self.
sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
1209 res = dense.sparse_mask(x)
1210 expected = self.
sparse_tensor(i, exp_v, torch.Size([5, 4, 0]))
1213 def test_sparse_mask(self):
1225 def _test_sparse_mask_hybrid_fixed(self):
1230 v = self.
value_tensor([[1, 2], [2, 3], [3, 4], [4, 5]])
1234 x = self.
sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
1236 [[1, 3], [2, 2], [3, 3], [4, 2]],
1237 [[5, 7], [6, 7], [7, 9], [8, 9]],
1238 [[9, 2], [10, 4], [11, 1], [12, 3]],
1239 [[13, 5], [14, 1], [15, 1], [16, 6]],
1240 [[17, 7], [18, 2], [19, 7], [20, 1]],
1242 res = dense.sparse_mask(x)
1243 exp_v = self.
value_tensor([[7, 9], [14, 1], [3, 3], [20, 1]])
1244 expected = self.
sparse_tensor(i, exp_v, torch.Size([5, 4, 2]))
1252 x = self.
sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
1254 res = dense.sparse_mask(x)
1256 expected = self.
sparse_tensor(i, exp_v, torch.Size([5, 4, 2, 0]))
1259 def test_sparse_mask_hybrid(self):
1275 def _test_zeros(self, nnzs, shape, out_shape_i, out_shape_v=None):
1276 out_shape = out_shape_i + (out_shape_v
or [])
1278 out, _, _ = self.
_gen_sparse(len(out_shape_i), nnz, out_shape)
1279 torch.zeros(*shape, out=out)
1281 self.assertTrue(out._indices().numel() == out._values().numel() == 0)
1286 def test_zeros(self):
1287 def test_shape(i_shapes, v_shapes, shape, nnzs):
1288 for i_dim
in range(1, len(i_shapes) + 1):
1289 for v_dim
in range(len(v_shapes) + 1):
1290 self.
_test_zeros(nnzs, shape, i_shapes[:i_dim], v_shapes[:v_dim])
1291 test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 4], [9, 12])
1292 test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 4], [0])
1293 test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 4], [9, 12])
1294 test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 0], [9, 12])
1295 test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 0], [0])
1296 test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 0], [9, 12])
1298 def _test_zeros_like(self, nnzs, template_shape_i, template_shape_v=None):
1299 template_shape_v = template_shape_v
or []
1300 template_shape = template_shape_i + template_shape_v
1302 t, _, _ = self.
_gen_sparse(len(template_shape_i), nnz, template_shape)
1303 res = torch.zeros_like(t)
1304 self.
assertEqual(tuple(res.size()), tuple(template_shape))
1305 self.assertTrue(res._indices().numel() == res._values().numel() == 0)
1307 self.
assertEqual(res.sparse_dim(), len(template_shape_i))
1308 self.
assertEqual(res.dense_dim(), len(template_shape_v))
1311 def test_zeros_like(self):
1312 def test_shape(i_shapes, v_shapes, nnzs):
1313 for i_dim
in range(1, len(i_shapes) + 1):
1314 for v_dim
in range(len(v_shapes) + 1):
1316 test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
1317 test_shape([0, 3, 4], [3, 4, 5, 6], [0])
1318 test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
1319 test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
1320 test_shape([0, 3, 4], [3, 4, 5, 6], [0])
1321 test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
1323 def _test_narrow(self, input, narrow_args):
1324 expected = input.to_dense().narrow(*narrow_args)
1325 self.
assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
1327 def _all_narrow_combs(self, shape):
1328 for dim, dim_sz
in enumerate(shape):
1329 for start
in range(dim_sz):
1330 for length
in range(dim_sz - start):
1331 yield [dim, start, length]
1333 def test_narrow(self):
1334 shape = [3, 3, 4, 2]
1339 self.assertRaises(RuntimeError,
lambda: input.narrow_copy(-1, 0, 3))
1340 self.assertRaises(RuntimeError,
lambda: input.narrow_copy(10, 0, 3))
1341 self.assertRaises(RuntimeError,
lambda: input.narrow_copy(0, shape[0] + 1, 3))
1342 self.assertRaises(RuntimeError,
lambda: input.narrow_copy(0, 2, shape[0]))
1348 self.assertRaises(RuntimeError,
lambda: with_dense.narrow_copy(10, 0, 3))
1350 def _test_log1p_tensor(self, input, dense_tensor):
1351 expected_output = dense_tensor.log1p()
1352 self.
assertEqual(expected_output, input.log1p().to_dense())
1353 self.
assertEqual(expected_output, input.coalesce().log1p_().to_dense())
1356 with self.
assertRaisesRegex(RuntimeError,
"in-place on uncoalesced tensors is not supported yet"):
1359 input.requires_grad_()
1360 self.assertTrue(input.requires_grad)
1365 with self.
assertRaisesRegex(RuntimeError,
"log1p of a sparse tensor is made to be non-differentiable"):
1368 def test_log1p(self):
1369 input = torch.sparse_coo_tensor(
1370 torch.LongTensor([[0], [1], [2]]).transpose(1, 0).clone().detach(),
1371 torch.FloatTensor([3, 4, 5]),
1377 input_uncoalesced = torch.sparse_coo_tensor(
1378 torch.LongTensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0).clone().detach(),
1379 torch.FloatTensor([2, 3, 4, 1, 1, 1]),
1384 input = torch.sparse_coo_tensor(
1385 torch.zeros([2, 0]),
1386 torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
1387 torch.Size([0, 0, 5, 5, 5, 5, 5, 5, 0]),
1391 input = torch.sparse_coo_tensor(
1392 torch.zeros([1, 5]),
1393 torch.zeros([5, 6, 0]),
1394 torch.Size([5, 6, 0]),
1399 def test_sparse_add_coalesce(self):
1406 self.assertFalse(z._indices().numel() != 2
and z.is_coalesced())
1414 self.assertFalse(z._indices().numel() != 2
and z.is_coalesced())
1417 def test_storage_not_null(self):
1418 x = torch.cuda.sparse.FloatTensor(2)
1421 x = torch.cuda.sparse.FloatTensor(2, 0)
1426 def test_same_gpu(self):
1427 def check_device(x, device_id):
1429 self.
assertEqual(x._values().get_device(), device_id)
1430 self.
assertEqual(x._indices().get_device(), device_id)
1458 def _test_new_device(self, size, device):
1460 x = torch.cuda.sparse.DoubleTensor(*size)
1468 def test_new_device_single_gpu(self):
1476 def test_new_device_multi_gpu(self):
1483 def test_shape(sparse_dims, nnz, with_size):
1484 x, indices, values = self.
_gen_sparse(sparse_dims, nnz, with_size)
1489 self.
assertEqual(x.new(indices, values, x.size()), x)
1491 test_shape(3, 10, 100)
1492 test_shape(3, 0, [100, 100, 0])
1495 def test_factory(self):
1496 for test_empty_tensor
in [
True,
False]:
1497 if test_empty_tensor:
1498 default_size = torch.Size([1, 3, 0])
1499 size = torch.Size([3, 3, 0])
1501 default_size = torch.Size([1, 3])
1502 size = torch.Size([3, 3])
1503 for include_size
in [
True,
False]:
1504 for use_tensor_idx
in [
True,
False]:
1505 for use_tensor_val
in [
True,
False]:
1508 include_size = include_size
or use_cuda
1509 dtype = torch.float64
1510 long_dtype = torch.int64
1511 device = torch.device(
'cpu')
if not use_cuda
else \
1513 indices =
torch.tensor(([0], [2]), dtype=long_dtype)
if use_tensor_idx
else ([0], [2])
1514 if test_empty_tensor:
1522 sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
1523 device=device, requires_grad=
True)
1525 sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
1526 device=device, requires_grad=
True)
1527 self.
assertEqual(indices, sparse_tensor._indices())
1529 self.
assertEqual(size
if include_size
else default_size, sparse_tensor.size())
1532 self.
assertEqual(device, sparse_tensor._values().device)
1533 self.
assertEqual(
True, sparse_tensor.requires_grad)
1535 def test_factory_size_check(self):
1539 sizes = torch.Size([2, 3])
1541 torch.sparse_coo_tensor(indices, values, sizes)
1545 torch.sparse_coo_tensor(indices, values, sizes)
1550 sizes = torch.Size([2, 3, 1, 0])
1552 torch.sparse_coo_tensor(indices, values, sizes)
1557 sizes = torch.Size([0, 0, 2, 2])
1559 torch.sparse_coo_tensor(indices, values, sizes)
1564 sizes = torch.Size([3, 3, 2])
1566 torch.sparse_coo_tensor(indices, values, sizes)
1571 sizes = torch.Size([3, 3, 2, 0])
1573 torch.sparse_coo_tensor(indices, values, sizes)
1575 def test_factory_default(self):
1578 expected_size = torch.Size([0])
1579 self.
assertEqual(tensor._indices(), expected_indices)
1582 def test_factory_empty_indices(self):
1583 device =
'cuda' if self.
is_cuda else 'cpu' 1585 expected_indices = torch.empty((1, 0), dtype=torch.long, device=device)
1586 self.
assertEqual(tensor._indices(), expected_indices)
1588 tensor = torch.sparse_coo_tensor(torch.Size([2, 0]), device=device)
1589 expected_indices = torch.empty((2, 0), dtype=torch.long, device=device)
1590 self.
assertEqual(tensor._indices(), expected_indices)
1592 tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0]), device=device)
1593 expected_indices = torch.empty((3, 0), dtype=torch.long, device=device)
1594 self.
assertEqual(tensor._indices(), expected_indices)
1596 tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0, 0]), device=device)
1597 expected_indices = torch.empty((4, 0), dtype=torch.long, device=device)
1598 self.
assertEqual(tensor._indices(), expected_indices)
1600 def test_factory_nnz(self):
1603 sizes = torch.Size([2, 2])
1604 with self.
assertRaisesRegex(RuntimeError,
"indices and values must have same nnz"):
1605 torch.sparse_coo_tensor(indices, values, sizes)
1609 sizes = torch.Size([2, 0])
1610 with self.
assertRaisesRegex(RuntimeError,
"indices and values must have same nnz"):
1611 torch.sparse_coo_tensor(indices, values, sizes)
1613 def test_factory_nnz_zero(self):
1614 def test_shape(i_shape, v_shape, size, expected_size):
1615 device =
'cuda' if self.
is_cuda else 'cpu' 1617 t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), torch.Size(size), device=device)
1619 t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), device=device)
1620 expected_indices = torch.empty(i_shape, device=device)
1621 expected_values = torch.empty(v_shape, device=device)
1622 expected_size = torch.Size(expected_size)
1627 test_shape([1, 0], [0, 2, 4, 0],
None, [0, 2, 4, 0])
1628 test_shape([3, 0], [0, 2, 4, 0],
None, [0, 0, 0, 2, 4, 0])
1629 test_shape([1, 0], [0, 2, 4, 0], [0, 2, 4, 0], [0, 2, 4, 0])
1630 test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0])
1631 test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0])
1633 def test_factory_dense_dim(self):
1636 sizes = torch.Size([1, 3, 4])
1638 torch.sparse_coo_tensor(indices, values, sizes)
1642 sizes = torch.Size([1, 3, 4, 0])
1644 torch.sparse_coo_tensor(indices, values, sizes)
1647 def test_factory_type_inference(self):
1655 t = torch.sparse_coo_tensor(
torch.tensor(([0], [2])), torch.FloatTensor(1, 0))
1657 t = torch.sparse_coo_tensor(
torch.tensor(([0], [2])), torch.DoubleTensor(1, 0))
1659 t = torch.sparse_coo_tensor(
torch.tensor(([0], [2])), torch.LongTensor(1, 0))
1663 def test_factory_device_type_inference(self):
1666 for indices_device
in [
'cuda',
'cpu']:
1667 for values_device
in [
'cuda',
'cpu']:
1668 for sparse_device
in [
'cuda',
'cpu',
None]:
1669 for test_empty_tensor
in [
True,
False]:
1670 if test_empty_tensor:
1671 t = torch.sparse_coo_tensor(
torch.tensor(([0], [2]), device=indices_device),
1673 (1, 3, 0), device=sparse_device)
1675 t = torch.sparse_coo_tensor(
torch.tensor(([0], [2]), device=indices_device),
1677 (1, 3), device=sparse_device)
1678 should_be_cuda = sparse_device ==
'cuda' or (sparse_device
is None and values_device ==
'cuda')
1682 def test_factory_copy(self):
1683 def test_tensor(indices, values, indices_equal, values_equal):
1684 sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64)
1686 self.
assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
1688 self.
assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
1690 self.
assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
1692 self.
assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
1697 test_tensor(indices, values,
True,
True)
1700 values = torch.DoubleTensor(1, 0)
1701 test_tensor(indices, values,
True,
True)
1706 test_tensor(indices, values,
True,
False)
1709 values = torch.FloatTensor(1, 0)
1710 test_tensor(indices, values,
True,
True)
1715 test_tensor(indices, values,
False,
True)
1718 values = torch.DoubleTensor(1, 0)
1719 test_tensor(indices, values,
False,
True)
1724 test_tensor(indices, values,
False,
False)
1727 values = torch.FloatTensor(1, 0)
1728 test_tensor(indices, values,
False,
True)
1731 def test_constructor_device_legacy(self):
1734 size = torch.Size([2, 3])
1736 self.assertRaises(RuntimeError,
lambda: torch.sparse.FloatTensor(device=
'cuda'))
1737 self.assertRaises(RuntimeError,
lambda: torch.sparse.FloatTensor(i, v, device=
'cuda'))
1738 self.assertRaises(RuntimeError,
lambda: torch.sparse.FloatTensor(i, v, size, device=
'cuda'))
1739 self.assertRaises(RuntimeError,
lambda: torch.sparse.FloatTensor(torch.Size([2, 3, 4]), device=
'cuda'))
1741 x = torch.sparse_coo_tensor(i, v, size, device=
'cpu')
1742 self.assertRaises(RuntimeError,
lambda: x.new(device=
'cuda'))
1743 self.assertRaises(RuntimeError,
lambda: x.new(i, v, device=
'cuda'))
1744 self.assertRaises(RuntimeError,
lambda: x.new(i, v, size, device=
'cuda'))
1745 self.assertRaises(RuntimeError,
lambda: x.new(torch.Size([2, 3, 4]), device=
'cuda'))
1748 self.assertRaises(RuntimeError,
lambda: torch.cuda.sparse.FloatTensor(device=
'cpu'))
1749 self.assertRaises(RuntimeError,
lambda: torch.cuda.sparse.FloatTensor(i, v, device=
'cpu'))
1750 self.assertRaises(RuntimeError,
lambda: torch.cuda.sparse.FloatTensor(i, v, size, device=
'cpu'))
1751 self.assertRaises(RuntimeError,
lambda: torch.cuda.sparse.FloatTensor(torch.Size([2, 3, 4]), device=
'cpu'))
1753 x = torch.sparse_coo_tensor(i, v, size, device=
'cuda')
1754 self.assertRaises(RuntimeError,
lambda: x.new(device=
'cpu'))
1755 self.assertRaises(RuntimeError,
lambda: x.new(i, v, device=
'cpu'))
1756 self.assertRaises(RuntimeError,
lambda: x.new(i, v, size, device=
'cpu'))
1757 self.assertRaises(RuntimeError,
lambda: x.new(torch.Size([2, 3, 4]), device=
'cpu'))
1760 def test_dtypes(self):
1762 do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device(
'cpu'))
1764 do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device(
'cuda:0'))
1767 def test_empty_full(self):
1769 do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device(
'cpu'))
1771 do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo,
None)
1772 do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device(
'cuda:0'))
1774 def test_is_sparse(self):
1775 x = torch.randn(3, 3)
1776 self.assertFalse(x.is_sparse)
1778 x = torch.randn(3, 3, 0)
1779 self.assertFalse(x.is_sparse)
1782 self.assertTrue(x.is_sparse)
1785 self.assertTrue(x.is_sparse)
1787 def test_resize_as(self):
1789 y = t.new().resize_as_(t).zero_()
1799 def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size):
1800 x_v_numel = torch.zeros(x_v).numel()
1801 y_v_numel = torch.zeros(y_v).numel()
1802 x = torch.sparse_coo_tensor(torch.zeros(x_i),
1803 torch.arange(x_v_numel).resize_(x_v).to(torch.float),
1805 x_dense = x.to_dense()
1806 y = torch.sparse_coo_tensor(torch.zeros(y_i),
1807 torch.ones(y_v).to(torch.float),
1809 y_dense = y.to_dense()
1811 x_dense.resize_as_(y_dense)
1818 self.
assertEqual(x.to_dense().view(-1)[0:x_v_numel].view(x_v),
1819 x_dense.view(-1)[0:x_v_numel].view(x_v))
1821 def test_resize(self):
1824 [1, 1], [1, 2, 4], [2, 2, 4])
1827 [1, 1], [1, 2, 4], [2, 2, 4])
1831 [1, 1], [1, 2, 3], [4, 2, 3])
1835 [2, 0], [0, 2, 4, 5], [1, 1, 2, 4, 5])
1838 [2, 0], [0, 2, 4, 0], [1, 1, 2, 4, 0])
1841 with self.
assertRaisesRegex(RuntimeError,
"changing the number of dense dimensions"):
1843 [1, 1], [1, 2, 3, 4], [2, 2, 3, 4])
1845 with self.
assertRaisesRegex(RuntimeError,
"changing the number of dense dimensions"):
1847 [1, 1], [1, 2, 3, 0], [2, 2, 3, 0])
1850 with self.
assertRaisesRegex(RuntimeError,
"changing the number of dense dimensions"):
1852 [1, 1], [1, 2], [2, 2])
1855 with self.
assertRaisesRegex(RuntimeError,
"changing the number of sparse dimensions"):
1857 [2, 1], [1, 2, 3], [1, 2, 2, 3])
1860 with self.
assertRaisesRegex(RuntimeError,
"shrinking the size of sparse dimensions"):
1862 [1, 1], [1, 2, 3], [1, 2, 3])
1865 with self.
assertRaisesRegex(RuntimeError,
"shrinking the size of dense dimensions"):
1867 [1, 1], [1, 2, 2], [2, 2, 2])
1869 with self.
assertRaisesRegex(RuntimeError,
"shrinking the size of dense dimensions"):
1871 [1, 1], [1, 2, 0], [2, 2, 0])
1873 def test_is_nonzero(self):
1874 self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,)).is_nonzero())
1875 self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,)).is_nonzero())
1876 self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1)).is_nonzero())
1877 self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,)).is_nonzero())
1878 self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,)).is_nonzero())
1879 self.assertTrue(torch.sparse_coo_tensor(torch.zeros(0, 1), 12.3, []).is_nonzero())
1880 with self.
assertRaisesRegex(RuntimeError,
"bool value of Tensor with no values is ambiguous"):
1881 torch.sparse_coo_tensor(([0, 1],), self.
value_empty(2, 0), (4, 0)).is_nonzero()
1883 def test_allow_tensor_metadata_change(self):
1887 "raw_resize_ is not allowed on Tensor created from .data or .detach()"):
1891 "resize_ is not allowed on Tensor created from .data or .detach()"):
1895 "resize_and_clear_ is not allowed on Tensor created from .data or .detach()"):
1899 "set_coalesced is not allowed on Tensor created from .data or .detach()"):
1903 "set_indices_and_values_unsafe is not allowed on Tensor created from .data or .detach()"):
1908 "resize_and_clear_ is not allowed on Tensor created from .data or .detach()"):
1912 "resize_ is not allowed on Tensor created from .data or .detach()"):
1921 super(TestUncoalescedSparse, self).setUp()
1925 @unittest.skipIf(
not TEST_CUDA,
'CUDA not available')
1928 super(TestCudaSparse, self).setUp()
1934 @unittest.skipIf(
not TEST_CUDA,
'CUDA not available')
1937 super(TestCudaUncoalescedSparse, self).setUp()
1942 @unittest.skipIf(
not TEST_CUDA,
'CUDA not available')
1943 def test_cuda_from_cpu(self):
1946 "backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
1947 torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
1948 torch.randn(4, 4, 4),
1953 "backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
1954 torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
1955 torch.randn(4, 4, 4, 0),
1960 "backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
1961 torch.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
1962 torch.randn(0, 4, 4, 0),
1965 @unittest.skipIf(
not TEST_CUDA,
'CUDA not available')
1966 def test_cuda_sparse_cpu_dense_add(self):
1967 x = torch.zeros(3, 4, 4)
1968 sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
1969 torch.randn(4, 4, 4).cuda(),
1971 with self.
assertRaisesRegex(RuntimeError,
"add: expected 'other' to be a CPU tensor\\, but got a CUDA tensor"):
1974 x = torch.zeros(3, 4, 4, 0)
1975 sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
1976 torch.randn(4, 4, 4, 0).cuda(),
1978 with self.
assertRaisesRegex(RuntimeError,
"add: expected 'other' to be a CPU tensor\\, but got a CUDA tensor"):
1981 x = torch.zeros(0, 4, 4, 0)
1982 sparse_y = torch.cuda.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
1983 torch.randn(0, 4, 4, 0).cuda(),
1985 with self.
assertRaisesRegex(RuntimeError,
"add: expected 'other' to be a CPU tensor\\, but got a CUDA tensor"):
1989 if __name__ ==
'__main__':
def assertEqual(self, x, y, prec=None, message='', allow_inf=False)
def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v=None)
def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size)
def _gen_sparse(self, sparse_dim, nnz, with_size)
def _test_sparse_mask_fixed(self)
def addmm(mat, mat1, mat2, beta=1, alpha=1)
def assertExpected(self, s, subname=None)
def _test_zeros_like(self, nnzs, template_shape_i, template_shape_v=None)
def assertNotEqual(self, x, y, prec=None, message='')
def _all_narrow_combs(self, shape)
def assert_uncoalesced(self, x)
def _test_sparse_mask_hybrid_fixed(self)
def randn(self, args, kwargs)
def _test_zeros(self, nnzs, shape, out_shape_i, out_shape_v=None)
def _test_spadd_shape(self, nnz, shape_i, shape_v=None)
def safeCoalesce(self, t)
def _test_narrow(self, input, narrow_args)
def _test_log1p_tensor(self, input, dense_tensor)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu')
def sum(input, dim=None, dtype=None)
def _test_new_device(self, size, device)
def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v=None)