4 import common_utils
as common
5 from common_utils
import TEST_NUMBA, TEST_NUMPY, IS_WINDOWS
6 from common_cuda
import TEST_NUMBA_CUDA, TEST_CUDA, TEST_MULTIGPU
21 @unittest.skipIf(
not TEST_NUMPY,
"No numpy")
22 @unittest.skipIf(
not TEST_CUDA,
"No cuda")
24 """torch.Tensor exposes __cuda_array_interface__ for cuda tensors. 26 An object t is considered a cuda-tensor if: 27 hasattr(t, '__cuda_array_interface__') 29 A cuda-tensor provides a tensor description dict: 30 shape: (integer, ...) Tensor shape. 31 strides: (integer, ...) Tensor strides, in bytes. 32 typestr: (str) A numpy-style typestr. 33 data: (int, boolean) A (data_ptr, read-only) tuple. 34 version: (int) Version 0 37 https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html 60 for tp, npt
in zip(types, dtypes):
65 self.assertFalse(hasattr(cput,
"__cuda_array_interface__"))
66 self.assertRaises(AttributeError,
lambda: cput.__cuda_array_interface__)
69 if tp
not in (torch.HalfTensor,):
70 indices_t = torch.empty(1, cput.size(0), dtype=torch.long).clamp_(min=0)
71 sparse_t = torch.sparse_coo_tensor(indices_t, cput)
73 self.assertFalse(hasattr(sparse_t,
"__cuda_array_interface__"))
75 AttributeError,
lambda: sparse_t.__cuda_array_interface__
78 sparse_cuda_t = torch.sparse_coo_tensor(indices_t, cput).cuda()
80 self.assertFalse(hasattr(sparse_cuda_t,
"__cuda_array_interface__"))
82 AttributeError,
lambda: sparse_cuda_t.__cuda_array_interface__
88 self.assertTrue(hasattr(cudat,
"__cuda_array_interface__"))
90 ar_dict = cudat.__cuda_array_interface__
93 set(ar_dict.keys()), {
"shape",
"strides",
"typestr",
"data",
"version"}
96 self.assertEqual(ar_dict[
"shape"], (10,))
97 self.assertEqual(ar_dict[
"strides"], (cudat.storage().element_size(),))
99 self.assertEqual(ar_dict[
"typestr"], numpy.dtype(npt).newbyteorder(
"<").str)
100 self.assertEqual(ar_dict[
"data"], (cudat.data_ptr(),
False))
101 self.assertEqual(ar_dict[
"version"], 0)
103 @unittest.skipIf(
not TEST_CUDA,
"No cuda")
104 @unittest.skipIf(
not TEST_NUMBA_CUDA,
"No numba.cuda")
106 """Torch __cuda_array_adaptor__ exposes tensor data to numba.cuda.""" 119 for dt
in torch_dtypes:
120 if dt == torch.int8
and not IS_WINDOWS:
122 with self.assertRaises(TypeError):
123 torch.arange(10).to(dt).numpy()
129 cput = torch.arange(10).to(dt)
132 self.assertTrue(
not numba.cuda.is_cuda_array(cput))
133 with self.assertRaises(TypeError):
134 numba.cuda.as_cuda_array(cput)
137 cudat = cput.to(device=
"cuda")
138 self.assertTrue(numba.cuda.is_cuda_array(cudat))
140 numba_view = numba.cuda.as_cuda_array(cudat)
141 self.assertIsInstance(numba_view, numba.cuda.devicearray.DeviceNDArray)
144 self.assertEqual(numba_view.dtype, npt.dtype)
145 self.assertEqual(numba_view.strides, npt.strides)
146 self.assertEqual(numba_view.shape, cudat.shape)
152 self.assertEqual(cudat,
torch.tensor(numba_view.copy_to_host()).to(
"cuda"))
156 self.assertEqual(cudat,
torch.tensor(numba_view.copy_to_host()).to(
"cuda"))
159 strided_cudat = cudat[::2]
160 strided_npt = cput[::2].numpy()
161 strided_numba_view = numba.cuda.as_cuda_array(strided_cudat)
163 self.assertEqual(strided_numba_view.dtype, strided_npt.dtype)
164 self.assertEqual(strided_numba_view.strides, strided_npt.strides)
165 self.assertEqual(strided_numba_view.shape, strided_cudat.shape)
170 @unittest.skipIf(
not TEST_CUDA,
"No cuda")
171 @unittest.skipIf(
not TEST_NUMBA_CUDA,
"No numba.cuda")
173 """Numba properly detects array interface for tensor.Tensor variants.""" 176 cput = torch.arange(100)
178 self.assertFalse(numba.cuda.is_cuda_array(cput))
179 with self.assertRaises(TypeError):
180 numba.cuda.as_cuda_array(cput)
183 sparset = torch.sparse_coo_tensor(cput[
None, :], cput)
185 self.assertFalse(numba.cuda.is_cuda_array(sparset))
186 with self.assertRaises(TypeError):
187 numba.cuda.as_cuda_array(sparset)
189 sparse_cuda_t = sparset.cuda()
191 self.assertFalse(numba.cuda.is_cuda_array(sparset))
192 with self.assertRaises(TypeError):
193 numba.cuda.as_cuda_array(sparset)
197 cpu_gradt = torch.zeros(100).requires_grad_(
True)
199 self.assertFalse(numba.cuda.is_cuda_array(cpu_gradt))
200 with self.assertRaises(TypeError):
201 numba.cuda.as_cuda_array(cpu_gradt)
207 cuda_gradt = torch.zeros(100).requires_grad_(
True).cuda()
209 if sys.version_info.major > 2:
211 with self.assertRaises(RuntimeError):
212 numba.cuda.is_cuda_array(cuda_gradt)
213 with self.assertRaises(RuntimeError):
214 numba.cuda.as_cuda_array(cuda_gradt)
219 was_cuda_array = numba.cuda.is_cuda_array(cuda_gradt)
220 was_runtime_error =
False 222 was_cuda_array =
False 223 was_runtime_error =
True 225 self.assertFalse(was_cuda_array)
227 if not was_runtime_error:
228 with self.assertRaises(TypeError):
229 numba.cuda.as_cuda_array(cuda_gradt)
231 with self.assertRaises(RuntimeError):
232 numba.cuda.as_cuda_array(cuda_gradt)
234 @unittest.skipIf(
not TEST_CUDA,
"No cuda")
235 @unittest.skipIf(
not TEST_NUMBA_CUDA,
"No numba.cuda")
236 @unittest.skipIf(
not TEST_MULTIGPU,
"No multigpu")
238 """'as_cuda_array' tensor device must match active numba context.""" 241 cudat = torch.arange(10, device=
"cuda")
242 self.assertEqual(cudat.device.index, 0)
243 self.assertIsInstance(
244 numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray
248 cudat = torch.arange(10, device=torch.device(
"cuda", 1))
250 with self.assertRaises(numba.cuda.driver.CudaAPIError):
251 numba.cuda.as_cuda_array(cudat)
254 with numba.cuda.devices.gpus[cudat.device.index]:
255 self.assertIsInstance(
256 numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray
260 if __name__ ==
"__main__":
def test_array_adaptor(self)
def test_cuda_array_interface(self)
def test_active_device(self)
def test_conversion_errors(self)