1 #include <gtest/gtest.h> 7 #define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_) \ 9 tensor.device().type() == at::Device((device_), (index_)).type()); \ 11 tensor.device().index() == at::Device((device_), (index_)).index()); \ 12 ASSERT_EQ(tensor.dtype(), (type_)); \ 13 ASSERT_TRUE(tensor.layout() == (layout_)) 15 TEST(TensorTest, AllocatesTensorOnTheCorrectDevice_MultiCUDA) {
16 auto tensor = at::tensor({1, 2, 3}, at::device({at::kCUDA, 1}));
17 ASSERT_EQ(tensor.device().type(), at::Device::Type::CUDA);
18 ASSERT_EQ(tensor.device().index(), 1);
21 TEST(TensorTest, ToDevice_MultiCUDA) {
22 auto tensor = at::empty({3, 4});
23 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
25 tensor = tensor.to({at::kCUDA, 1});
26 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
28 tensor = tensor.to({at::kCUDA, 0});
29 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
31 tensor = tensor.to({at::kCUDA, 1});
32 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
35 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
37 tensor = tensor.to(at::kCUDA);
38 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
41 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kFloat, at::kStrided);
44 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kFloat, at::kStrided);
47 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
50 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
53 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
56 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
59 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kInt, at::kStrided);
62 TEST(TensorTest, ToTensorAndTensorAttributes_MultiCUDA) {
63 auto tensor = at::empty({3, 4});
64 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
66 auto other = at::empty({3, 4}, at::kFloat);
67 tensor = tensor.to(other);
68 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
71 tensor = tensor.to(other.dtype());
72 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
73 tensor = tensor.to(other.device());
74 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kDouble, at::kStrided);
77 tensor = tensor.to(other.device(), other.dtype());
78 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kLong, at::kStrided);
80 other = at::empty({3, 4}, at::kFloat);
81 tensor = tensor.to(other.options());
82 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
86 TEST(TensorTest, ToDoesNotCopyWhenOptionsAreAllTheSame_CUDA) {
88 auto hopefully_not_copy = tensor.to(tensor.options());
89 ASSERT_EQ(hopefully_not_copy.data<
float>(), tensor.data<
float>());
90 hopefully_not_copy = tensor.to(at::kFloat);
91 ASSERT_EQ(hopefully_not_copy.data<
float>(), tensor.data<
float>());
92 hopefully_not_copy = tensor.to(
"cuda");
93 ASSERT_EQ(hopefully_not_copy.data<
float>(), tensor.data<
float>());
95 ASSERT_EQ(hopefully_not_copy.data<
float>(), tensor.data<
float>());
97 ASSERT_EQ(hopefully_not_copy.data<
float>(), tensor.data<
float>());
100 TEST(TensorTest, ToDeviceAndDtype_MultiCUDA) {
101 auto tensor = at::empty({3, 4});
102 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
104 tensor = tensor.to({at::kCUDA, 1}, at::kInt);
105 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kInt, at::kStrided);
108 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 0, at::kLong, at::kStrided);
111 REQUIRE_TENSOR_OPTIONS(at::kCUDA, 1, at::kDouble, at::kStrided);
113 tensor = tensor.to(at::kCPU, at::kInt);
114 REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
C10_NODISCARD TensorOptions device(c10::optional< Device > device) const noexcept
Return a copy of TensorOptions with device set to the given one, or cleared if device is nullopt...
Represents a a compute device on which a tensor is located.
C10_NODISCARD TensorOptions dtype(c10::optional< caffe2::TypeMeta > dtype) const noexcept
Return a copy of TensorOptions with dtype set to the given one.