Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor.cpp
1 #include <gtest/gtest.h>
2 
3 #include <torch/types.h>
4 
5 #include <ATen/ATen.h>
6 
7 #include <cmath>
8 #include <cstddef>
9 #include <vector>
10 
11 template <typename T>
12 bool exactly_equal(at::Tensor left, T right) {
13  return left.item<T>() == right;
14 }
15 
16 template <typename T>
17 bool almost_equal(at::Tensor left, T right, T tolerance = 1e-4) {
18  return std::abs(left.item<T>() - right) < tolerance;
19 }
20 
21 #define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_) \
22  ASSERT_TRUE( \
23  tensor.device().type() == at::Device((device_), (index_)).type()); \
24  ASSERT_TRUE( \
25  tensor.device().index() == at::Device((device_), (index_)).index()); \
26  ASSERT_EQ(tensor.dtype(), (type_)); \
27  ASSERT_TRUE(tensor.layout() == (layout_))
28 
29 TEST(TensorTest, ToDtype) {
30  auto tensor = at::empty({3, 4});
31  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
32 
33  tensor = tensor.to(at::kInt);
34  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
35 
36  tensor = tensor.to(at::kChar);
37  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kChar, at::kStrided);
38 
39  tensor = tensor.to(at::kDouble);
40  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
41 
42  tensor = tensor.to(at::TensorOptions(at::kInt));
43  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
44 
45  tensor = tensor.to(at::TensorOptions(at::kChar));
46  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kChar, at::kStrided);
47 
48  tensor = tensor.to(at::TensorOptions(at::kDouble));
49  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
50 }
51 
52 TEST(TensorTest, ToTensorAndTensorAttributes) {
53  auto tensor = at::empty({3, 4});
54  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
55 
56  auto other = at::empty({3, 4}, at::kInt);
57  tensor = tensor.to(other);
58  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
59 
60  other = at::empty({3, 4}, at::kDouble);
61  tensor = tensor.to(other.dtype());
62  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
63  tensor = tensor.to(other.device());
64  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kDouble, at::kStrided);
65 
66  other = at::empty({3, 4}, at::kLong);
67  tensor = tensor.to(other.device(), other.dtype());
68  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kLong, at::kStrided);
69 
70  other = at::empty({3, 4}, at::kInt);
71  tensor = tensor.to(other.options());
72  REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kInt, at::kStrided);
73 }
74 
75 // Not currently supported.
76 // TEST(TensorTest, ToLayout) {
77 // auto tensor = at::empty({3, 4});
78 // REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
79 //
80 // tensor = tensor.to(at::kSparse);
81 // REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kSparse);
82 //
83 // tensor = tensor.to(at::kStrided);
84 // REQUIRE_TENSOR_OPTIONS(at::kCPU, -1, at::kFloat, at::kStrided);
85 // }
86 
87 TEST(TensorTest, ToOptionsWithRequiresGrad) {
88  {
89  // Respects requires_grad
90  auto tensor = torch::empty({3, 4}, at::requires_grad());
91  ASSERT_TRUE(tensor.requires_grad());
92 
93  tensor = tensor.to(at::kDouble);
94  ASSERT_TRUE(tensor.requires_grad());
95 
96  // Throws if requires_grad is set in TensorOptions
97  ASSERT_THROW(
98  tensor.to(at::TensorOptions().requires_grad(true)), c10::Error);
99  ASSERT_THROW(
100  tensor.to(at::TensorOptions().requires_grad(false)), c10::Error);
101  }
102  {
103  auto tensor = torch::empty({3, 4});
104  ASSERT_FALSE(tensor.requires_grad());
105 
106  // Respects requires_grad
107  tensor = tensor.to(at::kDouble);
108  ASSERT_FALSE(tensor.requires_grad());
109 
110  // Throws if requires_grad is set in TensorOptions
111  ASSERT_THROW(
112  tensor.to(at::TensorOptions().requires_grad(true)), c10::Error);
113  ASSERT_THROW(
114  tensor.to(at::TensorOptions().requires_grad(false)), c10::Error);
115  }
116 }
117 
118 TEST(TensorTest, ToDoesNotCopyWhenOptionsAreAllTheSame) {
119  {
120  auto tensor = at::empty({3, 4}, at::kFloat);
121  auto hopefully_not_copy = tensor.to(at::kFloat);
122  ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
123  }
124  {
125  auto tensor = at::empty({3, 4}, at::kFloat);
126  auto hopefully_not_copy = tensor.to(tensor.options());
127  ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
128  }
129  {
130  auto tensor = at::empty({3, 4}, at::kFloat);
131  auto hopefully_not_copy = tensor.to(tensor.dtype());
132  ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
133  }
134  {
135  auto tensor = at::empty({3, 4}, at::kFloat);
136  auto hopefully_not_copy = tensor.to(tensor.device());
137  ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
138  }
139  {
140  auto tensor = at::empty({3, 4}, at::kFloat);
141  auto hopefully_not_copy = tensor.to(tensor);
142  ASSERT_EQ(hopefully_not_copy.data<float>(), tensor.data<float>());
143  }
144 }
145 
146 TEST(TensorTest, ContainsCorrectValueForSingleValue) {
147  auto tensor = at::tensor(123);
148  ASSERT_EQ(tensor.numel(), 1);
149  ASSERT_EQ(tensor.dtype(), at::kInt);
150  ASSERT_EQ(tensor[0].item<int32_t>(), 123);
151 
152  tensor = at::tensor(123.456f);
153  ASSERT_EQ(tensor.numel(), 1);
154  ASSERT_EQ(tensor.dtype(), at::kFloat);
155  ASSERT_TRUE(almost_equal(tensor[0], 123.456f));
156 
157  tensor = at::tensor(123.456);
158  ASSERT_EQ(tensor.numel(), 1);
159  ASSERT_EQ(tensor.dtype(), at::kDouble);
160  ASSERT_TRUE(almost_equal(tensor[0], 123.456));
161 }
162 
163 TEST(TensorTest, ContainsCorrectValuesForManyValues) {
164  auto tensor = at::tensor({1, 2, 3});
165  ASSERT_EQ(tensor.numel(), 3);
166  ASSERT_EQ(tensor.dtype(), at::kInt);
167  ASSERT_TRUE(exactly_equal(tensor[0], 1));
168  ASSERT_TRUE(exactly_equal(tensor[1], 2));
169  ASSERT_TRUE(exactly_equal(tensor[2], 3));
170 
171  tensor = at::tensor({1.5, 2.25, 3.125});
172  ASSERT_EQ(tensor.numel(), 3);
173  ASSERT_EQ(tensor.dtype(), at::kDouble);
174  ASSERT_TRUE(almost_equal(tensor[0], 1.5));
175  ASSERT_TRUE(almost_equal(tensor[1], 2.25));
176  ASSERT_TRUE(almost_equal(tensor[2], 3.125));
177 }
178 
179 TEST(TensorTest, ContainsCorrectValuesForManyValuesVariable) {
180  auto tensor = torch::tensor({1, 2, 3});
181  ASSERT_TRUE(tensor.is_variable());
182  ASSERT_EQ(tensor.numel(), 3);
183  ASSERT_EQ(tensor.dtype(), at::kInt);
184  ASSERT_TRUE(exactly_equal(tensor[0], 1));
185  ASSERT_TRUE(exactly_equal(tensor[1], 2));
186  ASSERT_TRUE(exactly_equal(tensor[2], 3));
187 
188  tensor = torch::tensor({1.5, 2.25, 3.125});
189  ASSERT_TRUE(tensor.is_variable());
190  ASSERT_EQ(tensor.numel(), 3);
191  ASSERT_EQ(tensor.dtype(), at::kDouble);
192  ASSERT_TRUE(almost_equal(tensor[0], 1.5));
193  ASSERT_TRUE(almost_equal(tensor[1], 2.25));
194  ASSERT_TRUE(almost_equal(tensor[2], 3.125));
195 }
196 
197 TEST(TensorTest, ContainsCorrectValuesWhenConstructedFromVector) {
198  std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
199  auto tensor = at::tensor(v);
200  ASSERT_EQ(tensor.numel(), v.size());
201  ASSERT_EQ(tensor.dtype(), at::kInt);
202  for (size_t i = 0; i < v.size(); ++i) {
203  ASSERT_TRUE(exactly_equal(tensor[i], v.at(i)));
204  }
205 
206  std::vector<double> w = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0};
207  tensor = at::tensor(w);
208  ASSERT_EQ(tensor.numel(), w.size());
209  ASSERT_EQ(tensor.dtype(), at::kDouble);
210  for (size_t i = 0; i < w.size(); ++i) {
211  ASSERT_TRUE(almost_equal(tensor[i], w.at(i)));
212  }
213 }
214 
215 TEST(TensorTest, UsesOptionsThatAreSupplied) {
216  auto tensor = at::tensor(123, at::dtype(at::kFloat)) + 0.5;
217  ASSERT_EQ(tensor.numel(), 1);
218  ASSERT_EQ(tensor.dtype(), at::kFloat);
219  ASSERT_TRUE(almost_equal(tensor[0], 123.5));
220 
221  tensor = at::tensor({1.1, 2.2, 3.3}, at::dtype(at::kInt));
222  ASSERT_EQ(tensor.numel(), 3);
223  ASSERT_EQ(tensor.dtype(), at::kInt);
224  ASSERT_EQ(tensor.layout(), at::kStrided);
225  ASSERT_TRUE(exactly_equal(tensor[0], 1));
226  ASSERT_TRUE(exactly_equal(tensor[1], 2));
227  ASSERT_TRUE(exactly_equal(tensor[2], 3));
228 }
229 
230 TEST(TensorTest, FromBlob) {
231  std::vector<double> v = {1.0, 2.0, 3.0};
232  auto tensor = torch::from_blob(
233  v.data(), v.size(), torch::dtype(torch::kFloat64).requires_grad(true));
234  ASSERT_TRUE(tensor.is_variable());
235  ASSERT_TRUE(tensor.requires_grad());
236  ASSERT_EQ(tensor.dtype(), torch::kFloat64);
237  ASSERT_EQ(tensor.numel(), 3);
238  ASSERT_EQ(tensor[0].item<double>(), 1);
239  ASSERT_EQ(tensor[1].item<double>(), 2);
240  ASSERT_EQ(tensor[2].item<double>(), 3);
241 }
242 
243 TEST(TensorTest, FromBlobUsesDeleter) {
244  bool called = false;
245  {
246  std::vector<int32_t> v = {1, 2, 3};
247  auto tensor = torch::from_blob(
248  v.data(),
249  v.size(),
250  /*deleter=*/[&called](void* data) { called = true; },
251  torch::kInt32);
252  }
253  ASSERT_TRUE(called);
254 }
255 
256 TEST(TensorTest, FromBlobWithStrides) {
257  // clang-format off
258  std::vector<int32_t> v = {
259  1, 2, 3,
260  4, 5, 6,
261  7, 8, 9
262  };
263  // clang-format on
264  auto tensor = torch::from_blob(
265  v.data(),
266  /*sizes=*/{3, 3},
267  /*strides=*/{1, 3},
268  torch::kInt32);
269  ASSERT_TRUE(tensor.is_variable());
270  ASSERT_EQ(tensor.dtype(), torch::kInt32);
271  ASSERT_EQ(tensor.numel(), 9);
272  const std::vector<int64_t> expected_strides = {1, 3};
273  ASSERT_EQ(tensor.strides(), expected_strides);
274  for (int64_t i = 0; i < tensor.size(0); ++i) {
275  for (int64_t j = 0; j < tensor.size(1); ++j) {
276  // NOTE: This is column major because the strides are swapped.
277  EXPECT_EQ(tensor[i][j].item<int32_t>(), 1 + (j * tensor.size(1)) + i);
278  }
279  }
280 }
281 
282 TEST(TensorTest, Item) {
283  {
284  torch::Tensor tensor = torch::tensor(3.14);
285  torch::Scalar scalar = tensor.item();
286  ASSERT_NEAR(scalar.to<float>(), 3.14, 1e-5);
287  }
288  {
289  torch::Tensor tensor = torch::tensor(123);
290  torch::Scalar scalar = tensor.item();
291  ASSERT_EQ(scalar.to<int>(), 123);
292  }
293 }
294 
295 TEST(TensorTest, Item_CUDA) {
296  {
297  torch::Tensor tensor = torch::tensor(3.14, torch::kCUDA);
298  torch::Scalar scalar = tensor.item();
299  ASSERT_NEAR(scalar.to<float>(), 3.14, 1e-5);
300  }
301  {
302  torch::Tensor tensor = torch::tensor(123, torch::kCUDA);
303  torch::Scalar scalar = tensor.item();
304  ASSERT_EQ(scalar.to<int>(), 123);
305  }
306 }
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
The primary ATen error class.
Definition: Exception.h:27
C10_NODISCARD TensorOptions requires_grad(c10::optional< bool > requires_grad) const noexcept
Sets the requires_grad property of the TensorOptions.