Caffe2 - C++ API
A deep learning, cross platform ML framework
basic.cpp
1 #include <gtest/gtest.h>
2 
3 #include <ATen/ATen.h>
4 #include <ATen/core/Reduction.h>
5 
6 // for TH compat test only...
7 struct THFloatTensor;
8 extern "C" THFloatTensor * THFloatTensor_newWithSize2d(size_t a, size_t b);
9 extern "C" void THFloatTensor_fill(THFloatTensor *, float v);
10 
11 #include <iostream>
12 #include <chrono>
13 #include <string.h>
14 #include <sstream>
15 
16 #define ASSERT_EQ_RESOLVED(X, Y) \
17  { \
18  bool isEQ = X == Y; \
19  ASSERT_TRUE(isEQ); \
20  }
21 
22 using namespace at;
23 
24 void TestResize(Type& type) {
25  auto a = at::empty({0}, type.options());
26  a.resize_({3, 4});
27  ASSERT_EQ_RESOLVED(a.numel(), 12);
28  a.resize_({5, 7});
29  ASSERT_EQ_RESOLVED(a.numel(), 35);
30 }
31 
32 void TestOnesAndDot(Type& type) {
33  Tensor b0 = ones({1, 1}, type);
34  ASSERT_EQ_RESOLVED((b0 + b0).sum().item<double>(), 2);
35 
36  Tensor b1 = ones({1, 2}, type);
37  ASSERT_EQ_RESOLVED((b1 + b1).sum().item<double>(), 4);
38 
39  Tensor b = ones({3, 4}, type);
40  ASSERT_EQ_RESOLVED((b + b).sum().item<double>(), 24);
41  ASSERT_EQ_RESOLVED(b.numel(), 12);
42  ASSERT_EQ_RESOLVED(b.view(-1).dot(b.view(-1)).item<double>(), 12);
43 }
44 
45 void TestSort(Type& type) {
46  Tensor b = rand({3, 4}, type);
47 
48  auto z = b.sort(1);
49  auto z_sorted = std::get<0>(z);
50 
51  bool isLT = z_sorted[0][0].item<float>() < z_sorted[0][1].item<float>();
52  ASSERT_TRUE(isLT);
53 }
54 
55 void TestRandperm(Type& type) {
56  if (type.backend() != Backend::CUDA) {
57  Tensor b = randperm(15, type);
58  Tensor rv, ri;
59  std::tie(rv, ri) = sort(b, 0);
60  bool isLE = (rv[0].item<float>() <= rv[1].item<float>());
61  ASSERT_TRUE(isLE);
62  }
63 }
64 
65 void SendContext() {
66  std::stringstream ss;
67  ss << "context: " << std::hex << (int64_t)&globalContext() << std::endl;
68 }
69 
70 void TestAdd(Type& type) {
71  Tensor a = rand({3, 4}, type);
72  Tensor b = rand({3, 4}, type);
73  Tensor c = add(a, add(a, b));
74  // TODO:0-dim Tensor d(3.f);
75  Scalar d = 3.f;
76  ASSERT_TRUE(add(c, d).allclose(a + a + b + d));
77 }
78 
79 void TestLoadsOfAdds(Type& type) {
80  auto begin = std::chrono::high_resolution_clock::now();
81  Tensor d = ones({3, 4}, type);
82  Tensor r = zeros({3, 4}, type);
83  for (auto i = 0; i < 100000; i++) {
84  add_out(r, r, d);
85  }
86  auto end = std::chrono::high_resolution_clock::now();
87  // TODO TEST PERF?
88  std::cout << std::dec << " "
89  << std::chrono::duration_cast<std::chrono::milliseconds>(
90  end - begin)
91  .count()
92  << " ms" << std::endl;
93  ASSERT_EQ_RESOLVED(norm(100000 * d).item<double>(), norm(r).item<double>());
94 }
95 
96 void TestLoadOfAddsWithCopy(Type& type) {
97  auto begin = std::chrono::high_resolution_clock::now();
98  Tensor d = ones({3, 4}, type);
99  Tensor r = zeros({3, 4}, type);
100  for (auto i = 0; i < 100000; i++) {
101  r = add(r, d);
102  }
103  auto end = std::chrono::high_resolution_clock::now();
104  // TODO TEST PERF?
105  std::cout << std::dec << " "
106  << std::chrono::duration_cast<std::chrono::milliseconds>(
107  end - begin)
108  .count()
109  << " ms" << std::endl;
110  ASSERT_EQ_RESOLVED(norm(100000 * d).item<double>(), norm(r).item<double>());
111 }
112 
113 void TestIsContiguous(Type& type) {
114  Tensor a = rand({3, 4}, type);
115  ASSERT_TRUE(a.is_contiguous());
116  a = a.transpose(0, 1);
117  ASSERT_FALSE(a.is_contiguous());
118 }
119 
120 void TestPermute(Type& type) {
121  Tensor a = rand({3, 4, 5}, type);
122  Tensor b = a.permute({1, 2, 0});
123  ASSERT_TRUE(b.sizes().equals({4, 5, 3}));
124  ASSERT_TRUE(b.strides().equals({5, 1, 20}));
125 }
126 
127 void TestMm(Type& type) {
128  Tensor a = rand({3, 4}, type);
129  Tensor b = rand({4}, type);
130  Tensor c = mv(a, b);
131  ASSERT_TRUE(c.equal(addmv(zeros({3}, type), a, b, 0, 1)));
132 }
133 
134 void TestSqueeze(Type& type) {
135  Tensor a = rand({2, 1}, type);
136  Tensor b = squeeze(a);
137  ASSERT_EQ_RESOLVED(b.dim(), 1);
138  a = rand({1}, type);
139  b = squeeze(a);
140  // TODO 0-dim squeeze
141  ASSERT_TRUE(a[0].equal(b));
142 }
143 
144 void TestCopy(Type& type) {
145  Tensor a = zeros({4, 3}, type);
146  Tensor e = rand({4, 3}, type);
147  a.copy_(e);
148  ASSERT_TRUE(a.equal(e));
149 }
150 
151 void TestCopyBroadcasting(Type& type) {
152  Tensor a = zeros({4, 3}, type);
153  Tensor e = rand({3}, type);
154  a.copy_(e);
155  for (int i = 0; i < 4; ++i) {
156  ASSERT_TRUE(a[i].equal(e));
157  }
158 }
159 void TestAbsValue(Type& type) {
160  Tensor r = at::abs(at::scalar_tensor(-3, type.options()));
161  ASSERT_EQ_RESOLVED(r.item<int32_t>(), 3);
162 }
163 /*
164  TODO(zach): operator overloads
165 #if 0
166 {
167 std::cout << "eq (value):" << std::endl;
168 Tensor a = Tensor(10.f);
169 std::cout << (a == 11_i64) << " -- should be 0" << std::endl;
170 std::cout << (a == 10_i64) << " -- should be 1" << std::endl;
171 std::cout << (a == 10.) << " -- should be 1" << std::endl;
172 }
173 #endif
174 */
175 
176 void TestAddingAValueWithScalar(Type& type) {
177  Tensor a = rand({4, 3}, type);
178  ASSERT_TRUE((ones({4, 3}, type) + a).equal(add(a, 1)));
179 }
180 
181 void TestSelect(Type& type) {
182  Tensor a = rand({3, 7}, type);
183  auto a_13 = select(a, 1, 3);
184  auto a_13_02 = select(select(a, 1, 3), 0, 2);
185  ASSERT_TRUE(a[0][3].equal(a_13[0]));
186  ASSERT_TRUE(a[2][3].equal(a_13_02));
187 }
188 
189 void TestZeroDim(Type& type) {
190  Tensor a = at::scalar_tensor(4, type.options()); // rand(type, {1});
191 
192  Tensor b = rand({3, 4}, type);
193  ASSERT_EQ_RESOLVED((a + a).dim(), 0);
194  ASSERT_EQ_RESOLVED((1 + a).dim(), 0);
195  ASSERT_EQ_RESOLVED((b + a).dim(), 2);
196  ASSERT_EQ_RESOLVED((a + b).dim(), 2);
197  auto c = rand({3, 4}, type);
198  ASSERT_EQ_RESOLVED(c[1][2].dim(), 0);
199 
200  auto f = rand({3, 4}, type);
201  f[2] = zeros({4}, type);
202  f[1][0] = -1;
203  ASSERT_EQ_RESOLVED(f[2][0].item<double>(), 0);
204 }
205 
206 void TestTensorFromTH() {
207  int a = 4;
208  THFloatTensor* t = THFloatTensor_newWithSize2d(a, a);
209  THFloatTensor_fill(t, a);
210  ASSERT_NO_THROW(CPU(kFloat).unsafeTensorFromTH(t, false));
211 }
212 
213 void TestToCFloat() {
214  Tensor a = zeros({3, 4});
215  Tensor b = ones({3, 7});
216  Tensor c = cat({a, b}, 1);
217  ASSERT_EQ_RESOLVED(c.size(1), 11);
218 
219  Tensor e = rand({});
220  ASSERT_EQ_RESOLVED(*e.data<float>(), e.sum().item<float>());
221 }
222 void TestToString() {
223  Tensor b = ones({3, 7}) * .0000001f;
224  std::stringstream s;
225  s << b << "\n";
226  std::string expect = "1e-07 *";
227  ASSERT_EQ_RESOLVED(s.str().substr(0, expect.size()), expect);
228 }
229 
230 void TestIndexingByScalar() {
231  Tensor tensor = arange(0, 10, kInt);
232  Tensor one = ones({}, kInt);
233  for (int64_t i = 0; i < tensor.numel(); ++i) {
234  ASSERT_TRUE(tensor[i].equal(one * i));
235  }
236  for (size_t i = 0; i < static_cast<uint64_t>(tensor.numel()); ++i) {
237  ASSERT_TRUE(tensor[i].equal(one * static_cast<int64_t>(i)));
238  }
239  for (int i = 0; i < tensor.numel(); ++i) {
240  ASSERT_TRUE(tensor[i].equal(one * i));
241  }
242  for (int16_t i = 0; i < tensor.numel(); ++i) {
243  ASSERT_TRUE(tensor[i].equal(one * i));
244  }
245  for (int8_t i = 0; i < tensor.numel(); ++i) {
246  ASSERT_TRUE(tensor[i].equal(one * i));
247  }
248  // Throw StartsWith("Can only index tensors with integral scalars")
249  ASSERT_ANY_THROW(tensor[Scalar(3.14)].equal(one));
250 }
251 
252 void TestIndexingByZerodimTensor() {
253  Tensor tensor = arange(0, 10, kInt);
254  Tensor one = ones({}, kInt);
255  for (int i = 0; i < tensor.numel(); ++i) {
256  ASSERT_TRUE(tensor[one * i].equal(one * i));
257  }
258  // Throw StartsWith(
259  // "Can only index tensors with integral scalars")
260  ASSERT_ANY_THROW(tensor[ones({}) * 3.14].equal(one));
261  // Throw StartsWith("Can only index with tensors that are defined")
262  ASSERT_ANY_THROW(tensor[Tensor()].equal(one));
263  // Throw StartsWith("Can only index with tensors that are scalars (zero-dim)")
264  ASSERT_ANY_THROW(tensor[ones({2, 3, 4}, kInt)].equal(one));
265 }
266 void TestIndexingMixedDevice(Type& type) {
267  Tensor tensor = randn({20, 20}, type);
268  Tensor index = arange(10, kLong).cpu();
269  Tensor result = tensor.index({index});
270  ASSERT_TRUE(result[0].equal(tensor[0]));
271 }
272 void TestDispatch() {
273  Tensor tensor = randn({20, 20});
274  Tensor other = randn({20, 20});
275  auto result = tensor.m(relu).m(mse_loss, other, Reduction::Mean);
276  ASSERT_TRUE(result.allclose(mse_loss(relu(tensor), other)));
277 }
278 
279 void TestNegativeDim(Type& type) {
280  ASSERT_ANY_THROW(empty({5, -5, 5}, type.options()));
281  ASSERT_ANY_THROW(empty({5, -5, -5}, type.options()));
282  Tensor tensor = empty({5, 5}, type.options());
283  ASSERT_ANY_THROW(tensor.reshape({-5, -5}));
284 }
285 
286 void test(Type& type) {
287  TestResize(type);
288  TestOnesAndDot(type);
289 
290  TestSort(type);
291  TestRandperm(type);
292  TestAdd(type);
293  TestLoadsOfAdds(type);
294  TestLoadOfAddsWithCopy(type);
295  TestIsContiguous(type);
296  TestPermute(type);
297  TestMm(type);
298  TestSqueeze(type);
299  TestCopy(type);
300  TestCopyBroadcasting(type);
301  TestAbsValue(type);
302  TestAddingAValueWithScalar(type);
303  TestSelect(type);
304  TestZeroDim(type);
305  TestTensorFromTH();
306  TestToCFloat();
307  TestToString();
308  TestIndexingByScalar();
309  TestIndexingByZerodimTensor();
310  TestIndexingMixedDevice(type);
311  TestDispatch();
312  TestNegativeDim(type);
313 }
314 
315 TEST(BasicTest, BasicTestCPU) {
316  manual_seed(123);
317 
318  test(CPU(kFloat));
319 }
320 
321 TEST(BasicTest, BasicTestCUDA) {
322  manual_seed(123);
323 
324  if (at::hasCUDA()) {
325  test(CUDA(kFloat));
326  }
327 }
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
Definition: module.cpp:17
Definition: Type.h:107
TensorOptions options(int16_t device_index=-1) const
Constructs the TensorOptions from a type and a device_index.
Definition: Type.h:185
Flush-To-Zero and Denormals-Are-Zero mode.