Caffe2 - C++ API
A deep learning, cross platform ML framework
native_test.cpp
1 #include <gtest/gtest.h>
2 
3 #include <ATen/ATen.h>
4 
5 using namespace at;
6 
7 #define ASSERT_EQUAL(t1, t2) ASSERT_TRUE(t1.equal(t2));
8 
9 #define ASSERT_ALLCLOSE(t1, t2) \
10  ASSERT_TRUE(t1.is_same_size(t2)); \
11  ASSERT_TRUE(t1.allclose(t2));
12 
13 #define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \
14  ASSERT_TRUE(t1.is_same_size(t2)); \
15  ASSERT_TRUE(t1.allclose(t2, atol, rtol));
16 
17 void requireEqualTensorList(TensorList t1, TensorList t2) {
18  ASSERT_EQ(t1.size(), t2.size());
19  for (size_t i = 0; i < t1.size(); ++i) {
20  ASSERT_EQUAL(t1[i], t2[i]);
21  }
22 }
23 
24 // split: test method, type, namespace give same result
25 void TestSplit(Type& T, Tensor& t) {
26  auto splitMethod = t.split(1, 0);
27  auto splitType = T.split(t, 1, 0);
28  auto splitNs = at::split(t, 1, 0);
29  requireEqualTensorList(splitMethod, splitType);
30  requireEqualTensorList(splitMethod, splitNs);
31 
32  // test rebuilding with cat
33  ASSERT_EQUAL(at::cat(splitMethod, 0), t);
34 }
35 
36 // chunk: test method, type, namespace give same result
37 void TestChunk(Type& T, Tensor& t) {
38  // test method, type, namespace give same result
39  auto chunkMethod = t.chunk(3, 0);
40  auto chunkType = T.chunk(t, 3, 0);
41  auto chunkNs = at::chunk(t, 3, 0);
42  requireEqualTensorList(chunkMethod, chunkType);
43  requireEqualTensorList(chunkMethod, chunkNs);
44 
45  // test rebuilding with cat
46  ASSERT_EQUAL(at::cat(chunkMethod, 0), t);
47 }
48 
49 void TestStack(Type& T, Tensor& t) {
50  auto x = rand({2, 3, 4});
51  auto y = rand({2, 3, 4});
52  auto z = rand({2, 3, 4});
53  for (int64_t dim = 0; dim < 4; ++dim) {
54  auto res = at::stack({x, y, z}, dim);
55  auto res_neg = at::stack({x, y, z}, dim - 4);
56  std::vector<int64_t> expected_size;
57  expected_size.insert(
58  expected_size.end(), x.sizes().begin(), x.sizes().begin() + dim);
59  expected_size.insert(expected_size.end(), 3);
60  expected_size.insert(
61  expected_size.end(), x.sizes().begin() + dim, x.sizes().end());
62 
63  ASSERT_EQUAL(res, res_neg);
64  ASSERT_TRUE(res.sizes().equals(expected_size));
65  ASSERT_EQUAL(res.select(dim, 0), x);
66  ASSERT_EQUAL(res.select(dim, 1), y);
67  ASSERT_EQUAL(res.select(dim, 2), z);
68  }
69 }
70 
71 // size / stride
72 void TestSize(Type& T, Tensor& t) {
73  auto scalar = randn({}, T);
74  // Throw StartsWith("dimension specified as 0 but tensor has no dimensions")
75  ASSERT_ANY_THROW(scalar.size(0));
76  // Throw StartsWith("dimension specified as -1 but tensor has no dimensions")
77  ASSERT_ANY_THROW(scalar.size(-1));
78  // Throw StartsWith("dimension specified as 0 but tensor has no dimensions")
79  ASSERT_ANY_THROW(scalar.stride(0));
80  // Throw StartsWith("dimension specified as -1 but tensor has no dimensions")
81  ASSERT_ANY_THROW(scalar.stride(-1));
82 
83  auto empty = randn({0}, T);
84  ASSERT_EQ(empty.size(0), 0);
85  ASSERT_EQ(empty.size(-1), 0);
86  ASSERT_EQ(empty.stride(0), 1);
87  ASSERT_EQ(empty.stride(-1), 1);
88 }
89 
90 void TestMatmul(Type& T, Tensor& t, Type& AccT) {
91  auto scalar = randn({}, T);
92  auto d1 = randn({3}, T);
93  auto d2 = randn({2, 3}, T);
94 
95  // 0-d
96  // Throw StartsWith("both arguments to matmul need to be at least 1D")
97  ASSERT_ANY_THROW(scalar.matmul(d2));
98  // Throw StartsWith("both arguments to matmul need to be at least 1D")
99  ASSERT_ANY_THROW(d2.matmul(scalar));
100 
101  // 1-d
102  ASSERT_ALLCLOSE(d1.matmul(d1), d1.dot(d1));
103  ASSERT_ALLCLOSE(d2.matmul(d1), d2.mv(d1));
104  auto d1o = randn({2}, T);
105  ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0));
106 
107  // 2-d
108  auto d2o = randn({3, 5}, T);
109  ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o));
110 
111  // > 2-d, 1-d
112  auto d3 = randn({5, 2, 3}, T);
113  ASSERT_ALLCLOSE(
114  d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2}));
115  ASSERT_ALLCLOSE(d1o.matmul(d3), d1o.expand({5, 1, 2}).bmm(d3).view({5, 3}));
116 
117  auto d5 = randn({3, 2, 4, 2, 3}, T);
118  ASSERT_ALLCLOSE(
119  d5.matmul(d1),
120  d5.view({24, 2, 3})
121  .bmm(d1.view({1, 3, 1}).expand({24, 3, 1}))
122  .view({3, 2, 4, 2}));
123  ASSERT_ALLCLOSE(
124  d1o.matmul(d5),
125  d1o.expand({24, 1, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 3}));
126 
127  // > 2-d, 2-d
128  // we use a "folding" algorithm in this case of matmul, so the direct
129  // comparison to bmm doesn't work; instead, compare to the higher precision
130  // computation (technically, we should always do this). Tolerances are
131  // selected empirically.
132  double atol = 1e-04;
133  double rtol = 1e-06;
134  d2 = randn({3, 4}, T);
135  d2o = randn({4, 2}, T);
136  auto result = d5.matmul(d2).toType(AccT);
137 
138  auto d5Acc = d5.toType(AccT);
139  auto d2Acc = d2.toType(AccT);
140  auto acc_result = d5Acc.view({24, 2, 3})
141  .bmm(d2Acc.expand({24, 3, 4}))
142  .view({3, 2, 4, 2, 4});
143  ASSERT_ALLCLOSE_TOLERANCES(result, acc_result, atol, rtol);
144  ASSERT_ALLCLOSE(
145  d2o.matmul(d5),
146  d2o.expand({24, 4, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 4, 3}));
147 
148  // > 2-d, > 2-d
149  auto d5o = randn({2, 1, 2, 4, 3, 2}, T);
150  auto d5_bmm_view =
151  d5.expand({2, 3, 2, 4, 2, 3}).contiguous().view({48, 2, 3});
152  auto d5o_bmm_view =
153  d5o.expand({2, 3, 2, 4, 3, 2}).contiguous().view({48, 3, 2});
154  ASSERT_ALLCLOSE(
155  d5.matmul(d5o), d5_bmm_view.bmm(d5o_bmm_view).view({2, 3, 2, 4, 2, 2}));
156 
157  // non-expandable case
158  auto d5wrong = randn({2, 4, 2, 4, 3, 2}, T);
159  // Throw Contains("must match the size")
160  ASSERT_ANY_THROW(d5.matmul(d5wrong));
161 }
162 
163 void TestStandardGammaGrad(Type& T, Tensor& t) {
164  // check empty
165  auto empty = ones({0}, T);
166  ASSERT_EQUAL(empty, at::_standard_gamma_grad(empty, empty));
167 
168  // check scalar equals one element
169  auto one_scalar = ones({}, T).mul(5);
170  auto one_with_dim = ones({1}, T).mul(5);
171  ASSERT_ALLCLOSE(
172  at::_standard_gamma_grad(one_scalar, one_scalar),
173  at::_standard_gamma_grad(one_with_dim, one_with_dim).sum());
174 
175  // check mixing types
176  auto t1 = randn({3, 4}, T);
177  auto t2 = randn({3, 4}, T).toType(kDouble);
178  // Throw StartsWith("expected scalar type")
179  ASSERT_ANY_THROW(at::_standard_gamma_grad(t1, t2));
180 }
181 
182 void TestWhere(Type& T, Tensor& t) {
183  // empty
184  auto empty = ones({0}, T);
185  auto& bT = T.toScalarType(ScalarType::Byte);
186  auto empty_byte = ones({0}, bT);
187  ASSERT_EQUAL(empty, at::where(empty_byte, empty, empty));
188 
189  // check scalar equals one element
190  auto x_scalar = ones({}, T).mul(5);
191  auto y_scalar = ones({}, T).mul(7);
192  auto cond_scalar = zeros({}, bT);
193  auto x_1d = x_scalar.unsqueeze(0);
194  auto y_1d = y_scalar.unsqueeze(0);
195  auto cond_1d = cond_scalar.unsqueeze(0);
196  ASSERT_ALLCLOSE(
197  at::where(cond_scalar, x_scalar, y_scalar).unsqueeze(0),
198  at::where(cond_1d, x_1d, y_1d));
199 }
200 
201 void test(Type& T, Type& AccT) {
202  auto t = randn({3, 3}, T);
203  TestSplit(T, t);
204  TestChunk(T, t);
205  TestStack(T, t);
206  TestSize(T, t);
207  TestMatmul(T, t, AccT);
208  TestStandardGammaGrad(T, t);
209  TestWhere(T, t);
210 }
211 
212 TEST(TestNative, NativeTestCPU) {
213  manual_seed(123);
214 
215  test(CPU(kFloat), CPU(kDouble));
216 }
217 
218 TEST(TestNative, NativeTestGPU) {
219  manual_seed(123);
220 
221  if (at::hasCUDA()) {
222  test(CUDA(kFloat), CUDA(kDouble));
223  }
224 }
Definition: module.cpp:17
constexpr size_t size() const
size - Get the array size.
Definition: ArrayRef.h:138
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:41
Flush-To-Zero and Denormals-Are-Zero mode.