1 #include <gtest/gtest.h> 4 #include <ATen/core/Reduction.h> 8 extern "C" THFloatTensor * THFloatTensor_newWithSize2d(
size_t a,
size_t b);
9 extern "C" void THFloatTensor_fill(THFloatTensor *,
float v);
16 #define ASSERT_EQ_RESOLVED(X, Y) \ 24 void TestResize(
Type& type) {
25 auto a = at::empty({0}, type.
options());
27 ASSERT_EQ_RESOLVED(a.numel(), 12);
29 ASSERT_EQ_RESOLVED(a.numel(), 35);
32 void TestOnesAndDot(
Type& type) {
33 Tensor b0 = ones({1, 1}, type);
34 ASSERT_EQ_RESOLVED((b0 + b0).sum().item<double>(), 2);
36 Tensor b1 = ones({1, 2}, type);
37 ASSERT_EQ_RESOLVED((b1 + b1).sum().item<double>(), 4);
39 Tensor b = ones({3, 4}, type);
40 ASSERT_EQ_RESOLVED((b + b).sum().item<double>(), 24);
41 ASSERT_EQ_RESOLVED(b.numel(), 12);
42 ASSERT_EQ_RESOLVED(b.view(-1).dot(b.view(-1)).item<double>(), 12);
45 void TestSort(
Type& type) {
46 Tensor b = rand({3, 4}, type);
49 auto z_sorted = std::get<0>(z);
51 bool isLT = z_sorted[0][0].item<
float>() < z_sorted[0][1].item<float>();
55 void TestRandperm(
Type& type) {
56 if (type.backend() != Backend::CUDA) {
57 Tensor b = randperm(15, type);
59 std::tie(rv, ri) = sort(b, 0);
60 bool isLE = (rv[0].item<
float>() <= rv[1].item<float>());
67 ss <<
"context: " << std::hex << (int64_t)&globalContext() << std::endl;
70 void TestAdd(
Type& type) {
71 Tensor a = rand({3, 4}, type);
72 Tensor b = rand({3, 4}, type);
73 Tensor c = add(a, add(a, b));
76 ASSERT_TRUE(add(c, d).allclose(a + a + b + d));
79 void TestLoadsOfAdds(
Type& type) {
80 auto begin = std::chrono::high_resolution_clock::now();
81 Tensor d = ones({3, 4}, type);
82 Tensor r = zeros({3, 4}, type);
83 for (
auto i = 0; i < 100000; i++) {
86 auto end = std::chrono::high_resolution_clock::now();
88 std::cout << std::dec <<
" " 89 << std::chrono::duration_cast<std::chrono::milliseconds>(
92 <<
" ms" << std::endl;
93 ASSERT_EQ_RESOLVED(norm(100000 * d).item<double>(), norm(r).item<double>());
96 void TestLoadOfAddsWithCopy(
Type& type) {
97 auto begin = std::chrono::high_resolution_clock::now();
98 Tensor d = ones({3, 4}, type);
99 Tensor r = zeros({3, 4}, type);
100 for (
auto i = 0; i < 100000; i++) {
103 auto end = std::chrono::high_resolution_clock::now();
105 std::cout << std::dec <<
" " 106 << std::chrono::duration_cast<std::chrono::milliseconds>(
109 <<
" ms" << std::endl;
110 ASSERT_EQ_RESOLVED(norm(100000 * d).item<double>(), norm(r).item<double>());
113 void TestIsContiguous(
Type& type) {
114 Tensor a = rand({3, 4}, type);
115 ASSERT_TRUE(a.is_contiguous());
116 a = a.transpose(0, 1);
117 ASSERT_FALSE(a.is_contiguous());
120 void TestPermute(
Type& type) {
121 Tensor a = rand({3, 4, 5}, type);
122 Tensor b = a.permute({1, 2, 0});
123 ASSERT_TRUE(b.sizes().equals({4, 5, 3}));
124 ASSERT_TRUE(b.strides().equals({5, 1, 20}));
127 void TestMm(
Type& type) {
128 Tensor a = rand({3, 4}, type);
129 Tensor b = rand({4}, type);
131 ASSERT_TRUE(c.equal(addmv(zeros({3}, type), a, b, 0, 1)));
134 void TestSqueeze(
Type& type) {
135 Tensor a = rand({2, 1}, type);
137 ASSERT_EQ_RESOLVED(b.dim(), 1);
141 ASSERT_TRUE(a[0].equal(b));
144 void TestCopy(
Type& type) {
145 Tensor a = zeros({4, 3}, type);
146 Tensor e = rand({4, 3}, type);
148 ASSERT_TRUE(a.equal(e));
151 void TestCopyBroadcasting(
Type& type) {
152 Tensor a = zeros({4, 3}, type);
153 Tensor e = rand({3}, type);
155 for (
int i = 0; i < 4; ++i) {
156 ASSERT_TRUE(a[i].equal(e));
159 void TestAbsValue(
Type& type) {
161 ASSERT_EQ_RESOLVED(r.item<int32_t>(), 3);
176 void TestAddingAValueWithScalar(
Type& type) {
177 Tensor a = rand({4, 3}, type);
178 ASSERT_TRUE((ones({4, 3}, type) + a).equal(add(a, 1)));
181 void TestSelect(
Type& type) {
182 Tensor a = rand({3, 7}, type);
183 auto a_13 = select(a, 1, 3);
184 auto a_13_02 = select(select(a, 1, 3), 0, 2);
185 ASSERT_TRUE(a[0][3].equal(a_13[0]));
186 ASSERT_TRUE(a[2][3].equal(a_13_02));
189 void TestZeroDim(
Type& type) {
192 Tensor b = rand({3, 4}, type);
193 ASSERT_EQ_RESOLVED((a + a).dim(), 0);
194 ASSERT_EQ_RESOLVED((1 + a).dim(), 0);
195 ASSERT_EQ_RESOLVED((b + a).dim(), 2);
196 ASSERT_EQ_RESOLVED((a + b).dim(), 2);
197 auto c = rand({3, 4}, type);
198 ASSERT_EQ_RESOLVED(c[1][2].dim(), 0);
200 auto f = rand({3, 4}, type);
201 f[2] = zeros({4}, type);
203 ASSERT_EQ_RESOLVED(f[2][0].item<double>(), 0);
206 void TestTensorFromTH() {
208 THFloatTensor* t = THFloatTensor_newWithSize2d(a, a);
209 THFloatTensor_fill(t, a);
210 ASSERT_NO_THROW(CPU(kFloat).unsafeTensorFromTH(t,
false));
213 void TestToCFloat() {
216 Tensor c = cat({a, b}, 1);
217 ASSERT_EQ_RESOLVED(c.size(1), 11);
220 ASSERT_EQ_RESOLVED(*e.data<
float>(), e.sum().item<
float>());
222 void TestToString() {
223 Tensor b = ones({3, 7}) * .0000001f;
226 std::string expect =
"1e-07 *";
227 ASSERT_EQ_RESOLVED(s.str().substr(0, expect.size()), expect);
230 void TestIndexingByScalar() {
231 Tensor tensor = arange(0, 10, kInt);
232 Tensor one = ones({}, kInt);
233 for (int64_t i = 0; i < tensor.numel(); ++i) {
234 ASSERT_TRUE(tensor[i].equal(one * i));
236 for (
size_t i = 0; i < static_cast<uint64_t>(tensor.numel()); ++i) {
237 ASSERT_TRUE(tensor[i].equal(one * static_cast<int64_t>(i)));
239 for (
int i = 0; i < tensor.numel(); ++i) {
240 ASSERT_TRUE(tensor[i].equal(one * i));
242 for (int16_t i = 0; i < tensor.numel(); ++i) {
243 ASSERT_TRUE(tensor[i].equal(one * i));
245 for (int8_t i = 0; i < tensor.numel(); ++i) {
246 ASSERT_TRUE(tensor[i].equal(one * i));
249 ASSERT_ANY_THROW(tensor[
Scalar(3.14)].equal(one));
252 void TestIndexingByZerodimTensor() {
253 Tensor tensor = arange(0, 10, kInt);
254 Tensor one = ones({}, kInt);
255 for (
int i = 0; i < tensor.numel(); ++i) {
256 ASSERT_TRUE(tensor[one * i].equal(one * i));
260 ASSERT_ANY_THROW(tensor[ones({}) * 3.14].equal(one));
262 ASSERT_ANY_THROW(tensor[
Tensor()].equal(one));
264 ASSERT_ANY_THROW(tensor[ones({2, 3, 4}, kInt)].equal(one));
266 void TestIndexingMixedDevice(
Type& type) {
267 Tensor tensor = randn({20, 20}, type);
268 Tensor index = arange(10, kLong).cpu();
269 Tensor result = tensor.index({index});
270 ASSERT_TRUE(result[0].equal(tensor[0]));
272 void TestDispatch() {
273 Tensor tensor = randn({20, 20});
274 Tensor other = randn({20, 20});
275 auto result = tensor.m(relu).m(mse_loss, other, Reduction::Mean);
276 ASSERT_TRUE(result.allclose(mse_loss(relu(tensor), other)));
279 void TestNegativeDim(
Type& type) {
280 ASSERT_ANY_THROW(empty({5, -5, 5}, type.
options()));
281 ASSERT_ANY_THROW(empty({5, -5, -5}, type.
options()));
283 ASSERT_ANY_THROW(tensor.reshape({-5, -5}));
288 TestOnesAndDot(type);
293 TestLoadsOfAdds(type);
294 TestLoadOfAddsWithCopy(type);
295 TestIsContiguous(type);
300 TestCopyBroadcasting(type);
302 TestAddingAValueWithScalar(type);
308 TestIndexingByScalar();
309 TestIndexingByZerodimTensor();
310 TestIndexingMixedDevice(type);
312 TestNegativeDim(type);
315 TEST(BasicTest, BasicTestCPU) {
321 TEST(BasicTest, BasicTestCUDA) {
Scalar represents a 0-dimensional tensor which contains a single element.
TensorOptions options(int16_t device_index=-1) const
Constructs the TensorOptions from a type and a device_index.
Flush-To-Zero and Denormals-Are-Zero mode.