1 #include <gtest/gtest.h>     7 #define ASSERT_EQUAL(t1, t2) ASSERT_TRUE(t1.equal(t2));     9 #define ASSERT_ALLCLOSE(t1, t2)     \    10   ASSERT_TRUE(t1.is_same_size(t2)); \    11   ASSERT_TRUE(t1.allclose(t2));    13 #define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \    14   ASSERT_TRUE(t1.is_same_size(t2));                    \    15   ASSERT_TRUE(t1.allclose(t2, atol, rtol));    19   for (
size_t i = 0; i < t1.
size(); ++i) {
    20     ASSERT_EQUAL(t1[i], t2[i]);
    26   auto splitMethod = t.split(1, 0);
    27   auto splitType = T.split(t, 1, 0);
    28   auto splitNs = at::split(t, 1, 0);
    29   requireEqualTensorList(splitMethod, splitType);
    30   requireEqualTensorList(splitMethod, splitNs);
    33   ASSERT_EQUAL(at::cat(splitMethod, 0), t);
    39   auto chunkMethod = t.chunk(3, 0);
    40   auto chunkType = T.chunk(t, 3, 0);
    41   auto chunkNs = at::chunk(t, 3, 0);
    42   requireEqualTensorList(chunkMethod, chunkType);
    43   requireEqualTensorList(chunkMethod, chunkNs);
    46   ASSERT_EQUAL(at::cat(chunkMethod, 0), t);
    50   auto x = rand({2, 3, 4});
    51   auto y = rand({2, 3, 4});
    52   auto z = rand({2, 3, 4});
    53   for (int64_t dim = 0; dim < 4; ++dim) {
    54     auto res = at::stack({x, y, z}, dim);
    55     auto res_neg = at::stack({x, y, z}, dim - 4);
    56     std::vector<int64_t> expected_size;
    58         expected_size.end(), x.sizes().begin(), x.sizes().begin() + dim);
    59     expected_size.insert(expected_size.end(), 3);
    61         expected_size.end(), x.sizes().begin() + dim, x.sizes().end());
    63     ASSERT_EQUAL(res, res_neg);
    64     ASSERT_TRUE(res.sizes().equals(expected_size));
    65     ASSERT_EQUAL(res.select(dim, 0), x);
    66     ASSERT_EQUAL(res.select(dim, 1), y);
    67     ASSERT_EQUAL(res.select(dim, 2), z);
    73   auto scalar = randn({}, T);
    75   ASSERT_ANY_THROW(scalar.size(0));
    77   ASSERT_ANY_THROW(scalar.size(-1));
    79   ASSERT_ANY_THROW(scalar.stride(0));
    81   ASSERT_ANY_THROW(scalar.stride(-1));
    83   auto empty = randn({0}, T);
    84   ASSERT_EQ(empty.size(0), 0);
    85   ASSERT_EQ(empty.size(-1), 0);
    86   ASSERT_EQ(empty.stride(0), 1);
    87   ASSERT_EQ(empty.stride(-1), 1);
    91   auto scalar = randn({}, T);
    92   auto d1 = randn({3}, T);
    93   auto d2 = randn({2, 3}, T);
    97   ASSERT_ANY_THROW(scalar.matmul(d2));
    99   ASSERT_ANY_THROW(d2.matmul(scalar));
   102   ASSERT_ALLCLOSE(d1.matmul(d1), d1.dot(d1));
   103   ASSERT_ALLCLOSE(d2.matmul(d1), d2.mv(d1));
   104   auto d1o = randn({2}, T);
   105   ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0));
   108   auto d2o = randn({3, 5}, T);
   109   ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o));
   112   auto d3 = randn({5, 2, 3}, T);
   114       d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2}));
   115   ASSERT_ALLCLOSE(d1o.matmul(d3), d1o.expand({5, 1, 2}).bmm(d3).view({5, 3}));
   117   auto d5 = randn({3, 2, 4, 2, 3}, T);
   121           .bmm(d1.view({1, 3, 1}).expand({24, 3, 1}))
   122           .view({3, 2, 4, 2}));
   125       d1o.expand({24, 1, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 3}));
   134   d2 = randn({3, 4}, T);
   135   d2o = randn({4, 2}, T);
   136   auto result = d5.matmul(d2).toType(AccT);
   138   auto d5Acc = d5.toType(AccT);
   139   auto d2Acc = d2.toType(AccT);
   140   auto acc_result = d5Acc.view({24, 2, 3})
   141                         .bmm(d2Acc.expand({24, 3, 4}))
   142                         .view({3, 2, 4, 2, 4});
   143   ASSERT_ALLCLOSE_TOLERANCES(result, acc_result, atol, rtol);
   146       d2o.expand({24, 4, 2}).bmm(d5.view({24, 2, 3})).view({3, 2, 4, 4, 3}));
   149   auto d5o = randn({2, 1, 2, 4, 3, 2}, T);
   151       d5.expand({2, 3, 2, 4, 2, 3}).contiguous().view({48, 2, 3});
   153       d5o.expand({2, 3, 2, 4, 3, 2}).contiguous().view({48, 3, 2});
   155       d5.matmul(d5o), d5_bmm_view.bmm(d5o_bmm_view).view({2, 3, 2, 4, 2, 2}));
   158   auto d5wrong = randn({2, 4, 2, 4, 3, 2}, T);
   160   ASSERT_ANY_THROW(d5.matmul(d5wrong));
   163 void TestStandardGammaGrad(
Type& T, 
Tensor& t) {
   165   auto empty = ones({0}, T);
   166   ASSERT_EQUAL(empty, at::_standard_gamma_grad(empty, empty));
   169   auto one_scalar = ones({}, T).mul(5);
   170   auto one_with_dim = ones({1}, T).mul(5);
   172       at::_standard_gamma_grad(one_scalar, one_scalar),
   173       at::_standard_gamma_grad(one_with_dim, one_with_dim).sum());
   176   auto t1 = randn({3, 4}, T);
   177   auto t2 = randn({3, 4}, T).toType(kDouble);
   179   ASSERT_ANY_THROW(at::_standard_gamma_grad(t1, t2));
   184   auto empty = ones({0}, T);
   185   auto& bT = T.toScalarType(ScalarType::Byte);
   186   auto empty_byte = ones({0}, bT);
   187   ASSERT_EQUAL(empty, at::where(empty_byte, empty, empty));
   190   auto x_scalar = ones({}, T).mul(5);
   191   auto y_scalar = ones({}, T).mul(7);
   192   auto cond_scalar = zeros({}, bT);
   193   auto x_1d = x_scalar.unsqueeze(0);
   194   auto y_1d = y_scalar.unsqueeze(0);
   195   auto cond_1d = cond_scalar.unsqueeze(0);
   197       at::where(cond_scalar, x_scalar, y_scalar).unsqueeze(0),
   198       at::where(cond_1d, x_1d, y_1d));
   202   auto t = randn({3, 3}, T);
   207   TestMatmul(T, t, AccT);
   208   TestStandardGammaGrad(T, t);
   212 TEST(TestNative, NativeTestCPU) {
   215   test(CPU(kFloat), CPU(kDouble));
   218 TEST(TestNative, NativeTestGPU) {
   222     test(CUDA(kFloat), CUDA(kDouble));
 
constexpr size_t size() const 
size - Get the array size. 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Flush-To-Zero and Denormals-Are-Zero mode.