Caffe2 - C++ API
A deep learning, cross platform ML framework
register_aten_ops.cpp
1 #include "torch/csrc/jit/operator.h"
2 #include "torch/csrc/jit/custom_operator.h"
3 
4 #include "torch/csrc/autograd/profiler.h"
5 #include "torch/csrc/autograd/generated/variable_factories.h"
6 
7 #include <ATen/ATen.h>
8 #include <ATen/core/functional.h>
9 #include <ATen/core/interned_strings.h>
10 
11 #include <algorithm>
12 #include <array>
13 #include <cstddef>
14 #include <cstring>
15 #include <sstream>
16 #include <stdexcept>
17 #include <tuple>
18 #include <unordered_map>
19 #include <unordered_set>
20 #include <utility>
21 #include <vector>
22 
23 // ${generated_comment}
24 
25 // NOTE [Sharded File]: This file is generated in a sharded fashion to speed up
26 // incremental rebuilds. See the comment at the top of
27 // templates/VariableType.cpp for an analogous, in-depth discussion.
28 //
29 // Note that unlike VariableType.cpp, when sharding this file we take
30 // care to generate all overloads of a particular name in a single
31 // file and in a particular order. See gen_jit_dispatch.py for
32 // details.
33 
34 namespace torch { namespace jit {
35 
36 using autograd::Variable;
37 using autograd::variable_list;
38 using at::Scalar;
39 using at::ScalarType;
40 using at::Tensor;
41 using at::TensorOptions;
42 using at::DeviceGuard;
43 
44 using ::c10::fmap;
45 using ::c10::filter;
46 
47 namespace {
48 
49 // TODO: remove the toOptionalTensor and toListOfOptionalTensor
50 // when we remove the undefined tensor semantic from TH
51 
52 // XXX: This function is to specialize IValue for tensor type in
53 // interpreter, it should only be used in this file
54 at::Tensor toOptionalTensor(const IValue& v) {
55  if (v.isNone()) {
56  return at::Tensor();
57  }
58  return v.toTensor();
59 }
60 
61 // XXX: This function is to specialize IValue for list of optional
62 // tensor type in interpreter, it should only be used in this file
63 std::vector<Tensor> toListOfOptionalTensor(const IValue& v) {
64  // v is a list of optional tensor, loop over as generic list
65  auto vlist = v.toGenericListRef();
66  std::vector<Tensor> res;
67 
68  for (const IValue &v: vlist) {
69  res.emplace_back(toOptionalTensor(v));
70  }
71  return res;
72 }
73 
74 template<size_t N>
75 std::array<bool, N> as_bool_array(const std::vector<bool>& vec) {
76  std::array<bool, N> res;
77  AT_ASSERT(vec.size() == N);
78  std::copy(vec.begin(), vec.end(), res.begin());
79  return res;
80 }
81 
82 RegisterOperators reg({
83  Operator(
84  "aten::get_device(Tensor self) -> int",
85  [](Stack & stack) {
86  autograd::profiler::RecordFunction record("get_device");
87  auto result = at::get_device(
88  (std::move(peek(stack, 0, 1))).toTensor()
89  );
90  drop(stack, 1);
91  pack(stack, std::move(result));
92  return 0;
93  }
94  ),
95  Operator(
96  "aten::storage_offset(Tensor self) -> int",
97  [](Stack & stack) {
98  autograd::profiler::RecordFunction record("storage_offset");
99  auto result = ((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
100  drop(stack, 1);
101  pack(stack, std::move(result));
102  return 0;
103  }
104  ),
105 
106  // Generated operators
107  ${constructors}
108 });
109 
110 } // anon namespace
111 
112 
113 }} // namespace torch::jit
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
Definition: jit_type.h:17
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...
Definition: DeviceGuard.h:19