Caffe2 - C++ API
A deep learning, cross platform ML framework
TensorMethods.h
1 #pragma once
2 
3 #include <ATen/core/Tensor.h>
4 #include <c10/core/Scalar.h>
5 #include <c10/macros/Macros.h>
6 #include <ATen/core/SparseTensorRef.h>
7 #include <ATen/core/Type.h>
8 #include <c10/core/TensorOptions.h>
9 
10 namespace at {
11 
12 inline Tensor Tensor::toType(const Type & t, bool non_blocking) const {
13  if(type() == t)
14  return *this;
15  return t.copy(*this, non_blocking);
16 }
17 
18 inline Tensor Tensor::cpu() const {
19  return toType(type().cpu());
20 }
21 
22 inline Tensor Tensor::cuda() const {
23  return toType(type().cuda());
24 }
25 
26 inline Tensor Tensor::hip() const {
27  return toType(type().hip());
28 }
29 
30 inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) {
31  return type().copy_(*this, src, non_blocking);
32 }
33 
34 inline Tensor Tensor::toType(ScalarType t) const {
35  return toType(type().toScalarType(t));
36 }
37 
38 inline Tensor Tensor::toBackend(Backend b) const {
39  return toType(type().toBackend(b));
40 }
41 
42 inline TensorOptions Tensor::options() const {
43  return TensorOptions().dtype(dtype())
44  .device(device())
45  .layout(layout())
46  .is_variable(is_variable());
47 }
48 
49 inline void Tensor::backward(
50  c10::optional<Tensor> gradient,
51  bool keep_graph,
52  bool create_graph) {
53  type().backward(*this, std::move(gradient), keep_graph, create_graph);
54 }
55 
56 inline void Tensor::set_data(Tensor new_data) {
57  type().set_data(*this, new_data);
58 }
59 
60 // all static inline to allow for inlining of the non-dynamic part of dispatch
61 ${tensor_method_definitions}
62 
63 inline bool Tensor::is_variable() const noexcept {
64  return impl_->is_variable();
65 }
66 
67 inline caffe2::TypeMeta Tensor::dtype() const noexcept {
68  return impl_->dtype();
69 }
70 
71 inline Layout Tensor::layout() const noexcept {
72  return impl_->layout();
73 }
74 
75 inline Device Tensor::device() const {
76  return impl_->device();
77 }
78 
79 inline int64_t Tensor::get_device() const {
80  // NB: this is not a native function to avoid dispatching overhead.
81  return impl_->get_device();
82 }
83 
84 inline int64_t get_device(Tensor self) {
85  return self.get_device();
86 }
87 
88 inline bool Tensor::is_cuda() const {
89  // NB: this is not a native function to avoid dispatching overhead.
90  return impl_->is_cuda();
91 }
92 
93 inline bool is_cuda(Tensor self) {
94  return self.is_cuda();
95 }
96 
97 inline bool Tensor::is_hip() const {
98  // NB: this is not a native function to avoid dispatching overhead.
99  return impl_->is_hip();
100 }
101 
102 inline bool is_hip(Tensor self) {
103  return self.is_hip();
104 }
105 
106 inline bool Tensor::is_sparse() const {
107  // NB: this is not a native function to avoid dispatching overhead.
108  return impl_->is_sparse();
109 }
110 
111 inline bool is_sparse(Tensor self) {
112  return self.is_sparse();
113 }
114 
115 #define DEFINE_CAST(T, name, _) \
116  template <> \
117  inline T* Tensor::data() const { \
118  AT_CHECK( \
119  scalar_type() == ScalarType::name, \
120  "expected scalar type ", \
121  #name, \
122  " but found ", \
123  c10::toString(scalar_type())); \
124  return static_cast<T*>(this->data_ptr()); \
125  }
126 
127 AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(DEFINE_CAST)
128 #undef DEFINE_CAST
129 
130 #define DEFINE_ITEM(T, name, _) \
131  template <> \
132  inline T Tensor::item() const { \
133  return item().to##name(); \
134  }
135 
136 AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(DEFINE_ITEM)
137 #undef DEFINE_ITEM
138 
139 } //namespace at
void backward(c10::optional< Tensor > gradient=c10::nullopt, bool keep_graph=false, bool create_graph=false)
Computes the gradient of current tensor w.r.t. graph leaves.
Definition: TensorMethods.h:49
bool is_variable() const
True if a tensor is a variable.
Definition: TensorImpl.h:809
bool is_hip() const
Returns if a Tensor has HIP backend.
TensorOptions options() const
Returns the TensorOptions corresponding to this Tensor.
Definition: TensorMethods.h:42
const caffe2::TypeMeta & dtype() const
Returns the TypeMeta of a tensor, which describes what data type it is (e.g., int, float, ...)
Definition: TensorImpl.h:629
int64_t get_device() const
Returns a Tensor&#39;s device index.
Layout layout() const noexcept
Returns a Tensor&#39;s layout. Defined in Type.h.
caffe2::TypeMeta dtype() const noexcept
Returns a Tensor&#39;s dtype (TypeMeta). Defined in TensorMethods.h.
bool is_variable() const noexcept
Returns true if the Tensor is actually a torch::autograd::Variable.
bool is_cuda() const
Returns if a Tensor has CUDA backend.
Device device() const
Returns a Tensor&#39;s device.
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
Definition: TensorOptions.h:80
bool is_sparse() const
Returns if a Tensor has sparse backend.
Flush-To-Zero and Denormals-Are-Zero mode.
TypeMeta is a thin class that allows us to store the type of a container such as a blob...
Definition: typeid.h:324