Caffe2 - C++ API
A deep learning, cross platform ML framework
Tensor.h
1 #pragma once
2 
3 #include <c10/core/Device.h>
4 #include <c10/core/Layout.h>
5 #include <c10/core/Scalar.h>
6 #include <c10/core/ScalarType.h>
7 #include <ATen/core/SparseTensorRef.h>
8 #include <c10/core/Storage.h>
9 #include <ATen/core/TensorAccessor.h>
10 #include <c10/core/TensorImpl.h>
11 #include <c10/core/UndefinedTensorImpl.h>
12 #include <c10/util/Exception.h>
13 #include <c10/util/Optional.h>
14 #include <c10/core/Tensor.h>
15 #include <ATen/core/LegacyTypeDispatch.h>
16 
17 namespace c10{
18 struct TensorOptions;
19 }
20 namespace at {
21 struct Generator;
22 struct Type;
23 class Tensor;
24 } // namespace at
25 
26 namespace at {
27 
28 class Tensor;
29 using TensorList = ArrayRef<Tensor>;
30 
31 // Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
32 // has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
33 //
34 // For example:
35 //
36 // void func(Tensor a) {
37 // Tensor b = a;
38 // ...
39 // }
40 //
41 // In this example, when we say Tensor b = a, we are creating a new object that points to the
42 // same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
43 // destructor decrements the reference count by calling release() on the TensorImpl it points to.
44 // The existing constructors, operator overloads, etc. take care to implement the correct semantics.
45 //
46 // Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
47 // special care must be taken to handle this.
49  public:
50  Tensor(){};
51  // This constructor should not be used by end users and is an implementation
52  // detail invoked by autogenerated code.
53  explicit Tensor(
55  : impl_(std::move(tensor_impl)) {
56  if (impl_.get() == nullptr) {
57  throw std::runtime_error("TensorImpl with nullptr is not supported");
58  }
59  }
60  Tensor(const Tensor&) = default;
61  Tensor(Tensor&&) = default;
62 
63 
64  public:
65  // Creates a new wrapper from TensorImpl. Intentionally a free method because
66  // it should be used with care. Checks necessary invariants
67  static Tensor wrap_tensor_impl(
69  Tensor r(std::move(tensor_impl));
70  r.enforce_invariants();
71  return r;
72  }
73 
74  explicit Tensor(C10Tensor tensor) : impl_(std::move(tensor).impl()) {
75  enforce_invariants();
76  }
77 
78  explicit operator C10Tensor() const & {
79  return C10Tensor(impl_);
80  }
81 
82  explicit operator C10Tensor() && {
83  return C10Tensor(std::move(impl_));
84  }
85 
86  int64_t dim() const {
87  return impl_->dim();
88  }
89  int64_t storage_offset() const {
90  return impl_->storage_offset();
91  }
92 
93  TensorImpl * unsafeGetTensorImpl() const {
94  return impl_.get();
95  }
96  TensorImpl * unsafeReleaseTensorImpl() {
97  return impl_.release();
98  }
99  const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
100  return impl_;
101  }
102 
103  bool defined() const {
104  return impl_;
105  }
106 
107  void reset() {
108  impl_.reset();
109  }
110 
111  // The following overloads are very intruiging. Consider the following
112  // program:
113  //
114  // x[1] = 3;
115  //
116  // We would expect that the first entry of x is written to 3. But how can we
117  // actually achieve this? x[1] evaluates to a tensor...
118  //
119  // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
120  // (profitably) assigned to in the traditional sense, so we overload
121  // assignment to mean, "Actually, copy 3 into the tensor data." This is done
122  // with an rvalue-reference ref-qualified overload (the methods with && at the
123  // end of their type.)
124  //
125  // There's one more fly in the ointment: We also want
126  //
127  // Tensor x = y;
128  //
129  // to work, and we want it NOT to copy. So we need a traditional operator=
130  // overload. But we MUST specify a mutable lvalue ref-qualifier, to
131  // disambiguate the traditional overload from the rvalue-reference
132  // ref-qualified overload. Otherwise, it will be ambiguous, because
133  // a non ref-qualified method is eligible for all situations.
134 
135  // Unfortunately, we have to write these constructors out manually
136  // to work around an MSVC bug:
137  // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
138  // multiple versions of a defaulted special member functions are not allowed
139  // Tensor& operator=(const Tensor&) & = default;
140  // Tensor& operator=(Tensor&&) & = default;
141  Tensor& operator=(const Tensor& x) & {
142  impl_ = x.impl_;
143  return *this;
144  }
145  Tensor& operator=(Tensor&& x) & {
146  impl_ = std::move(x.impl_);
147  return *this;
148  }
149 
150  Tensor& operator=(Scalar v) &&;
151  Tensor& operator=(const Tensor&) &&;
152  Tensor& operator=(Tensor&&) &&;
153 
154  bool is_same(const Tensor& other) const noexcept {
155  return impl_ == other.impl_;
156  }
157  size_t use_count() const noexcept {
158  return impl_.use_count();
159  }
160  size_t weak_use_count() const noexcept {
161  return impl_.weak_use_count();
162  }
163 
164  const char * toString() const;
165 
166  IntArrayRef sizes() const {
167  return impl_->sizes();
168  }
169  IntArrayRef strides() const {
170  return impl_->strides();
171  }
172  int64_t ndimension() const {
173  return dim();
174  }
175  bool is_contiguous() const {
176  return impl_->is_contiguous();
177  }
178 
179  // Total bytes consumed by the "view" of elements of the array. Does not
180  // include size of metadata. The number reported here does not necessarily
181  // correspond to the true physical memory consumed by a tensor; instead,
182  // it reports the memory the tensor would take *if* it were contiguous.
183  // Defined to be numel() * itemsize()
184  size_t nbytes() const {
185  return impl_->numel() * impl_->itemsize();
186  }
187 
188  // Length of one array element in bytes. This is the traditional
189  // Numpy naming.
190  size_t itemsize() const {
191  return impl_->itemsize();
192  }
193 
194  // Same as itemsize(). This is the PyTorch naming.
195  size_t element_size() const {
196  return impl_->itemsize();
197  }
198 
199  Type & type() const {
200  return legacyTensorType(*impl_);
201  }
202  TensorTypeId type_id() const {
203  return impl_->type_id();
204  }
205  ScalarType scalar_type() const {
206  return typeMetaToScalarType(impl_->dtype());
207  }
208  bool has_storage() const {
209  return defined() && impl_->has_storage();
210  }
211  const Storage& storage() const {
212  return impl_->storage();
213  }
214  bool is_alias_of(const at::Tensor& other) const{
215  return impl_->storage().is_alias_of(other.storage());
216  }
217  Tensor toType(const Type & t, bool non_blocking=false) const;
218  Tensor & copy_(const Tensor & src, bool non_blocking=false);
219  Tensor toType(ScalarType t) const;
220  Tensor toBackend(Backend b) const;
221 
224  bool is_variable() const noexcept;
225 
227  Layout layout() const noexcept;
228 
230  caffe2::TypeMeta dtype() const noexcept;
231 
233  Device device() const;
234 
236  int64_t get_device() const;
237 
239  bool is_cuda() const;
240 
242  bool is_hip() const;
243 
245  bool is_sparse() const;
246 
249  TensorOptions options() const;
250 
251  template<typename T>
252  T * data() const;
253 
254  template <typename T>
255  T item() const;
256 
257  // Purposely not defined here to avoid inlining
258  void print() const;
259 
260  // Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and
261  // dimension.
262  template<typename T, size_t N>
263  TensorAccessor<T,N> accessor() const& {
264  static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()");
265  AT_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
266  return TensorAccessor<T,N>(data<T>(),sizes().data(),strides().data());
267  }
268  template<typename T, size_t N>
269  TensorAccessor<T,N> accessor() && = delete;
270 
271  // Return a `PackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and
272  // dimension. You can optionally specify RestrictPtrTraits as a template parameter to
273  // cast the data pointer to a __restrict__ pointer.
274  // In order to use this, your CUDA kernel has to take a corresponding PackedTensorAccessor
275  // as an argument.
276  template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
277  PackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const& {
278  static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()");
279  AT_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
280  return PackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(data<T>()),sizes().data(),strides().data());
281  }
282  template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
283  PackedTensorAccessor<T,N> packed_accessor() && = delete;
284 
285  Tensor operator-() const;
286  Tensor& operator+=(const Tensor & other);
287  Tensor& operator+=(Scalar other);
288  Tensor& operator-=(const Tensor & other);
289  Tensor& operator-=(Scalar other);
290  Tensor& operator*=(const Tensor & other);
291  Tensor& operator*=(Scalar other);
292  Tensor& operator/=(const Tensor & other);
293  Tensor& operator/=(Scalar other);
294  Tensor operator[](Scalar index) const;
295  Tensor operator[](Tensor index) const;
296  Tensor operator[](int64_t index) const;
297 
298  Tensor cpu() const;
299  Tensor cuda() const;
300  Tensor hip() const;
301 
302  // ~~~~~ Autograd API ~~~~~
303 
304  Tensor& set_requires_grad(bool requires_grad) {
305  impl_->set_requires_grad(requires_grad);
306  return *this;
307  }
308  bool requires_grad() const {
309  return impl_->requires_grad();
310  }
311 
312  Tensor& grad() {
313  return impl_->grad();
314  }
315  const Tensor& grad() const {
316  return impl_->grad();
317  }
318 
319  void set_data(Tensor new_data);
320 
322  void backward(
323  c10::optional<Tensor> gradient = c10::nullopt,
324  bool keep_graph = false,
325  bool create_graph = false);
326 
327  // STOP. Thinking of adding a method here, which only makes use
328  // of other ATen methods? Define it in native_functions.yaml.
329 
330  //example
331  //Tensor * add(Tensor & b);
332  Tensor abs() const;
333  Tensor & abs_();
334  Tensor acos() const;
335  Tensor & acos_();
336  Tensor add(const Tensor & other, Scalar alpha=1) const;
337  Tensor & add_(const Tensor & other, Scalar alpha=1);
338  Tensor add(Scalar other, Scalar alpha=1) const;
339  Tensor & add_(Scalar other, Scalar alpha=1);
340  Tensor addmv(const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1) const;
341  Tensor & addmv_(const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
342  Tensor addr(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1) const;
343  Tensor & addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
344  Tensor all(int64_t dim, bool keepdim=false) const;
345  bool allclose(const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
346  Tensor any(int64_t dim, bool keepdim=false) const;
347  Tensor argmax(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
348  Tensor argmin(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
349  Tensor as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
350  Tensor & as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
351  Tensor asin() const;
352  Tensor & asin_();
353  Tensor atan() const;
354  Tensor & atan_();
355  Tensor baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
356  Tensor & baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
357  Tensor bernoulli(Generator * generator=nullptr) const;
358  Tensor & bernoulli_(const Tensor & p, Generator * generator=nullptr);
359  Tensor & bernoulli_(double p=0.5, Generator * generator=nullptr);
360  Tensor bernoulli(double p, Generator * generator=nullptr) const;
361  Tensor bincount(const Tensor & weights={}, int64_t minlength=0) const;
362  Tensor bmm(const Tensor & mat2) const;
363  Tensor ceil() const;
364  Tensor & ceil_();
365  std::vector<Tensor> chunk(int64_t chunks, int64_t dim=0) const;
366  Tensor clamp(c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt) const;
367  Tensor & clamp_(c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
368  Tensor clamp_max(Scalar max) const;
369  Tensor & clamp_max_(Scalar max);
370  Tensor clamp_min(Scalar min) const;
371  Tensor & clamp_min_(Scalar min);
372  Tensor contiguous() const;
373  Tensor cos() const;
374  Tensor & cos_();
375  Tensor cosh() const;
376  Tensor & cosh_();
377  Tensor cumsum(int64_t dim, ScalarType dtype) const;
378  Tensor cumsum(int64_t dim) const;
379  Tensor cumprod(int64_t dim, ScalarType dtype) const;
380  Tensor cumprod(int64_t dim) const;
381  Tensor det() const;
382  Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const;
383  Tensor diagflat(int64_t offset=0) const;
384  Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const;
385  Tensor div(const Tensor & other) const;
386  Tensor & div_(const Tensor & other);
387  Tensor div(Scalar other) const;
388  Tensor & div_(Scalar other);
389  Tensor dot(const Tensor & tensor) const;
390  Tensor & resize_(IntArrayRef size);
391  Tensor erf() const;
392  Tensor & erf_();
393  Tensor erfc() const;
394  Tensor & erfc_();
395  Tensor exp() const;
396  Tensor & exp_();
397  Tensor expm1() const;
398  Tensor & expm1_();
399  Tensor expand(IntArrayRef size, bool implicit=false) const;
400  Tensor expand_as(const Tensor & other) const;
401  Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const;
402  Tensor & fill_(Scalar value);
403  Tensor & fill_(const Tensor & value);
404  Tensor floor() const;
405  Tensor & floor_();
406  Tensor ger(const Tensor & vec2) const;
407  Tensor fft(int64_t signal_ndim, bool normalized=false) const;
408  Tensor ifft(int64_t signal_ndim, bool normalized=false) const;
409  Tensor rfft(int64_t signal_ndim, bool normalized=false, bool onesided=true) const;
410  Tensor irfft(int64_t signal_ndim, bool normalized=false, bool onesided=true, IntArrayRef signal_sizes={}) const;
411  Tensor index(TensorList indices) const;
412  Tensor & index_copy_(int64_t dim, const Tensor & index, const Tensor & source);
413  Tensor index_copy(int64_t dim, const Tensor & index, const Tensor & source) const;
414  Tensor & index_put_(TensorList indices, const Tensor & values, bool accumulate=false);
415  Tensor index_put(TensorList indices, const Tensor & values, bool accumulate=false) const;
416  Tensor inverse() const;
417  Tensor isclose(const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
418  bool is_distributed() const;
419  bool is_floating_point() const;
420  bool is_complex() const;
421  bool is_nonzero() const;
422  bool is_same_size(const Tensor & other) const;
423  bool is_signed() const;
424  std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const;
425  Tensor log() const;
426  Tensor & log_();
427  Tensor log10() const;
428  Tensor & log10_();
429  Tensor log1p() const;
430  Tensor & log1p_();
431  Tensor log2() const;
432  Tensor & log2_();
433  Tensor logdet() const;
434  Tensor log_softmax(int64_t dim, ScalarType dtype) const;
435  Tensor log_softmax(int64_t dim) const;
436  Tensor logsumexp(IntArrayRef dim, bool keepdim=false) const;
437  Tensor matmul(const Tensor & other) const;
438  Tensor matrix_power(int64_t n) const;
439  std::tuple<Tensor,Tensor> max(int64_t dim, bool keepdim=false) const;
440  Tensor max_values(IntArrayRef dim, bool keepdim=false) const;
441  Tensor mean(ScalarType dtype) const;
442  Tensor mean() const;
443  Tensor mean(IntArrayRef dim, bool keepdim, ScalarType dtype) const;
444  Tensor mean(IntArrayRef dim, bool keepdim=false) const;
445  Tensor mean(IntArrayRef dim, ScalarType dtype) const;
446  std::tuple<Tensor,Tensor> median(int64_t dim, bool keepdim=false) const;
447  std::tuple<Tensor,Tensor> min(int64_t dim, bool keepdim=false) const;
448  Tensor min_values(IntArrayRef dim, bool keepdim=false) const;
449  Tensor mm(const Tensor & mat2) const;
450  std::tuple<Tensor,Tensor> mode(int64_t dim=-1, bool keepdim=false) const;
451  Tensor mul(const Tensor & other) const;
452  Tensor & mul_(const Tensor & other);
453  Tensor mul(Scalar other) const;
454  Tensor & mul_(Scalar other);
455  Tensor mv(const Tensor & vec) const;
456  Tensor mvlgamma(int64_t p) const;
457  Tensor & mvlgamma_(int64_t p);
458  Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;
459  Tensor narrow(int64_t dim, int64_t start, int64_t length) const;
460  Tensor permute(IntArrayRef dims) const;
461  Tensor pin_memory() const;
462  Tensor pinverse(double rcond=1e-15) const;
463  Tensor repeat(IntArrayRef repeats) const;
464  Tensor reshape(IntArrayRef shape) const;
465  Tensor reshape_as(const Tensor & other) const;
466  Tensor round() const;
467  Tensor & round_();
468  Tensor relu() const;
469  Tensor & relu_();
470  Tensor prelu(const Tensor & weight) const;
471  std::tuple<Tensor,Tensor> prelu_backward(const Tensor & grad_output, const Tensor & weight) const;
472  Tensor hardshrink(Scalar lambd=0.5) const;
473  Tensor hardshrink_backward(const Tensor & grad_out, Scalar lambd) const;
474  Tensor rsqrt() const;
475  Tensor & rsqrt_();
476  Tensor select(int64_t dim, int64_t index) const;
477  Tensor sigmoid() const;
478  Tensor & sigmoid_();
479  Tensor sin() const;
480  Tensor & sin_();
481  Tensor sinh() const;
482  Tensor & sinh_();
483  Tensor detach() const;
484  Tensor & detach_();
485  int64_t size(int64_t dim) const;
486  Tensor slice(int64_t dim=0, int64_t start=0, int64_t end=9223372036854775807, int64_t step=1) const;
487  std::tuple<Tensor,Tensor> slogdet() const;
488  Tensor smm(const Tensor & mat2) const;
489  Tensor softmax(int64_t dim, ScalarType dtype) const;
490  Tensor softmax(int64_t dim) const;
491  std::vector<Tensor> split(int64_t split_size, int64_t dim=0) const;
492  std::vector<Tensor> split_with_sizes(IntArrayRef split_sizes, int64_t dim=0) const;
493  Tensor squeeze() const;
494  Tensor squeeze(int64_t dim) const;
495  Tensor & squeeze_();
496  Tensor & squeeze_(int64_t dim);
497  Tensor sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
498  Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const Tensor & window={}, bool normalized=false, bool onesided=true) const;
499  int64_t stride(int64_t dim) const;
500  Tensor sum(ScalarType dtype) const;
501  Tensor sum() const;
502  Tensor sum(IntArrayRef dim, bool keepdim, ScalarType dtype) const;
503  Tensor sum(IntArrayRef dim, bool keepdim=false) const;
504  Tensor sum(IntArrayRef dim, ScalarType dtype) const;
505  Tensor sum_to_size(IntArrayRef size) const;
506  Tensor sqrt() const;
507  Tensor & sqrt_();
508  Tensor std(bool unbiased=true) const;
509  Tensor std(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
510  Tensor prod(ScalarType dtype) const;
511  Tensor prod() const;
512  Tensor prod(int64_t dim, bool keepdim, ScalarType dtype) const;
513  Tensor prod(int64_t dim, bool keepdim=false) const;
514  Tensor prod(int64_t dim, ScalarType dtype) const;
515  Tensor t() const;
516  Tensor & t_();
517  Tensor tan() const;
518  Tensor & tan_();
519  Tensor tanh() const;
520  Tensor & tanh_();
521  Tensor transpose(int64_t dim0, int64_t dim1) const;
522  Tensor & transpose_(int64_t dim0, int64_t dim1);
523  Tensor flip(IntArrayRef dims) const;
524  Tensor roll(IntArrayRef shifts, IntArrayRef dims={}) const;
525  Tensor rot90(int64_t k=1, IntArrayRef dims={0,1}) const;
526  Tensor trunc() const;
527  Tensor & trunc_();
528  Tensor type_as(const Tensor & other) const;
529  Tensor unsqueeze(int64_t dim) const;
530  Tensor & unsqueeze_(int64_t dim);
531  Tensor var(bool unbiased=true) const;
532  Tensor var(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
533  Tensor view_as(const Tensor & other) const;
534  Tensor where(const Tensor & condition, const Tensor & other) const;
535  Tensor norm(c10::optional<Scalar> p, ScalarType dtype) const;
536  Tensor norm(Scalar p=2) const;
537  Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const;
538  Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim=false) const;
539  Tensor clone() const;
540  Tensor & resize_as_(const Tensor & the_template);
541  Tensor pow(Scalar exponent) const;
542  Tensor & zero_();
543  Tensor sub(const Tensor & other, Scalar alpha=1) const;
544  Tensor & sub_(const Tensor & other, Scalar alpha=1);
545  Tensor sub(Scalar other, Scalar alpha=1) const;
546  Tensor & sub_(Scalar other, Scalar alpha=1);
547  Tensor addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
548  Tensor & addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1);
549  Tensor & sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
550  Tensor & sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
551  Tensor sparse_mask(SparseTensorRef mask) const;
552  Tensor to_dense() const;
553  int64_t sparse_dim() const;
554  int64_t _dimI() const;
555  int64_t dense_dim() const;
556  int64_t _dimV() const;
557  int64_t _nnz() const;
558  Tensor coalesce() const;
559  bool is_coalesced() const;
560  Tensor _indices() const;
561  Tensor _values() const;
562  Tensor & _coalesced_(bool coalesced);
563  Tensor indices() const;
564  Tensor values() const;
565  int64_t numel() const;
566  std::vector<Tensor> unbind(int64_t dim=0) const;
567  Tensor to_sparse(int64_t sparse_dim) const;
568  Tensor to_sparse() const;
569  Tensor to(const TensorOptions & options, bool non_blocking=false, bool copy=false) const;
570  Tensor to(Device device, ScalarType dtype, bool non_blocking=false, bool copy=false) const;
571  Tensor to(ScalarType dtype, bool non_blocking=false, bool copy=false) const;
572  Tensor to(const Tensor & other, bool non_blocking=false, bool copy=false) const;
573  Scalar item() const;
574  void* data_ptr() const;
575  Tensor & set_(Storage source);
576  Tensor & set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride={});
577  Tensor & set_(const Tensor & source);
578  Tensor & set_();
579  bool is_set_to(const Tensor & tensor) const;
580  Tensor & masked_fill_(const Tensor & mask, Scalar value);
581  Tensor masked_fill(const Tensor & mask, Scalar value) const;
582  Tensor & masked_fill_(const Tensor & mask, const Tensor & value);
583  Tensor masked_fill(const Tensor & mask, const Tensor & value) const;
584  Tensor & masked_scatter_(const Tensor & mask, const Tensor & source);
585  Tensor masked_scatter(const Tensor & mask, const Tensor & source) const;
586  Tensor view(IntArrayRef size) const;
587  Tensor & put_(const Tensor & index, const Tensor & source, bool accumulate=false);
588  Tensor & index_add_(int64_t dim, const Tensor & index, const Tensor & source);
589  Tensor index_add(int64_t dim, const Tensor & index, const Tensor & source) const;
590  Tensor & index_fill_(int64_t dim, const Tensor & index, Scalar value);
591  Tensor index_fill(int64_t dim, const Tensor & index, Scalar value) const;
592  Tensor & index_fill_(int64_t dim, const Tensor & index, const Tensor & value);
593  Tensor index_fill(int64_t dim, const Tensor & index, const Tensor & value) const;
594  Tensor & scatter_(int64_t dim, const Tensor & index, const Tensor & src);
595  Tensor scatter(int64_t dim, const Tensor & index, const Tensor & src) const;
596  Tensor & scatter_(int64_t dim, const Tensor & index, Scalar value);
597  Tensor scatter(int64_t dim, const Tensor & index, Scalar value) const;
598  Tensor & scatter_add_(int64_t dim, const Tensor & index, const Tensor & src);
599  Tensor scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const;
600  Tensor & lt_(Scalar other);
601  Tensor & lt_(const Tensor & other);
602  Tensor & gt_(Scalar other);
603  Tensor & gt_(const Tensor & other);
604  Tensor & le_(Scalar other);
605  Tensor & le_(const Tensor & other);
606  Tensor & ge_(Scalar other);
607  Tensor & ge_(const Tensor & other);
608  Tensor & eq_(Scalar other);
609  Tensor & eq_(const Tensor & other);
610  Tensor & ne_(Scalar other);
611  Tensor & ne_(const Tensor & other);
612  Tensor __and__(Scalar other) const;
613  Tensor __and__(const Tensor & other) const;
614  Tensor & __iand__(Scalar other);
615  Tensor & __iand__(const Tensor & other);
616  Tensor __or__(Scalar other) const;
617  Tensor __or__(const Tensor & other) const;
618  Tensor & __ior__(Scalar other);
619  Tensor & __ior__(const Tensor & other);
620  Tensor __xor__(Scalar other) const;
621  Tensor __xor__(const Tensor & other) const;
622  Tensor & __ixor__(Scalar other);
623  Tensor & __ixor__(const Tensor & other);
624  Tensor __lshift__(Scalar other) const;
625  Tensor __lshift__(const Tensor & other) const;
626  Tensor & __ilshift__(Scalar other);
627  Tensor & __ilshift__(const Tensor & other);
628  Tensor __rshift__(Scalar other) const;
629  Tensor __rshift__(const Tensor & other) const;
630  Tensor & __irshift__(Scalar other);
631  Tensor & __irshift__(const Tensor & other);
632  Tensor & lgamma_();
633  Tensor & atan2_(const Tensor & other);
634  Tensor & tril_(int64_t diagonal=0);
635  Tensor & triu_(int64_t diagonal=0);
636  Tensor & digamma_();
637  Tensor & polygamma_(int64_t n);
638  Tensor & erfinv_();
639  Tensor & frac_();
640  Tensor & renorm_(Scalar p, int64_t dim, Scalar maxnorm);
641  Tensor & reciprocal_();
642  Tensor & neg_();
643  Tensor & pow_(Scalar exponent);
644  Tensor & pow_(const Tensor & exponent);
645  Tensor & lerp_(const Tensor & end, Scalar weight);
646  Tensor & lerp_(const Tensor & end, const Tensor & weight);
647  Tensor & sign_();
648  Tensor & fmod_(Scalar other);
649  Tensor & fmod_(const Tensor & other);
650  Tensor & remainder_(Scalar other);
651  Tensor & remainder_(const Tensor & other);
652  Tensor & addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
653  Tensor addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
654  Tensor & addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1);
655  Tensor & addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1);
656  Tensor & random_(int64_t from, int64_t to, Generator * generator=nullptr);
657  Tensor & random_(int64_t to, Generator * generator=nullptr);
658  Tensor & random_(Generator * generator=nullptr);
659  Tensor & uniform_(double from=0, double to=1, Generator * generator=nullptr);
660  Tensor & normal_(double mean=0, double std=1, Generator * generator=nullptr);
661  Tensor & cauchy_(double median=0, double sigma=1, Generator * generator=nullptr);
662  Tensor & log_normal_(double mean=1, double std=2, Generator * generator=nullptr);
663  Tensor & exponential_(double lambd=1, Generator * generator=nullptr);
664  Tensor & geometric_(double p, Generator * generator=nullptr);
665  Tensor diag(int64_t diagonal=0) const;
666  Tensor cross(const Tensor & other, int64_t dim=-1) const;
667  Tensor triu(int64_t diagonal=0) const;
668  Tensor tril(int64_t diagonal=0) const;
669  Tensor trace() const;
670  Tensor ne(Scalar other) const;
671  Tensor ne(const Tensor & other) const;
672  Tensor eq(Scalar other) const;
673  Tensor eq(const Tensor & other) const;
674  Tensor ge(Scalar other) const;
675  Tensor ge(const Tensor & other) const;
676  Tensor le(Scalar other) const;
677  Tensor le(const Tensor & other) const;
678  Tensor gt(Scalar other) const;
679  Tensor gt(const Tensor & other) const;
680  Tensor lt(Scalar other) const;
681  Tensor lt(const Tensor & other) const;
682  Tensor take(const Tensor & index) const;
683  Tensor index_select(int64_t dim, const Tensor & index) const;
684  Tensor masked_select(const Tensor & mask) const;
685  Tensor nonzero() const;
686  Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
687  Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
688  Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
689  std::tuple<Tensor,Tensor> gels(const Tensor & A) const;
690  std::tuple<Tensor,Tensor> trtrs(const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const;
691  std::tuple<Tensor,Tensor> symeig(bool eigenvectors=false, bool upper=true) const;
692  std::tuple<Tensor,Tensor> eig(bool eigenvectors=false) const;
693  std::tuple<Tensor,Tensor,Tensor> svd(bool some=true, bool compute_uv=true) const;
694  Tensor cholesky(bool upper=false) const;
695  Tensor cholesky_solve(const Tensor & input2, bool upper=false) const;
696  std::tuple<Tensor,Tensor> solve(const Tensor & A) const;
697  Tensor potri(bool upper=true) const;
698  std::tuple<Tensor,Tensor> pstrf(bool upper=true, Scalar tol=-1) const;
699  std::tuple<Tensor,Tensor> qr() const;
700  std::tuple<Tensor,Tensor> geqrf() const;
701  Tensor orgqr(const Tensor & input2) const;
702  Tensor ormqr(const Tensor & input2, const Tensor & input3, bool left=true, bool transpose=false) const;
703  std::tuple<Tensor,Tensor> btrifact(bool pivot=true) const;
704  std::tuple<Tensor,Tensor,Tensor> btrifact_with_info(bool pivot=true) const;
705  Tensor btrisolve(const Tensor & LU_data, const Tensor & LU_pivots) const;
706  Tensor multinomial(int64_t num_samples, bool replacement=false, Generator * generator=nullptr) const;
707  Tensor lgamma() const;
708  Tensor digamma() const;
709  Tensor polygamma(int64_t n) const;
710  Tensor erfinv() const;
711  Tensor frac() const;
712  Tensor dist(const Tensor & other, Scalar p=2) const;
713  Tensor reciprocal() const;
714  Tensor neg() const;
715  Tensor atan2(const Tensor & other) const;
716  Tensor lerp(const Tensor & end, Scalar weight) const;
717  Tensor lerp(const Tensor & end, const Tensor & weight) const;
718  Tensor histc(int64_t bins=100, Scalar min=0, Scalar max=0) const;
719  Tensor sign() const;
720  Tensor fmod(Scalar other) const;
721  Tensor fmod(const Tensor & other) const;
722  Tensor remainder(Scalar other) const;
723  Tensor remainder(const Tensor & other) const;
724  Tensor min(const Tensor & other) const;
725  Tensor min() const;
726  Tensor max(const Tensor & other) const;
727  Tensor max() const;
728  Tensor median() const;
729  std::tuple<Tensor,Tensor> sort(int64_t dim=-1, bool descending=false) const;
730  Tensor argsort(int64_t dim=-1, bool descending=false) const;
731  std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const;
732  Tensor all() const;
733  Tensor any() const;
734  Tensor renorm(Scalar p, int64_t dim, Scalar maxnorm) const;
735  Tensor unfold(int64_t dimension, int64_t size, int64_t step) const;
736  bool equal(const Tensor & other) const;
737  Tensor pow(const Tensor & exponent) const;
738  Tensor alias() const;
739 
740  // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
741  // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
742  // Before that change, we make this method to maintain BC for C++ usage like
743  // `x.to(y.dtype)`.
744  // TODO: remove following two after at::kDouble and its friends are TypeMeta's.
745  inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
746  return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
747  }
748  inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
749  return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
750  }
751 
752  template <typename F, typename... Args>
753  auto m(F func, Args&&... params) const -> decltype(func(*this, std::forward<Args>(params)...)) {
754  return func(*this, std::forward<Args>(params)...);
755  }
756 
757  friend struct WeakTensor;
758 
759 protected:
760  void enforce_invariants();
762 };
763 
765  WeakTensor(const Tensor& t) : weak_impl_(t.impl_) {}
766 
767  // XXX: this can return undefined tensors
768  // Ideally it would be c10::optional<Tensor>, but MSVC is too cool for that
769  Tensor lock() const {
770  return Tensor(weak_impl_.lock());
771  }
772 
773  bool is_same(const WeakTensor& other) const noexcept {
774  return weak_impl_ == other.weak_impl_;
775  }
776 
777  size_t use_count() const noexcept {
778  return weak_impl_.use_count();
779  }
780  size_t weak_use_count() const noexcept {
781  return weak_impl_.weak_use_count();
782  }
783 
784  TensorImpl* unsafeGetTensorImpl() const {
785  return weak_impl_._unsafe_get_target();
786  }
787 
788 private:
790 };
791 
792 namespace detail {
793 // Helper creator for Tensor clas which doesn't requires the users to pass
794 // in an intrusive_ptr instead it just converts the argument passed to
795 // requested intrusive_ptr type.
796 template <typename T, typename... Args>
797 Tensor make_tensor(Args&&... args) {
798  return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
799 }
800 } // namespace detail
801 
802 } // namespace at
803 
804 #include <ATen/core/TensorMethods.h>
This is a minimal Tensor class for use in c10 code.
Definition: Tensor.h:18
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
The low-level representation of a tensor, which contains a pointer to a storage (which contains the a...
Definition: TensorImpl.h:211
TensorOptions device(Device device)
Convenience function that returns a TensorOptions object with the device set to the given one...
Represents a a compute device on which a tensor is located.
Definition: Device.h:30
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
Definition: Backend.h:23
Definition: static.cpp:52
Dynamic type ID of a Tensor argument.
Definition: TensorTypeId.h:19
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
Definition: TensorOptions.h:80
TensorOptions layout(Layout layout)
Convenience function that returns a TensorOptions object with the layout set to the given one...
To register your own kernel for an operator, do in one (!) cpp file: C10_REGISTER_KERNEL(OperatorHand...
Definition: alias_info.h:7
Type & legacyTensorType(const TensorImpl &tensor)
Return the Type object corresponding to this Tensor, which we can use to do dynamic dispatch to opera...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...
Flush-To-Zero and Denormals-Are-Zero mode.
TypeMeta is a thin class that allows us to store the type of a container such as a blob...
Definition: typeid.h:324
C10_NODISCARD TensorOptions requires_grad(c10::optional< bool > requires_grad) const noexcept
Sets the requires_grad property of the TensorOptions.
TensorOptions dtype(caffe2::TypeMeta dtype)
Convenience function that returns a TensorOptions object with the dtype set to the given one...