Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor.h
1 #ifndef CAFFE2_CORE_TENSOR_H_
2 #define CAFFE2_CORE_TENSOR_H_
3 
4 #include "caffe2/core/storage.h"
5 #include "caffe2/core/tensor_impl.h"
6 
7 #include <ATen/core/UndefinedTensorImpl.h>
8 #include <c10/util/intrusive_ptr.h>
9 #include "ATen/core/Tensor.h"
10 #include <c10/core/TensorOptions.h>
11 #include <c10/core/Tensor.h>
12 
13 namespace caffe2 {
14 
16 
25 class CAFFE2_API Tensor final {
26  private:
27  enum Unsafe { IDoWantAliasing };
28  Tensor(const Tensor& other, Unsafe _) : impl_(other.getIntrusivePtr()) {}
29 
30  protected:
32  TensorImplPtr impl_;
33 
34  void enforce_invariants();
35 
36  public:
37  Tensor() : impl_() {}
38 
39  // caffe2::Tensor is explicitly marked as moveable-only because before
40  // the refactoring the class used to be a value type and a lot of user code
41  // is written this way. With PyTorch unification, caffe2::Tensor actually
42  // has semantics of a shared_ptr now (via intrusive_ptr). However, to prevent
43  // accidental mistakes when changing legacy code we keep caffe2::Tensor
44  // to have movable semantics.
45  //
46  // If you need to get a pointer to the same Tensor instance (not to be
47  // confused with shared storage), `UnsafeSharedInstance` can be used. It has
48  // the same behavior as `at::Tensor a = b`.
49  Tensor(const Tensor&) = delete;
50  Tensor& operator=(const Tensor&) = delete;
51  Tensor(Tensor&&) = default;
52  Tensor& operator=(Tensor&&) = default;
53 
54  operator bool() const {
55  return impl_.defined();
56  }
57 
58  TensorImpl* unsafeGetTensorImpl() const {
59  return impl_.get();
60  }
61 
62  Tensor UnsafeSharedInstance() const {
63  return Tensor(*this, IDoWantAliasing);
64  }
65 
72  explicit Tensor(at::Device device)
73  : impl_(c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
74  Storage::create_legacy(device, TypeMeta()),
75  c10::computeTensorTypeId(at::device(device).layout(at::kStrided)),
76  /*is_variable=*/ false
77  )) {
78  }
79 
86  explicit Tensor(at::IntArrayRef dims, DeviceType type) : Tensor(type) {
87  // TODO: here, we create a Storage
88  // and immediately discard it in Resize() since
89  // reset_tensor will be true and FreeMemory will be called,
90  // we might want to avoid creating Storage twice?
91  Resize(dims);
92  }
93 
94  // we want to preserve index information
95  explicit Tensor(at::IntArrayRef dims, at::Device device): Tensor(device) {
96  Resize(dims);
97  }
98 
99  // TODO: remove?
100  explicit Tensor(const vector<int>& dims, DeviceType type)
101  : Tensor(type) {
102  Resize(dims);
103  }
104 
109  Tensor(const Tensor& src, DeviceType type)
110  : Tensor(type) {
111  CopyFrom(src);
112  }
113 
120  explicit Tensor(const at::Tensor& tensor)
121  : impl_(std::move(tensor.getIntrusivePtr())) {
122  enforce_invariants();
123  }
124 
125  explicit operator at::Tensor() const& {
126  return at::Tensor::wrap_tensor_impl(impl_);
127  }
128 
129  explicit operator at::Tensor() && {
130  return at::Tensor::wrap_tensor_impl(std::move(impl_));
131  }
132 
139  explicit Tensor(C10Tensor tensor) : impl_(std::move(tensor).impl()) {
140  enforce_invariants();
141  }
142 
143  explicit operator C10Tensor() const & {
144  return C10Tensor(impl_);
145  }
146 
147  explicit operator C10Tensor() && {
148  return C10Tensor(std::move(impl_));
149  }
150 
151  bool is_same(const Tensor& other) const noexcept {
152  return impl_ == other.impl_;
153  }
154 
155  Tensor Clone() const {
156  Tensor x(GetDevice());
157  x.CopyFrom(*this);
158  return x;
159  }
160 
174  Tensor Alias() const {
175  Tensor x(sizes(), GetDevice());
176  if (!dtype_initialized()) {
177  C10_LOG_EVERY_MS(WARNING, 1000) <<
178  "Cloning a tensor that don't have a data type (did you call mutable_data<T> on the tensor?)";
179  }
180  AT_ASSERTM(
181  storage_initialized(),
182  "Cloning a tensor that has no content and has size > 0");
183  // set_storage already sets data_type_ of TensorImpl
184  x.impl_->set_storage(storage());
185  x.impl_->set_storage_offset(impl_->storage_offset());
186  x.impl_->set_sizes_and_strides(sizes(), strides());
187  return x;
188  }
189 
190  DeviceType GetDeviceType() const {
191  return impl_->device_type();
192  }
193 
194  at::Device GetDevice() const {
195  return impl_.get()->GetDevice();
196  }
197 
211  void CopyFrom(const Tensor& src, bool async = false) {
212  AT_ASSERT(!impl_->is_variable()); // TODO: remove this when Variable and Tensor are merged
213  AT_ASSERTM(
214  src.impl_->is_contiguous(),
215  "Right now only copy of contiguous source Tensor is supported.");
216  AT_ASSERTM(
217  src.impl_->storage_initialized(),
218  "Cannot copy from an uninitialized Tensor");
219 
220  if (src.impl_.get() == impl_.get()) {
221  return;
222  }
223 
224  // Test if we need to allocate a new storage
225  // Uninitialized storages are guaranteed to be uniquely owned,
226  // so we don't need to swap in dst case.
227  // If the dtype changed, we need to reallocate storage.
228  if (impl_->dtype() != src.impl_->dtype()) {
229  // NB: copy preserves device_type
230  // This storage will get initialized by the mutable_data call below.
231  impl_->set_storage(at::Storage::create_legacy(impl_->device_type(), src.impl_->dtype()));
232  }
233  impl_->Resize(src.impl_->sizes());
234 
235  if (impl_->numel() > 0) {
236  if (impl_->dtype().copy()) {
237  AT_ASSERTM(
238  impl_->device_type() == ::at::DeviceType::CPU,
239  "In CopyFrom source and dest tensors must both be CPU for "
240  "non-POD copy, but dest tensor was ",
241  impl_->device_type());
242  AT_ASSERTM(
243  src.impl_->device_type() == ::at::DeviceType::CPU,
244  "In CopyFrom source and dest tensors must both be CPU for "
245  "non-POD copy, but src tensor was ",
246  src.impl_->device_type());
247  impl_->dtype().copy()(src.impl_->data(), impl_->raw_mutable_data(impl_->dtype()), impl_->numel());
248  } else {
249  // The following copy uses the current (thread local) stream for copying
250  // and also takes the GPU id from the device() field passed in.
251  //
252  // TODO: Potentially more enforcements are necessary to avoid accidental
253  // switch to sync copy if the currently set device is wrong.
254  //
255  // Specifically, we might need to switch to a different context device
256  // here explicitly to avoid relying on user synchronizing things
257  // properly.
258  //
259  // note: raw_mutable_data initializes device here
260  void* new_data = impl_->raw_mutable_data(impl_->dtype());
261  at::CopyBytes(
262  impl_->numel() * impl_->itemsize(),
263  src.impl_->data(),
264  src.impl_->device(),
265  new_data,
266  impl_->device(),
267  async);
268  }
269  }
270  }
271 
276  void ExtendTo(int64_t num, float growthPct) const {
277  CAFFE_ENFORCE_GE_WITH_CALLER(impl_->dim(), 1);
278  CAFFE_ENFORCE_GE_WITH_CALLER(growthPct, 0);
279  Extend(num - impl_->size(0), growthPct);
280  }
281 
282  void Extend(int64_t num, float growthPct) const {
283  impl_.get()->Extend(num, growthPct);
284  }
285 
293  void ShrinkTo(int64_t outer_dim) const {
294  CAFFE_ENFORCE_WITH_CALLER(
295  impl_->is_contiguous(),
296  "Right now ShrinkTo is only supported on contiguous Tensor.");
297  CAFFE_ENFORCE_WITH_CALLER(impl_->dim() >= 1, "Tensor must be at least 1D");
298  CAFFE_ENFORCE_WITH_CALLER(
299  outer_dim <= impl_->size(0),
300  "New outer dimension must be smaller than current.");
301  CAFFE_ENFORCE(
302  impl_->storage().unique(),
303  "Can't call ShrinkTo on shared storage, please call Resize instead.");
304  impl_.get()->set_size(0, outer_dim);
305  }
306 
307  template <class T>
308  void ReserveSpace(const T& outer_dim) const {
309  impl_.get()->ReserveSpace(outer_dim);
310  }
311 
312  template <typename... Ts>
313  void Resize(Ts... dim_source) const {
314  impl_.get()->Resize(dim_source...);
315  }
316 
322  inline void ResizeLike(const Tensor& src_tensor) const {
323  CAFFE_ENFORCE_WITH_CALLER(
324  src_tensor.is_contiguous(),
325  "Right now ResizeLike is only supported for contiguous Tensor.");
326  if (impl_ != src_tensor.impl_) {
327  impl_.get()->Resize(src_tensor.sizes());
328  }
329  }
330 
331  inline void Reshape(const vector<int64_t>& dims) const {
332  impl_.get()->Reshape(dims);
333  }
334 
335  inline void Reshape(const vector<int>& dims) const {
336  impl_.get()->Reshape(ToVectorint64_t(dims));
337  }
338 
339  inline void FreeMemory() const {
340  impl_.get()->FreeMemory();
341  }
342 
348  string DebugString() const {
349  std::stringstream ss;
350  ss << "A Tensor of item size " << impl_->storage().itemsize() << " and type "
351  << impl_->dtype().name() << " and dimension (";
352  for (int d : impl_->sizes()) {
353  ss << d << ",";
354  }
355  ss << ").";
356  return ss.str();
357  }
358 
359  // To be deprecated
360  void ShareData(const Tensor& src) const {
361  impl_.get()->ShareData(*src.impl_.get());
362  }
363 
373  template <typename T>
375  T* src,
376  size_t capacity = 0,
377  MemoryDeleter d = nullptr) const {
378  ShareExternalPointer((void*)src, caffe2::TypeMeta::Make<T>(), capacity, d);
379  }
380 
381  template <typename T>
382  void ShareExternalPointer(at::DataPtr&& data_ptr, size_t capacity = 0) const {
383  ShareExternalPointer(std::move(data_ptr), caffe2::TypeMeta::Make<T>(), capacity);
384  }
385 
386  void ShareExternalPointer(
387  void* src,
388  const TypeMeta& data_type,
389  size_t capacity = 0,
390  MemoryDeleter d = nullptr) const {
391  CAFFE_ENFORCE_WITH_CALLER(
392  impl_->is_contiguous(),
393  "Right now ShareExternalPointer is only supported for contiguous Tensor.");
394  CAFFE_ENFORCE_WITH_CALLER(
395  data_type.id() != caffe2::TypeIdentifier::uninitialized(),
396  "To share with a raw external pointer you need to pass in an "
397  "initialized data_type(TypeMeta).");
398  impl_.get()->ShareExternalPointer(
399  at::DataPtr(src, src, d, impl_->device_type()), data_type, capacity);
400  }
401 
402  void ShareExternalPointer(
403  at::DataPtr&& data_ptr,
404  const TypeMeta& data_type,
405  size_t capacity) {
406  impl_.get()->ShareExternalPointer(std::move(data_ptr), data_type, capacity);
407  }
408 
410  const {
411  return impl_;
412  }
413 
414  bool defined() const {
415  return impl_;
416  }
417 
422  inline void* raw_data() const {
423  return impl_->data();
424  }
425 
426  template <typename T>
427  inline T* data() const {
428  return impl_.get()->data<T>();
429  }
430 
431  inline void* raw_mutable_data(const TypeMeta& meta) const {
432  return impl_.get()->raw_mutable_data(meta);
433  }
434 
444  inline void* raw_mutable_data() const {
445  const auto& data_type = impl_->dtype();
446  CAFFE_ENFORCE_WITH_CALLER(
447  data_type.id() != caffe2::TypeIdentifier::uninitialized(),
448  "Calling raw_mutable_data() without meta, but the current meta is "
449  "of unknown type.");
450  return raw_mutable_data(data_type);
451  }
452 
453  template <typename T>
454  inline T* mutable_data() const {
455  return impl_.get()->mutable_data<T>();
456  }
457 
461  inline int dim() const {
462  return impl_->dim();
463  }
464 
468  inline int ndim() const {
469  return impl_->dim();
470  }
471 
476  inline int64_t size() const {
477  return impl_->numel();
478  }
479 
483  inline int64_t numel() const {
484  return impl_->numel();
485  }
486 
490  inline size_t itemsize() const {
491  return impl_->storage().itemsize();
492  }
493 
499  inline size_t nbytes() const {
500  return impl_->numel() * itemsize();
501  }
502 
503  inline at::IntArrayRef sizes() const {
504  return impl_.get()->sizes();
505  }
506 
507  inline int64_t size_from_dim(int k) const {
508  return size_from_dim_(k, impl_->sizes());
509  }
510 
511  inline int64_t size_to_dim(int k) const {
512  return size_to_dim_(k, impl_->sizes());
513  }
514 
515  inline int64_t size_between_dim(int k, int l) const {
516  return size_between_dim_(k, l, impl_->sizes());
517  }
518 
530  inline int canonical_axis_index(int axis_index) const {
531  return canonical_axis_index_(axis_index, impl_->dim());
532  }
533 
534  inline int64_t stride(int64_t dim) const {
535  return impl_.get()->stride(dim);
536  }
537 
538  inline at::IntArrayRef strides() const {
539  return impl_.get()->strides();
540  }
541 
542  inline bool is_contiguous() const {
543  return impl_.get()->is_contiguous();
544  }
545 
549  template <typename T>
550  inline bool IsType() const {
551  return impl_->storage().IsType<T>();
552  }
553 
557  inline const TypeMeta& dtype() const {
558  return impl_->dtype();
559  }
560 
565  inline const TypeMeta& meta() const {
566  return impl_->dtype();
567  }
568 
576  inline int dim32(const int i) const {
577 #ifndef NDEBUG
578  CAFFE_ENFORCE_LT_WITH_CALLER(i, static_cast<int>(impl_->dim()), "Exceeding ndim limit");
579  CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
580 #endif
581  auto s = impl_->size(i);
582  CAFFE_ENFORCE_LT_WITH_CALLER(s, std::numeric_limits<int>::max());
583  return static_cast<int>(s);
584  }
585 
586  inline int64_t size(const int i) const {
587  return impl_->size(i);
588  }
589 
590  // To be deprecated
591  inline int64_t dim(const int i) const {
592  return impl_->size(i);
593  }
594 
595  const Storage& storage() {
596  return impl_->storage();
597  }
598 
599  const Storage& storage() const {
600  return impl_->storage();
601  }
602 
603  bool storage_initialized() const {
604  return impl_->storage_initialized();
605  }
606 
607  bool dtype_initialized() const {
608  return impl_->dtype_initialized();
609  }
610 };
611 
617 CAFFE2_API void ReinitializeTensor(Tensor* t, at::IntArrayRef dims, at::TensorOptions options);
618 
619 CAFFE2_API void ReinitializeAndCopyFrom(
620  Tensor* t,
621  at::TensorOptions options,
622  const Tensor& src,
623  bool async = false);
624 
625 CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(12, Tensor)
626 
627 using TensorCPU = Tensor;
628 
629 constexpr int k_limit_default_ = 1000;
630 
631 // TODO: the following logic can be merged into regular Tensor class methods
632 // after MKLMemory starts to implement Tensor interface
633 
634 // Type call registry
635 typedef TypeMeta (*TypeCall)(const void*);
636 TypeCall GetTypeCallFunction(TypeIdentifier id);
637 void RegisterTypeCallFunction(TypeIdentifier id, TypeCall c);
638 
639 // Shape call registry
640 typedef vector<int64_t> (*TensorInfoCall)(
641  const void*,
642  size_t* capacity,
643  DeviceOption* device);
644 TensorInfoCall GetTensorInfoFunction(TypeIdentifier id);
645 void RegisterTensorInfoFunction(TypeIdentifier id, TensorInfoCall c);
646 
647 // resize helper function
648 void TensorVectorResize(
649  std::vector<Tensor>& tensors,
650  int size,
651  DeviceType type);
652 
653 // Tensor factory function
654 CAFFE2_API Tensor empty(at::IntArrayRef dims, at::TensorOptions options);
655 
660 // TODO: can be unified with at::from_blob when Tensor is merged and string
661 // types are supported
662 template <typename T>
664  Tensor r = empty(dims, at::device(CPU).dtype<T>());
665  CAFFE_ENFORCE_EQ(values.size(), r.numel());
666  CPUContext context;
667  context.CopyItemsFromCPU(
668  r.dtype(), values.size(), values.data(), r.mutable_data<T>());
669  return r;
670 }
671 
672 class CAFFE2_API TensorPrinter {
673  public:
674  explicit TensorPrinter(
675  const std::string& tensor_name = "",
676  const std::string& file_name = "",
677  int limit = k_limit_default_);
678  ~TensorPrinter();
679 
680  template <class T>
681  void Print(const Tensor& tensor);
682 
683  void PrintMeta(const Tensor& tensor);
684 
685  string MetaStr(const Tensor& tensor);
686 
687  private:
688  bool to_file_;
689  int limit_;
690  std::unique_ptr<std::ofstream> log_file_;
691  std::string tensor_name_;
692 };
693 
694 template <class T>
695 void TensorPrinter::Print(const Tensor& tensor) {
696  std::stringstream values_stream;
697  // One most likely doesn't want to print int64-number of items for visual
698  // inspection, so we cast down to int here.
699  int total_count = static_cast<int>(std::min(tensor.numel(), int64_t(limit_)));
700 
701  const T* tensor_data = tensor.template data<T>();
702  for (int i = 0; i < total_count - 1; ++i) {
703  values_stream << tensor_data[i] << ",";
704  }
705  if (total_count) {
706  // We do not add a comma after the last item.
707  values_stream << tensor_data[total_count - 1];
708  }
709 
710  if (to_file_) {
711  (*log_file_) << MetaStr(tensor) << values_stream.str() << std::endl;
712  } else {
713  // Log to console.
714  LOG(INFO) << MetaStr(tensor) << values_stream.str();
715  }
716 }
717 
718 } // namespace caffe2
719 #endif // CAFFE2_CORE_TENSOR_H_
virtual int64_t dim() const
Return the number of dimensions of this tensor.
Definition: TensorImpl.cpp:91
T * data() const
Return a typed data pointer to the actual data which this tensor refers to.
Definition: TensorImpl.h:564
This is a minimal Tensor class for use in c10 code.
Definition: Tensor.h:18
int64_t size() const
(To be deprecated) Returns the size (i.e.
Definition: tensor.h:476
bool is_variable() const
True if a tensor is a variable.
Definition: TensorImpl.h:809
virtual void set_size(int64_t dim, int64_t new_size)
Change the size at some dimension.
Definition: TensorImpl.h:688
virtual int64_t size(int64_t d) const
Return the size of a tensor at some dimension.
Definition: TensorImpl.cpp:95
size_t itemsize() const
Return the number of bytes each item takes in the tensor.
Definition: tensor.h:490
void * raw_mutable_data(const caffe2::TypeMeta &meta)
Returns a mutable raw pointer of the underlying storage.
Definition: TensorImpl.h:1160
void * raw_mutable_data() const
Returns a mutable raw pointer of the underlying storage.
Definition: tensor.h:444
void ReinitializeTensor(Tensor *tensor, at::IntArrayRef dims, at::TensorOptions options)
Reinitialize a Tensor to given dims and options if necessary, note that this will not do anything if ...
Definition: tensor.cc:127
Tensor Alias() const
Clone self as a Tensor that share the same Storage, that is, both Tensors are views on the same Stora...
Definition: tensor.h:174
const caffe2::TypeMeta & dtype() const
Returns the TypeMeta of a tensor, which describes what data type it is (e.g., int, float, ...)
Definition: TensorImpl.h:629
virtual int64_t numel() const
The number of elements in a tensor.
Definition: TensorImpl.h:317
virtual bool is_contiguous() const
Whether or not a tensor is laid out in contiguous memory.
Definition: TensorImpl.h:331
void Resize(Ts...dim_source)
Resizes a tensor.
Definition: TensorImpl.h:1014
void ShrinkTo(int64_t outer_dim) const
Shrinks the outer-most dimension to given size, keeping the data.
Definition: tensor.h:293
virtual IntArrayRef sizes() const
Return a reference to the sizes of this tensor.
Definition: TensorImpl.cpp:59
int64_t numel() const
Returns the number of items of the tensor.
Definition: tensor.h:483
std::vector< int64_t > ToVectorint64_t(ArrayRef< int > src)
A utility function to convert vector<int> to vector<int64_t>.
Definition: TensorImpl.h:46
void Extend(int64_t num, float growthPct)
Extends the outer-most dimension of this tensor by num elements, preserving the existing data...
Definition: TensorImpl.h:904
A type id is a unique id for a given C++ type.
Definition: typeid.h:60
void ShareData(const TensorImpl &src)
Shares the data with another tensor.
Definition: TensorImpl.h:1090
void ShareExternalPointer(T *src, size_t capacity=0, MemoryDeleter d=nullptr) const
Shares the data with an externally managed pointer.
Definition: tensor.h:374
The CPU Context, representing the bare minimum of what a Context class in Caffe2 should implement...
Definition: context.h:40
const TypeMeta & dtype() const
Returns the TypeMeta object associated with the current data type.
Definition: tensor.h:557
int64_t size_from_dim_(int k, IntArrayRef dims)
Return product of all dimensions starting from k.
Definition: TensorImpl.h:53
T * mutable_data()
Returns a typed pointer of the underlying storage.
Definition: TensorImpl.h:1221
The low-level representation of a tensor, which contains a pointer to a storage (which contains the a...
Definition: TensorImpl.h:211
bool IsType() const
Checks if the tensor content is of the given data type.
Definition: tensor.h:550
size_t itemsize() const
Return the size of a single element of this tensor in bytes.
Definition: TensorImpl.h:636
Tensor(at::IntArrayRef dims, DeviceType type)
Creates a tensor of the given dimension.
Definition: tensor.h:86
void CopyFrom(const Tensor &src, bool async=false)
Copies the data from a source tensor, with a contex provided to carry out the underlying memcpy opera...
Definition: tensor.h:211
size_t nbytes() const
Returns the total number of bytes of the storage.
Definition: tensor.h:499
Represents a a compute device on which a tensor is located.
Definition: Device.h:30
void FreeMemory()
Release whatever memory the tensor was holding but keep size and type information.
Definition: TensorImpl.h:1071
bool dtype_initialized() const noexcept
True if a tensor is dtype initialized.
Definition: TensorImpl.h:1246
constexpr const char * name() const noexcept
Returns a printable name for the type.
Definition: typeid.h:395
constexpr size_t size() const
size - Get the array size.
Definition: ArrayRef.h:138
bool storage_initialized() const noexcept
True if a tensor is storage initialized.
Definition: TensorImpl.h:1237
Tensor(const at::Tensor &tensor)
Mutual conversion with at::Tensor.
Definition: tensor.h:120
Tensor(at::Device device)
Creates a tensor of the given device type.
Definition: tensor.h:72
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
const TypeMeta & meta() const
(To be deprecated) Returns the TypeMeta object associated with the current data type.
Definition: tensor.h:565
void ExtendTo(int64_t num, float growthPct) const
Extend the outer-most dimension of this tensor to dimension of num.
Definition: tensor.h:276
Tensor TensorCPUFromValues(at::IntArrayRef dims, at::ArrayRef< T > values)
Creates a CPU tensor, and fills its contents with the given values.
Definition: tensor.h:663
constexpr TypeIdentifier id() const noexcept
Returns the type id.
Definition: typeid.h:359
constexpr Copy * copy() const noexcept
Returns the typed copy function pointer for individual iterms.
Definition: typeid.h:380
int ndim() const
(To be deprecated) Returns the number of dimensions of the data.
Definition: tensor.h:468
void ReserveSpace(const T &outer_dim)
Reserve space for the underlying tensor.
Definition: TensorImpl.h:969
virtual int64_t storage_offset() const
Return the offset in number of elements into the storage that this tensor points to.
Definition: TensorImpl.h:650
int dim() const
Returns the number of dimensions of the data.
Definition: tensor.h:461
string DebugString() const
A utility function to print the debug string for the tensor.
Definition: tensor.h:348
int canonical_axis_index(int axis_index) const
Returns the &#39;canonical&#39; version of a (usually) user-specified axis, allowing for negative indexing (e...
Definition: tensor.h:530
To register your own kernel for an operator, do in one (!) cpp file: C10_REGISTER_KERNEL(OperatorHand...
Definition: alias_info.h:7
Flush-To-Zero and Denormals-Are-Zero mode.
void Reshape(const std::vector< int64_t > &dims)
Resizes the tensor without touching underlying storage.
Definition: TensorImpl.h:1043
TypeMeta is a thin class that allows us to store the type of a container such as a blob...
Definition: typeid.h:324
Tensor(C10Tensor tensor)
Mutual conversion with C10Tensor.
Definition: tensor.h:139
void ResizeLike(const Tensor &src_tensor) const
Resize the tensor like the source tensor.
Definition: tensor.h:322
int dim32(const int i) const
Returns the i-th dimension of the tensor in int.
Definition: tensor.h:576
DeviceType device_type() const
The device type of a Tensor, e.g., DeviceType::CPU or DeviceType::CUDA.
Definition: TensorImpl.h:880
void * raw_data() const
Returns a raw void* pointer of the underlying storage.
Definition: tensor.h:422
Tensor(const Tensor &src, DeviceType type)
: Create a Tensor of at::DeviceType type and initialize it with src Tensor
Definition: tensor.h:109
virtual int64_t stride(int64_t d) const
Return the stride of a tensor at some dimension.
Definition: TensorImpl.cpp:100
Device GetDevice() const
The device of a Tensor; e.g., Device(kCUDA, 1) (the 1-index CUDA device).
Definition: TensorImpl.h:889
virtual const Storage & storage() const
Return the underlying storage of a Tensor.
Definition: TensorImpl.cpp:117
virtual IntArrayRef strides() const
Return a reference to the strides of this tensor.
Definition: TensorImpl.cpp:63