Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor.h
1 
17 #ifndef CAFFE2_CORE_TENSOR_H_
18 #define CAFFE2_CORE_TENSOR_H_
19 
20 #include <cstddef>
21 #include <cstdint>
22 #include <fstream>
23 #include <sstream>
24 #include <type_traits>
25 #include <typeinfo>
26 #include <vector>
27 
28 #include "caffe2/core/common.h"
29 #include "caffe2/core/flags.h"
30 #include "caffe2/core/context.h"
31 #include "caffe2/core/typeid.h"
32 #include "caffe2/core/logging.h"
33 
34 // A global boolean variable to control whether we free memory when a Tensor
35 // is shrinked to a smaller size. As a result, a Tensor is always going to
36 // keep the memory allocated for its maximum capacity reshaped to so far.
37 CAFFE2_DECLARE_bool(caffe2_keep_on_shrink);
38 
39 // Since we can have high variance in blob memory allocated across different
40 // inputs in the same run, we will shrink the blob only if the memory gain
41 // is larger than this flag in bytes.
42 CAFFE2_DECLARE_int64(caffe2_max_keep_on_shrink_memory);
43 
44 namespace caffe2 {
45 
49 inline vector<TIndex> ToVectorTIndex(const std::vector<int>& src) {
50  return vector<TIndex>(src.begin(), src.end());
51 }
52 
56 inline TIndex size_from_dim_(int k, vector<TIndex> dims) {
57  TIndex r = 1;
58  for (int i = k; i < dims.size(); ++i) {
59  r *= dims[i];
60  }
61  return r;
62 }
63 
64 // Product of all dims up to
65 inline TIndex size_to_dim_(int k, vector<TIndex> dims) {
66  CAFFE_ENFORCE(k <= dims.size());
67  TIndex r = 1;
68  for (int i = 0; i < k; ++i) {
69  r *= dims[i];
70  }
71  return r;
72 }
73 
74 // Product of all dims between k and l (not including dims[k] and dims[l])
75 inline TIndex size_between_dim_(int k, int l, vector<TIndex> dims) {
76  CAFFE_ENFORCE(l < dims.size());
77  TIndex r = 1;
78  if (k < l) {
79  for (int i = k + 1; i < l; ++i) {
80  r *= dims[i];
81  }
82  } else {
83  for (int i = l + 1; i < k; ++i) {
84  r *= dims[i];
85  }
86  }
87  return r;
88 }
89 
90 inline int canonical_axis_index_(int axis_index, int ndims) {
91  CAFFE_ENFORCE_GE(axis_index, -ndims);
92  CAFFE_ENFORCE_LT(axis_index, ndims);
93  if (axis_index < 0) {
94  return axis_index + ndims;
95  }
96  return axis_index;
97 }
98 
108 template <class Context>
109 class Tensor {
110  public:
114  Tensor() {}
115 
122  explicit Tensor(const vector<TIndex>& dims) { Resize(dims); }
123  explicit Tensor(const vector<int>& dims) { Resize(dims); }
124 
135  template <class SrcContext, class ContextForCopy>
136  Tensor(const Tensor<SrcContext>& src, ContextForCopy* context) {
137  CopyFrom(src, context);
138  }
139 
151  template <class SrcContext>
152  explicit Tensor(const Tensor<SrcContext>& src) {
153  CopyFrom(src);
154  }
155 
159  template <typename T>
160  Tensor(const vector<TIndex>& dims, const vector<T>& values, Context* context)
161  : meta_(TypeMeta::Make<T>()) {
162  Resize(dims);
163  CAFFE_ENFORCE_EQ_WITH_CALLER(values.size(), size_);
164  context->template Copy<T, CPUContext, Context>(size_, values.data(), mutable_data<T>());
165  }
166 
170  template <typename T,
171  typename = typename std::enable_if<std::is_scalar<T>::value>::type>
172  Tensor(const T& value, Context* context) {
173  Resize(vector<TIndex>{});
174  context->template Copy<T, CPUContext, Context>(size_, &value, mutable_data<T>());
175  }
176 
181  template <class SrcContext, class ContextForCopy>
182  void CopyFrom(const Tensor<SrcContext>& src, ContextForCopy* context) {
183  if ((void*)&src == (void*)this) {
184  return;
185  }
186  meta_ = src.meta();
187  Resize(src.dims());
188  if (size() > 0) {
189  if (meta_.copy()) {
190  meta_.copy()(src.raw_data(), raw_mutable_data(), size());
191  } else {
192  context->template CopyBytes<SrcContext, Context>(
193  nbytes(), src.raw_data(), raw_mutable_data());
194  }
195  }
196  }
197 
205  template <class SrcContext>
206  inline void CopyFrom(const Tensor<SrcContext>& src) {
207  SrcContext tmp_context;
208  CopyFrom(src, &tmp_context);
209  }
210 
211  virtual ~Tensor() noexcept {}
212 
222  template <class ContextForCopy>
223  void Extend(TIndex num, float growthPct, ContextForCopy* context) {
224  CAFFE_ENFORCE_GE_WITH_CALLER(dims_.size(), 1);
225  auto newDims = dims_;
226  newDims[0] += num;
227  if (!data_) {
228  Resize(newDims);
229  return;
230  }
231  auto newSize = std::accumulate(
232  newDims.begin(),
233  newDims.end(),
234  static_cast<TIndex>(1),
235  std::multiplies<TIndex>());
236  if (newSize * meta_.itemsize() <= capacity_) {
237  dims_ = newDims;
238  size_ = newSize;
239  return;
240  }
241  auto newCapacity = dims_;
242  newCapacity[0] = std::max<size_t>(
243  newDims[0], std::ceil(dims_[0] * (growthPct + 100) / 100));
244  Reserve(newCapacity, context);
245  dims_ = newDims;
246  size_ = newSize;
247  }
248 
249  template <class T, class ContextForCopy>
250  void Reserve(const std::vector<T>& newCapacity, ContextForCopy* context) {
251  auto newSize = std::accumulate(
252  newCapacity.begin(),
253  newCapacity.end(),
254  static_cast<TIndex>(1),
255  std::multiplies<TIndex>());
256  if (newSize * meta_.itemsize() <= capacity_) {
257  return;
258  }
259  auto oldData = std::move(data_);
260  auto oldSize = size_;
261  auto oldDims = dims_;
262  Resize(newCapacity);
263  auto* newData = raw_mutable_data(meta_);
264  context->template CopyItems<ContextForCopy, ContextForCopy>(
265  meta_, oldSize, oldData.get(), newData);
266  dims_ = oldDims;
267  size_ = oldSize;
268  reserved_ = true;
269  }
270 
277  void Shrink(TIndex outer_dim) {
278  CAFFE_ENFORCE_WITH_CALLER(dims_.size() >= 1, "Tensor must be at least 1D");
279  CAFFE_ENFORCE_WITH_CALLER(
280  outer_dim <= dims_[0],
281  "New outer dimension must be smaller than current.");
282  dims_[0] = outer_dim;
283  size_ = std::accumulate(
284  dims_.begin(),
285  dims_.end(),
286  static_cast<TIndex>(1),
287  std::multiplies<TIndex>());
288  }
289 
303  template <typename... Ts>
304  void Resize(Ts... dim_source) {
305  bool size_changed = SetDims(dim_source...);
306  if (size_changed) {
307  // If needed, we will free the data. the next mutable_data() call
308  // will create the data storage.
309  int64_t new_size = size_ * meta_.itemsize();
310  bool reset_tensor = false;
311  if (reserved_) {
312  // If tensor is reserved then don't claim its memeory unless capacity_
313  // is smaller than new size
314  reset_tensor = capacity_ < new_size;
315  } else {
316  reset_tensor = capacity_ < new_size || !FLAGS_caffe2_keep_on_shrink ||
317  capacity_ - new_size > FLAGS_caffe2_max_keep_on_shrink_memory;
318  }
319 
320  if (reset_tensor) {
321  FreeMemory();
322  }
323  }
324  }
325 
330  template <class OtherContext>
331  inline void ResizeLike(const Tensor<OtherContext>& src_tensor) {
332  // Note: need casting for different context types.
333  if (static_cast<void*>(this) != static_cast<const void*>(&src_tensor)) {
334  Resize(src_tensor.dims());
335  }
336  }
337 
342  inline void Reshape(const vector<TIndex>& dims) {
343  TIndex new_size = 1;
344  for (auto d : dims) {
345  CAFFE_ENFORCE_GE_WITH_CALLER(d, 0);
346  new_size *= d;
347  }
348  CAFFE_ENFORCE_WITH_CALLER(
349  new_size == size_,
350  "New size and old size are not equal. You cannot use Reshape, "
351  "but should use Resize."
352  // TODO(jiayq): remove the following warning after pending diffs
353  // stabilize.
354  " The old caffe2 mixes Reshape and Resize but this behavior has "
355  "been changed. If you find this error, most likely you will need "
356  "to change corresponding code from Reshape to Resize.");
357  dims_ = dims;
358  }
359 
360  inline void Reshape(const vector<int>& dims) {
361  Reshape(ToVectorTIndex(dims));
362  }
363 
369  inline void FreeMemory() {
370  data_.reset();
371  capacity_ = 0;
372  // If reserved is true and we changed tensor memory then it is fine
373  // to switch it to false, if Resize is called from Reserve and it triggers
374  // FreeMemory() then reserved_ will be set to true at end of Reserve()
375  reserved_ = false;
376  }
377 
383  string DebugString() const {
384  std::stringstream ss;
385  ss << "A Tensor of item size " << itemsize() << " and type "
386  << meta_.name() << " and dimension (";
387  for (int d : dims_) {
388  ss << d << ",";
389  }
390  ss << ").";
391  return ss.str();
392  }
393 
394  void swap(Tensor<Context>& other) {
395  std::swap(dims_, other.dims_);
396  std::swap(size_, other.size_);
397  std::swap(meta_, other.meta_);
398  std::swap(data_, other.data_);
399  std::swap(shares_data_, other.shares_data_);
400  std::swap(capacity_, other.capacity_);
401  std::swap(reserved_, other.reserved_);
402  }
403 
416  void ShareData(const Tensor& src) {
417  meta_ = src.meta();
418  CAFFE_ENFORCE_EQ_WITH_CALLER(
419  src.size_,
420  size_,
421  "Size mismatch - did you call reshape before sharing the data?");
422  // It is possible that the source tensor hasn't called mutable_data() yet,
423  // in which case ShareData() doesn't make much sense since we don't really
424  // know what to share yet.
425  CAFFE_ENFORCE_WITH_CALLER(
426  src.data_.get() || src.size_ == 0,
427  "Source tensor has no content and has size > 0");
428  // Finally, do sharing.
429  data_ = src.data_;
430  capacity_ = src.capacity_;
431  shares_data_ = true;
432  }
433 
443  template <typename T, typename Deleter = MemoryDeleter>
444  void ShareExternalPointer(T* src, size_t capacity = 0, Deleter d = nullptr) {
445  ShareExternalPointer(src, TypeMeta::Make<T>(), capacity, d);
446  }
447 
448  template <typename Deleter = MemoryDeleter>
450  void* src,
451  const TypeMeta& meta,
452  size_t capacity = 0,
453  Deleter d = nullptr) {
454  meta_ = meta;
455  CAFFE_ENFORCE_WITH_CALLER(
456  meta_.id(),
457  "To share with a raw external pointer you need to have meta "
458  "already set.");
459  CAFFE_ENFORCE_WITH_CALLER(
460  size_ >= 0,
461  "To share data with a raw pointer, you need to set shape first.");
462  // Check if the deleter is a MemoryDeleter and is a simple nullptr.
463  if (std::is_same<MemoryDeleter, Deleter>::value &&
464  reinterpret_cast<MemoryDeleter*>(&d)[0] == nullptr) {
465  // Use aliasing constructor trick to avoid calling the destructor.
466  data_ = std::shared_ptr<void>(std::shared_ptr<void>(), src);
467  } else {
468  data_.reset(src, d);
469  }
470  // Sets capacity. If not specified, we will implicitly assume that
471  // the capacity is the current size.
472  if (capacity) {
473  capacity_ = capacity;
474  } else {
475  capacity_ = nbytes();
476  }
477  shares_data_ = true;
478  }
479 
480  bool shares_data() const {
481  return shares_data_;
482  }
483 
488  inline const void* raw_data() const {
489  CAFFE_ENFORCE_WITH_CALLER(data_.get() || size_ == 0);
490  return data_.get();
491  }
492 
499  template <typename T>
500  inline const T* data() const {
501  CAFFE_ENFORCE_WITH_CALLER(
502  data_.get() || size_ == 0,
503  "The tensor is of non-zero shape, but its data is not allocated yet. "
504  "Caffe2 uses a lazy allocation, so you will need to call "
505  "mutable_data() or raw_mutable_data() to actually allocate memory.");
506  CAFFE_ENFORCE_WITH_CALLER(
507  IsType<T>(),
508  "Tensor type mismatch, caller expects elements to be ",
509  TypeMeta::TypeName<T>(),
510  " while tensor contains ",
511  meta_.name());
512  return static_cast<T*>(data_.get());
513  }
514 
526  inline void* raw_mutable_data(const TypeMeta& meta) {
527  // For 0-size tensors it's fine to return any pointer (including nullptr)
528  if (meta_ == meta && (data_.get() || size_ == 0)) {
529  return data_.get();
530  } else {
531  bool had_special_dtor = meta_.dtor() != nullptr;
532  meta_ = meta;
533  CAFFE_ENFORCE_WITH_CALLER(
534  size_ >= 0,
535  "Tensor is not initialized. You probably need to call Resize() "
536  "before calling mutable_data()");
537 
538  // We can reuse the existing buffer if the current data does not have
539  // a special destructor and the new data doesn't have a special
540  // constructor.
541  if (size_ == 0 ||
542  (meta.ctor() == nullptr && !had_special_dtor &&
543  capacity_ >= size_ * meta_.itemsize())) {
544  return data_.get();
545  }
546  if (meta.ctor()) {
547  // For types that need placement new, we will call it, as well as
548  // making sure that when the data is freed, it calls the right
549  // destruction procedure.
550  auto size = size_;
551  auto dtor = meta_.dtor();
552  auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());
553  auto deleter = ptr_and_deleter.second;
554  data_.reset(
555  ptr_and_deleter.first, [size, dtor, deleter](void* ptr) -> void {
556  dtor(ptr, size);
557  deleter(ptr);
558  });
559  meta_.ctor()(data_.get(), size_);
560  } else {
561  // For fundamental type, new and delete is easier.
562  auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());
563  data_.reset(ptr_and_deleter.first, ptr_and_deleter.second);
564  }
565  capacity_ = size_ * meta_.itemsize();
566  return data_.get();
567  }
568  }
569 
579  inline void* raw_mutable_data() {
580  CAFFE_ENFORCE_WITH_CALLER(
581  meta_.id() != 0,
582  "Calling raw_mutable_data() without meta, but the current meta is "
583  "of unknown type.");
584  return raw_mutable_data(meta_);
585  }
586 
593  template <typename T>
594  inline T* mutable_data() {
595  if ((size_ == 0 || data_.get()) && IsType<T>()) {
596  return static_cast<T*>(data_.get());
597  }
598  return static_cast<T*>(raw_mutable_data(TypeMeta::Make<T>()));
599  }
600 
601 
605  inline int ndim() const { return dims_.size(); }
609  inline TIndex size() const { return size_; }
613  inline size_t itemsize() const { return meta_.itemsize(); }
619  inline size_t nbytes() const { return size_ * meta_.itemsize(); }
620 
621  inline size_t capacity_nbytes() const {
622  return capacity_;
623  }
627  inline const vector<TIndex>& dims() const { return dims_; }
628 
629  inline TIndex size_from_dim(int k) const {
630  return size_from_dim_(k, dims_);
631  }
632 
633  inline TIndex size_to_dim(int k) const {
634  return size_to_dim_(k, dims_);
635  }
636 
637  inline TIndex size_between_dim(int k, int l) const {
638  return size_between_dim_(k, l, dims_);
639  }
640 
652  inline int canonical_axis_index(int axis_index) const {
653  return canonical_axis_index_(axis_index, ndim());
654  }
655 
659  template <typename T>
660  inline bool IsType() const { return meta_.Match<T>(); }
664  inline const TypeMeta& meta() const { return meta_; }
665 
673  inline int dim32(const int i) const {
674  #ifndef NDEBUG
675  CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), "Exceeding ndim limit");
676  CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
677  #endif
678  CAFFE_ENFORCE_LT_WITH_CALLER(dims_[i], std::numeric_limits<int>::max());
679  return static_cast<int>(dims_[i]);
680  }
681 
687  inline TIndex dim(const int i) const {
688  #ifndef NDEBUG
689  CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), "Exceeding ndim limit");
690  CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
691  #endif
692  return dims_[i];
693  }
694 
695  protected:
696  vector<TIndex> dims_;
697  TIndex size_ = -1;
698  TypeMeta meta_;
699  std::shared_ptr<void> data_;
700  bool shares_data_ = false;
701  size_t capacity_ = 0;
702  bool reserved_ = false;
703  // In case of chunk load we store how much data was already loaded
704 
705  private:
706  template <
707  typename T,
708  typename = typename std::enable_if<std::is_integral<T>::value>::type>
709  bool SetDims(const vector<T>& src) {
710  auto old_size = size_;
711  dims_.resize(src.size());
712  TIndex new_size = 1;
713  for (unsigned int i = 0; i < src.size(); ++i) {
714  new_size *= src[i];
715  dims_[i] = src[i];
716  }
717  size_ = new_size;
718  return size_ != old_size;
719  }
720 
721  bool SetDims() {
722  auto old_size = size_;
723  dims_.resize(0);
724  size_ = 1;
725  return size_ != old_size;
726  }
727 
728  // TODO(jiayq): maybe rewrite the following functions with initializer list.
729  // NVCC does not play well with initializer lists last time, but worth
730  // another shot.
731  bool SetDims(const TIndex d0) {
732  auto old_size = size_;
733  dims_.resize(1);
734  dims_[0] = d0;
735  size_ = d0;
736  return size_ != old_size;
737  }
738 
739  bool SetDims(const TIndex d0, const TIndex d1) {
740  auto old_size = size_;
741  dims_.resize(2);
742  dims_[0] = d0;
743  dims_[1] = d1;
744  size_ = d0 * d1;
745  return size_ != old_size;
746  }
747 
748  bool SetDims(const TIndex d0, const TIndex d1, const TIndex d2) {
749  auto old_size = size_;
750  dims_.resize(3);
751  dims_[0] = d0;
752  dims_[1] = d1;
753  dims_[2] = d2;
754  size_ = d0 * d1 * d2;
755  return size_ != old_size;
756  }
757 
758  bool
759  SetDims(const TIndex d0, const TIndex d1, const TIndex d2, const TIndex d3) {
760  auto old_size = size_;
761  dims_.resize(4);
762  dims_[0] = d0;
763  dims_[1] = d1;
764  dims_[2] = d2;
765  dims_[3] = d3;
766  size_ = d0 * d1 * d2 * d3;
767  return size_ != old_size;
768  }
769 
770  // Note(jiayq): possibly a rule-of-three violation, but we explicitly
771  // discourage the use of = for Tensors.
772  Tensor& operator=(const Tensor& src) = delete;
773 };
774 
775 // For simplicity, we will typedef Tensor<CPUContext> to TensorCPU.
777 
778 constexpr int k_limit_default_ = 1000;
779 
780 // Type call registry
781 typedef TypeMeta (*TypeCall)(const void*);
782 TypeCall GetTypeCallFunction(CaffeTypeId id);
783 void RegisterTypeCallFunction(CaffeTypeId id, TypeCall c);
784 
785 template <class Context>
786 TypeMeta GetTensorType(const void* c) {
787  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);
788  return tc->meta();
789 }
790 
791 // Shape call registry
792 typedef vector<TIndex> (*TensorInfoCall)(
793  const void*,
794  bool* shares_data,
795  size_t* capacity,
796  DeviceOption* device);
797 TensorInfoCall GetTensorInfoFunction(CaffeTypeId id);
798 void RegisterTensorInfoFunction(CaffeTypeId id, TensorInfoCall c);
799 
800 template <class Context>
801 vector<TIndex> GetTensorInfo(
802  const void* c,
803  bool* shares_data,
804  size_t* capacity,
805  DeviceOption* device) {
806  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);
807  *shares_data = tc->shares_data();
808  *capacity = tc->capacity_nbytes();
809  device->set_device_type(CPU);
810  device->set_cuda_gpu_id(0);
811  return tc->dims();
812 }
813 
815  public:
816  explicit TensorPrinter(
817  const std::string& tensor_name = "",
818  const std::string& file_name = "",
819  int limit = k_limit_default_);
820  ~TensorPrinter();
821 
822  template <class T>
823  void Print(const Tensor<CPUContext>& tensor);
824 
825  template <class Context>
826  void PrintMeta(const Tensor<Context>& tensor);
827 
828  template <class Context>
829  string MetaStr(const Tensor<Context>& tensor);
830 
831  private:
832  bool to_file_;
833  int limit_;
834  std::unique_ptr<std::ofstream> log_file_;
835  std::string tensor_name_;
836 };
837 
838 template <class T>
839 void TensorPrinter::Print(const Tensor<CPUContext>& tensor) {
840  std::stringstream values_stream;
841  // One most likely doesn't want to print int64-number of items for visual
842  // inspection, so we cast down to int here.
843  int total_count = static_cast<int>(
844  std::min(tensor.size(), TIndex(limit_)));
845  const T* tensor_data = tensor.template data<T>();
846  for (int i = 0; i < total_count - 1; ++i) {
847  values_stream << tensor_data[i] << ",";
848  }
849  // We do not add a comma after the last item.
850  values_stream << tensor_data[total_count - 1];
851  if (to_file_) {
852  (*log_file_) << MetaStr(tensor) << values_stream.str() << std::endl;
853  } else {
854  // Log to console.
855  LOG(INFO) << MetaStr(tensor) << values_stream.str();
856  }
857 }
858 
859 template <class Context>
860 void TensorPrinter::PrintMeta(const Tensor<Context>& tensor) {
861  if (to_file_) {
862  (*log_file_) << MetaStr(tensor) << std::endl;
863  } else {
864  LOG(INFO) << MetaStr(tensor);
865  }
866 }
867 
868 template <class Context>
869 std::string TensorPrinter::MetaStr(const Tensor<Context>& tensor) {
870  std::stringstream meta_stream;
871  meta_stream << "Tensor " << tensor_name_ << " of type "
872  << tensor.meta().name() << ". Dims: (";
873  for (const auto dim : tensor.dims()) {
874  meta_stream << dim << ",";
875  }
876  meta_stream << "): ";
877  return meta_stream.str();
878 }
879 
880 } // namespace caffe2
881 #endif // CAFFE2_CORE_TENSOR_H_
void Extend(TIndex num, float growthPct, ContextForCopy *context)
Extends the outer-most dimension of this tensor by num elements, preserving the existing data...
Definition: tensor.h:223
const T * data() const
Returns a typed pointer of the underlying storage.
Definition: tensor.h:500
size_t itemsize() const
Return the number of bytes each item takes in the tensor.
Definition: tensor.h:613
const TypeMeta & meta() const
Returns the TypeMeta object associated with the current data type.
Definition: tensor.h:664
Tensor(const vector< TIndex > &dims, const vector< T > &values, Context *context)
Creates a tensor, and fills its contents with the given values.
Definition: tensor.h:160
TIndex dim(const int i) const
Returns the i-th dimension of the tensor.
Definition: tensor.h:687
void Shrink(TIndex outer_dim)
Shrinks the outer-most dimension to given size, keeping the data.
Definition: tensor.h:277
PlacementNew ctor() const
Returns the placement new function pointer for individual items.
Definition: typeid.h:165
int canonical_axis_index(int axis_index) const
Returns the &#39;canonical&#39; version of a (usually) user-specified axis, allowing for negative indexing (e...
Definition: tensor.h:652
void ShareExternalPointer(T *src, size_t capacity=0, Deleter d=nullptr)
Shares the data with an externally managed pointer.
Definition: tensor.h:444
Tensor is the basic class in Caffe2 that stores a contiguous memory with its shape information...
Definition: tensor.h:109
int dim32(const int i) const
Returns the i-th dimension of the tensor in int.
Definition: tensor.h:673
void * raw_mutable_data()
Returns a mutable raw pointer of the underlying storage.
Definition: tensor.h:579
void CopyFrom(const Tensor< SrcContext > &src, ContextForCopy *context)
Copies the data from a source tensor, with a contex provided to carry out the underlying memcpy opera...
Definition: tensor.h:182
Tensor(const Tensor< SrcContext > &src, ContextForCopy *context)
Creates a tensor from a source tensor, copying over the content.
Definition: tensor.h:136
TIndex size() const
Returns the size (i.e.
Definition: tensor.h:609
T * mutable_data()
Returns a typed pointer of the underlying storage.
Definition: tensor.h:594
void FreeMemory()
Release whatever memory the tensor was holding but keep size and type information.
Definition: tensor.h:369
const vector< TIndex > & dims() const
Returns the dimensions of the tensor as a vector.
Definition: tensor.h:627
void CopyFrom(const Tensor< SrcContext > &src)
Copies the data from a source tensor.
Definition: tensor.h:206
void Resize(Ts...dim_source)
Resizes a tensor.
Definition: tensor.h:304
Copyright (c) 2016-present, Facebook, Inc.
Tensor()
Initializes an empty tensor.
Definition: tensor.h:114
const CaffeTypeId & id() const
Returns the type id.
Definition: typeid.h:153
const char * name() const
Returns a printable name for the type.
Definition: typeid.h:183
const void * raw_data() const
Returns a const raw void* pointer of the underlying storage.
Definition: tensor.h:488
TypedCopy copy() const
Returns the typed copy function pointer for individual iterms.
Definition: typeid.h:171
void Reshape(const vector< TIndex > &dims)
Resizes the tensor without touching underlying storage.
Definition: tensor.h:342
string DebugString() const
A utility function to print the debug string for the tensor.
Definition: tensor.h:383
size_t nbytes() const
Returns the total number of bytes of the storage.
Definition: tensor.h:619
void ShareData(const Tensor &src)
Shares the data with another tensor.
Definition: tensor.h:416
Tensor(const T &value, Context *context)
Creates a scalar tensor, and fills its content with the given value.
Definition: tensor.h:172
Tensor(const vector< TIndex > &dims)
Creates a tensor of the given dimension.
Definition: tensor.h:122
void ResizeLike(const Tensor< OtherContext > &src_tensor)
Resize the tensor like the source tensor.
Definition: tensor.h:331
TypedDestructor dtor() const
Returns the destructor function pointer for individual items.
Definition: typeid.h:177
TypeMeta is a thin class that allows us to store the type of a container such as a blob...
Definition: typeid.h:104
vector< TIndex > ToVectorTIndex(const std::vector< int > &src)
A utility function to convert vector<int> to vector<TIndex>.
Definition: tensor.h:49
Copyright (c) 2016-present, Facebook, Inc.
bool IsType() const
Checks if the tensor content is of the given data type.
Definition: tensor.h:660
int ndim() const
Returns the number of dimensions of the data.
Definition: tensor.h:605
Tensor(const Tensor< SrcContext > &src)
Creates a tensor from a source tensor, copying over the content.
Definition: tensor.h:152
void * raw_mutable_data(const TypeMeta &meta)
Returns a mutable raw pointer of the underlying storage.
Definition: tensor.h:526
const size_t & itemsize() const
Returns the size of the item.
Definition: typeid.h:159
TIndex size_from_dim_(int k, vector< TIndex > dims)
Return product of all dimensions starting from K.
Definition: tensor.h:56