Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor.cc
1 #include "caffe2/core/tensor.h"
2 
3 #include "caffe2/core/blob_stats.h"
4 
5 namespace caffe2 {
6 
7 CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(12, Tensor);
8 
9 TensorPrinter::TensorPrinter(
10  const std::string& tensor_name,
11  const std::string& file_name,
12  int limit)
13  : to_file_(!file_name.empty()),
14  limit_(limit ? limit : k_limit_default_),
15  tensor_name_(tensor_name) {
16  if (to_file_) {
17  // We will output to file instead of printing on screen.
18  // We will write each individual tensor to its individual file.
19  log_file_.reset(new std::ofstream(
20  file_name, std::ofstream::out | std::ofstream::trunc));
21  CAFFE_ENFORCE(
22  log_file_->good(),
23  "Failed to open TensorPrinter file ",
24  file_name,
25  ". rdstate() = ",
26  log_file_->rdstate());
27  }
28 }
29 
30 TensorPrinter::~TensorPrinter() {
31  if (log_file_.get()) {
32  log_file_->close();
33  }
34 }
35 
36 void TensorPrinter::PrintMeta(const Tensor& tensor) {
37  if (to_file_) {
38  (*log_file_) << MetaStr(tensor) << std::endl;
39  } else {
40  LOG(INFO) << MetaStr(tensor);
41  }
42 }
43 
44 std::string TensorPrinter::MetaStr(const Tensor& tensor) {
45  std::stringstream meta_stream;
46  meta_stream << "Tensor " << tensor_name_ << " of type "
47  << tensor.dtype().name() << ". Dims: (";
48  for (const auto dim : tensor.sizes()) {
49  meta_stream << dim << ",";
50  }
51  meta_stream << "): ";
52  return meta_stream.str();
53 }
54 
55 TypeMeta GetTensorType(const void* c) {
56  const Tensor* tc = static_cast<const Tensor*>(c);
57  return tc->dtype();
58 }
59 
60 // TODO(jerryzh): Remove
61 static CaffeMap<TypeIdentifier, TypeCall> type_call_registry_{
62  {TypeMeta::Id<Tensor>(), GetTensorType}};
63 
64 TypeCall GetTypeCallFunction(TypeIdentifier id) {
65  auto f = type_call_registry_.find(id);
66  if (f == type_call_registry_.end()) {
67  return nullptr;
68  }
69  return f->second;
70 }
71 
72 void RegisterTypeCallFunction(TypeIdentifier id, TypeCall c) {
73  type_call_registry_[id] = c;
74 }
75 
76 int GetGPUIDForPointer(const void* ptr);
77 
78 vector<int64_t> GetTensorInfo(
79  const void* c,
80  size_t* capacity,
81  DeviceOption* device) {
82  CHECK(capacity);
83  const Tensor* tc = static_cast<const Tensor*>(c);
84  CHECK(tc);
85  CHECK(tc->unsafeGetTensorImpl());
86  CHECK(tc->unsafeGetTensorImpl()->storage().unsafeGetStorageImpl());
87  *capacity = tc->storage().capacity();
88  ExtractDeviceOption(device, tc->GetDevice());
89  return tc->sizes().vec();
90 }
91 
92 // since we only have one tensor, probably need to remove this at some point?
93 static CaffeMap<TypeIdentifier, TensorInfoCall> tensor_info_call_registry_{
94  {TypeMeta::Id<Tensor>(), GetTensorInfo}};
95 
96 // TODO: Remove this code in a separate diff, since we only have one
97 // GetTensorInfo function now
98 TensorInfoCall GetTensorInfoFunction(TypeIdentifier id) {
99  auto f = tensor_info_call_registry_.find(id);
100  if (f == tensor_info_call_registry_.end()) {
101  return nullptr;
102  }
103  return f->second;
104 }
105 
106 void RegisterTensorInfoFunction(TypeIdentifier id, TensorInfoCall c) {
107  tensor_info_call_registry_[id] = c;
108 }
109 
110 void TensorVectorResize(
111  std::vector<Tensor>& tensors,
112  int size,
113  DeviceType type) {
114  tensors.reserve(size);
115  for (auto i = 0; i < size; ++i) {
116  tensors.emplace_back(type);
117  }
118 }
119 
120 Tensor empty(at::IntArrayRef dims, at::TensorOptions options) {
121  // TODO: merge this with at::empty after Tensor is merged
122  auto tensor = Tensor(dims, options.device());
123  tensor.raw_mutable_data(options.dtype());
124  return tensor;
125 }
126 
128  Tensor* tensor,
129  at::IntArrayRef dims,
130  at::TensorOptions options) {
131  CAFFE_ENFORCE(options.device_opt() != c10::nullopt);
132  if (*tensor) {
133  // Note: we don't compare device_id here because of the purpose of
134  // ReinitializeTensor: https://github.com/pytorch/pytorch/pull/13147
135  // In the original code, we don't have device_id defined, therefore, we should not
136  // include device_id in the comparison
137  if (tensor->GetDeviceType() == options.device().type()) {
138  if (tensor->sizes() != dims) {
139  // Resize when the dims doesn't match
140  tensor->Resize(dims);
141  }
142  if (tensor->dtype() == options.dtype()) {
143  tensor->raw_mutable_data();
144  } else {
145  C10_LOG_EVERY_MS(WARNING, 1000)
146  << "Changing the data type of Tensor is discouraged."
147  << " Attempt to change data type from: " << tensor->dtype()
148  << " to: " << options.dtype();
149  // create a new Tensor when the data_type doesn't match
150  *tensor = caffe2::empty(dims, options);
151  }
152  return;
153  }
154  // create a new Tensor when device doesn't match
155  }
156 
157  VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<Tensor>()
158  << " dims: " << dims;
159  *tensor = caffe2::empty(dims, options);
160 }
161 
162 void ReinitializeAndCopyFrom(
163  Tensor* t,
164  at::TensorOptions options,
165  const Tensor& src,
166  bool async) {
167  auto device_type = options.device().type();
168  CAFFE_ENFORCE(t != nullptr, "Target tensor ptr is null.");
169  if (!*t || device_type != t->GetDeviceType()) {
170  *t = Tensor(device_type);
171  }
172  CAFFE_ENFORCE(
173  !t->dtype_initialized() || t->dtype() == src.dtype(),
174  "We don't allow a change of data type in ReinitializeAndCopyFrom. Attempt to "
175  " change from: ",
176  t->dtype(),
177  " to: ",
178  src.dtype());
179  t->CopyFrom(src, async);
180 }
181 
182 void Tensor::enforce_invariants() {
183  if (impl_.get() == nullptr) {
184  throw std::runtime_error("TensorImpl with nullptr is not supported");
185  }
186  CAFFE_ENFORCE(
187  !impl_->is_variable(),
188  "Caffe2 tensor wrapper doesn't support autograd variables");
189  CAFFE_ENFORCE_EQ(
190  impl_->layout(),
191  at::kStrided,
192  "Caffe2 tensor wrapper supports only regular non-sparse tensors");
193  CAFFE_ENFORCE(
194  impl_->is_contiguous(),
195  "Caffe2 tensor wrapper supports only contiguous tensors");
196 }
197 
198 namespace {
199 
200 struct TensorStatGetter : BlobStatGetter {
201  size_t sizeBytes(const Blob& blob) const override {
202  const auto& tensor = blob.Get<Tensor>();
203  auto nbytes = tensor.nbytes();
204  if (nbytes > 0 && tensor.IsType<std::string>()) {
205  const auto* data = tensor.data<std::string>();
206  for (int i = 0; i < tensor.numel(); ++i) {
207  nbytes += data[i].size();
208  }
209  }
210  return nbytes;
211  }
212 };
213 REGISTER_BLOB_STAT_GETTER(Tensor, TensorStatGetter);
214 }
215 
216 } // namespace caffe2
Blob is a general container that hosts a typed pointer.
Definition: blob.h:24
C10_NODISCARD TensorOptions device(c10::optional< Device > device) const noexcept
Return a copy of TensorOptions with device set to the given one, or cleared if device is nullopt...
void ReinitializeTensor(Tensor *tensor, at::IntArrayRef dims, at::TensorOptions options)
Reinitialize a Tensor to given dims and options if necessary, note that this will not do anything if ...
Definition: tensor.cc:127
c10::optional< Device > device_opt() const noexcept
Returns the device of the TensorOptions, or c10::nullopt if device is not specified.
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
int GetGPUIDForPointer(const void *ptr)
Gets the GPU id that the current pointer is located at.
Definition: common_gpu.cc:106
C10_NODISCARD TensorOptions dtype(c10::optional< caffe2::TypeMeta > dtype) const noexcept
Return a copy of TensorOptions with dtype set to the given one.
const T & Get() const
Gets the const reference of the stored object.
Definition: blob.h:71