Caffe2 - C++ API
A deep learning, cross platform ML framework
THTensor.cpp
1 #ifndef TH_GENERIC_FILE
2 #define TH_GENERIC_FILE "TH/generic/THTensor.cpp"
3 #else
4 
5 #include <ATen/InferSize.h>
6 #include <new>
7 
8 /**** access methods ****/
9 THStorage *THTensor_(storage)(const THTensor *self)
10 {
11  return THTensor_getStoragePtr(self);
12 }
13 
14 ptrdiff_t THTensor_(storageOffset)(const THTensor *self)
15 {
16  return self->storage_offset();
17 }
18 
19 int THTensor_(nDimension)(const THTensor *self)
20 {
21  return THTensor_nDimension(self);
22 }
23 
24 int THTensor_(nDimensionLegacyNoScalars)(const THTensor *self)
25 {
26  return THTensor_nDimensionLegacyNoScalars(self);
27 }
28 
29 int THTensor_(nDimensionLegacyAll)(const THTensor *self)
30 {
31  return THTensor_nDimensionLegacyAll(self);
32 }
33 
34 int64_t THTensor_(size)(const THTensor *self, int dim)
35 {
36  THArgCheck((dim >= 0) && (dim < self->dim()), 2, "dimension %d out of range of %dD tensor",
37  dim, THTensor_(nDimensionLegacyNoScalars)(self));
38  return self->size(dim);
39 }
40 
41 int64_t THTensor_(stride)(const THTensor *self, int dim)
42 {
43  THArgCheck((dim >= 0) && (dim < self->dim()), 2, "dimension %d out of range of %dD tensor",
44  dim, THTensor_(nDimensionLegacyNoScalars)(self));
45  return self->stride(dim);
46 }
47 
48 scalar_t *THTensor_(data)(const THTensor *self) {
49  return self->data<scalar_t>();
50 }
51 
52 /**** creation methods ****/
53 
54 /* Empty init */
55 THTensor *THTensor_(new)(void)
56 {
57  return c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
59  at::CPUTensorId(),
60  false
61  ).release();
62 }
63 
64 /* Pointer-copy init */
65 THTensor *THTensor_(newWithTensor)(THTensor *tensor)
66 {
67  THTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
69  at::CPUTensorId(),
70  false
71  ).release();
72  THTensor_(setStorageNd)(self,
73  THTensor_getStoragePtr(tensor),
74  tensor->storage_offset(),
75  tensor->dim(),
76  THTensor_getSizePtr(tensor),
77  THTensor_getStridePtr(tensor));
78  return self;
79 }
80 
81 /* Storage init */
82 THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset, at::IntArrayRef sizes, at::IntArrayRef strides) {
83  if (strides.data()) {
84  AT_CHECK(sizes.size() == strides.size(), "number of sizes and strides must match");
85  }
86  THTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
88  at::CPUTensorId(),
89  false
90  ).release();
91  THTensor_(setStorageNd)(self, storage, storageOffset, sizes.size(),
92  const_cast<int64_t*>(sizes.data()), const_cast<int64_t*>(strides.data()));
93 
94  return self;
95 }
96 
97 THTensor *THTensor_(newWithStorage1d)(THStorage *storage, ptrdiff_t storageOffset,
98  int64_t size0, int64_t stride0)
99 {
100  return THTensor_(newWithStorage)(storage, storageOffset, {size0}, {stride0});
101 }
102 
103 THTensor *THTensor_(newWithStorage2d)(THStorage *storage, ptrdiff_t storageOffset,
104  int64_t size0, int64_t stride0,
105  int64_t size1, int64_t stride1)
106 {
107  return THTensor_(newWithStorage)(storage, storageOffset, {size0, size1}, {stride0, stride1});
108 }
109 
110 THTensor *THTensor_(newWithStorage3d)(THStorage *storage, ptrdiff_t storageOffset,
111  int64_t size0, int64_t stride0,
112  int64_t size1, int64_t stride1,
113  int64_t size2, int64_t stride2)
114 {
115  return THTensor_(newWithStorage)(storage, storageOffset, {size0, size1, size2}, {stride0, stride1, stride2});
116 }
117 
118 THTensor *THTensor_(newWithStorage4d)(THStorage *storage, ptrdiff_t storageOffset,
119  int64_t size0, int64_t stride0,
120  int64_t size1, int64_t stride1,
121  int64_t size2, int64_t stride2,
122  int64_t size3, int64_t stride3)
123 {
124  return THTensor_(newWithStorage)(storage, storageOffset,
125  {size0, size1, size2, size3},
126  {stride0, stride1, stride2, stride3});
127 }
128 
129 THTensor *THTensor_(newWithSize)(at::IntArrayRef size, at::IntArrayRef stride)
130 {
131  return THTensor_(newWithStorage)(NULL, 0, size, stride);
132 }
133 
134 THTensor *THTensor_(newWithSize1d)(int64_t size0)
135 {
136  return THTensor_(newWithSize)({size0}, {});
137 }
138 
139 THTensor *THTensor_(newWithSize2d)(int64_t size0, int64_t size1)
140 {
141  return THTensor_(newWithSize)({size0, size1}, {});
142 }
143 
144 THTensor *THTensor_(newWithSize3d)(int64_t size0, int64_t size1, int64_t size2)
145 {
146  return THTensor_(newWithSize)({size0, size1, size2}, {});
147 }
148 
149 THTensor *THTensor_(newWithSize4d)(int64_t size0, int64_t size1, int64_t size2, int64_t size3)
150 {
151  return THTensor_(newWithSize)({size0, size1, size2, size3}, {});
152 }
153 
154 THTensor *THTensor_(newClone)(THTensor *self)
155 {
156  THTensor *tensor = THTensor_(new)();
157  THTensor_(resizeAs)(tensor, self);
158  at::Tensor tensor_wrap = THTensor_wrap(tensor);
159  at::Tensor self_wrap = THTensor_wrap(self);
160  at::_copy_same_type_(tensor_wrap, self_wrap);
161  return tensor;
162 }
163 
164 THTensor *THTensor_(newContiguous)(THTensor *self)
165 {
166  if(!THTensor_(isContiguous)(self))
167  return THTensor_(newClone)(self);
168  else
169  {
170  THTensor_(retain)(self);
171  return self;
172  }
173 }
174 
175 THTensor *THTensor_(newSelect)(THTensor *tensor, int dimension_, int64_t sliceIndex_)
176 {
177  THTensor *self = THTensor_(newWithTensor)(tensor);
178  THTensor_(select)(self, NULL, dimension_, sliceIndex_);
179  return self;
180 }
181 
182 THTensor *THTensor_(newNarrow)(THTensor *tensor, int dimension_, int64_t firstIndex_, int64_t size_)
183 {
184  THTensor *self = THTensor_(newWithTensor)(tensor);
185  THTensor_(narrow)(self, NULL, dimension_, firstIndex_, size_);
186  return self;
187 }
188 
189 THTensor *THTensor_(newTranspose)(THTensor *tensor, int dimension1_, int dimension2_)
190 {
191  THTensor *self = THTensor_(newWithTensor)(tensor);
192  THTensor_(transpose)(self, NULL, dimension1_, dimension2_);
193  return self;
194 }
195 
196 THTensor *THTensor_(newUnfold)(THTensor *tensor, int dimension_, int64_t size_, int64_t step_)
197 {
198  THTensor *self = THTensor_(newWithTensor)(tensor);
199  THTensor_(unfold)(self, NULL, dimension_, size_, step_);
200  return self;
201 }
202 
203 THTensor *THTensor_(newView)(THTensor *tensor, at::IntArrayRef size)
204 {
205  ptrdiff_t numel = THTensor_(nElement)(tensor);
206  THTensor *self = THTensor_(new)();
207  auto inferred_size = at::infer_size(size, numel);
208  auto stride = THTensor_compute_stride(tensor->sizes(),
209  tensor->strides(),
210  inferred_size);
211  THArgCheck(stride.has_value(), 2, "view size is "
212  "not compatible with input tensor's size and stride (at least one dimension spans "
213  "across two contiguous subspaces). Call .contiguous() before .view().");
214  auto stride_value = *stride;
215  THTensor_setStorage(self, THTensor_getStoragePtr(tensor), tensor->storage_offset(), inferred_size, stride_value);
216  return self;
217 }
218 
219 /* Resize */
220 void THTensor_(resize)(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
221 {
222  return THTensor_resize(self, size, stride);
223 }
224 
225 void THTensor_(resizeAs)(THTensor *self, THTensor *src)
226 {
227  if(!THTensor_(isSameSizeAs)(self, src))
228  THTensor_(resizeNd)(self, src->dim(), THTensor_getSizePtr(src), NULL);
229 }
230 
231 void THTensor_(resize0d)(THTensor *tensor)
232 {
233  THTensor_(resizeNd)(tensor, 0, {}, nullptr);
234 }
235 
236 void THTensor_(resize1d)(THTensor *tensor, int64_t size0)
237 {
238  int64_t size[1] = {size0};
239  THTensor_(resizeNd)(tensor, 1, size, nullptr);
240 }
241 
242 void THTensor_(resize2d)(THTensor *tensor, int64_t size0, int64_t size1)
243 {
244  int64_t size[2] = {size0, size1};
245  THTensor_(resizeNd)(tensor, 2, size, nullptr);
246 }
247 
248 void THTensor_(resize3d)(THTensor *tensor, int64_t size0, int64_t size1, int64_t size2)
249 {
250  int64_t size[3] = {size0, size1, size2};
251  THTensor_(resizeNd)(tensor, 3, size, nullptr);
252 }
253 
254 void THTensor_(resize4d)(THTensor *self, int64_t size0, int64_t size1, int64_t size2, int64_t size3)
255 {
256  int64_t size[4] = {size0, size1, size2, size3};
257  THTensor_(resizeNd)(self, 4, size, nullptr);
258 }
259 
260 void THTensor_(resize5d)(THTensor *self, int64_t size0, int64_t size1, int64_t size2, int64_t size3, int64_t size4)
261 {
262  int64_t size[5] = {size0, size1, size2, size3, size4};
263  THTensor_(resizeNd)(self, 5, size, nullptr);
264 }
265 
266 void THTensor_(set)(THTensor *self, THTensor *src)
267 {
268  if(self != src)
269  THTensor_(setStorageNd)(self,
270  THTensor_getStoragePtr(src),
271  src->storage_offset(),
272  src->dim(),
273  THTensor_getSizePtr(src),
274  THTensor_getStridePtr(src));
275 }
276 
277 void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_)
278 {
279  THTensor_setStorage(self, storage_, storageOffset_, size_, stride_);
280 }
281 
282 void THTensor_(setStorage1d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
283  int64_t size0_, int64_t stride0_)
284 {
285  THTensor_(setStorage)(self, storage_, storageOffset_,
286  {size0_}, {stride0_});
287 }
288 
289 void THTensor_(setStorage2d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
290  int64_t size0_, int64_t stride0_,
291  int64_t size1_, int64_t stride1_)
292 {
293  THTensor_(setStorage)(self, storage_, storageOffset_,
294  {size0_, size1_},
295  {stride0_, stride1_});
296 }
297 
298 void THTensor_(setStorage3d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
299  int64_t size0_, int64_t stride0_,
300  int64_t size1_, int64_t stride1_,
301  int64_t size2_, int64_t stride2_)
302 {
303  THTensor_(setStorage)(self, storage_, storageOffset_,
304  {size0_, size1_, size2_},
305  {stride0_, stride1_, stride2_});
306 }
307 
308 void THTensor_(setStorage4d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_,
309  int64_t size0_, int64_t stride0_,
310  int64_t size1_, int64_t stride1_,
311  int64_t size2_, int64_t stride2_,
312  int64_t size3_, int64_t stride3_)
313 {
314 
315  int64_t size[4] = {size0_, size1_, size2_, size3_};
316  int64_t stride[4] = {stride0_, stride1_, stride2_, stride3_};
317 
318  THTensor_(setStorage)(self, storage_, storageOffset_, size, stride);
319 }
320 
321 
322 void THTensor_(narrow)(THTensor *self, THTensor *src, int dimension, int64_t firstIndex, int64_t size)
323 {
324  if(!src)
325  src = self;
326 
327  THArgCheck( (dimension >= 0) && (dimension < src->dim()), 2, "out of range");
328  THArgCheck( firstIndex >= 0, 3, "out of range");
329  THArgCheck( size >= 0, 4, "out of range");
330  THArgCheck(firstIndex <= src->size(dimension) - size, 4, "out of range");
331 
332  THTensor_(set)(self, src);
333 
334  if (firstIndex > 0) {
335  self->set_storage_offset(self->storage_offset() + firstIndex*self->stride(dimension));
336  }
337 
338  self->set_size(dimension, size);
339 }
340 
341 void THTensor_(select)(THTensor *self, THTensor *src, int dimension, int64_t sliceIndex)
342 {
343  int d;
344 
345  if(!src)
346  src = self;
347 
348  THArgCheck(src->dim() > 0, 1, "cannot select on a 0-dim tensor");
349  THArgCheck((dimension >= 0) && (dimension < src->dim()), 2, "out of range");
350  THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size(dimension)), 3, "out of range");
351 
352  THTensor_(set)(self, src);
353  THTensor_(narrow)(self, NULL, dimension, sliceIndex, 1);
354 
355  std::vector<int64_t> newSize(self->dim()-1);
356  std::vector<int64_t> newStride(self->dim()-1);
357  for (d = 0; d < dimension; d++)
358  {
359  newSize[d] = self->size(d);
360  newStride[d] = self->stride(d);
361  }
362 
363  for(d = dimension; d < self->dim()-1; d++)
364  {
365  newSize[d] = self->size(d+1);
366  newStride[d] = self->stride(d+1);
367  }
368  self->set_sizes_and_strides(newSize, newStride);
369 }
370 
371 void THTensor_(transpose)(THTensor *self, THTensor *src, int dimension1, int dimension2)
372 {
373  int64_t z;
374 
375  if(!src)
376  src = self;
377 
378  THArgCheck( (dimension1 >= 0) && (dimension1 < THTensor_nDimensionLegacyNoScalars(src)), 1, "out of range");
379  THArgCheck( (dimension2 >= 0) && (dimension2 < THTensor_nDimensionLegacyNoScalars(src)), 2, "out of range");
380 
381  THTensor_(set)(self, src);
382 
383  if(dimension1 == dimension2)
384  return;
385 
386  z = self->stride(dimension1);
387  self->set_stride(dimension1, self->stride(dimension2));
388  self->set_stride(dimension2, z);
389  z = self->size(dimension1);
390  self->set_size(dimension1, self->size(dimension2));
391  self->set_size(dimension2, z);
392 }
393 
394 void THTensor_(unfold)(THTensor *self, THTensor *src, int dimension, int64_t size, int64_t step)
395 {
396  int d;
397 
398  if(!src)
399  src = self;
400 
401  THArgCheck((dimension >= 0) && (dimension < THTensor_nDimensionLegacyNoScalars(src)), 2, "out of range");
402  THArgCheck(size <= THTensor_sizeLegacyNoScalars(src, dimension), 3, "out of range");
403  THArgCheck(step > 0, 4, "invalid step");
404 
405  THTensor_(set)(self, src);
406 
407  std::vector<int64_t> newSize(/* size */ self->dim()+1);
408  std::vector<int64_t> newStride(/* size */ self->dim()+1);
409 
410  newSize[self->dim()] = size;
411  newStride[self->dim()] = THTensor_strideLegacyNoScalars(self, dimension);
412  for(d = 0; d < self->dim(); d++)
413  {
414  auto self_size = THTensor_sizeLegacyNoScalars(self, d);
415  auto self_stride = THTensor_strideLegacyNoScalars(self, d);
416  if(d == dimension)
417  {
418  newSize[d] = (self_size - size) / step + 1;
419  newStride[d] = step*self_stride;
420  }
421  else
422  {
423  newSize[d] = self_size;
424  newStride[d] = self_stride;
425  }
426  }
427  self->set_sizes_and_strides(newSize, newStride);
428 }
429 
430 /* we have to handle the case where the result is a number */
431 void THTensor_(squeeze)(THTensor *self, THTensor *src)
432 {
433  if(!src)
434  src = self;
435 
436  THTensor_(set)(self, src);
437 
438  std::vector<int64_t> newSize;
439  std::vector<int64_t> newStride;
440  for(int d = 0; d < src->dim(); ++d)
441  {
442  if(src->size(d) != 1)
443  {
444  newSize.push_back(src->size(d));
445  newStride.push_back(src->stride(d));
446  }
447  }
448 
449  self->set_sizes_and_strides(newSize, newStride);
450 }
451 
452 void THTensor_(squeeze1d)(THTensor *self, THTensor *src, int dimension)
453 {
454  int d;
455 
456  if(!src)
457  src = self;
458 
459  THArgCheck((dimension >= 0) && (dimension < src->dim()), 2, "dimension out of range");
460 
461  THTensor_(set)(self, src);
462 
463  if(src->size(dimension) == 1)
464  {
465  std::vector<int64_t> newSize(self->dim() - 1);
466  std::vector<int64_t> newStride(self->dim() - 1);
467  for (d = 0; d < dimension; d++)
468  {
469  newSize[d] = self->size(d);
470  newStride[d] = self->stride(d);
471  }
472 
473  for(d = dimension; d < self->dim()-1; d++)
474  {
475  newSize[d] = self->size(d+1);
476  newStride[d] = self->stride(d+1);
477  }
478  self->set_sizes_and_strides(newSize, newStride);
479  }
480 }
481 
482 void THTensor_(unsqueeze1d)(THTensor *self, THTensor *src, int dimension)
483 {
484  int d;
485 
486  if(!src)
487  src = self;
488 
489  THArgCheck((dimension >= 0) && (dimension <= src->dim()), 2, "dimension out of range");
490 
491  THTensor_(set)(self, src);
492 
493  std::vector<int64_t> newSize(/* size */ self->dim()+1);
494  std::vector<int64_t> newStride(/* size */ self->dim()+1);
495 
496  for(d = self->dim(); d > dimension; d--)
497  {
498  newSize[d] = self->size(d-1);
499  newStride[d] = self->stride(d-1);
500  }
501  if (dimension < self->dim())
502  {
503  newStride[dimension] = self->size(dimension) * self->stride(dimension);
504  }
505  else
506  {
507  newStride[dimension] = 1;
508  }
509  newSize[dimension] = 1;
510  for(d = dimension - 1; d >= 0; d--)
511  {
512  newSize[d] = self->size(d);
513  newStride[d] = self->stride(d);
514  }
515  self->set_sizes_and_strides(newSize, newStride);
516 }
517 
518 int THTensor_(isTransposed)(const THTensor *self)
519 {
520  if (THTensor_(isContiguous)(self)) {
521  return 0;
522  }
523  int64_t max_stride = 1;
524  int64_t size_max_stride = 1;
525  int64_t z = 1;
526  int d;
527  for (d = 0; d < self->dim(); ++d) {
528  if (self->stride(d) == 0 && self->size(d) != 1)
529  return 0;
530  if (self->stride(d) > max_stride) {
531  max_stride = self->stride(d);
532  size_max_stride = self->size(d);
533  }
534  z *= self->size(d);
535  }
536  if (z == max_stride * size_max_stride) {
537  return 1;
538  }
539  return 0;
540 }
541 
542 int THTensor_(isContiguous)(const THTensor *self)
543 {
544  return self->is_contiguous();
545 }
546 
547 int THTensor_(isSameSizeAs)(const THTensor *self, const THTensor* src)
548 {
549  int d;
550  if (self->dim() != src->dim())
551  return 0;
552  for(d = 0; d < self->dim(); ++d)
553  {
554  if(self->size(d) != src->size(d))
555  return 0;
556  }
557  return 1;
558 }
559 
560 int THTensor_(isSetTo)(const THTensor *self, const THTensor* src)
561 {
562  if (!THTensor_getStoragePtr(self))
563  return 0;
564  if (THTensor_getStoragePtr(self) == THTensor_getStoragePtr(src) &&
565  self->storage_offset() == src->storage_offset() &&
566  THTensor_nDimensionLegacyAll(self) == THTensor_nDimensionLegacyAll(src))
567  {
568  int d;
569  for (d = 0; d < THTensor_nDimensionLegacyAll(self); ++d)
570  {
571  if (self->size(d) != src->size(d) || self->stride(d) != src->stride(d))
572  return 0;
573  }
574  return 1;
575  }
576  return 0;
577 }
578 
579 ptrdiff_t THTensor_(nElement)(const THTensor *self)
580 {
581  if(THTensor_nDimensionLegacyAll(self) == 0)
582  return 0;
583  else
584  {
585  ptrdiff_t nElement = 1;
586  int d;
587  for(d = 0; d < THTensor_nDimension(self); d++)
588  nElement *= self->size(d);
589  return nElement;
590  }
591 }
592 
593 // NB: It is INVALID to call this on an UndefinedTensorImpl
594 void THTensor_(retain)(THTensor *self)
595 {
596  c10::raw::intrusive_ptr::incref(self);
597 }
598 
599 void THTensor_(free)(THTensor *self)
600 {
601  THTensor_free(self);
602 }
603 
604 void THTensor_(freeCopyTo)(THTensor *self, THTensor *dst)
605 {
606  if(self != dst) {
607  at::Tensor dst_wrap = THTensor_wrap(dst);
608  at::Tensor self_wrap = THTensor_wrap(self);
609  at::_copy_same_type_(dst_wrap, self_wrap);
610  }
611 
612  THTensor_(free)(self);
613 }
614 
615 /*******************************************************************************/
616 
617 void THTensor_(setStorageNd)(THTensor *self, THStorage *storage, ptrdiff_t storageOffset, int nDimension, const int64_t *size, const int64_t *stride)
618 {
619  return THTensor_setStorageNd(self, storage, storageOffset, nDimension, size, stride);
620 }
621 
622 void THTensor_(resizeNd)(THTensor *self, int nDimension, const int64_t *size, const int64_t *stride)
623 {
624  return THTensor_resizeNd(self, nDimension, size, stride);
625 }
626 
627 void THTensor_(set0d)(THTensor *tensor, scalar_t value)
628 {
629  THArgCheck(THTensor_nDimension(tensor) == 0, 1, "tensor must have no dimensions");
630  THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset(), value);
631 }
632 
633 scalar_t THTensor_(get0d)(const THTensor *tensor)
634 {
635  THArgCheck(THTensor_nDimension(tensor) == 0, 1, "tensor must have no dimensions");
636  return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset());
637 }
638 
639 void THTensor_(set1d)(THTensor *tensor, int64_t x0, scalar_t value)
640 {
641  THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension");
642  THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range");
643  THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0), value);
644 }
645 
646 scalar_t THTensor_(get1d)(const THTensor *tensor, int64_t x0)
647 {
648  THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension");
649  THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range");
650  return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0));
651 }
652 
653 void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, scalar_t value)
654 {
655  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1, "tensor must have two dimensions");
656  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range");
657  THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1), value);
658 }
659 
660 scalar_t THTensor_(get2d)(const THTensor *tensor, int64_t x0, int64_t x1)
661 {
662  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1, "tensor must have two dimensions");
663  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range");
664  return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1));
665 }
666 
667 void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value)
668 {
669  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1, "tensor must have three dimensions");
670  THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range");
671  THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2), value);
672 }
673 
674 scalar_t THTensor_(get3d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2)
675 {
676  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1, "tensor must have three dimensions");
677  THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range");
678  return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2));
679 }
680 
681 void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value)
682 {
683  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1, "tensor must have four dimensions");
684  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range");
685  THStorage_(set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3), value);
686 }
687 
688 scalar_t THTensor_(get4d)(const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3)
689 {
690  THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1, "tensor must have four dimensions");
691  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range");
692  return THStorage_(get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3));
693 }
694 
695 
696 /* Shape manipulation methods */
697 void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension)
698 {
699  THTensor* inputs[2];
700  inputs[0] = ta;
701  inputs[1] = tb;
702  THTensor_(catArray)(r_, inputs, 2, dimension);
703 }
704 
705 void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension);
706 inline void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second, int dimension)
707 {
708  int first_dims = first->dim();
709  int second_dims = second->dim();
710  THArgCheck(first_dims == second_dims, 0,
711  "Tensors must have same number of dimensions: got %d and %d",
712  first_dims, second_dims);
713  for (int dim = 0; dim < first_dims; dim++) {
714  if (dim == dimension) {
715  continue;
716  }
717  int64_t first_dim_size = first->size(dim);
718  int64_t second_dim_size = second->size(dim);
719  THArgCheck(first_dim_size == second_dim_size, 0,
720  "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
721  dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
722  }
723 }
724 
725 void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension)
726 {
727  // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
728  // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
729  // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific
730  // size (i.e. other empty sizes are not skipped).
731  // FIXME: warn if this is the case
732  bool allSkipped= true;
733  int64_t nDims = 0;
734  THTensor *notSkippedTensor; // non-owning reference
735  auto should_skip = [](THTensor *t) { return t->is_empty() && t->dim() == 1; };
736  for (int i = 0; i < numInputs; i++) {
737  if (should_skip(inputs[i])) {
738  continue;
739  }
740  // We've found a non-empty tensor
741  allSkipped = false;
742  notSkippedTensor = inputs[i];
743  nDims = notSkippedTensor->dim();
744  break;
745  }
746  if (allSkipped) {
747  return;
748  }
749 
750  // Compute cat_dimension based on the non-empty tensor
751  THArgCheck(dimension < nDims, 4, "invalid dimension %d", dimension);
752  THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
753 
754  // Compute size of the result in the cat dimension
755  int64_t cat_dim_size = 0;
756  for (int i = 0; i < numInputs; i++) {
757  THTensor *tensor = inputs[i];
758  if (should_skip(tensor)) {
759  continue;
760  }
761  THTensor_(check_shape_except_dim)(notSkippedTensor, tensor, dimension);
762  cat_dim_size += tensor->size(dimension);
763  }
764 
765  // Compute the size of the result
766  std::vector<int64_t> size(nDims);
767  for (int dim = 0; dim < nDims; dim++) {
768  int64_t result_dim_size = notSkippedTensor->size(dim);
769  if (dim == dimension) {
770  result_dim_size = cat_dim_size;
771  }
772  size[dim] = result_dim_size;
773  }
774  THTensor_(resize)(result, size, {});
775 
776  // Check contiguity of all inputs and result
777  bool allContiguous = true;
778  for (int i = 0; i < numInputs; i++) {
779  if(!should_skip(inputs[i])) {
780  allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
781  }
782  }
783  allContiguous = allContiguous && THTensor_(isContiguous)(result);
784 
785  // First path is for contiguous inputs
786  // Second path for non-contiguous
787  int64_t offset;
788  if (allContiguous) {
789  int64_t outer = 1, inner = 1;
790 
791  // Outer is the product of dimensions from the left up to (and not
792  // including the concatenation dimension). This becomes the number of times
793  // we have to replicate the memcpy call.
794  for (int i = 0; i < dimension; ++i) {
795  outer *= size[i];
796  }
797 
798  // The product of dimensions to the right of the concatenation dimension.
799  // We go on to multiply this by the size of the concat dimension for
800  // each input tensor.
801  for (int i = dimension + 1; i < size.size(); ++i) {
802  inner *= size[i];
803  }
804 
805  scalar_t* result_data = THStorage_(data)(THTensor_getStoragePtr(result)) + result->storage_offset();
806  offset = 0;
807  for (int o = 0; o < outer; ++o) {
808  for (int j = 0; j < numInputs; ++j) {
809  if (!should_skip(inputs[j])) {
810  THTensor* input0 = inputs[j];
811  scalar_t* input0_data = THStorage_(data)(THTensor_getStoragePtr(input0)) + input0->storage_offset();
812  int local_inner = inner * input0->size(dimension);
813  if (local_inner != 0) {
814  memcpy(result_data + offset, input0_data + o*local_inner, local_inner*sizeof(scalar_t));
815  } // input0_size != 0
816  offset += local_inner;
817  } // should_skip
818  } // for j
819  } // for i
820  } else {
821  offset = 0;
822  for (int j = 0; j < numInputs; j++) {
823  if (!should_skip(inputs[j])) {
824  int64_t dimSize = inputs[j]->size(dimension);
825  THTensor *nt = THTensor_(newWithTensor)(result);
826  THTensor_(narrow)(nt, NULL, dimension, offset, dimSize);
827  at::Tensor nt__wrap = THTensor_wrap(nt);
828  at::Tensor inputs_wrap = THTensor_wrap(inputs[j]);
829  at::_copy_same_type_(nt__wrap, inputs_wrap);
830  c10::raw::intrusive_ptr::decref(nt);
831  offset += dimSize;
832  }
833  }
834  }
835 }
836 
837 THDescBuff THTensor_(desc)(const THTensor *tensor) {
838  const int L = TH_DESC_BUFF_LEN;
839  THDescBuff buf;
840  char *str = buf.str;
841  int n = 0;
842 #define _stringify(x) #x
843  n += snprintf(str, L-n, "torch." _stringify(x) "Tensor of size ");
844 #undef _stringify
845  int i;
846  for(i = 0; i < THTensor_nDimension(tensor); i++) {
847  if(n >= L) break;
848  n += snprintf(str+n, L-n, "%" PRId64, tensor->size(i));
849  if(i < THTensor_nDimension(tensor)-1) {
850  n += snprintf(str+n, L-n, "x");
851  }
852  }
853  if(n >= L) {
854  snprintf(str+L-4, 4, "...");
855  }
856  return buf;
857 }
858 
859 THDescBuff THTensor_(sizeDesc)(const THTensor *tensor) {
860  THDescBuff buf = _THSizeDesc(tensor->sizes().data(), tensor->sizes().size());
861  return buf;
862 }
863 
864 #endif
static intrusive_ptr reclaim(TTarget *owning_ptr)
Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes over ownership.