1 #ifndef TH_GENERIC_FILE 2 #define TH_GENERIC_FILE "TH/generic/THTensor.cpp" 5 #include <ATen/InferSize.h> 9 THStorage *THTensor_(storage)(
const THTensor *
self)
11 return THTensor_getStoragePtr(
self);
14 ptrdiff_t THTensor_(storageOffset)(
const THTensor *
self)
16 return self->storage_offset();
19 int THTensor_(nDimension)(
const THTensor *
self)
21 return THTensor_nDimension(
self);
24 int THTensor_(nDimensionLegacyNoScalars)(
const THTensor *
self)
26 return THTensor_nDimensionLegacyNoScalars(
self);
29 int THTensor_(nDimensionLegacyAll)(
const THTensor *
self)
31 return THTensor_nDimensionLegacyAll(
self);
34 int64_t THTensor_(size)(
const THTensor *
self,
int dim)
36 THArgCheck((dim >= 0) && (dim < self->dim()), 2,
"dimension %d out of range of %dD tensor",
37 dim, THTensor_(nDimensionLegacyNoScalars)(
self));
38 return self->size(dim);
41 int64_t THTensor_(stride)(
const THTensor *
self,
int dim)
43 THArgCheck((dim >= 0) && (dim < self->dim()), 2,
"dimension %d out of range of %dD tensor",
44 dim, THTensor_(nDimensionLegacyNoScalars)(
self));
45 return self->stride(dim);
48 scalar_t *THTensor_(data)(
const THTensor *
self) {
49 return self->data<scalar_t>();
55 THTensor *THTensor_(
new)(void)
57 return c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
65 THTensor *THTensor_(newWithTensor)(THTensor *tensor)
67 THTensor *
self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
72 THTensor_(setStorageNd)(
self,
73 THTensor_getStoragePtr(tensor),
74 tensor->storage_offset(),
76 THTensor_getSizePtr(tensor),
77 THTensor_getStridePtr(tensor));
84 AT_CHECK(sizes.size() == strides.size(),
"number of sizes and strides must match");
86 THTensor *
self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
91 THTensor_(setStorageNd)(
self, storage, storageOffset, sizes.size(),
92 const_cast<int64_t*
>(sizes.data()), const_cast<int64_t*>(strides.data()));
97 THTensor *THTensor_(newWithStorage1d)(THStorage *storage, ptrdiff_t storageOffset,
98 int64_t size0, int64_t stride0)
100 return THTensor_(newWithStorage)(storage, storageOffset, {size0}, {stride0});
103 THTensor *THTensor_(newWithStorage2d)(THStorage *storage, ptrdiff_t storageOffset,
104 int64_t size0, int64_t stride0,
105 int64_t size1, int64_t stride1)
107 return THTensor_(newWithStorage)(storage, storageOffset, {size0, size1}, {stride0, stride1});
110 THTensor *THTensor_(newWithStorage3d)(THStorage *storage, ptrdiff_t storageOffset,
111 int64_t size0, int64_t stride0,
112 int64_t size1, int64_t stride1,
113 int64_t size2, int64_t stride2)
115 return THTensor_(newWithStorage)(storage, storageOffset, {size0, size1, size2}, {stride0, stride1, stride2});
118 THTensor *THTensor_(newWithStorage4d)(THStorage *storage, ptrdiff_t storageOffset,
119 int64_t size0, int64_t stride0,
120 int64_t size1, int64_t stride1,
121 int64_t size2, int64_t stride2,
122 int64_t size3, int64_t stride3)
124 return THTensor_(newWithStorage)(storage, storageOffset,
125 {size0, size1, size2, size3},
126 {stride0, stride1, stride2, stride3});
131 return THTensor_(newWithStorage)(NULL, 0, size, stride);
134 THTensor *THTensor_(newWithSize1d)(int64_t size0)
136 return THTensor_(newWithSize)({size0}, {});
139 THTensor *THTensor_(newWithSize2d)(int64_t size0, int64_t size1)
141 return THTensor_(newWithSize)({size0, size1}, {});
144 THTensor *THTensor_(newWithSize3d)(int64_t size0, int64_t size1, int64_t size2)
146 return THTensor_(newWithSize)({size0, size1, size2}, {});
149 THTensor *THTensor_(newWithSize4d)(int64_t size0, int64_t size1, int64_t size2, int64_t size3)
151 return THTensor_(newWithSize)({size0, size1, size2, size3}, {});
154 THTensor *THTensor_(newClone)(THTensor *
self)
156 THTensor *tensor = THTensor_(
new)();
157 THTensor_(resizeAs)(tensor,
self);
158 at::Tensor tensor_wrap = THTensor_wrap(tensor);
160 at::_copy_same_type_(tensor_wrap, self_wrap);
164 THTensor *THTensor_(newContiguous)(THTensor *
self)
166 if(!THTensor_(isContiguous)(
self))
167 return THTensor_(newClone)(
self);
170 THTensor_(retain)(
self);
175 THTensor *THTensor_(newSelect)(THTensor *tensor,
int dimension_, int64_t sliceIndex_)
177 THTensor *
self = THTensor_(newWithTensor)(tensor);
178 THTensor_(select)(
self, NULL, dimension_, sliceIndex_);
182 THTensor *THTensor_(newNarrow)(THTensor *tensor,
int dimension_, int64_t firstIndex_, int64_t size_)
184 THTensor *
self = THTensor_(newWithTensor)(tensor);
185 THTensor_(narrow)(
self, NULL, dimension_, firstIndex_, size_);
189 THTensor *THTensor_(newTranspose)(THTensor *tensor,
int dimension1_,
int dimension2_)
191 THTensor *
self = THTensor_(newWithTensor)(tensor);
192 THTensor_(transpose)(
self, NULL, dimension1_, dimension2_);
196 THTensor *THTensor_(newUnfold)(THTensor *tensor,
int dimension_, int64_t size_, int64_t step_)
198 THTensor *
self = THTensor_(newWithTensor)(tensor);
199 THTensor_(unfold)(
self, NULL, dimension_, size_, step_);
205 ptrdiff_t numel = THTensor_(nElement)(tensor);
206 THTensor *
self = THTensor_(
new)();
207 auto inferred_size = at::infer_size(size, numel);
208 auto stride = THTensor_compute_stride(tensor->sizes(),
211 THArgCheck(stride.has_value(), 2,
"view size is " 212 "not compatible with input tensor's size and stride (at least one dimension spans " 213 "across two contiguous subspaces). Call .contiguous() before .view().");
214 auto stride_value = *stride;
215 THTensor_setStorage(
self, THTensor_getStoragePtr(tensor), tensor->storage_offset(), inferred_size, stride_value);
222 return THTensor_resize(
self, size, stride);
225 void THTensor_(resizeAs)(THTensor *
self, THTensor *src)
227 if(!THTensor_(isSameSizeAs)(
self, src))
228 THTensor_(resizeNd)(
self, src->dim(), THTensor_getSizePtr(src), NULL);
231 void THTensor_(resize0d)(THTensor *tensor)
233 THTensor_(resizeNd)(tensor, 0, {},
nullptr);
236 void THTensor_(resize1d)(THTensor *tensor, int64_t size0)
238 int64_t size[1] = {size0};
239 THTensor_(resizeNd)(tensor, 1, size,
nullptr);
242 void THTensor_(resize2d)(THTensor *tensor, int64_t size0, int64_t size1)
244 int64_t size[2] = {size0, size1};
245 THTensor_(resizeNd)(tensor, 2, size,
nullptr);
248 void THTensor_(resize3d)(THTensor *tensor, int64_t size0, int64_t size1, int64_t size2)
250 int64_t size[3] = {size0, size1, size2};
251 THTensor_(resizeNd)(tensor, 3, size,
nullptr);
254 void THTensor_(resize4d)(THTensor *
self, int64_t size0, int64_t size1, int64_t size2, int64_t size3)
256 int64_t size[4] = {size0, size1, size2, size3};
257 THTensor_(resizeNd)(
self, 4, size,
nullptr);
260 void THTensor_(resize5d)(THTensor *
self, int64_t size0, int64_t size1, int64_t size2, int64_t size3, int64_t size4)
262 int64_t size[5] = {size0, size1, size2, size3, size4};
263 THTensor_(resizeNd)(
self, 5, size,
nullptr);
266 void THTensor_(
set)(THTensor *
self, THTensor *src)
269 THTensor_(setStorageNd)(
self,
270 THTensor_getStoragePtr(src),
271 src->storage_offset(),
273 THTensor_getSizePtr(src),
274 THTensor_getStridePtr(src));
279 THTensor_setStorage(
self, storage_, storageOffset_, size_, stride_);
282 void THTensor_(setStorage1d)(THTensor *
self, THStorage *storage_, ptrdiff_t storageOffset_,
283 int64_t size0_, int64_t stride0_)
285 THTensor_(setStorage)(
self, storage_, storageOffset_,
286 {size0_}, {stride0_});
289 void THTensor_(setStorage2d)(THTensor *
self, THStorage *storage_, ptrdiff_t storageOffset_,
290 int64_t size0_, int64_t stride0_,
291 int64_t size1_, int64_t stride1_)
293 THTensor_(setStorage)(
self, storage_, storageOffset_,
295 {stride0_, stride1_});
298 void THTensor_(setStorage3d)(THTensor *
self, THStorage *storage_, ptrdiff_t storageOffset_,
299 int64_t size0_, int64_t stride0_,
300 int64_t size1_, int64_t stride1_,
301 int64_t size2_, int64_t stride2_)
303 THTensor_(setStorage)(
self, storage_, storageOffset_,
304 {size0_, size1_, size2_},
305 {stride0_, stride1_, stride2_});
308 void THTensor_(setStorage4d)(THTensor *
self, THStorage *storage_, ptrdiff_t storageOffset_,
309 int64_t size0_, int64_t stride0_,
310 int64_t size1_, int64_t stride1_,
311 int64_t size2_, int64_t stride2_,
312 int64_t size3_, int64_t stride3_)
315 int64_t size[4] = {size0_, size1_, size2_, size3_};
316 int64_t stride[4] = {stride0_, stride1_, stride2_, stride3_};
318 THTensor_(setStorage)(
self, storage_, storageOffset_, size, stride);
322 void THTensor_(narrow)(THTensor *
self, THTensor *src,
int dimension, int64_t firstIndex, int64_t size)
327 THArgCheck( (dimension >= 0) && (dimension < src->dim()), 2,
"out of range");
328 THArgCheck( firstIndex >= 0, 3,
"out of range");
329 THArgCheck( size >= 0, 4,
"out of range");
330 THArgCheck(firstIndex <= src->size(dimension) - size, 4,
"out of range");
332 THTensor_(
set)(
self, src);
334 if (firstIndex > 0) {
335 self->set_storage_offset(self->storage_offset() + firstIndex*
self->stride(dimension));
338 self->set_size(dimension, size);
341 void THTensor_(select)(THTensor *
self, THTensor *src,
int dimension, int64_t sliceIndex)
348 THArgCheck(src->dim() > 0, 1,
"cannot select on a 0-dim tensor");
349 THArgCheck((dimension >= 0) && (dimension < src->dim()), 2,
"out of range");
350 THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size(dimension)), 3,
"out of range");
352 THTensor_(
set)(
self, src);
353 THTensor_(narrow)(
self, NULL, dimension, sliceIndex, 1);
355 std::vector<int64_t> newSize(self->dim()-1);
356 std::vector<int64_t> newStride(self->dim()-1);
357 for (d = 0; d < dimension; d++)
359 newSize[d] =
self->size(d);
360 newStride[d] =
self->stride(d);
363 for(d = dimension; d <
self->dim()-1; d++)
365 newSize[d] =
self->size(d+1);
366 newStride[d] =
self->stride(d+1);
368 self->set_sizes_and_strides(newSize, newStride);
371 void THTensor_(transpose)(THTensor *
self, THTensor *src,
int dimension1,
int dimension2)
378 THArgCheck( (dimension1 >= 0) && (dimension1 < THTensor_nDimensionLegacyNoScalars(src)), 1,
"out of range");
379 THArgCheck( (dimension2 >= 0) && (dimension2 < THTensor_nDimensionLegacyNoScalars(src)), 2,
"out of range");
381 THTensor_(
set)(
self, src);
383 if(dimension1 == dimension2)
386 z =
self->stride(dimension1);
387 self->set_stride(dimension1, self->stride(dimension2));
388 self->set_stride(dimension2, z);
389 z =
self->size(dimension1);
390 self->set_size(dimension1, self->size(dimension2));
391 self->set_size(dimension2, z);
394 void THTensor_(unfold)(THTensor *
self, THTensor *src,
int dimension, int64_t size, int64_t step)
401 THArgCheck((dimension >= 0) && (dimension < THTensor_nDimensionLegacyNoScalars(src)), 2,
"out of range");
402 THArgCheck(size <= THTensor_sizeLegacyNoScalars(src, dimension), 3,
"out of range");
403 THArgCheck(step > 0, 4,
"invalid step");
405 THTensor_(
set)(
self, src);
407 std::vector<int64_t> newSize( self->dim()+1);
408 std::vector<int64_t> newStride( self->dim()+1);
410 newSize[
self->dim()] = size;
411 newStride[
self->dim()] = THTensor_strideLegacyNoScalars(
self, dimension);
412 for(d = 0; d <
self->dim(); d++)
414 auto self_size = THTensor_sizeLegacyNoScalars(
self, d);
415 auto self_stride = THTensor_strideLegacyNoScalars(
self, d);
418 newSize[d] = (self_size - size) / step + 1;
419 newStride[d] = step*self_stride;
423 newSize[d] = self_size;
424 newStride[d] = self_stride;
427 self->set_sizes_and_strides(newSize, newStride);
431 void THTensor_(squeeze)(THTensor *
self, THTensor *src)
436 THTensor_(
set)(
self, src);
438 std::vector<int64_t> newSize;
439 std::vector<int64_t> newStride;
440 for(
int d = 0; d < src->dim(); ++d)
442 if(src->size(d) != 1)
444 newSize.push_back(src->size(d));
445 newStride.push_back(src->stride(d));
449 self->set_sizes_and_strides(newSize, newStride);
452 void THTensor_(squeeze1d)(THTensor *
self, THTensor *src,
int dimension)
459 THArgCheck((dimension >= 0) && (dimension < src->dim()), 2,
"dimension out of range");
461 THTensor_(
set)(
self, src);
463 if(src->size(dimension) == 1)
465 std::vector<int64_t> newSize(self->dim() - 1);
466 std::vector<int64_t> newStride(self->dim() - 1);
467 for (d = 0; d < dimension; d++)
469 newSize[d] =
self->size(d);
470 newStride[d] =
self->stride(d);
473 for(d = dimension; d <
self->dim()-1; d++)
475 newSize[d] =
self->size(d+1);
476 newStride[d] =
self->stride(d+1);
478 self->set_sizes_and_strides(newSize, newStride);
482 void THTensor_(unsqueeze1d)(THTensor *
self, THTensor *src,
int dimension)
489 THArgCheck((dimension >= 0) && (dimension <= src->dim()), 2,
"dimension out of range");
491 THTensor_(
set)(
self, src);
493 std::vector<int64_t> newSize( self->dim()+1);
494 std::vector<int64_t> newStride( self->dim()+1);
496 for(d = self->dim(); d > dimension; d--)
498 newSize[d] =
self->size(d-1);
499 newStride[d] =
self->stride(d-1);
501 if (dimension < self->dim())
503 newStride[dimension] =
self->size(dimension) *
self->stride(dimension);
507 newStride[dimension] = 1;
509 newSize[dimension] = 1;
510 for(d = dimension - 1; d >= 0; d--)
512 newSize[d] =
self->size(d);
513 newStride[d] =
self->stride(d);
515 self->set_sizes_and_strides(newSize, newStride);
518 int THTensor_(isTransposed)(
const THTensor *
self)
520 if (THTensor_(isContiguous)(
self)) {
523 int64_t max_stride = 1;
524 int64_t size_max_stride = 1;
527 for (d = 0; d <
self->dim(); ++d) {
528 if (self->stride(d) == 0 &&
self->size(d) != 1)
530 if (self->stride(d) > max_stride) {
531 max_stride =
self->stride(d);
532 size_max_stride =
self->size(d);
536 if (z == max_stride * size_max_stride) {
542 int THTensor_(isContiguous)(
const THTensor *
self)
544 return self->is_contiguous();
547 int THTensor_(isSameSizeAs)(
const THTensor *
self,
const THTensor* src)
550 if (self->dim() != src->dim())
552 for(d = 0; d <
self->dim(); ++d)
554 if(self->size(d) != src->size(d))
560 int THTensor_(isSetTo)(
const THTensor *
self,
const THTensor* src)
562 if (!THTensor_getStoragePtr(
self))
564 if (THTensor_getStoragePtr(
self) == THTensor_getStoragePtr(src) &&
565 self->storage_offset() == src->storage_offset() &&
566 THTensor_nDimensionLegacyAll(
self) == THTensor_nDimensionLegacyAll(src))
569 for (d = 0; d < THTensor_nDimensionLegacyAll(
self); ++d)
571 if (self->size(d) != src->size(d) ||
self->stride(d) != src->stride(d))
579 ptrdiff_t THTensor_(nElement)(
const THTensor *
self)
581 if(THTensor_nDimensionLegacyAll(
self) == 0)
585 ptrdiff_t nElement = 1;
587 for(d = 0; d < THTensor_nDimension(
self); d++)
588 nElement *= self->size(d);
594 void THTensor_(retain)(THTensor *
self)
596 c10::raw::intrusive_ptr::incref(
self);
599 void THTensor_(free)(THTensor *
self)
604 void THTensor_(freeCopyTo)(THTensor *
self, THTensor *dst)
609 at::_copy_same_type_(dst_wrap, self_wrap);
612 THTensor_(free)(
self);
617 void THTensor_(setStorageNd)(THTensor *
self, THStorage *storage, ptrdiff_t storageOffset,
int nDimension,
const int64_t *size,
const int64_t *stride)
619 return THTensor_setStorageNd(
self, storage, storageOffset, nDimension, size, stride);
622 void THTensor_(resizeNd)(THTensor *
self,
int nDimension,
const int64_t *size,
const int64_t *stride)
624 return THTensor_resizeNd(
self, nDimension, size, stride);
627 void THTensor_(set0d)(THTensor *tensor, scalar_t value)
629 THArgCheck(THTensor_nDimension(tensor) == 0, 1,
"tensor must have no dimensions");
630 THStorage_(
set)(THTensor_getStoragePtr(tensor), tensor->storage_offset(), value);
633 scalar_t THTensor_(get0d)(
const THTensor *tensor)
635 THArgCheck(THTensor_nDimension(tensor) == 0, 1,
"tensor must have no dimensions");
636 return THStorage_(
get)(THTensor_getStoragePtr(tensor), tensor->storage_offset());
639 void THTensor_(set1d)(THTensor *tensor, int64_t x0, scalar_t value)
641 THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1,
"tensor must have one dimension");
642 THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2,
"out of range");
643 THStorage_(
set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0), value);
646 scalar_t THTensor_(get1d)(
const THTensor *tensor, int64_t x0)
648 THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1,
"tensor must have one dimension");
649 THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2,
"out of range");
650 return THStorage_(
get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0));
653 void THTensor_(set2d)(THTensor *tensor, int64_t x0, int64_t x1, scalar_t value)
655 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1,
"tensor must have two dimensions");
656 THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2,
"out of range");
657 THStorage_(
set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1), value);
660 scalar_t THTensor_(get2d)(
const THTensor *tensor, int64_t x0, int64_t x1)
662 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 2, 1,
"tensor must have two dimensions");
663 THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2,
"out of range");
664 return THStorage_(
get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1));
667 void THTensor_(set3d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value)
669 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1,
"tensor must have three dimensions");
670 THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2,
"out of range");
671 THStorage_(
set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2), value);
674 scalar_t THTensor_(get3d)(
const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2)
676 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 3, 1,
"tensor must have three dimensions");
677 THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2,
"out of range");
678 return THStorage_(
get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2));
681 void THTensor_(set4d)(THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value)
683 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1,
"tensor must have four dimensions");
684 THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2,
"out of range");
685 THStorage_(
set)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3), value);
688 scalar_t THTensor_(get4d)(
const THTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3)
690 THArgCheck(THTensor_nDimensionLegacyAll(tensor) == 4, 1,
"tensor must have four dimensions");
691 THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2,
"out of range");
692 return THStorage_(
get)(THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3));
697 void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb,
int dimension)
702 THTensor_(catArray)(r_, inputs, 2, dimension);
705 void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second,
int dimension);
706 inline void THTensor_(check_shape_except_dim)(THTensor *first, THTensor *second,
int dimension)
708 int first_dims = first->dim();
709 int second_dims = second->dim();
710 THArgCheck(first_dims == second_dims, 0,
711 "Tensors must have same number of dimensions: got %d and %d",
712 first_dims, second_dims);
713 for (
int dim = 0; dim < first_dims; dim++) {
714 if (dim == dimension) {
717 int64_t first_dim_size = first->size(dim);
718 int64_t second_dim_size = second->size(dim);
719 THArgCheck(first_dim_size == second_dim_size, 0,
720 "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
721 dimension, (
long long)first_dim_size, (
long long)second_dim_size, dim);
725 void THTensor_(catArray)(THTensor *result, THTensor **inputs,
int numInputs,
int dimension)
732 bool allSkipped=
true;
734 THTensor *notSkippedTensor;
735 auto should_skip = [](THTensor *t) {
return t->is_empty() && t->dim() == 1; };
736 for (
int i = 0; i < numInputs; i++) {
737 if (should_skip(inputs[i])) {
742 notSkippedTensor = inputs[i];
743 nDims = notSkippedTensor->dim();
751 THArgCheck(dimension < nDims, 4,
"invalid dimension %d", dimension);
752 THArgCheck(numInputs > 0, 3,
"invalid number of inputs %d", numInputs);
755 int64_t cat_dim_size = 0;
756 for (
int i = 0; i < numInputs; i++) {
757 THTensor *tensor = inputs[i];
758 if (should_skip(tensor)) {
761 THTensor_(check_shape_except_dim)(notSkippedTensor, tensor, dimension);
762 cat_dim_size += tensor->size(dimension);
766 std::vector<int64_t> size(nDims);
767 for (
int dim = 0; dim < nDims; dim++) {
768 int64_t result_dim_size = notSkippedTensor->size(dim);
769 if (dim == dimension) {
770 result_dim_size = cat_dim_size;
772 size[dim] = result_dim_size;
774 THTensor_(resize)(result, size, {});
777 bool allContiguous =
true;
778 for (
int i = 0; i < numInputs; i++) {
779 if(!should_skip(inputs[i])) {
780 allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]);
783 allContiguous = allContiguous && THTensor_(isContiguous)(result);
789 int64_t outer = 1, inner = 1;
794 for (
int i = 0; i < dimension; ++i) {
801 for (
int i = dimension + 1; i < size.size(); ++i) {
805 scalar_t* result_data = THStorage_(data)(THTensor_getStoragePtr(result)) + result->storage_offset();
807 for (
int o = 0; o < outer; ++o) {
808 for (
int j = 0; j < numInputs; ++j) {
809 if (!should_skip(inputs[j])) {
810 THTensor* input0 = inputs[j];
811 scalar_t* input0_data = THStorage_(data)(THTensor_getStoragePtr(input0)) + input0->storage_offset();
812 int local_inner = inner * input0->size(dimension);
813 if (local_inner != 0) {
814 memcpy(result_data + offset, input0_data + o*local_inner, local_inner*
sizeof(scalar_t));
816 offset += local_inner;
822 for (
int j = 0; j < numInputs; j++) {
823 if (!should_skip(inputs[j])) {
824 int64_t dimSize = inputs[j]->size(dimension);
825 THTensor *nt = THTensor_(newWithTensor)(result);
826 THTensor_(narrow)(nt, NULL, dimension, offset, dimSize);
828 at::Tensor inputs_wrap = THTensor_wrap(inputs[j]);
829 at::_copy_same_type_(nt__wrap, inputs_wrap);
830 c10::raw::intrusive_ptr::decref(nt);
837 THDescBuff THTensor_(desc)(
const THTensor *tensor) {
838 const int L = TH_DESC_BUFF_LEN;
842 #define _stringify(x) #x 843 n += snprintf(str, L-n,
"torch." _stringify(x)
"Tensor of size ");
846 for(i = 0; i < THTensor_nDimension(tensor); i++) {
848 n += snprintf(str+n, L-n,
"%" PRId64, tensor->size(i));
849 if(i < THTensor_nDimension(tensor)-1) {
850 n += snprintf(str+n, L-n,
"x");
854 snprintf(str+L-4, 4,
"...");
859 THDescBuff THTensor_(sizeDesc)(
const THTensor *tensor) {
860 THDescBuff buf = _THSizeDesc(tensor->sizes().data(), tensor->sizes().size());
static intrusive_ptr reclaim(TTarget *owning_ptr)
Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes over ownership.