Caffe2 - C++ API
A deep learning, cross platform ML framework
THCTensor.cpp
1 #ifndef THC_GENERIC_FILE
2 #define THC_GENERIC_FILE "THC/generic/THCTensor.cpp"
3 #else
4 
5 #include <ATen/InferSize.h>
6 
7 /**** access methods ****/
8 THCStorage *THCTensor_(storage)(THCState *state, const THCTensor *self)
9 {
10  return THTensor_getStoragePtr(self);
11 }
12 
13 ptrdiff_t THCTensor_(storageOffset)(THCState *state, const THCTensor *self)
14 {
15  return self->storage_offset();
16 }
17 
18 int THCTensor_(nDimension)(THCState *state, const THCTensor *self)
19 {
20  return THCTensor_nDimension(state, self);
21 }
22 
23 int THCTensor_(nDimensionLegacyNoScalars)(THCState *state, const THCTensor *self)
24 {
25  return THCTensor_nDimensionLegacyNoScalars(state, self);
26 }
27 
28 int THCTensor_(nDimensionLegacyAll)(THCState *state, const THCTensor *self)
29 {
30  return THCTensor_nDimensionLegacyAll(state, self);
31 }
32 
33 int64_t THCTensor_(size)(THCState *state, const THCTensor *self, int dim)
34 {
35  return THCTensor_size(state, self, dim);
36 }
37 
38 int64_t THCTensor_(sizeLegacyNoScalars)(THCState *state, const THCTensor *self, int dim)
39 {
40  return THTensor_sizeLegacyNoScalars(self, dim);
41 }
42 
43 int64_t THCTensor_(stride)(THCState *state, const THCTensor *self, int dim)
44 {
45  return THCTensor_stride(state, self, dim);
46 }
47 
48 int64_t THCTensor_(strideLegacyNoScalars)(THCState *state, const THCTensor *self, int dim)
49 {
50  return THTensor_strideLegacyNoScalars(self, dim);
51 }
52 
53 scalar_t *THCTensor_(data)(THCState *state, const THCTensor *self)
54 {
55  if(THTensor_getStoragePtr(self))
56  return (THCStorage_(data)(state, THTensor_getStoragePtr(self))+self->storage_offset());
57  else
58  return NULL;
59 }
60 
61 /**** creation methods ****/
62 
63 /* Empty init */
64 THCTensor *THCTensor_(new)(THCState *state)
65 {
66  return c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
67  c10::intrusive_ptr<at::StorageImpl>::reclaim(THCStorage_(new)(state)),
68  at::CUDATensorId(),
69  false
70  ).release();
71 }
72 
73 /* Pointer-copy init */
74 THCTensor *THCTensor_(newWithTensor)(THCState *state, THCTensor *tensor)
75 {
76  THCTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
77  c10::intrusive_ptr<at::StorageImpl>::reclaim(THCStorage_(new)(state)),
78  at::CUDATensorId(),
79  false
80  ).release();
81  THCTensor_(setStorageNd)(state,
82  self,
83  THTensor_getStoragePtr(tensor),
84  tensor->storage_offset(),
85  tensor->dim(),
86  THTensor_getSizePtr(tensor),
87  THTensor_getStridePtr(tensor));
88  return self;
89 }
90 
91 /* Storage init */
92 THCTensor *THCTensor_(newWithStorage)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset, at::IntArrayRef sizes, at::IntArrayRef strides) {
93  if (strides.data()) {
94  AT_CHECK(sizes.size() == strides.size(), "number of sizes and strides must match");
95  }
96  THCTensor *self = c10::make_intrusive<at::TensorImpl, at::UndefinedTensorImpl>(
97  c10::intrusive_ptr<at::StorageImpl>::reclaim(THCStorage_(new)(state)),
98  at::CUDATensorId(),
99  false
100  ).release();
101  THCTensor_(setStorageNd)(state, self, storage, storageOffset, sizes.size(),
102  const_cast<int64_t*>(sizes.data()), const_cast<int64_t*>(strides.data()));
103 
104  return self;
105 }
106 
107 THCTensor *THCTensor_(newWithStorage1d)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset,
108  int64_t size0, int64_t stride0)
109 {
110  return THCTensor_(newWithStorage)(state, storage, storageOffset, {size0}, {stride0});
111 }
112 
113 THCTensor *THCTensor_(newWithStorage2d)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset,
114  int64_t size0, int64_t stride0,
115  int64_t size1, int64_t stride1)
116 {
117  return THCTensor_(newWithStorage)(state, storage, storageOffset, {size0, size1}, {stride0, stride1});
118 }
119 
120 THCTensor *THCTensor_(newWithStorage3d)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset,
121  int64_t size0, int64_t stride0,
122  int64_t size1, int64_t stride1,
123  int64_t size2, int64_t stride2)
124 {
125  return THCTensor_(newWithStorage)(state, storage, storageOffset, {size0, size1, size2}, {stride0, stride1, stride2});
126 }
127 
128 THCTensor *THCTensor_(newWithStorage4d)(THCState *state, THCStorage *storage, ptrdiff_t storageOffset,
129  int64_t size0, int64_t stride0,
130  int64_t size1, int64_t stride1,
131  int64_t size2, int64_t stride2,
132  int64_t size3, int64_t stride3)
133 {
134  return THCTensor_(newWithStorage)(state, storage, storageOffset,
135  {size0, size1, size2, size3},
136  {stride0, stride1, stride2, stride3});
137 }
138 
139 THCTensor *THCTensor_(newWithSize)(THCState *state, at::IntArrayRef size, at::IntArrayRef stride)
140 {
141  return THCTensor_(newWithStorage)(state, NULL, 0, size, stride);
142 }
143 
144 THCTensor *THCTensor_(newWithSize1d)(THCState *state, int64_t size0)
145 {
146  return THCTensor_(newWithSize)(state, {size0}, {});
147 }
148 
149 THCTensor *THCTensor_(newWithSize2d)(THCState *state, int64_t size0, int64_t size1)
150 {
151  return THCTensor_(newWithSize)(state, {size0, size1}, {});
152 }
153 
154 THCTensor *THCTensor_(newWithSize3d)(THCState *state, int64_t size0, int64_t size1, int64_t size2)
155 {
156  return THCTensor_(newWithSize)(state, {size0, size1, size2}, {});
157 }
158 
159 THCTensor *THCTensor_(newWithSize4d)(THCState *state, int64_t size0, int64_t size1, int64_t size2, int64_t size3)
160 {
161  return THCTensor_(newWithSize)(state, {size0, size1, size2, size3}, {});
162 }
163 
164 THCTensor *THCTensor_(newClone)(THCState *state, THCTensor *self)
165 {
166  THCTensor *tensor = THCTensor_(new)(state);
167  THCTensor_(resizeAs)(state, tensor, self);
168  THCTensor_(copy)(state, tensor, self);
169  return tensor;
170 }
171 
172 THCTensor *THCTensor_(newContiguous)(THCState *state, THCTensor *self)
173 {
174  if(!THCTensor_(isContiguous)(state, self)) {
175  return THCTensor_(newClone)(state, self);
176  } else {
177  THCTensor_(retain)(state, self);
178  return self;
179  }
180 }
181 
182 THCTensor *THCTensor_(newSelect)(THCState *state, THCTensor *tensor, int dimension_, int64_t sliceIndex_)
183 {
184  THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
185  THCTensor_(select)(state, self, NULL, dimension_, sliceIndex_);
186  return self;
187 }
188 
189 THCTensor *THCTensor_(newNarrow)(THCState *state, THCTensor *tensor, int dimension_, int64_t firstIndex_, int64_t size_)
190 {
191  THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
192  THCTensor_(narrow)(state, self, NULL, dimension_, firstIndex_, size_);
193  return self;
194 }
195 
196 THCTensor *THCTensor_(newTranspose)(THCState *state, THCTensor *tensor, int dimension1_, int dimension2_)
197 {
198  THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
199  THCTensor_(transpose)(state, self, NULL, dimension1_, dimension2_);
200  return self;
201 }
202 
203 THCTensor *THCTensor_(newUnfold)(THCState *state, THCTensor *tensor, int dimension_, int64_t size_, int64_t step_)
204 {
205  THCTensor *self = THCTensor_(newWithTensor)(state, tensor);
206  THCTensor_(unfold)(state, self, NULL, dimension_, size_, step_);
207  return self;
208 }
209 
210 THCTensor *THCTensor_(newView)(THCState *state, THCTensor *tensor, at::IntArrayRef size)
211 {
212  ptrdiff_t numel = THCTensor_(nElement)(state, tensor);
213  THCTensor *self = THCTensor_(new)(state);
214  auto inferred_size = at::infer_size(size, numel);
215  auto stride = THTensor_compute_stride(tensor->sizes(),
216  tensor->strides(),
217  inferred_size);
218  THArgCheck(stride.has_value(), 2, "view size is "
219  "not compatible with input tensor's size and stride (at least one dimension spans "
220  "across two contiguous subspaces). Call .contiguous() before .view().");
221  auto stride_value = *stride;
222  THCTensor_setStorage(state, self, THTensor_getStoragePtr(tensor), tensor->storage_offset(), inferred_size, stride_value);
223  return self;
224 }
225 
226 // Collapses the first two dimensions of a tensor.
227 // Assumes the input tensor is contiguous.
228 THCTensor *THCTensor_(newFoldBatchDim)(THCState *state, THCTensor *input) {
229  int in_dims = THCTensor_(nDimensionLegacyAll)(state, input);
230  THArgCheck(in_dims >= 2, 1, "Tensor needs to have at least two dimensions");
231  THArgCheck(THCTensor_(isContiguous)(state, input), 1,
232  "Tensor must be contiguous");
233  std::vector<int64_t> new_size(in_dims - 1);
234  new_size[0] = THCTensor_(size)(state, input, 0) * THCTensor_(size)(state, input, 1);
235  for (int i = 2; i < in_dims; i++) {
236  new_size[i - 1] = THCTensor_(size)(state, input, i);
237  }
238  THCTensor *output = THCTensor_(newView)(state, input, new_size);
239  return output;
240 }
241 
242 /* Resize */
243 void THCTensor_(resize)(THCState *state, THCTensor *self, at::IntArrayRef size, at::IntArrayRef stride)
244 {
245  THCTensor_resize(state, self, size, stride);
246 }
247 
248 void THCTensor_(resizeAs)(THCState *state, THCTensor *self, THCTensor *src)
249 {
250  THCTensor_resizeAs(state, self, src);
251 }
252 
253 void THCTensor_(resize0d)(THCState *state, THCTensor *tensor)
254 {
255  THCTensor_resizeNd(state, tensor, 0, {}, nullptr);
256 }
257 
258 void THCTensor_(resize1d)(THCState *state, THCTensor *tensor, int64_t size0)
259 {
260  int64_t size[1] = {size0};
261  THCTensor_resizeNd(state, tensor, 1, size, nullptr);
262 }
263 
264 void THCTensor_(resize2d)(THCState *state, THCTensor *tensor, int64_t size0, int64_t size1)
265 {
266  int64_t size[2] = {size0, size1};
267  THCTensor_resizeNd(state, tensor, 2, size, nullptr);
268 }
269 
270 void THCTensor_(resize3d)(THCState *state, THCTensor *tensor, int64_t size0, int64_t size1, int64_t size2)
271 {
272  int64_t size[3] = {size0, size1, size2};
273  THCTensor_resizeNd(state, tensor, 3, size, nullptr);
274 }
275 
276 void THCTensor_(resize4d)(THCState *state, THCTensor *self, int64_t size0, int64_t size1, int64_t size2, int64_t size3)
277 {
278  int64_t size[4] = {size0, size1, size2, size3};
279  THCTensor_resizeNd(state, self, 4, size, nullptr);
280 }
281 
282 void THCTensor_(resize5d)(THCState *state, THCTensor *self, int64_t size0, int64_t size1, int64_t size2, int64_t size3, int64_t size4)
283 {
284  int64_t size[5] = {size0, size1, size2, size3, size4};
285  THCTensor_resizeNd(state, self, 5, size, nullptr);
286 }
287 
288 void THCTensor_(set)(THCState *state, THCTensor *self, THCTensor *src)
289 {
290  THCTensor_set(state, self, src);
291 }
292 
293 void THCTensor_(setStorage)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_) {
294  THCTensor_setStorage(state, self, storage_, storageOffset_, size_, stride_);
295 }
296 
297 void THCTensor_(setStorage1d)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_,
298  int64_t size0_, int64_t stride0_)
299 {
300  THCTensor_(setStorage)(state, self, storage_, storageOffset_,
301  {size0_}, {stride0_});
302 }
303 
304 void THCTensor_(setStorage2d)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_,
305  int64_t size0_, int64_t stride0_,
306  int64_t size1_, int64_t stride1_)
307 {
308  THCTensor_(setStorage)(state, self, storage_, storageOffset_,
309  {size0_, size1_},
310  {stride0_, stride1_});
311 }
312 
313 void THCTensor_(setStorage3d)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_,
314  int64_t size0_, int64_t stride0_,
315  int64_t size1_, int64_t stride1_,
316  int64_t size2_, int64_t stride2_)
317 {
318  THCTensor_(setStorage)(state, self, storage_, storageOffset_,
319  {size0_, size1_, size2_},
320  {stride0_, stride1_, stride2_});
321 }
322 
323 void THCTensor_(setStorage4d)(THCState *state, THCTensor *self, THCStorage *storage_, ptrdiff_t storageOffset_,
324  int64_t size0_, int64_t stride0_,
325  int64_t size1_, int64_t stride1_,
326  int64_t size2_, int64_t stride2_,
327  int64_t size3_, int64_t stride3_)
328 {
329 
330  int64_t size[4] = {size0_, size1_, size2_, size3_};
331  int64_t stride[4] = {stride0_, stride1_, stride2_, stride3_};
332 
333  THCTensor_(setStorage)(state, self, storage_, storageOffset_, size, stride);
334 }
335 
336 
337 void THCTensor_(narrow)(THCState *state, THCTensor *self, THCTensor *src, int dimension, int64_t firstIndex, int64_t size)
338 {
339  if(!src)
340  src = self;
341 
342  THArgCheck( (dimension >= 0) && (dimension < src->dim()), 3, "out of range");
343  THArgCheck( firstIndex >= 0, 4, "out of range");
344  THArgCheck( size >= 0, 5, "out of range");
345  THArgCheck(firstIndex+size <= src->size(dimension), 5, "out of range");
346 
347  THCTensor_(set)(state, self, src);
348 
349  if (firstIndex > 0) {
350  self->set_storage_offset(self->storage_offset() + firstIndex*self->stride(dimension));
351  }
352 
353  self->set_size(dimension, size);
354 }
355 
356 void THCTensor_(select)(THCState *state, THCTensor *self, THCTensor *src, int dimension, int64_t sliceIndex)
357 {
358  int d;
359 
360  if(!src)
361  src = self;
362 
363  THArgCheck(src->dim() > 0, 1, "cannot select on a 0-dim tensor");
364  THArgCheck((dimension >= 0) && (dimension < src->dim()), 3, "out of range");
365  THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size(dimension)), 4, "out of range");
366 
367  THCTensor_(set)(state, self, src);
368  THCTensor_(narrow)(state, self, NULL, dimension, sliceIndex, 1);
369 
370  std::vector<int64_t> newSize(self->dim()-1);
371  std::vector<int64_t> newStride(self->dim()-1);
372 
373  for (d = 0; d < dimension; d++)
374  {
375  newSize[d] = self->size(d);
376  newStride[d] = self->stride(d);
377  }
378 
379  for(d = dimension; d < self->dim()-1; d++)
380  {
381  newSize[d] = self->size(d+1);
382  newStride[d] = self->stride(d+1);
383  }
384  self->set_sizes_and_strides(newSize, newStride);
385 }
386 
387 void THCTensor_(transpose)(THCState *state, THCTensor *self, THCTensor *src, int dimension1, int dimension2)
388 {
389  int64_t z;
390 
391  if(!src)
392  src = self;
393 
394  THArgCheck( (dimension1 >= 0) && (dimension1 < THTensor_nDimensionLegacyNoScalars(src)), 1, "out of range");
395  THArgCheck( (dimension2 >= 0) && (dimension2 < THTensor_nDimensionLegacyNoScalars(src)), 2, "out of range");
396 
397  THCTensor_(set)(state, self, src);
398 
399  if(dimension1 == dimension2)
400  return;
401 
402  z = self->stride(dimension1);
403  self->set_stride(dimension1, self->stride(dimension2));
404  self->set_stride(dimension2, z);
405  z = self->size(dimension1);
406  self->set_size(dimension1, self->size(dimension2));
407  self->set_size(dimension2, z);
408 }
409 
410 void THCTensor_(unfold)(THCState *state, THCTensor *self, THCTensor *src, int dimension, int64_t size, int64_t step)
411 {
412  int d;
413 
414  if(!src)
415  src = self;
416 
417  THArgCheck(dimension < THTensor_nDimensionLegacyNoScalars(src), 2, "out of range");
418  THArgCheck(size <= THTensor_sizeLegacyNoScalars(src, dimension), 3, "out of range");
419  THArgCheck(step > 0, 4, "invalid step");
420 
421  THCTensor_(set)(state, self, src);
422 
423  std::vector<int64_t> newSize(self->dim() + 1);
424  std::vector<int64_t> newStride(self->dim() + 1);
425 
426  newSize[self->dim()] = size;
427  newStride[self->dim()] = THTensor_strideLegacyNoScalars(self, dimension);
428  for(d = 0; d < self->dim(); d++)
429  {
430  auto self_size = THTensor_sizeLegacyNoScalars(self, d);
431  auto self_stride = THTensor_strideLegacyNoScalars(self, d);
432  if(d == dimension)
433  {
434  newSize[d] = (self_size - size) / step + 1;
435  newStride[d] = step*self_stride;
436  }
437  else
438  {
439  newSize[d] = self_size;
440  newStride[d] = self_stride;
441  }
442  }
443 
444  self->set_sizes_and_strides(newSize, newStride);
445 }
446 
447 /* we have to handle the case where the result is a number */
448 void THCTensor_(squeeze)(THCState *state, THCTensor *self, THCTensor *src)
449 {
450  if(!src)
451  src = self;
452 
453  THCTensor_(set)(state, self, src);
454 
455  std::vector<int64_t> newSize;
456  std::vector<int64_t> newStride;
457  for(int d = 0; d < src->dim(); ++d)
458  {
459  if(src->size(d) != 1)
460  {
461  newSize.push_back(src->size(d));
462  newStride.push_back(src->stride(d));
463  }
464  }
465 
466  self->set_sizes_and_strides(newSize, newStride);
467 }
468 
469 void THCTensor_(squeeze1d)(THCState *state, THCTensor *self, THCTensor *src, int dimension)
470 {
471  THCTensor_squeeze1d(state, self, src, dimension);
472 }
473 
474 void THCTensor_(unsqueeze1d)(THCState *state, THCTensor *self, THCTensor *src, int dimension)
475 {
476  THCTensor_unsqueeze1d(state, self, src, dimension);
477 }
478 
479 int THCTensor_(isContiguous)(THCState *state, const THCTensor *self)
480 {
481  return self->is_contiguous();
482 }
483 
484 int THCTensor_(isSetTo)(THCState *state, const THCTensor *self, const THCTensor *src)
485 {
486  if (THTensor_getStoragePtr(self) == THTensor_getStoragePtr(src) &&
487  self->storage_offset() == src->storage_offset() &&
488  self->dim() == src->dim())
489  {
490  int d;
491  for (d = 0; d < self->dim(); ++d)
492  {
493  if (self->size(d) != src->size(d) || self->stride(d) != src->stride(d))
494  return 0;
495  }
496  return 1;
497  }
498  return 0;
499 }
500 
501 int THCTensor_(isSameSizeAs)(THCState *state, const THCTensor *self, const THCTensor* src)
502 {
503  int d;
504  if (self->dim() != src->dim())
505  return 0;
506  for(d = 0; d < self->dim(); ++d)
507  {
508  if(self->size(d) != src->size(d))
509  return 0;
510  }
511  return 1;
512 }
513 
514 ptrdiff_t THCTensor_(nElement)(THCState *state, const THCTensor *self)
515 {
516  return THCTensor_nElement(state, self);
517 }
518 
519 void THCTensor_(retain)(THCState *state, THCTensor *self)
520 {
521  THCTensor_retain(state, self);
522 }
523 
524 void THCTensor_(free)(THCState *state, THCTensor *self)
525 {
526  THCTensor_free(state, self);
527 }
528 
529 void THCTensor_(freeCopyTo)(THCState *state, THCTensor *self, THCTensor *dst)
530 {
531  if(self != dst)
532  THCTensor_(copy)(state, dst, self);
533 
534  THCTensor_(free)(state, self);
535 }
536 
537 /*******************************************************************************/
538 
539 void THCTensor_(setStorageNd)(THCState *state, THCTensor *self, THCStorage *storage, ptrdiff_t storageOffset, int nDimension, const int64_t *size, const int64_t *stride)
540 {
541  THCTensor_setStorageNd(state, self, storage, storageOffset, nDimension, size, stride);
542 }
543 
544 void THCTensor_(resizeNd)(THCState *state, THCTensor *self, int nDimension, const int64_t *size, const int64_t *stride)
545 {
546  THCTensor_resizeNd(state, self, nDimension, size, stride);
547 }
548 
549 void THCTensor_(set0d)(THCState *state, THCTensor *tensor, scalar_t value)
550 {
551  THArgCheck(THTensor_nDimension(tensor) == 0, 1, "tensor must have no dimensions");
552  THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset(), value);
553 }
554 
555 
556 scalar_t THCTensor_(get0d)(THCState *state, const THCTensor *tensor)
557 {
558  THArgCheck(THTensor_nDimension(tensor) == 0, 1, "tensor must have no dimensions dimension");
559  return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset());
560 }
561 
562 void THCTensor_(set1d)(THCState *state, THCTensor *tensor, int64_t x0, scalar_t value)
563 {
564  THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension");
565  THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range");
566  THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0), value);
567 }
568 
569 scalar_t THCTensor_(get1d)(THCState *state, const THCTensor *tensor, int64_t x0)
570 {
571  THArgCheck(THTensor_nDimensionLegacyNoScalars(tensor) == 1, 1, "tensor must have one dimension");
572  THArgCheck( (x0 >= 0) && (x0 < THTensor_sizeLegacyNoScalars(tensor, 0)), 2, "out of range");
573  return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*THTensor_strideLegacyNoScalars(tensor, 0));
574 }
575 
576 void THCTensor_(set2d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, scalar_t value)
577 {
578  THArgCheck(tensor->dim() == 2, 1, "tensor must have two dimensions");
579  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range");
580  THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1), value);
581 }
582 
583 scalar_t THCTensor_(get2d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1)
584 {
585  THArgCheck(tensor->dim() == 2, 1, "tensor must have two dimensions");
586  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)), 2, "out of range");
587  return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1));
588 }
589 
590 void THCTensor_(set3d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, scalar_t value)
591 {
592  THArgCheck(tensor->dim() == 3, 1, "tensor must have three dimensions");
593  THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range");
594  THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2), value);
595 }
596 
597 scalar_t THCTensor_(get3d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2)
598 {
599  THArgCheck(tensor->dim() == 3, 1, "tensor must have three dimensions");
600  THArgCheck( (x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)), 2, "out of range");
601  return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2));
602 }
603 
604 void THCTensor_(set4d)(THCState *state, THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3, scalar_t value)
605 {
606  THArgCheck(tensor->dim() == 4, 1, "tensor must have four dimensions");
607  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range");
608  THCStorage_(set)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3), value);
609 }
610 
611 scalar_t THCTensor_(get4d)(THCState *state, const THCTensor *tensor, int64_t x0, int64_t x1, int64_t x2, int64_t x3)
612 {
613  THArgCheck(tensor->dim() == 4, 1, "tensor must have four dimensions");
614  THArgCheck((x0 >= 0) && (x0 < tensor->size(0)) && (x1 >= 0) && (x1 < tensor->size(1)) && (x2 >= 0) && (x2 < tensor->size(2)) && (x3 >= 0) && (x3 < tensor->size(3)), 2, "out of range");
615  return THCStorage_(get)(state, THTensor_getStoragePtr(tensor), tensor->storage_offset()+x0*tensor->stride(0)+x1*tensor->stride(1)+x2*tensor->stride(2)+x3*tensor->stride(3));
616 }
617 
618 int THCTensor_(checkGPU)(THCState *state, unsigned int nTensors, ...)
619 {
620  int curDev = -1;
621  THCudaCheck(cudaGetDevice(&curDev));
622  va_list args;
623  va_start(args, nTensors);
624  int valid = 1;
625  for (unsigned int i = 0; i < nTensors; i++) {
626  THCTensor* tensor = va_arg(args, THCTensor*);
627  if (tensor == NULL) {
628  continue;
629  }
630 
631  const int tensorDev = THCTensor_(getDevice)(state, tensor);
632 
633  // Skips CPU tensors
634  if (tensorDev == -1) { continue; }
635 
636  // Checks all tensors are on the same device
637  if (tensorDev != curDev) {
638  valid = 0;
639  break;
640  }
641  }
642 
643  va_end(args);
644  return valid;
645 }
646 
647 THCDescBuff THCTensor_(sizeDesc)(THCState *state, const THCTensor *tensor) {
648  const int L = THC_DESC_BUFF_LEN;
649  THCDescBuff buf;
650  char *str = buf.str;
651  int n = 0;
652  n += snprintf(str, L-n, "[");
653  int i;
654  for(i = 0; i < tensor->dim(); i++) {
655  if(n >= L) break;
656  n += snprintf(str+n, L-n, "%" PRId64, tensor->size(i));
657  if(i < tensor->dim()-1) {
658  n += snprintf(str+n, L-n, " x ");
659  }
660  }
661  if(n < L - 2) {
662  snprintf(str+n, L-n, "]");
663  } else {
664  snprintf(str+L-5, 5, "...]");
665  }
666  return buf;
667 }
668 
669 #endif
static intrusive_ptr reclaim(TTarget *owning_ptr)
Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes over ownership.