Caffe2 - C++ API
A deep learning, cross platform ML framework
python_variable_methods.cpp
1 // ${generated_comment}
2 
3 #include <Python.h>
4 
5 #include "torch/csrc/DynamicTypes.h"
6 #include "torch/csrc/Exceptions.h"
7 #include "torch/csrc/Size.h"
8 #include "torch/csrc/autograd/python_variable.h"
9 #include "torch/csrc/autograd/utils/python_error_messages.h"
10 #include "torch/csrc/autograd/utils/wrap_outputs.h"
11 #include "torch/csrc/autograd/utils/python_arg_parsing.h"
12 #include "torch/csrc/jit/tracer.h"
13 #ifdef USE_CUDA
14 #include "torch/csrc/cuda/Stream.h"
15 #include "torch/csrc/cuda/Event.h"
16 #endif
17 #include "torch/csrc/utils/cuda_lazy_init.h"
18 #include "torch/csrc/utils/object_ptr.h"
19 #include "torch/csrc/utils/python_arg_parser.h"
20 #include "torch/csrc/utils/python_numbers.h"
21 #include "torch/csrc/utils/python_strings.h"
22 #include "torch/csrc/utils/python_tuples.h"
23 #include "torch/csrc/utils/tensor_apply.h"
24 #include "torch/csrc/utils/tensor_list.h"
25 #include "torch/csrc/utils/tensor_new.h"
26 #include "torch/csrc/utils/tensor_numpy.h"
27 #include "torch/csrc/utils/tensor_types.h"
28 #include "torch/csrc/utils/structseq.h"
29 
30 #include <ATen/ATen.h>
31 #include "c10/util/Optional.h"
32 
33 #include "python_variable_methods_dispatch.h"
34 
35 #include <stdexcept>
36 
37 using at::DeviceGuard;
38 using at::device_of;
40 using at::Backend;
41 using at::Scalar;
42 using at::ScalarType;
43 using at::Tensor;
44 using namespace torch::autograd::utils;
45 
46 namespace torch { namespace autograd {
47 
48 static PyObject * THPVariable__is_view(PyObject *self, PyObject* args)
49 {
50  HANDLE_TH_ERRORS
51  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
52  if (self_.is_view()) {
53  Py_RETURN_TRUE;
54  } else {
55  Py_RETURN_FALSE;
56  }
57  END_HANDLE_TH_ERRORS
58 }
59 
60 static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
61 {
62  HANDLE_TH_ERRORS
63  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
64  if (self_.requires_grad()) {
65  throw std::runtime_error(
66  "Can't call apply_() on Variable that requires grad. Use "
67  "var.detach().apply_() instead.");
68  }
69  return THPVariable_Wrap(torch::utils::apply_(self_, arg));
70  END_HANDLE_TH_ERRORS
71 }
72 
73 static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
74 {
75  HANDLE_TH_ERRORS
76  static PythonArgParser parser({
77  "size(int64_t dim)",
78  "size()",
79  });
80  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
81  ParsedArgs<3> parsed_args;
82  auto r = parser.parse(args, kwargs, parsed_args);
83  if (r.idx == 0) {
84  if (jit::tracer::isTracing()) {
85  return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
86  } else {
87  return wrap(self_.size(r.toInt64(0)));
88  }
89  } else if (r.idx == 1) {
90  // we can't do the normal wrapping here because IntArrayRef maps to both
91  // torch.Size and tuple in python.
92  return THPSize_New(self_);
93  }
94  Py_RETURN_NONE;
95  END_HANDLE_TH_ERRORS
96 }
97 
98 static PyObject * THPVariable_stride(PyObject* self, PyObject* args, PyObject* kwargs)
99 {
100  HANDLE_TH_ERRORS
101  static PythonArgParser parser({
102  "stride(int64_t dim)",
103  "stride()",
104  });
105  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
106  ParsedArgs<3> parsed_args;
107  auto r = parser.parse(args, kwargs, parsed_args);
108  if (r.idx == 0) {
109  return wrap(self_.stride(r.toInt64(0)));
110  } else if (r.idx == 1) {
111  // yes, this is called strides in ATen.
112  IntArrayRef strides = self_.strides();
113  // we can't do the normal wrapping here because IntArrayRef maps to both
114  // torch.Size and tuple in python
115  return THPUtils_packInt64Array(strides.size(), strides.data());
116  }
117  Py_RETURN_NONE;
118  END_HANDLE_TH_ERRORS
119 }
120 
121 static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
122 {
123  HANDLE_TH_ERRORS
124  auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
125  return wrap(self.get_device());
126  END_HANDLE_TH_ERRORS
127 }
128 
129 static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
130 {
131  HANDLE_TH_ERRORS
132  auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
133  return wrap(self.storage_offset());
134  END_HANDLE_TH_ERRORS
135 }
136 
137 static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
138 {
139  HANDLE_TH_ERRORS
140  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
141  return THPUtils_packInt64(self_.dim());
142  END_HANDLE_TH_ERRORS
143 }
144 
145 static Tensor dispatch_contiguous(const Tensor & self) {
146  AutoNoGIL no_gil;
147  OptionalDeviceGuard device_guard(device_of(self));
148  return self.contiguous();
149 }
150  static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args)
151 {
152  HANDLE_TH_ERRORS
153  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
154  // avoids touching the GIL or current device if self is already contiguous
155  if (self_.is_contiguous()) {
156  // NOTE: this logic is duplicated from VariableType.cpp. Since we need to
157  // record this call to contiguous() in the trace regardless of whether
158  // we actually call contiguous here, we need to record this information
159  // manually.
160  if (jit::tracer::isTracing()) {
161  auto tracer_state = jit::tracer::getTracingState();
162  auto node = tracer_state->graph->create(jit::aten::contiguous, /*num_outputs=*/0);
163  jit::tracer::recordSourceLocation(node);
164  jit::tracer::addInputs(node, "self", self_);
165  tracer_state->graph->insertNode(node);
166  jit::tracer::addOutput(node, self_);
167  }
168  Py_INCREF(self);
169  return self;
170  }
171  return THPVariable_Wrap(dispatch_contiguous(self_));
172  END_HANDLE_TH_ERRORS
173 }
174 
175 static Tensor dispatch_copy_(Tensor & self, const Tensor & other, bool non_blocking) {
176  AutoNoGIL no_gil;
177  OptionalDeviceGuard device_guard(device_of(self));
178  return self.copy_(other, non_blocking);
179 }
180 
181 static PyObject * THPVariable_copy_(PyObject* self, PyObject* args, PyObject* kwargs)
182 {
183  HANDLE_TH_ERRORS
184  static PythonArgParser parser({
185  "copy_(Tensor other, bool non_blocking=False)",
186  "copy_(Tensor other, bool async=False)|deprecated"
187  });
188  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
189  ParsedArgs<2> parsed_args;
190  auto r = parser.parse(args, kwargs, parsed_args);
191  return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
192  END_HANDLE_TH_ERRORS
193 }
194 
195 static double dispatch_to_CDouble(const Tensor & self) {
196  AutoNoGIL no_gil;
197  OptionalDeviceGuard device_guard(device_of(self));
198  if (self.numel() != 1) {
199  throw ValueError("only one element tensors can be converted to Python scalars");
200  }
201  return self.item<double>();
202 }
203 
204 static std::complex<double> dispatch_to_CComplexDouble(const Tensor & self) {
205  AutoNoGIL no_gil;
206  OptionalDeviceGuard device_guard(device_of(self));
207  if (self.numel() != 1) {
208  throw ValueError("only one element tensors can be converted to Python scalars");
209  }
210  return self.item<std::complex<double>>();
211 }
212 
213 static int64_t dispatch_to_CLong(const Tensor & self) {
214  AutoNoGIL no_gil;
215  OptionalDeviceGuard device_guard(device_of(self));
216  if (self.numel() != 1) {
217  throw ValueError("only one element tensors can be converted to Python scalars");
218  }
219  return self.item<int64_t>();
220 }
221 
222 static PyObject * THPVariable_float_scalar(PyObject* self, PyObject* args) {
223  HANDLE_TH_ERRORS
224  jit::tracer::warn("Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
225  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
226  return wrap(dispatch_to_CDouble(self_));
227  END_HANDLE_TH_ERRORS
228 }
229 
230 static PyObject * THPVariable_integral_scalar(PyObject* self, PyObject* args) {
231  HANDLE_TH_ERRORS
232  jit::tracer::warn("Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
233  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
234  if (isFloatingType(self_.scalar_type())) {
235  // we can't dispatch to item<int64_t> here because we want to avoid ATen overflow checks;
236  // the python integral type (long in python2) can't overflow.
237  return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
238  } else {
239  return wrap(dispatch_to_CLong(self_));
240  }
241  END_HANDLE_TH_ERRORS
242 }
243 
244 // This is the __index__ function in Python which is similar to __int__, but
245 // called when used as a slice.
246 static PyObject * THPVariable_index_scalar(PyObject* self, PyObject* args) {
247  HANDLE_TH_ERRORS
248  jit::tracer::warn("Converting a tensor to a Python index", jit::tracer::WARN_PYTHON_DATAFLOW);
249  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
250  // TODO: change the condition to `self_.dim() != 0` once we expose scalars
251  // in PyTorch.
252  if (!isIntegralType(self_.scalar_type()) || self_.numel() != 1) {
253  throw TypeError("only integer tensors of a single element can be converted to an index");
254  }
255  return wrap(dispatch_to_CLong(self_));
256  END_HANDLE_TH_ERRORS
257 }
258 
259 static Tensor dispatch_invert(const Tensor & self) {
260  AutoNoGIL no_gil;
261  OptionalDeviceGuard device_guard(device_of(self));
262  return 1 - self;
263 }
264 
265 static PyObject * THPVariable_invert(PyObject* self, PyObject* args) {
266  HANDLE_TH_ERRORS
267  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
268  if (self_.scalar_type() != at::kByte) {
269  throw TypeError("~ (operator.invert) is only implemented on byte tensors");
270  }
271  return THPVariable_Wrap(dispatch_invert(self_));
272  END_HANDLE_TH_ERRORS
273 }
274 
275 static Tensor dispatch_to(const Tensor & self, Device device, bool non_blocking, bool copy) {
276  AutoNoGIL no_gil;
277  // NOTE: this is where we record aten::to in the graph during tracing. However, the behavior of aten::to
278  // is different with respect to TensorOptions fields that are not present: aten::to inherits fields that
279  // are missing from the self argument while the tracer assumes that they should be populated with the
280  // default values (eg. float for scalar type). By explicitly copying over the tensor options here we fully
281  // specify all tensor options and thus record the proper trace
282  return self.to(self.options().device(device), non_blocking, copy);
283 }
284 
285 static Tensor dispatch_to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy) {
286  AutoNoGIL no_gil;
287  return self.to(dtype, non_blocking, copy);
288 }
289 
290 static Tensor dispatch_to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy) {
291  AutoNoGIL no_gil;
292  return self.to(device, dtype, non_blocking, copy);
293 }
294 
295 static PyObject * THPVariable_cpu(PyObject* self, PyObject* args)
296 {
297  HANDLE_TH_ERRORS
298  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
299  return THPVariable_Wrap(dispatch_to(self_, at::Device(at::DeviceType::CPU), false, false));
300  END_HANDLE_TH_ERRORS
301 }
302 
303 static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
304 {
305  HANDLE_TH_ERRORS
306  static PythonArgParser parser({
307  "cuda(Device? device=None, bool non_blocking=False)",
308  "cuda(Device? device=None, bool async=False)|deprecated"
309  });
310  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
311  ParsedArgs<2> parsed_args;
312  auto r = parser.parse(args, kwargs, parsed_args);
313  auto device = r.isNone(0) ? at::Device(at::DeviceType::CUDA) : r.device(0);
314  AT_CHECK(device.is_cuda(), "Invalid device, must be cuda device");
315  torch::utils::cuda_lazy_init();
316  return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1), false));
317  END_HANDLE_TH_ERRORS
318 }
319 
320 static PyObject * THPVariable_to_type(PyObject* self, ScalarType scalarType) {
321  HANDLE_TH_ERRORS
322  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
323  return THPVariable_Wrap(dispatch_to(self_, scalarType, false, false));
324  END_HANDLE_TH_ERRORS
325 }
326 static PyObject * THPVariable_byte(PyObject* self, PyObject* args) {
327  return THPVariable_to_type(self, ScalarType::Byte);
328 }
329 
330 static PyObject * THPVariable_char(PyObject* self, PyObject* args) {
331  return THPVariable_to_type(self, ScalarType::Char);
332 }
333 
334 static PyObject * THPVariable_double(PyObject* self, PyObject* args) {
335  return THPVariable_to_type(self, ScalarType::Double);
336 }
337 
338 static PyObject * THPVariable_float(PyObject* self, PyObject* args) {
339  return THPVariable_to_type(self, ScalarType::Float);
340 }
341 
342 static PyObject * THPVariable_half(PyObject* self, PyObject* args) {
343  return THPVariable_to_type(self, ScalarType::Half);
344 }
345 
346 static PyObject * THPVariable_int(PyObject* self, PyObject* args) {
347  return THPVariable_to_type(self, ScalarType::Int);
348 }
349 
350 static PyObject * THPVariable_long(PyObject* self, PyObject* args) {
351  return THPVariable_to_type(self, ScalarType::Long);
352 }
353 
354 static PyObject * THPVariable_short(PyObject* self, PyObject* args) {
355  return THPVariable_to_type(self, ScalarType::Short);
356 }
357 
358 static PyObject * THPVariable_element_size(PyObject* self, PyObject* args)
359 {
360  HANDLE_TH_ERRORS
361  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
362  return THPUtils_packInt64(self_.element_size());
363  END_HANDLE_TH_ERRORS
364 }
365 
366 static PyObject * THPVariable_numpy(PyObject* self, PyObject* arg)
367 {
368  HANDLE_TH_ERRORS
369  jit::tracer::warn("Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
370  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
371  if (self_.requires_grad()) {
372  throw std::runtime_error(
373  "Can't call numpy() on Variable that requires grad. "
374  "Use var.detach().numpy() instead.");
375  }
376  return torch::utils::tensor_to_numpy(self_.data());
377  END_HANDLE_TH_ERRORS
378 }
379 
380 // TODO: move this to ATen. We would need to expose Stream objects in ATen.
381 static PyObject * THPVariable_record_stream(PyObject* self, PyObject* arg)
382 {
383  HANDLE_TH_ERRORS
384 #ifdef USE_CUDA
385  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
386  if (!THCPStream_Check(arg)) {
387  return PyErr_Format(PyExc_TypeError, "expected Stream object");
388  }
389  void* data = self_.data_ptr();
390  c10::cuda::CUDACachingAllocator::recordStream(data, at::cuda::CUDAStream::unpack(((THCPStream*)arg)->cdata));
391  Py_RETURN_NONE;
392 #else
393  throw std::runtime_error("PyTorch compiled without CUDA support");
394 #endif
395  END_HANDLE_TH_ERRORS
396 }
397 
398 static PyObject * THPVariable_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs)
399 {
400  HANDLE_TH_ERRORS
401  static PythonArgParser parser({
402  "requires_grad_(bool requires_grad=True)",
403  });
404  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
405  ParsedArgs<1> parsed_args;
406  auto r = parser.parse(args, kwargs, parsed_args);
407  auto requires_grad = r.toBool(0);
408  // should we throw if requires_grad is true? var.requires_grad = True throws here
409  // but it's nice to let this be a no-op.
410  if (!self_.is_leaf() && !requires_grad) {
411  throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
412  }
413  if (requires_grad && !self_.is_floating_point()) {
414  throw std::runtime_error("only Tensors of floating point dtype can require gradients");
415  }
416  self_.set_requires_grad(requires_grad);
417  return THPVariable_Wrap(self_);
418  END_HANDLE_TH_ERRORS
419 }
420 
421 inline bool dispatch_is_contiguous(Tensor & self) {
422  return self.is_contiguous();
423 }
424 
425 static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args)
426 {
427  HANDLE_TH_ERRORS
428  auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
429  return wrap(dispatch_is_contiguous(self));
430  END_HANDLE_TH_ERRORS
431 }
432 
433 static PyObject * THPVariable_item(PyObject* self, PyObject* args)
434 {
435  HANDLE_TH_ERRORS
436  jit::tracer::warn("Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
437  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
438  if (self_.is_floating_point()) {
439  return wrap(dispatch_to_CDouble(self_));
440  } else if (self_.is_complex()) {
441  return wrap(dispatch_to_CComplexDouble(self_));
442  } else {
443  return wrap(dispatch_to_CLong(self_));
444  }
445  END_HANDLE_TH_ERRORS
446 }
447 
448 static PyObject * THPVariable_map_(PyObject* self, PyObject* args, PyObject* kwargs)
449 {
450  HANDLE_TH_ERRORS
451  static PythonArgParser parser({ "map_(Tensor other, PyObject* callable)" });
452  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
453  ParsedArgs<2> parsed_args;
454  auto r = parser.parse(args, kwargs, parsed_args);
455  Variable other = r.tensor(0);
456  if (self_.requires_grad() || other.requires_grad()) {
457  throw std::runtime_error(
458  "Can't call map_() on Variable that requires grad. Use "
459  "var.detach().map_() instead.");
460  }
461  return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
462  END_HANDLE_TH_ERRORS
463 }
464 
465 static PyObject * THPVariable_map2_(PyObject* self, PyObject* args, PyObject* kwargs)
466 {
467  HANDLE_TH_ERRORS
468  static PythonArgParser parser({ "map2_(Tensor x, Tensor y, PyObject* callable)" });
469  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
470  ParsedArgs<3> parsed_args;
471  auto r = parser.parse(args, kwargs, parsed_args);
472  Variable x = r.tensor(0);
473  Variable y = r.tensor(1);
474  if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
475  throw std::runtime_error(
476  "Can't call map2_() on Variable that requires grad. Use "
477  "var.detach().map2_() instead.");
478  }
479  return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
480  END_HANDLE_TH_ERRORS
481 }
482 
483 static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwargs)
484 {
485  HANDLE_TH_ERRORS
486  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
487  OptionalDeviceGuard device_guard(device_of(self_));
488  return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.type(), args, kwargs));
489  END_HANDLE_TH_ERRORS
490 }
491 
492 static PyObject * THPVariable_new_empty(PyObject* self, PyObject* args, PyObject* kwargs)
493 {
494  HANDLE_TH_ERRORS
495  jit::tracer::warn("new_empty", jit::tracer::LEGACY_CONSTRUCTOR);
496  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
497  OptionalDeviceGuard device_guard(device_of(self_));
498  return THPVariable_Wrap(torch::utils::new_empty(self_.type(), args, kwargs));
499  END_HANDLE_TH_ERRORS
500 }
501 
502 static PyObject * THPVariable_new_full(PyObject* self, PyObject* args, PyObject* kwargs)
503 {
504  HANDLE_TH_ERRORS
505  jit::tracer::warn("new_full", jit::tracer::LEGACY_CONSTRUCTOR);
506  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
507  OptionalDeviceGuard device_guard(device_of(self_));
508  return THPVariable_Wrap(torch::utils::new_full(self_.type(), args, kwargs));
509  END_HANDLE_TH_ERRORS
510 }
511 
512 static PyObject * THPVariable_new_ones(PyObject* self, PyObject* args, PyObject* kwargs)
513 {
514  HANDLE_TH_ERRORS
515  jit::tracer::warn("new_ones", jit::tracer::LEGACY_CONSTRUCTOR);
516  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
517  OptionalDeviceGuard device_guard(device_of(self_));
518  return THPVariable_Wrap(torch::utils::new_ones(self_.type(), args, kwargs));
519  END_HANDLE_TH_ERRORS
520 }
521 
522 static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
523 {
524  HANDLE_TH_ERRORS
525  jit::tracer::warn("new_tensor", jit::tracer::LEGACY_CONSTRUCTOR);
526  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
527  OptionalDeviceGuard device_guard(device_of(self_));
528  return THPVariable_Wrap(torch::utils::new_tensor(self_.type(), args, kwargs));
529  END_HANDLE_TH_ERRORS
530 }
531 
532 static PyObject * THPVariable_new_zeros(PyObject* self, PyObject* args, PyObject* kwargs)
533 {
534  HANDLE_TH_ERRORS
535  jit::tracer::warn("new_zeros", jit::tracer::LEGACY_CONSTRUCTOR);
536  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
537  OptionalDeviceGuard device_guard(device_of(self_));
538  return THPVariable_Wrap(torch::utils::new_zeros(self_.type(), args, kwargs));
539  END_HANDLE_TH_ERRORS
540 }
541 
542 static PyObject * THPVariable_storage(PyObject* self, PyObject* arg)
543 {
544  HANDLE_TH_ERRORS
545  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
546  return createPyObject(self_.storage());
547  END_HANDLE_TH_ERRORS
548 }
549 
550 static PyObject * THPVariable_storage_type(PyObject* self, PyObject* arg)
551 {
552  HANDLE_TH_ERRORS
553  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
554  auto storage = THPObjectPtr(createPyObject(self_.storage()));
555  auto storage_type = (PyObject*)Py_TYPE(storage);
556  Py_INCREF(storage_type);
557  return storage_type;
558  END_HANDLE_TH_ERRORS
559 }
560 
561 static PyObject * THPVariable_to(PyObject* self, PyObject* args, PyObject* kwargs)
562 {
563  HANDLE_TH_ERRORS
564  auto parsed = parse_to_conversion(args, kwargs, /*allow_copy*/ true);
565  auto& device = std::get<0>(parsed);
566  auto& scalarType = std::get<1>(parsed);
567  auto non_blocking = std::get<2>(parsed);
568  auto copy = std::get<3>(parsed);
569  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
570  if (device && device->is_cuda()) {
571  torch::utils::cuda_lazy_init();
572  }
573  if (!device && !scalarType && !copy) {
574  Py_INCREF(self);
575  return self;
576  } else if (!device) {
577  return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy));
578  } else if (!scalarType) {
579  return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy));
580  } else {
581  return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy));
582  }
583  Py_RETURN_NONE;
584  END_HANDLE_TH_ERRORS
585 }
586 
587 static PyObject * THPVariable_tolist(PyObject* self, PyObject* args)
588 {
589  HANDLE_TH_ERRORS
590  jit::tracer::warn("Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
591  auto self_ = reinterpret_cast<THPVariable*>(self)->cdata;
592  return torch::utils::tensor_to_list(self_.data());
593  END_HANDLE_TH_ERRORS
594 }
595 
596 static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwargs)
597 {
598  HANDLE_TH_ERRORS
599  static PythonArgParser parser({
600  "type(PyObject* dtype=None, bool non_blocking=False)",
601  "type(PyObject* dtype=None, bool async=False)|deprecated"
602  });
603  auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
604  ParsedArgs<2> parsed_args;
605  auto r = parser.parse(args, kwargs, parsed_args);
606  if (r.isNone(0)) {
607  return THPUtils_packString(torch::utils::type_to_string(self_.type()));
608  }
609  auto obj = r.pyobject(0);
610  std::string type_name;
611  bool is_dtype = false;
612  if (PyType_Check(obj)) {
613  if (obj == THPVariableClass) {
614  type_name = "torch.Tensor";
615  } else {
616  type_name = ((PyTypeObject*)obj)->tp_name;
617  }
618  } else if (THPUtils_checkString(obj)) {
619  type_name = THPUtils_unpackString(obj);
620  } else if (THPDtype_Check(obj)) {
621  is_dtype = true;
622  } else {
623  throw TypeError("dtype must be a type, str, or dtype object");
624  }
625  ScalarType scalar_type;
626  Device device = self_.device();
627  if (is_dtype) {
628  scalar_type = r.scalartype(0);
629  } else {
630  auto& type = torch::utils::type_from_string(type_name);
631  scalar_type = type.scalarType();
632  auto device_type = backendToDeviceType(type.backend());
633  if (device_type != device.type()) {
634  device = at::Device(device_type);
635  }
636  }
637  if (device.is_cuda()) {
638  torch::utils::cuda_lazy_init();
639  }
640  return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, /*non_blocking=*/ r.toBool(1), /*copy=*/ false));
641  END_HANDLE_TH_ERRORS
642 }
643 
644 // generated methods start here
645 
646 ${py_methods}
647 
648 static PyObject * THPVariable_bool(PyObject* self, PyObject* args) {
649  jit::tracer::warn("Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
650  return THPVariable_is_nonzero(self, args);
651 }
652 
653 PyMethodDef variable_methods[] = {
654  {"__add__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
655  {"__radd__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
656  {"__iadd__", (PyCFunction)THPVariable_add_, METH_VARARGS | METH_KEYWORDS, NULL},
657  {"__rmul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
658  {"__mul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
659  {"__imul__", (PyCFunction)THPVariable_mul_, METH_VARARGS | METH_KEYWORDS, NULL},
660  {"__sub__", (PyCFunction)THPVariable_sub, METH_VARARGS | METH_KEYWORDS, NULL},
661  {"__isub__", (PyCFunction)THPVariable_sub_, METH_VARARGS | METH_KEYWORDS, NULL},
662  {"__div__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
663  {"__truediv__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
664  {"__idiv__", (PyCFunction)THPVariable_div_, METH_VARARGS | METH_KEYWORDS, NULL},
665  {"__mod__", (PyCFunction)THPVariable_remainder, METH_VARARGS | METH_KEYWORDS, NULL},
666  {"__bool__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
667  {"__float__", (PyCFunction)THPVariable_float_scalar, METH_NOARGS, NULL},
668  {"__int__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
669  {"__long__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
670  {"__index__", (PyCFunction)THPVariable_index_scalar, METH_NOARGS, NULL},
671  {"__nonzero__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
672  {"__invert__", (PyCFunction)THPVariable_invert, METH_NOARGS, NULL},
673  {"__matmul__", (PyCFunction)THPVariable_matmul, METH_VARARGS | METH_KEYWORDS, NULL},
674  {"_is_view", (PyCFunction)THPVariable__is_view, METH_NOARGS, NULL},
675  {"apply_", (PyCFunction)THPVariable_apply_, METH_O, NULL},
676  {"byte", (PyCFunction)THPVariable_byte, METH_NOARGS, NULL},
677  {"char", (PyCFunction)THPVariable_char, METH_NOARGS, NULL},
678  {"contiguous", (PyCFunction)THPVariable_contiguous, METH_NOARGS, NULL},
679  {"copy_", (PyCFunction)THPVariable_copy_, METH_VARARGS | METH_KEYWORDS, NULL},
680  {"cpu", (PyCFunction)THPVariable_cpu, METH_NOARGS, NULL},
681  {"cuda", (PyCFunction)THPVariable_cuda, METH_VARARGS | METH_KEYWORDS, NULL},
682  {"dim", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
683  {"double", (PyCFunction)THPVariable_double, METH_NOARGS, NULL},
684  {"element_size", (PyCFunction)THPVariable_element_size, METH_NOARGS, NULL},
685  {"float", (PyCFunction)THPVariable_float, METH_NOARGS, NULL},
686  {"get_device", (PyCFunction)THPVariable_get_device, METH_NOARGS, NULL},
687  {"half", (PyCFunction)THPVariable_half, METH_NOARGS, NULL},
688  {"int", (PyCFunction)THPVariable_int, METH_NOARGS, NULL},
689  {"is_contiguous", (PyCFunction)THPVariable_is_contiguous, METH_NOARGS, NULL},
690  {"item", (PyCFunction)THPVariable_item, METH_NOARGS, NULL},
691  {"long", (PyCFunction)THPVariable_long, METH_NOARGS, NULL},
692  {"map_", (PyCFunction)THPVariable_map_, METH_VARARGS | METH_KEYWORDS, NULL},
693  {"map2_", (PyCFunction)THPVariable_map2_, METH_VARARGS | METH_KEYWORDS, NULL},
694  {"ndimension", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
695  {"nelement", (PyCFunction)THPVariable_numel, METH_NOARGS, NULL},
696  {"new", (PyCFunction)THPVariable_new, METH_VARARGS | METH_KEYWORDS, NULL},
697  {"new_empty", (PyCFunction)THPVariable_new_empty, METH_VARARGS | METH_KEYWORDS, NULL},
698  {"new_full", (PyCFunction)THPVariable_new_full, METH_VARARGS | METH_KEYWORDS, NULL},
699  {"new_ones", (PyCFunction)THPVariable_new_ones, METH_VARARGS | METH_KEYWORDS, NULL},
700  {"new_tensor", (PyCFunction)THPVariable_new_tensor, METH_VARARGS | METH_KEYWORDS, NULL},
701  {"new_zeros", (PyCFunction)THPVariable_new_zeros, METH_VARARGS | METH_KEYWORDS, NULL},
702  {"numpy", (PyCFunction)THPVariable_numpy, METH_NOARGS, NULL},
703  {"record_stream", (PyCFunction)THPVariable_record_stream, METH_O, NULL},
704  {"requires_grad_", (PyCFunction)THPVariable_requires_grad_, METH_VARARGS | METH_KEYWORDS, NULL},
705  {"short", (PyCFunction)THPVariable_short, METH_NOARGS, NULL},
706  {"size", (PyCFunction)THPVariable_size, METH_VARARGS | METH_KEYWORDS, NULL},
707  {"storage", (PyCFunction)THPVariable_storage, METH_NOARGS, NULL},
708  {"storage_offset", (PyCFunction)THPVariable_storage_offset, METH_NOARGS, NULL},
709  {"storage_type", (PyCFunction)THPVariable_storage_type, METH_NOARGS, NULL},
710  {"stride", (PyCFunction)THPVariable_stride, METH_VARARGS | METH_KEYWORDS, NULL},
711  {"to", (PyCFunction)THPVariable_to, METH_VARARGS | METH_KEYWORDS, NULL},
712  {"tolist", (PyCFunction)THPVariable_tolist, METH_NOARGS, NULL},
713  {"type", (PyCFunction)THPVariable_type, METH_VARARGS | METH_KEYWORDS, NULL},
714  ${py_method_defs}
715  {NULL}
716 };
717 
718 }} // namespace torch::autograd
optional< Device > device_of(Tensor t)
Return the Device of a Tensor, if the Tensor is defined.
Definition: DeviceGuard.h:17
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
Represents a a compute device on which a tensor is located.
Definition: Device.h:30
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
Definition: Backend.h:23
A OptionalDeviceGuard is an RAII class that sets a device to some value on initialization, and resets the device to its original value on destruction.
Definition: DeviceGuard.h:119
Definition: jit_type.h:17
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...
Definition: DeviceGuard.h:19
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...