5 #include "torch/csrc/DynamicTypes.h" 6 #include "torch/csrc/Exceptions.h" 7 #include "torch/csrc/Size.h" 8 #include "torch/csrc/autograd/python_variable.h" 9 #include "torch/csrc/autograd/utils/python_error_messages.h" 10 #include "torch/csrc/autograd/utils/wrap_outputs.h" 11 #include "torch/csrc/autograd/utils/python_arg_parsing.h" 12 #include "torch/csrc/jit/tracer.h" 14 #include "torch/csrc/cuda/Stream.h" 15 #include "torch/csrc/cuda/Event.h" 17 #include "torch/csrc/utils/cuda_lazy_init.h" 18 #include "torch/csrc/utils/object_ptr.h" 19 #include "torch/csrc/utils/python_arg_parser.h" 20 #include "torch/csrc/utils/python_numbers.h" 21 #include "torch/csrc/utils/python_strings.h" 22 #include "torch/csrc/utils/python_tuples.h" 23 #include "torch/csrc/utils/tensor_apply.h" 24 #include "torch/csrc/utils/tensor_list.h" 25 #include "torch/csrc/utils/tensor_new.h" 26 #include "torch/csrc/utils/tensor_numpy.h" 27 #include "torch/csrc/utils/tensor_types.h" 28 #include "torch/csrc/utils/structseq.h" 30 #include <ATen/ATen.h> 31 #include "c10/util/Optional.h" 33 #include "python_variable_methods_dispatch.h" 46 namespace torch {
namespace autograd {
48 static PyObject * THPVariable__is_view(PyObject *
self, PyObject* args)
51 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
52 if (self_.is_view()) {
60 static PyObject * THPVariable_apply_(PyObject*
self, PyObject* arg)
63 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
64 if (self_.requires_grad()) {
65 throw std::runtime_error(
66 "Can't call apply_() on Variable that requires grad. Use " 67 "var.detach().apply_() instead.");
69 return THPVariable_Wrap(torch::utils::apply_(self_, arg));
73 static PyObject * THPVariable_size(PyObject*
self, PyObject* args, PyObject* kwargs)
76 static PythonArgParser parser({
80 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
81 ParsedArgs<3> parsed_args;
82 auto r = parser.parse(args, kwargs, parsed_args);
84 if (jit::tracer::isTracing()) {
85 return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
87 return wrap(self_.size(r.toInt64(0)));
89 }
else if (r.idx == 1) {
92 return THPSize_New(self_);
98 static PyObject * THPVariable_stride(PyObject*
self, PyObject* args, PyObject* kwargs)
101 static PythonArgParser parser({
102 "stride(int64_t dim)",
105 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
106 ParsedArgs<3> parsed_args;
107 auto r = parser.parse(args, kwargs, parsed_args);
109 return wrap(self_.stride(r.toInt64(0)));
110 }
else if (r.idx == 1) {
112 IntArrayRef strides = self_.strides();
115 return THPUtils_packInt64Array(strides.size(), strides.data());
121 static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args)
124 auto&
self =
reinterpret_cast<THPVariable*
>(self_)->cdata;
125 return wrap(
self.get_device());
129 static PyObject * THPVariable_storage_offset(PyObject* self_, PyObject* args)
132 auto&
self =
reinterpret_cast<THPVariable*
>(self_)->cdata;
133 return wrap(
self.storage_offset());
137 static PyObject * THPVariable_dim(PyObject*
self, PyObject* args)
140 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
141 return THPUtils_packInt64(self_.dim());
145 static Tensor dispatch_contiguous(
const Tensor &
self) {
147 OptionalDeviceGuard device_guard(
device_of(
self));
148 return self.contiguous();
150 static PyObject * THPVariable_contiguous(PyObject*
self, PyObject* args)
153 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
155 if (self_.is_contiguous()) {
160 if (jit::tracer::isTracing()) {
161 auto tracer_state = jit::tracer::getTracingState();
162 auto node = tracer_state->graph->create(jit::aten::contiguous, 0);
163 jit::tracer::recordSourceLocation(node);
164 jit::tracer::addInputs(node,
"self", self_);
165 tracer_state->graph->insertNode(node);
166 jit::tracer::addOutput(node, self_);
171 return THPVariable_Wrap(dispatch_contiguous(self_));
175 static Tensor dispatch_copy_(
Tensor &
self,
const Tensor & other,
bool non_blocking) {
177 OptionalDeviceGuard device_guard(
device_of(
self));
178 return self.copy_(other, non_blocking);
181 static PyObject * THPVariable_copy_(PyObject*
self, PyObject* args, PyObject* kwargs)
184 static PythonArgParser parser({
185 "copy_(Tensor other, bool non_blocking=False)",
186 "copy_(Tensor other, bool async=False)|deprecated" 188 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
189 ParsedArgs<2> parsed_args;
190 auto r = parser.parse(args, kwargs, parsed_args);
191 return THPVariable_Wrap(dispatch_copy_(self_, r.tensor(0), r.toBool(1)));
195 static double dispatch_to_CDouble(
const Tensor &
self) {
197 OptionalDeviceGuard device_guard(
device_of(
self));
198 if (
self.numel() != 1) {
199 throw ValueError(
"only one element tensors can be converted to Python scalars");
201 return self.item<
double>();
204 static std::complex<double> dispatch_to_CComplexDouble(
const Tensor &
self) {
206 OptionalDeviceGuard device_guard(
device_of(
self));
207 if (
self.numel() != 1) {
208 throw ValueError(
"only one element tensors can be converted to Python scalars");
210 return self.item<std::complex<double>>();
213 static int64_t dispatch_to_CLong(
const Tensor &
self) {
215 OptionalDeviceGuard device_guard(
device_of(
self));
216 if (
self.numel() != 1) {
217 throw ValueError(
"only one element tensors can be converted to Python scalars");
219 return self.item<int64_t>();
222 static PyObject * THPVariable_float_scalar(PyObject*
self, PyObject* args) {
224 jit::tracer::warn(
"Converting a tensor to a Python float", jit::tracer::WARN_PYTHON_DATAFLOW);
225 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
226 return wrap(dispatch_to_CDouble(self_));
230 static PyObject * THPVariable_integral_scalar(PyObject*
self, PyObject* args) {
232 jit::tracer::warn(
"Converting a tensor to a Python integer", jit::tracer::WARN_PYTHON_DATAFLOW);
233 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
234 if (isFloatingType(self_.scalar_type())) {
237 return THPUtils_packDoubleAsInt(dispatch_to_CDouble(self_));
239 return wrap(dispatch_to_CLong(self_));
246 static PyObject * THPVariable_index_scalar(PyObject*
self, PyObject* args) {
248 jit::tracer::warn(
"Converting a tensor to a Python index", jit::tracer::WARN_PYTHON_DATAFLOW);
249 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
252 if (!isIntegralType(self_.scalar_type()) || self_.numel() != 1) {
253 throw TypeError(
"only integer tensors of a single element can be converted to an index");
255 return wrap(dispatch_to_CLong(self_));
261 OptionalDeviceGuard device_guard(
device_of(
self));
265 static PyObject * THPVariable_invert(PyObject*
self, PyObject* args) {
267 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
268 if (self_.scalar_type() != at::kByte) {
269 throw TypeError(
"~ (operator.invert) is only implemented on byte tensors");
271 return THPVariable_Wrap(dispatch_invert(self_));
275 static Tensor dispatch_to(
const Tensor &
self, Device device,
bool non_blocking,
bool copy) {
282 return self.to(
self.options().device(device), non_blocking, copy);
285 static Tensor dispatch_to(
const Tensor &
self, ScalarType dtype,
bool non_blocking,
bool copy) {
287 return self.to(dtype, non_blocking, copy);
290 static Tensor dispatch_to(
const Tensor &
self, Device device, ScalarType dtype,
bool non_blocking,
bool copy) {
292 return self.to(device, dtype, non_blocking, copy);
295 static PyObject * THPVariable_cpu(PyObject*
self, PyObject* args)
298 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
299 return THPVariable_Wrap(dispatch_to(self_,
at::Device(at::DeviceType::CPU),
false,
false));
303 static PyObject * THPVariable_cuda(PyObject*
self, PyObject* args, PyObject* kwargs)
306 static PythonArgParser parser({
307 "cuda(Device? device=None, bool non_blocking=False)",
308 "cuda(Device? device=None, bool async=False)|deprecated" 310 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
311 ParsedArgs<2> parsed_args;
312 auto r = parser.parse(args, kwargs, parsed_args);
313 auto device = r.isNone(0) ?
at::Device(at::DeviceType::CUDA) : r.device(0);
314 AT_CHECK(device.is_cuda(),
"Invalid device, must be cuda device");
315 torch::utils::cuda_lazy_init();
316 return THPVariable_Wrap(dispatch_to(self_, device, r.toBool(1),
false));
320 static PyObject * THPVariable_to_type(PyObject*
self, ScalarType scalarType) {
322 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
323 return THPVariable_Wrap(dispatch_to(self_, scalarType,
false,
false));
326 static PyObject * THPVariable_byte(PyObject*
self, PyObject* args) {
327 return THPVariable_to_type(
self, ScalarType::Byte);
330 static PyObject * THPVariable_char(PyObject*
self, PyObject* args) {
331 return THPVariable_to_type(
self, ScalarType::Char);
334 static PyObject * THPVariable_double(PyObject*
self, PyObject* args) {
335 return THPVariable_to_type(
self, ScalarType::Double);
338 static PyObject * THPVariable_float(PyObject*
self, PyObject* args) {
339 return THPVariable_to_type(
self, ScalarType::Float);
342 static PyObject * THPVariable_half(PyObject*
self, PyObject* args) {
343 return THPVariable_to_type(
self, ScalarType::Half);
346 static PyObject * THPVariable_int(PyObject*
self, PyObject* args) {
347 return THPVariable_to_type(
self, ScalarType::Int);
350 static PyObject * THPVariable_long(PyObject*
self, PyObject* args) {
351 return THPVariable_to_type(
self, ScalarType::Long);
354 static PyObject * THPVariable_short(PyObject*
self, PyObject* args) {
355 return THPVariable_to_type(
self, ScalarType::Short);
358 static PyObject * THPVariable_element_size(PyObject*
self, PyObject* args)
361 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
362 return THPUtils_packInt64(self_.element_size());
366 static PyObject * THPVariable_numpy(PyObject*
self, PyObject* arg)
369 jit::tracer::warn(
"Converting a tensor to a NumPy array", jit::tracer::WARN_PYTHON_DATAFLOW);
370 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
371 if (self_.requires_grad()) {
372 throw std::runtime_error(
373 "Can't call numpy() on Variable that requires grad. " 374 "Use var.detach().numpy() instead.");
376 return torch::utils::tensor_to_numpy(self_.data());
381 static PyObject * THPVariable_record_stream(PyObject*
self, PyObject* arg)
385 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
386 if (!THCPStream_Check(arg)) {
387 return PyErr_Format(PyExc_TypeError,
"expected Stream object");
389 void* data = self_.data_ptr();
390 c10::cuda::CUDACachingAllocator::recordStream(data, at::cuda::CUDAStream::unpack(((
THCPStream*)arg)->cdata));
393 throw std::runtime_error(
"PyTorch compiled without CUDA support");
398 static PyObject * THPVariable_requires_grad_(PyObject*
self, PyObject* args, PyObject* kwargs)
401 static PythonArgParser parser({
402 "requires_grad_(bool requires_grad=True)",
404 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
405 ParsedArgs<1> parsed_args;
406 auto r = parser.parse(args, kwargs, parsed_args);
411 throw std::runtime_error(autograd::utils::requires_grad_leaf_error(requires_grad));
413 if (requires_grad && !self_.is_floating_point()) {
414 throw std::runtime_error(
"only Tensors of floating point dtype can require gradients");
416 self_.set_requires_grad(requires_grad);
417 return THPVariable_Wrap(self_);
421 inline bool dispatch_is_contiguous(
Tensor &
self) {
422 return self.is_contiguous();
425 static PyObject * THPVariable_is_contiguous(PyObject* self_, PyObject* args)
428 auto&
self =
reinterpret_cast<THPVariable*
>(self_)->cdata;
429 return wrap(dispatch_is_contiguous(
self));
433 static PyObject * THPVariable_item(PyObject*
self, PyObject* args)
436 jit::tracer::warn(
"Converting a tensor to a Python number", jit::tracer::WARN_PYTHON_DATAFLOW);
437 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
438 if (self_.is_floating_point()) {
439 return wrap(dispatch_to_CDouble(self_));
440 }
else if (self_.is_complex()) {
441 return wrap(dispatch_to_CComplexDouble(self_));
443 return wrap(dispatch_to_CLong(self_));
448 static PyObject * THPVariable_map_(PyObject*
self, PyObject* args, PyObject* kwargs)
451 static PythonArgParser parser({
"map_(Tensor other, PyObject* callable)" });
452 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
453 ParsedArgs<2> parsed_args;
454 auto r = parser.parse(args, kwargs, parsed_args);
455 Variable other = r.tensor(0);
456 if (self_.requires_grad() || other.requires_grad()) {
457 throw std::runtime_error(
458 "Can't call map_() on Variable that requires grad. Use " 459 "var.detach().map_() instead.");
461 return THPVariable_Wrap(torch::utils::map_(self_, other, r.pyobject(1)));
465 static PyObject * THPVariable_map2_(PyObject*
self, PyObject* args, PyObject* kwargs)
468 static PythonArgParser parser({
"map2_(Tensor x, Tensor y, PyObject* callable)" });
469 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
470 ParsedArgs<3> parsed_args;
471 auto r = parser.parse(args, kwargs, parsed_args);
472 Variable x = r.tensor(0);
473 Variable y = r.tensor(1);
474 if (self_.requires_grad() || x.requires_grad() || y.requires_grad()) {
475 throw std::runtime_error(
476 "Can't call map2_() on Variable that requires grad. Use " 477 "var.detach().map2_() instead.");
479 return THPVariable_Wrap(torch::utils::map2_(self_, x, y, r.pyobject(2)));
483 static PyObject * THPVariable_new(PyObject*
self, PyObject* args, PyObject* kwargs)
486 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
487 OptionalDeviceGuard device_guard(
device_of(self_));
488 return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.type(), args, kwargs));
492 static PyObject * THPVariable_new_empty(PyObject*
self, PyObject* args, PyObject* kwargs)
495 jit::tracer::warn(
"new_empty", jit::tracer::LEGACY_CONSTRUCTOR);
496 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
497 OptionalDeviceGuard device_guard(
device_of(self_));
498 return THPVariable_Wrap(torch::utils::new_empty(self_.type(), args, kwargs));
502 static PyObject * THPVariable_new_full(PyObject*
self, PyObject* args, PyObject* kwargs)
505 jit::tracer::warn(
"new_full", jit::tracer::LEGACY_CONSTRUCTOR);
506 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
507 OptionalDeviceGuard device_guard(
device_of(self_));
508 return THPVariable_Wrap(torch::utils::new_full(self_.type(), args, kwargs));
512 static PyObject * THPVariable_new_ones(PyObject*
self, PyObject* args, PyObject* kwargs)
515 jit::tracer::warn(
"new_ones", jit::tracer::LEGACY_CONSTRUCTOR);
516 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
517 OptionalDeviceGuard device_guard(
device_of(self_));
518 return THPVariable_Wrap(torch::utils::new_ones(self_.type(), args, kwargs));
522 static PyObject * THPVariable_new_tensor(PyObject*
self, PyObject* args, PyObject* kwargs)
525 jit::tracer::warn(
"new_tensor", jit::tracer::LEGACY_CONSTRUCTOR);
526 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
527 OptionalDeviceGuard device_guard(
device_of(self_));
528 return THPVariable_Wrap(torch::utils::new_tensor(self_.type(), args, kwargs));
532 static PyObject * THPVariable_new_zeros(PyObject*
self, PyObject* args, PyObject* kwargs)
535 jit::tracer::warn(
"new_zeros", jit::tracer::LEGACY_CONSTRUCTOR);
536 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
537 OptionalDeviceGuard device_guard(
device_of(self_));
538 return THPVariable_Wrap(torch::utils::new_zeros(self_.type(), args, kwargs));
542 static PyObject * THPVariable_storage(PyObject*
self, PyObject* arg)
545 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
546 return createPyObject(self_.storage());
550 static PyObject * THPVariable_storage_type(PyObject*
self, PyObject* arg)
553 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
554 auto storage =
THPObjectPtr(createPyObject(self_.storage()));
555 auto storage_type = (PyObject*)Py_TYPE(storage);
556 Py_INCREF(storage_type);
561 static PyObject * THPVariable_to(PyObject*
self, PyObject* args, PyObject* kwargs)
564 auto parsed = parse_to_conversion(args, kwargs,
true);
565 auto& device = std::get<0>(parsed);
566 auto& scalarType = std::get<1>(parsed);
567 auto non_blocking = std::get<2>(parsed);
568 auto copy = std::get<3>(parsed);
569 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
570 if (device && device->is_cuda()) {
571 torch::utils::cuda_lazy_init();
573 if (!device && !scalarType && !copy) {
576 }
else if (!device) {
577 return THPVariable_Wrap(dispatch_to(self_, *scalarType, non_blocking, copy));
578 }
else if (!scalarType) {
579 return THPVariable_Wrap(dispatch_to(self_, *device, non_blocking, copy));
581 return THPVariable_Wrap(dispatch_to(self_, *device, *scalarType, non_blocking, copy));
587 static PyObject * THPVariable_tolist(PyObject*
self, PyObject* args)
590 jit::tracer::warn(
"Converting a tensor to a Python list", jit::tracer::WARN_PYTHON_DATAFLOW);
591 auto self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
592 return torch::utils::tensor_to_list(self_.data());
596 static PyObject * THPVariable_type(PyObject*
self, PyObject* args, PyObject* kwargs)
599 static PythonArgParser parser({
600 "type(PyObject* dtype=None, bool non_blocking=False)",
601 "type(PyObject* dtype=None, bool async=False)|deprecated" 603 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
604 ParsedArgs<2> parsed_args;
605 auto r = parser.parse(args, kwargs, parsed_args);
607 return THPUtils_packString(torch::utils::type_to_string(self_.type()));
609 auto obj = r.pyobject(0);
610 std::string type_name;
611 bool is_dtype =
false;
612 if (PyType_Check(obj)) {
613 if (obj == THPVariableClass) {
614 type_name =
"torch.Tensor";
616 type_name = ((PyTypeObject*)obj)->tp_name;
618 }
else if (THPUtils_checkString(obj)) {
619 type_name = THPUtils_unpackString(obj);
620 }
else if (THPDtype_Check(obj)) {
623 throw TypeError(
"dtype must be a type, str, or dtype object");
625 ScalarType scalar_type;
626 Device device = self_.device();
628 scalar_type = r.scalartype(0);
630 auto& type = torch::utils::type_from_string(type_name);
631 scalar_type = type.scalarType();
632 auto device_type = backendToDeviceType(type.backend());
633 if (device_type != device.type()) {
637 if (device.is_cuda()) {
638 torch::utils::cuda_lazy_init();
640 return THPVariable_Wrap(dispatch_to(self_, device, scalar_type, r.toBool(1),
false));
648 static PyObject * THPVariable_bool(PyObject*
self, PyObject* args) {
649 jit::tracer::warn(
"Converting a tensor to a Python boolean", jit::tracer::WARN_PYTHON_DATAFLOW);
650 return THPVariable_is_nonzero(
self, args);
653 PyMethodDef variable_methods[] = {
654 {
"__add__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
655 {
"__radd__", (PyCFunction)THPVariable_add, METH_VARARGS | METH_KEYWORDS, NULL},
656 {
"__iadd__", (PyCFunction)THPVariable_add_, METH_VARARGS | METH_KEYWORDS, NULL},
657 {
"__rmul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
658 {
"__mul__", (PyCFunction)THPVariable_mul, METH_VARARGS | METH_KEYWORDS, NULL},
659 {
"__imul__", (PyCFunction)THPVariable_mul_, METH_VARARGS | METH_KEYWORDS, NULL},
660 {
"__sub__", (PyCFunction)THPVariable_sub, METH_VARARGS | METH_KEYWORDS, NULL},
661 {
"__isub__", (PyCFunction)THPVariable_sub_, METH_VARARGS | METH_KEYWORDS, NULL},
662 {
"__div__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
663 {
"__truediv__", (PyCFunction)THPVariable_div, METH_VARARGS | METH_KEYWORDS, NULL},
664 {
"__idiv__", (PyCFunction)THPVariable_div_, METH_VARARGS | METH_KEYWORDS, NULL},
665 {
"__mod__", (PyCFunction)THPVariable_remainder, METH_VARARGS | METH_KEYWORDS, NULL},
666 {
"__bool__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
667 {
"__float__", (PyCFunction)THPVariable_float_scalar, METH_NOARGS, NULL},
668 {
"__int__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
669 {
"__long__", (PyCFunction)THPVariable_integral_scalar, METH_NOARGS, NULL},
670 {
"__index__", (PyCFunction)THPVariable_index_scalar, METH_NOARGS, NULL},
671 {
"__nonzero__", (PyCFunction)THPVariable_bool, METH_NOARGS, NULL},
672 {
"__invert__", (PyCFunction)THPVariable_invert, METH_NOARGS, NULL},
673 {
"__matmul__", (PyCFunction)THPVariable_matmul, METH_VARARGS | METH_KEYWORDS, NULL},
674 {
"_is_view", (PyCFunction)THPVariable__is_view, METH_NOARGS, NULL},
675 {
"apply_", (PyCFunction)THPVariable_apply_, METH_O, NULL},
676 {
"byte", (PyCFunction)THPVariable_byte, METH_NOARGS, NULL},
677 {
"char", (PyCFunction)THPVariable_char, METH_NOARGS, NULL},
678 {
"contiguous", (PyCFunction)THPVariable_contiguous, METH_NOARGS, NULL},
679 {
"copy_", (PyCFunction)THPVariable_copy_, METH_VARARGS | METH_KEYWORDS, NULL},
680 {
"cpu", (PyCFunction)THPVariable_cpu, METH_NOARGS, NULL},
681 {
"cuda", (PyCFunction)THPVariable_cuda, METH_VARARGS | METH_KEYWORDS, NULL},
682 {
"dim", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
683 {
"double", (PyCFunction)THPVariable_double, METH_NOARGS, NULL},
684 {
"element_size", (PyCFunction)THPVariable_element_size, METH_NOARGS, NULL},
685 {
"float", (PyCFunction)THPVariable_float, METH_NOARGS, NULL},
686 {
"get_device", (PyCFunction)THPVariable_get_device, METH_NOARGS, NULL},
687 {
"half", (PyCFunction)THPVariable_half, METH_NOARGS, NULL},
688 {
"int", (PyCFunction)THPVariable_int, METH_NOARGS, NULL},
689 {
"is_contiguous", (PyCFunction)THPVariable_is_contiguous, METH_NOARGS, NULL},
690 {
"item", (PyCFunction)THPVariable_item, METH_NOARGS, NULL},
691 {
"long", (PyCFunction)THPVariable_long, METH_NOARGS, NULL},
692 {
"map_", (PyCFunction)THPVariable_map_, METH_VARARGS | METH_KEYWORDS, NULL},
693 {
"map2_", (PyCFunction)THPVariable_map2_, METH_VARARGS | METH_KEYWORDS, NULL},
694 {
"ndimension", (PyCFunction)THPVariable_dim, METH_NOARGS, NULL},
695 {
"nelement", (PyCFunction)THPVariable_numel, METH_NOARGS, NULL},
696 {
"new", (PyCFunction)THPVariable_new, METH_VARARGS | METH_KEYWORDS, NULL},
697 {
"new_empty", (PyCFunction)THPVariable_new_empty, METH_VARARGS | METH_KEYWORDS, NULL},
698 {
"new_full", (PyCFunction)THPVariable_new_full, METH_VARARGS | METH_KEYWORDS, NULL},
699 {
"new_ones", (PyCFunction)THPVariable_new_ones, METH_VARARGS | METH_KEYWORDS, NULL},
700 {
"new_tensor", (PyCFunction)THPVariable_new_tensor, METH_VARARGS | METH_KEYWORDS, NULL},
701 {
"new_zeros", (PyCFunction)THPVariable_new_zeros, METH_VARARGS | METH_KEYWORDS, NULL},
702 {
"numpy", (PyCFunction)THPVariable_numpy, METH_NOARGS, NULL},
703 {
"record_stream", (PyCFunction)THPVariable_record_stream, METH_O, NULL},
704 {
"requires_grad_", (PyCFunction)THPVariable_requires_grad_, METH_VARARGS | METH_KEYWORDS, NULL},
705 {
"short", (PyCFunction)THPVariable_short, METH_NOARGS, NULL},
706 {
"size", (PyCFunction)THPVariable_size, METH_VARARGS | METH_KEYWORDS, NULL},
707 {
"storage", (PyCFunction)THPVariable_storage, METH_NOARGS, NULL},
708 {
"storage_offset", (PyCFunction)THPVariable_storage_offset, METH_NOARGS, NULL},
709 {
"storage_type", (PyCFunction)THPVariable_storage_type, METH_NOARGS, NULL},
710 {
"stride", (PyCFunction)THPVariable_stride, METH_VARARGS | METH_KEYWORDS, NULL},
711 {
"to", (PyCFunction)THPVariable_to, METH_VARARGS | METH_KEYWORDS, NULL},
712 {
"tolist", (PyCFunction)THPVariable_tolist, METH_NOARGS, NULL},
713 {
"type", (PyCFunction)THPVariable_type, METH_VARARGS | METH_KEYWORDS, NULL},
optional< Device > device_of(Tensor t)
Return the Device of a Tensor, if the Tensor is defined.
Scalar represents a 0-dimensional tensor which contains a single element.
Represents a a compute device on which a tensor is located.
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
A OptionalDeviceGuard is an RAII class that sets a device to some value on initialization, and resets the device to its original value on destruction.
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...