11 #include "python_torch_functions_dispatch.h"    13 #include "torch/csrc/autograd/python_variable.h"    14 #include "torch/csrc/autograd/utils/wrap_outputs.h"    15 #include "torch/csrc/Dtype.h"    16 #include "torch/csrc/DynamicTypes.h"    17 #include "torch/csrc/Exceptions.h"    18 #include "torch/csrc/utils/python_arg_parser.h"    19 #include "torch/csrc/utils/tensor_layouts.h"    20 #include "torch/csrc/utils/tensor_new.h"    21 #include "torch/csrc/utils/tensor_numpy.h"    22 #include "torch/csrc/jit/tracer.h"    23 #include "torch/csrc/autograd/generated/variable_factories.h"    24 #include "torch/csrc/utils/structseq.h"    26 #include <ATen/ATen.h>    29 #include <initializer_list>    44 namespace torch { 
namespace autograd {
    46 static void check_out_type_matches(
Tensor result,
    47                                    ScalarType scalarType, 
bool scalarType_is_none,
    48                                    const THPLayout& layout, 
bool layout_is_none,
    49                                    const Device& device, 
bool device_is_none) {
    50   if (scalarType_is_none && layout_is_none && device_is_none) {  
    53   auto scalarType_arg = scalarType_is_none ? result.scalar_type() : scalarType;
    54   auto layout_arg = layout_is_none ? *torch::getLayout(result.type().backend()) : layout;
    55   auto device_type_arg = device_is_none ? torch::getDeviceType(result.type()) : device.type();
    56   const auto& type = torch::getVariableType(scalarType_arg, layout_arg, device_type_arg);
    57   if (result.type() != type) {
    59         "type corresponding to ", type.toString(),
    60         " does not match type of out parameter (", result.type().toString(), 
")");
    64 inline Tensor dispatch_arange(Scalar end, 
Tensor result) {
    66   return at::arange_out(result, end);
    69 inline Tensor dispatch_arange(Scalar end, 
const TensorOptions& options) {
    70   maybe_initialize_cuda(options);
    72   return torch::arange(end, options);
    75 inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, 
Tensor result) {
    77   return at::arange_out(result, start, end, step);
    80 inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, 
const TensorOptions& options) {
    81   maybe_initialize_cuda(options);
    83   return torch::arange(start, end, step, options);
    86 static inline bool allIntegral(std::initializer_list<std::reference_wrapper<Scalar>> l) {
    88     if (!s.isIntegral()) {
    95 static PyObject * THPVariable_arange(PyObject* 
self, PyObject* args, PyObject* kwargs)
    98   static PythonArgParser parser({
    99     "arange(Scalar end, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   100     "arange(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   103   ParsedArgs<8> parsed_args;
   104   auto r = parser.parse(args, kwargs, parsed_args);
   108       auto end = r.scalar(0);
   110       auto scalarType = r.isNone(2) && allIntegral({end}) ? at::ScalarType::Long : r.scalartype(2);
   114           .layout(r.layout(3).layout)
   116       return wrap(dispatch_arange(end, options));
   118       check_out_type_matches(r.tensor(1), r.scalartype(2), r.isNone(2), r.layout(3), r.isNone(3),
   119                              r.device(4), r.isNone(4));
   120       return wrap(dispatch_arange(r.scalar(0), r.tensor(1)).set_requires_grad(r.toBool(5)));
   122   } 
else if (r.idx == 1) {
   124       auto start = r.scalar(0);
   125       auto end = r.scalar(1);
   126       auto step = r.scalar(2);
   128       auto scalarType = r.isNone(4) && allIntegral({start, end, step}) ? at::ScalarType::Long : r.scalartype(4);
   132           .layout(r.layout(5).layout)
   134       return wrap(dispatch_arange(start, end, step, options));
   136       check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4), r.layout(5), r.isNone(5),
   137                                r.device(6), r.isNone(6));
   138       return wrap(dispatch_arange(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
   145 inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, 
Tensor result) {
   147   OptionalDeviceGuard device_guard(
device_of(result));
   148   return at::range_out(result, start, end, step);
   151 inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, 
const TensorOptions& options) {
   152   maybe_initialize_cuda(options);
   154   DeviceGuard device_guard(options.device());
   155   return torch::range(start, end, step, options);
   158 static PyObject * THPVariable_range(PyObject* 
self, PyObject* args, PyObject* kwargs)
   161   static PythonArgParser parser({
   162     "range(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   165   ParsedArgs<8> parsed_args;
   166   auto r = parser.parse(args, kwargs, parsed_args);
   168     PyErr_WarnEx(PyExc_UserWarning, 
"torch.range is deprecated in favor of torch.arange "   169         "and will be removed in 0.5. Note that arange generates values in [start; end), "   170         "not [start; end].", 1);
   173           .dtype(r.scalartype(4))
   175           .layout(r.layout(5).layout)
   177       return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), options));
   179       check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
   180                              r.layout(5), r.isNone(5),
   181                              r.device(6), r.isNone(6));
   182       return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
   189 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, 
Tensor result) {
   191   return at::randint_out(result, high, size, generator);
   193 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, 
const TensorOptions & options) {
   194   maybe_initialize_cuda(options);
   196   return torch::randint(high, size, generator, options);
   198 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, 
Tensor result) {
   200   return at::randint_out(result, high, size);
   202 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, 
const TensorOptions & options) {
   203   maybe_initialize_cuda(options);
   205   return torch::randint(high, size, options);
   207 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, 
Tensor result) {
   209   return at::randint_out(result, low, high, size, generator);
   211 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, 
const TensorOptions & options) {
   212   maybe_initialize_cuda(options);
   214   return torch::randint(low, high, size, generator, options);
   216 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, 
Tensor result) {
   218   return at::randint_out(result, low, high, size);
   220 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, 
const TensorOptions & options) {
   221   maybe_initialize_cuda(options);
   223   return torch::randint(low, high, size, options);
   226 static PyObject * THPVariable_randint(PyObject* self_, PyObject* args, PyObject* kwargs)
   229   static PythonArgParser parser({
   230     "randint(int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   231     "randint(int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   232     "randint(int64_t low, int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   233     "randint(int64_t low, int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
   236   ParsedArgs<9> parsed_args;
   237   auto r = parser.parse(args, kwargs, parsed_args);
   240       auto high = r.toInt64(0);
   241       auto size = r.intlist(1);
   242       auto generator = r.generator(2);
   244       auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
   245       auto device = r.device(6);
   249           .layout(r.layout(5).layout)
   251       return wrap(dispatch_randint(high, size, generator, options));
   253       check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
   254                              r.layout(5), r.isNone(5),
   255                              r.device(6), r.isNone(6));
   256       return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.generator(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
   258   } 
else if (r.idx == 1) {
   260       auto high = r.toInt64(0);
   261       auto size = r.intlist(1);
   263       auto dtype = r.scalartypeWithDefault(3, at::ScalarType::Long);
   264       auto device = r.device(5);
   268           .layout(r.layout(4).layout)
   270       return wrap(dispatch_randint(high, size, options));
   272       check_out_type_matches(r.tensor(2), r.scalartype(3), r.isNone(3),
   273                              r.layout(4), r.isNone(4),
   274                              r.device(5), r.isNone(5));
   275       return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.tensor(2)).set_requires_grad(r.toBool(6)));
   277   } 
else if (r.idx == 2) {
   279       auto low = r.toInt64(0);
   280       auto high = r.toInt64(1);
   281       auto size = r.intlist(2);
   282       auto generator = r.generator(3);
   284       auto dtype = r.scalartypeWithDefault(5, at::ScalarType::Long);
   285       auto device = r.device(7);
   289           .layout(r.layout(6).layout)
   291       return wrap(dispatch_randint(low, high, size, generator, options));
   293       check_out_type_matches(r.tensor(4), r.scalartype(5), r.isNone(5),
   294                              r.layout(6), r.isNone(6),
   295                              r.device(7), r.isNone(7));
   296       return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.generator(3), r.tensor(4)).set_requires_grad(r.toBool(8)));
   298   } 
else if (r.idx == 3) {
   300       auto low = r.toInt64(0);
   301       auto high = r.toInt64(1);
   302       auto size = r.intlist(2);
   304       auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
   305       auto device = r.device(6);
   309           .layout(r.layout(5).layout)
   311       return wrap(dispatch_randint(low, high, size, options));
   313       check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
   314                              r.layout(5), r.isNone(5),
   315                              r.device(6), r.isNone(6));
   316       return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
   323 static PyObject * THPVariable_as_tensor(PyObject* 
self, PyObject* args, PyObject* kwargs)
   326   jit::tracer::warn(
"torch.as_tensor", jit::tracer::WARN_CONSTRUCTOR);
   327   return THPVariable_Wrap(torch::utils::as_tensor(default_type(), args, kwargs));
   331 static PyObject * THPVariable_from_numpy(PyObject* module, PyObject* arg)
   334   jit::tracer::warn(
"torch.from_numpy", jit::tracer::WARN_CONSTRUCTOR);
   335   auto data = torch::utils::tensor_from_numpy(arg);
   336   return THPVariable_Wrap(make_variable(std::move(data), 
false));
   340 static PyObject * THPVariable__promote_types(PyObject* 
self, PyObject* args, PyObject* kwargs)
   343   static PythonArgParser parser({
   344     "_promote_types(ScalarType type1, ScalarType type2)",
   346   ParsedArgs<2> parsed_args;
   347   auto r = parser.parse(args, kwargs, parsed_args);
   349     ScalarType promoted = at::promoteTypes(r.scalartype(0), r.scalartype(1));
   350     return torch::autograd::utils::wrap(torch::getDtype(promoted));
   356 static PyObject * THPVariable_sparse_coo_tensor(PyObject* 
self, PyObject* args, PyObject* kwargs)
   359   jit::tracer::warn(
"torch.sparse_coo_tensor", jit::tracer::WARN_CONSTRUCTOR);
   360   return THPVariable_Wrap(torch::utils::sparse_coo_tensor_ctor(default_type(), args, kwargs));
   364 static PyObject * THPVariable_tensor(PyObject* 
self, PyObject* args, PyObject* kwargs)
   367   jit::tracer::warn(
"torch.tensor", jit::tracer::WARN_CONSTRUCTOR);
   368   return THPVariable_Wrap(torch::utils::tensor_ctor(default_type(), args, kwargs));
   372 static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args, PyObject* kwargs)
   375   static PythonArgParser parser({
   376     "get_device(Tensor input)",
   379   ParsedArgs<1> parsed_args;
   380   auto r = parser.parse(args, kwargs, parsed_args);
   382     return wrap(r.tensor(0).get_device());
   392 static PyMethodDef torch_functions[] = {
   393   {
"arange", (PyCFunction)THPVariable_arange, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   394   {
"as_tensor", (PyCFunction)THPVariable_as_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   395   {
"dsmm", (PyCFunction)THPVariable_mm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   396   {
"from_numpy", (PyCFunction)THPVariable_from_numpy, METH_STATIC | METH_O, NULL},
   397   {
"hsmm", (PyCFunction)THPVariable_hspmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   398   {
"_promote_types", (PyCFunction)THPVariable__promote_types, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   399   {
"randint", (PyCFunction)THPVariable_randint, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   400   {
"range", (PyCFunction)THPVariable_range, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   401   {
"saddmm", (PyCFunction)THPVariable_sspaddmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   402   {
"sparse_coo_tensor", (PyCFunction)THPVariable_sparse_coo_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   403   {
"spmm", (PyCFunction)THPVariable_mm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   404   {
"tensor", (PyCFunction)THPVariable_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   405   {
"get_device", (PyCFunction)THPVariable_get_device, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
   410 static PyTypeObject THPVariableFunctions = {
   411   PyVarObject_HEAD_INIT(NULL, 0)
   412   "torch._C._VariableFunctions",         
   451 void initTorchFunctions(PyObject* module) {
   452   if (PyType_Ready(&THPVariableFunctions) < 0) {
   455   Py_INCREF(&THPVariableFunctions);
   456   if (PyModule_AddObject(module, 
"_VariableFunctions", (PyObject*)&THPVariableFunctions) < 0) {
 
optional< Device > device_of(Tensor t)
Return the Device of a Tensor, if the Tensor is defined. 
Scalar represents a 0-dimensional tensor which contains a single element. 
Represents a a compute device on which a tensor is located. 
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
A OptionalDeviceGuard is an RAII class that sets a device to some value on initialization, and resets the device to its original value on destruction. 
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor. 
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...