Caffe2 - C++ API
A deep learning, cross platform ML framework
python_torch_functions.cpp
1 // ${generated_comment}
2 
3 // Python bindings for torch.* functions implemented through ATen.
4 //
5 // The functions are bound as static methods on a class
6 // torch._C._VariableFunctions which is also aliased as Variable._torch
7 // and also copied into 'torch' module.
8 
9 #include <Python.h>
10 
11 #include "python_torch_functions_dispatch.h"
12 
13 #include "torch/csrc/autograd/python_variable.h"
14 #include "torch/csrc/autograd/utils/wrap_outputs.h"
15 #include "torch/csrc/Dtype.h"
16 #include "torch/csrc/DynamicTypes.h"
17 #include "torch/csrc/Exceptions.h"
18 #include "torch/csrc/utils/python_arg_parser.h"
19 #include "torch/csrc/utils/tensor_layouts.h"
20 #include "torch/csrc/utils/tensor_new.h"
21 #include "torch/csrc/utils/tensor_numpy.h"
22 #include "torch/csrc/jit/tracer.h"
23 #include "torch/csrc/autograd/generated/variable_factories.h"
24 #include "torch/csrc/utils/structseq.h"
25 
26 #include <ATen/ATen.h>
27 
28 #include <functional>
29 #include <initializer_list>
30 #include <stdexcept>
31 #include <utility>
32 
33 using at::Tensor;
34 using at::Device;
35 using at::Scalar;
36 using at::ScalarType;
37 using at::Backend;
39 using at::DeviceGuard;
40 using at::TensorOptions;
41 
42 using namespace torch::autograd::utils;
43 
44 namespace torch { namespace autograd {
45 
46 static void check_out_type_matches(Tensor result,
47  ScalarType scalarType, bool scalarType_is_none,
48  const THPLayout& layout, bool layout_is_none,
49  const Device& device, bool device_is_none) {
50  if (scalarType_is_none && layout_is_none && device_is_none) { // common case
51  return;
52  }
53  auto scalarType_arg = scalarType_is_none ? result.scalar_type() : scalarType;
54  auto layout_arg = layout_is_none ? *torch::getLayout(result.type().backend()) : layout;
55  auto device_type_arg = device_is_none ? torch::getDeviceType(result.type()) : device.type();
56  const auto& type = torch::getVariableType(scalarType_arg, layout_arg, device_type_arg);
57  if (result.type() != type) {
58  AT_ERROR(
59  "type corresponding to ", type.toString(),
60  " does not match type of out parameter (", result.type().toString(), ")");
61  }
62 }
63 
64 inline Tensor dispatch_arange(Scalar end, Tensor result) {
65  AutoNoGIL no_gil;
66  return at::arange_out(result, end);
67 }
68 
69 inline Tensor dispatch_arange(Scalar end, const TensorOptions& options) {
70  maybe_initialize_cuda(options);
71  AutoNoGIL no_gil;
72  return torch::arange(end, options);
73 }
74 
75 inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, Tensor result) {
76  AutoNoGIL no_gil;
77  return at::arange_out(result, start, end, step);
78 }
79 
80 inline Tensor dispatch_arange(Scalar start, Scalar end, Scalar step, const TensorOptions& options) {
81  maybe_initialize_cuda(options);
82  AutoNoGIL no_gil;
83  return torch::arange(start, end, step, options);
84 }
85 
86 static inline bool allIntegral(std::initializer_list<std::reference_wrapper<Scalar>> l) {
87  for (Scalar& s : l) {
88  if (!s.isIntegral()) {
89  return false;
90  }
91  }
92  return true;
93 }
94 
95 static PyObject * THPVariable_arange(PyObject* self, PyObject* args, PyObject* kwargs)
96 {
97  HANDLE_TH_ERRORS
98  static PythonArgParser parser({
99  "arange(Scalar end, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
100  "arange(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
101  });
102 
103  ParsedArgs<8> parsed_args;
104  auto r = parser.parse(args, kwargs, parsed_args);
105 
106  if (r.idx == 0) {
107  if (r.isNone(1)) {
108  auto end = r.scalar(0);
109  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
110  auto scalarType = r.isNone(2) && allIntegral({end}) ? at::ScalarType::Long : r.scalartype(2);
111  const auto options = TensorOptions()
112  .dtype(scalarType)
113  .device(r.device(4))
114  .layout(r.layout(3).layout)
115  .requires_grad(r.toBool(5));
116  return wrap(dispatch_arange(end, options));
117  } else {
118  check_out_type_matches(r.tensor(1), r.scalartype(2), r.isNone(2), r.layout(3), r.isNone(3),
119  r.device(4), r.isNone(4));
120  return wrap(dispatch_arange(r.scalar(0), r.tensor(1)).set_requires_grad(r.toBool(5)));
121  }
122  } else if (r.idx == 1) {
123  if (r.isNone(3)) {
124  auto start = r.scalar(0);
125  auto end = r.scalar(1);
126  auto step = r.scalar(2);
127  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
128  auto scalarType = r.isNone(4) && allIntegral({start, end, step}) ? at::ScalarType::Long : r.scalartype(4);
129  const auto options = TensorOptions()
130  .dtype(scalarType)
131  .device(r.device(6))
132  .layout(r.layout(5).layout)
133  .requires_grad(r.toBool(7));
134  return wrap(dispatch_arange(start, end, step, options));
135  } else {
136  check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4), r.layout(5), r.isNone(5),
137  r.device(6), r.isNone(6));
138  return wrap(dispatch_arange(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
139  }
140  }
141  Py_RETURN_NONE;
142  END_HANDLE_TH_ERRORS
143 }
144 
145 inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, Tensor result) {
146  AutoNoGIL no_gil;
147  OptionalDeviceGuard device_guard(device_of(result));
148  return at::range_out(result, start, end, step);
149 }
150 
151 inline Tensor dispatch_range(Scalar start, Scalar end, Scalar step, const TensorOptions& options) {
152  maybe_initialize_cuda(options);
153  AutoNoGIL no_gil;
154  DeviceGuard device_guard(options.device());
155  return torch::range(start, end, step, options);
156 }
157 
158 static PyObject * THPVariable_range(PyObject* self, PyObject* args, PyObject* kwargs)
159 {
160  HANDLE_TH_ERRORS
161  static PythonArgParser parser({
162  "range(Scalar start, Scalar end, Scalar step=1, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
163  });
164 
165  ParsedArgs<8> parsed_args;
166  auto r = parser.parse(args, kwargs, parsed_args);
167  if (r.idx == 0) {
168  PyErr_WarnEx(PyExc_UserWarning, "torch.range is deprecated in favor of torch.arange "
169  "and will be removed in 0.5. Note that arange generates values in [start; end), "
170  "not [start; end].", 1);
171  if (r.isNone(3)) {
172  const auto options = TensorOptions()
173  .dtype(r.scalartype(4))
174  .device(r.device(6))
175  .layout(r.layout(5).layout)
176  .requires_grad(r.toBool(7));
177  return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), options));
178  } else {
179  check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
180  r.layout(5), r.isNone(5),
181  r.device(6), r.isNone(6));
182  return wrap(dispatch_range(r.scalar(0), r.scalar(1), r.scalar(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
183  }
184  }
185  Py_RETURN_NONE;
186  END_HANDLE_TH_ERRORS
187 }
188 
189 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, Tensor result) {
190  AutoNoGIL no_gil;
191  return at::randint_out(result, high, size, generator);
192 }
193 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) {
194  maybe_initialize_cuda(options);
195  AutoNoGIL no_gil;
196  return torch::randint(high, size, generator, options);
197 }
198 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, Tensor result) {
199  AutoNoGIL no_gil;
200  return at::randint_out(result, high, size);
201 }
202 inline Tensor dispatch_randint(int64_t high, IntArrayRef size, const TensorOptions & options) {
203  maybe_initialize_cuda(options);
204  AutoNoGIL no_gil;
205  return torch::randint(high, size, options);
206 }
207 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, Tensor result) {
208  AutoNoGIL no_gil;
209  return at::randint_out(result, low, high, size, generator);
210 }
211 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) {
212  maybe_initialize_cuda(options);
213  AutoNoGIL no_gil;
214  return torch::randint(low, high, size, generator, options);
215 }
216 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, Tensor result) {
217  AutoNoGIL no_gil;
218  return at::randint_out(result, low, high, size);
219 }
220 inline Tensor dispatch_randint(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options) {
221  maybe_initialize_cuda(options);
222  AutoNoGIL no_gil;
223  return torch::randint(low, high, size, options);
224 }
225 
226 static PyObject * THPVariable_randint(PyObject* self_, PyObject* args, PyObject* kwargs)
227 {
228  HANDLE_TH_ERRORS
229  static PythonArgParser parser({
230  "randint(int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
231  "randint(int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
232  "randint(int64_t low, int64_t high, IntArrayRef size, *, Generator generator, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
233  "randint(int64_t low, int64_t high, IntArrayRef size, *, Tensor out=None, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool requires_grad=False)",
234  }, /*traceable=*/false);
235 
236  ParsedArgs<9> parsed_args;
237  auto r = parser.parse(args, kwargs, parsed_args);
238  if (r.idx == 0) {
239  if (r.isNone(3)) {
240  auto high = r.toInt64(0);
241  auto size = r.intlist(1);
242  auto generator = r.generator(2);
243  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
244  auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
245  auto device = r.device(6);
246  const auto options = TensorOptions()
247  .dtype(dtype)
248  .device(device)
249  .layout(r.layout(5).layout)
250  .requires_grad(r.toBool(7));
251  return wrap(dispatch_randint(high, size, generator, options));
252  } else {
253  check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
254  r.layout(5), r.isNone(5),
255  r.device(6), r.isNone(6));
256  return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.generator(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
257  }
258  } else if (r.idx == 1) {
259  if (r.isNone(2)) {
260  auto high = r.toInt64(0);
261  auto size = r.intlist(1);
262  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
263  auto dtype = r.scalartypeWithDefault(3, at::ScalarType::Long);
264  auto device = r.device(5);
265  const auto options = TensorOptions()
266  .dtype(dtype)
267  .device(device)
268  .layout(r.layout(4).layout)
269  .requires_grad(r.toBool(6));
270  return wrap(dispatch_randint(high, size, options));
271  } else {
272  check_out_type_matches(r.tensor(2), r.scalartype(3), r.isNone(3),
273  r.layout(4), r.isNone(4),
274  r.device(5), r.isNone(5));
275  return wrap(dispatch_randint(r.toInt64(0), r.intlist(1), r.tensor(2)).set_requires_grad(r.toBool(6)));
276  }
277  } else if (r.idx == 2) {
278  if (r.isNone(4)) {
279  auto low = r.toInt64(0);
280  auto high = r.toInt64(1);
281  auto size = r.intlist(2);
282  auto generator = r.generator(3);
283  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
284  auto dtype = r.scalartypeWithDefault(5, at::ScalarType::Long);
285  auto device = r.device(7);
286  const auto options = TensorOptions()
287  .dtype(dtype)
288  .device(device)
289  .layout(r.layout(6).layout)
290  .requires_grad(r.toBool(8));
291  return wrap(dispatch_randint(low, high, size, generator, options));
292  } else {
293  check_out_type_matches(r.tensor(4), r.scalartype(5), r.isNone(5),
294  r.layout(6), r.isNone(6),
295  r.device(7), r.isNone(7));
296  return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.generator(3), r.tensor(4)).set_requires_grad(r.toBool(8)));
297  }
298  } else if (r.idx == 3) {
299  if (r.isNone(3)) {
300  auto low = r.toInt64(0);
301  auto high = r.toInt64(1);
302  auto size = r.intlist(2);
303  // NOTE: r.scalartype(X) gives the default dtype if r.isNone(X)
304  auto dtype = r.scalartypeWithDefault(4, at::ScalarType::Long);
305  auto device = r.device(6);
306  const auto options = TensorOptions()
307  .dtype(dtype)
308  .device(device)
309  .layout(r.layout(5).layout)
310  .requires_grad(r.toBool(7));
311  return wrap(dispatch_randint(low, high, size, options));
312  } else {
313  check_out_type_matches(r.tensor(3), r.scalartype(4), r.isNone(4),
314  r.layout(5), r.isNone(5),
315  r.device(6), r.isNone(6));
316  return wrap(dispatch_randint(r.toInt64(0), r.toInt64(1), r.intlist(2), r.tensor(3)).set_requires_grad(r.toBool(7)));
317  }
318  }
319  Py_RETURN_NONE;
320  END_HANDLE_TH_ERRORS
321 }
322 
323 static PyObject * THPVariable_as_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
324 {
325  HANDLE_TH_ERRORS
326  jit::tracer::warn("torch.as_tensor", jit::tracer::WARN_CONSTRUCTOR);
327  return THPVariable_Wrap(torch::utils::as_tensor(default_type(), args, kwargs));
328  END_HANDLE_TH_ERRORS
329 }
330 
331 static PyObject * THPVariable_from_numpy(PyObject* module, PyObject* arg)
332 {
333  HANDLE_TH_ERRORS
334  jit::tracer::warn("torch.from_numpy", jit::tracer::WARN_CONSTRUCTOR);
335  auto data = torch::utils::tensor_from_numpy(arg);
336  return THPVariable_Wrap(make_variable(std::move(data), /*requires_grad=*/false));
337  END_HANDLE_TH_ERRORS
338 }
339 
340 static PyObject * THPVariable__promote_types(PyObject* self, PyObject* args, PyObject* kwargs)
341 {
342  HANDLE_TH_ERRORS
343  static PythonArgParser parser({
344  "_promote_types(ScalarType type1, ScalarType type2)",
345  });
346  ParsedArgs<2> parsed_args;
347  auto r = parser.parse(args, kwargs, parsed_args);
348  if (r.idx == 0) {
349  ScalarType promoted = at::promoteTypes(r.scalartype(0), r.scalartype(1));
350  return torch::autograd::utils::wrap(torch::getDtype(promoted));
351  }
352  Py_RETURN_NONE;
353  END_HANDLE_TH_ERRORS
354 }
355 
356 static PyObject * THPVariable_sparse_coo_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
357 {
358  HANDLE_TH_ERRORS
359  jit::tracer::warn("torch.sparse_coo_tensor", jit::tracer::WARN_CONSTRUCTOR);
360  return THPVariable_Wrap(torch::utils::sparse_coo_tensor_ctor(default_type(), args, kwargs));
361  END_HANDLE_TH_ERRORS
362 }
363 
364 static PyObject * THPVariable_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
365 {
366  HANDLE_TH_ERRORS
367  jit::tracer::warn("torch.tensor", jit::tracer::WARN_CONSTRUCTOR);
368  return THPVariable_Wrap(torch::utils::tensor_ctor(default_type(), args, kwargs));
369  END_HANDLE_TH_ERRORS
370 }
371 
372 static PyObject * THPVariable_get_device(PyObject* self_, PyObject* args, PyObject* kwargs)
373 {
374  HANDLE_TH_ERRORS
375  static PythonArgParser parser({
376  "get_device(Tensor input)",
377  }, /*traceable=*/false);
378 
379  ParsedArgs<1> parsed_args;
380  auto r = parser.parse(args, kwargs, parsed_args);
381  if (r.idx == 0) {
382  return wrap(r.tensor(0).get_device());
383  }
384  Py_RETURN_NONE;
385  END_HANDLE_TH_ERRORS
386 }
387 
388 // generated methods start here
389 
390 ${py_methods}
391 
392 static PyMethodDef torch_functions[] = {
393  {"arange", (PyCFunction)THPVariable_arange, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
394  {"as_tensor", (PyCFunction)THPVariable_as_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
395  {"dsmm", (PyCFunction)THPVariable_mm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
396  {"from_numpy", (PyCFunction)THPVariable_from_numpy, METH_STATIC | METH_O, NULL},
397  {"hsmm", (PyCFunction)THPVariable_hspmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
398  {"_promote_types", (PyCFunction)THPVariable__promote_types, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
399  {"randint", (PyCFunction)THPVariable_randint, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
400  {"range", (PyCFunction)THPVariable_range, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
401  {"saddmm", (PyCFunction)THPVariable_sspaddmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
402  {"sparse_coo_tensor", (PyCFunction)THPVariable_sparse_coo_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
403  {"spmm", (PyCFunction)THPVariable_mm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
404  {"tensor", (PyCFunction)THPVariable_tensor, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
405  {"get_device", (PyCFunction)THPVariable_get_device, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
406  ${py_method_defs}
407  {NULL}
408 };
409 
410 static PyTypeObject THPVariableFunctions = {
411  PyVarObject_HEAD_INIT(NULL, 0)
412  "torch._C._VariableFunctions", /* tp_name */
413  0, /* tp_basicsize */
414  0, /* tp_itemsize */
415  0, /* tp_dealloc */
416  0, /* tp_print */
417  0, /* tp_getattr */
418  0, /* tp_setattr */
419  0, /* tp_reserved */
420  0, /* tp_repr */
421  0, /* tp_as_number */
422  0, /* tp_as_sequence */
423  0, /* tp_as_mapping */
424  0, /* tp_hash */
425  0, /* tp_call */
426  0, /* tp_str */
427  0, /* tp_getattro */
428  0, /* tp_setattro */
429  0, /* tp_as_buffer */
430  Py_TPFLAGS_DEFAULT, /* tp_flags */
431  NULL, /* tp_doc */
432  0, /* tp_traverse */
433  0, /* tp_clear */
434  0, /* tp_richcompare */
435  0, /* tp_weaklistoffset */
436  0, /* tp_iter */
437  0, /* tp_iternext */
438  torch_functions, /* tp_methods */
439  0, /* tp_members */
440  0, /* tp_getset */
441  0, /* tp_base */
442  0, /* tp_dict */
443  0, /* tp_descr_get */
444  0, /* tp_descr_set */
445  0, /* tp_dictoffset */
446  0, /* tp_init */
447  0, /* tp_alloc */
448  0 /* tp_new */
449 };
450 
451 void initTorchFunctions(PyObject* module) {
452  if (PyType_Ready(&THPVariableFunctions) < 0) {
453  throw python_error();
454  }
455  Py_INCREF(&THPVariableFunctions);
456  if (PyModule_AddObject(module, "_VariableFunctions", (PyObject*)&THPVariableFunctions) < 0) {
457  throw python_error();
458  }
459 }
460 
461 }} // namespace torch::autograd
optional< Device > device_of(Tensor t)
Return the Device of a Tensor, if the Tensor is defined.
Definition: DeviceGuard.h:17
Scalar represents a 0-dimensional tensor which contains a single element.
Definition: Scalar.h:22
Represents a a compute device on which a tensor is located.
Definition: Device.h:30
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
Definition: Backend.h:23
A OptionalDeviceGuard is an RAII class that sets a device to some value on initialization, and resets the device to its original value on destruction.
Definition: DeviceGuard.h:119
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
Definition: TensorOptions.h:80
Definition: jit_type.h:17
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...
Definition: DeviceGuard.h:19
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...