43 #include <torch/csrc/python_headers.h> 45 #include <torch/csrc/Device.h> 46 #include <torch/csrc/Dtype.h> 47 #include <torch/csrc/DynamicTypes.h> 48 #include <torch/csrc/Exceptions.h> 49 #include <torch/csrc/Generator.h> 50 #include <torch/csrc/autograd/generated/VariableType.h> 51 #include <torch/csrc/autograd/python_variable.h> 52 #include <torch/csrc/jit/tracer.h> 53 #include <torch/csrc/tensor/python_tensor.h> 54 #include <torch/csrc/utils/numpy_stub.h> 55 #include <torch/csrc/utils/object_ptr.h> 56 #include <torch/csrc/utils/python_numbers.h> 57 #include <torch/csrc/utils/python_strings.h> 58 #include <torch/csrc/utils/six.h> 59 #include <torch/csrc/autograd/variable.h> 61 #include <ATen/ATen.h> 72 enum class ParameterType {
73 TENSOR, SCALAR, INT64, DOUBLE, TENSOR_LIST, INT_LIST, GENERATOR,
74 BOOL, STORAGE, PYOBJECT, SCALARTYPE, LAYOUT, DEVICE, STRING
77 struct FunctionParameter;
78 struct FunctionSignature;
89 explicit PythonArgParser(std::vector<std::string> fmts,
bool traceable=
false);
96 void print_error(PyObject* args, PyObject* kwargs, PyObject* parsed_args[]);
97 PythonArgs raw_parse(PyObject* args, PyObject* kwargs, PyObject* parsed_args[]);
99 std::vector<FunctionSignature> signatures_;
100 std::string function_name;
108 , traceable(traceable)
109 , signature(signature)
120 inline std::vector<at::Tensor> tensorlist(
int i);
122 inline std::array<at::Tensor, N> tensorlist_n(
int i);
123 inline std::vector<int64_t> intlist(
int i);
124 inline std::vector<int64_t> intlistWithDefault(
int i, std::vector<int64_t> default_intlist);
127 inline at::ScalarType scalartype(
int i);
128 inline at::ScalarType scalartypeWithDefault(
int i, at::ScalarType default_scalartype);
137 inline std::string string(
int i);
138 inline PyObject* pyobject(
int i);
139 inline int64_t toInt64(
int i);
140 inline int64_t toInt64WithDefault(
int i, int64_t default_int);
141 inline double toDouble(
int i);
142 inline double toDoubleWithDefault(
int i,
double default_double);
143 inline bool toBool(
int i);
144 inline bool toBoolWithDefault(
int i,
bool default_bool);
145 inline bool isNone(
int i);
151 bool parse(PyObject* args, PyObject* kwargs, PyObject* dst[],
bool raise_exception);
152 std::string toString()
const;
155 std::vector<FunctionParameter> params;
158 ssize_t max_pos_args;
166 bool check(PyObject* obj);
167 void set_default_str(
const std::string& str);
168 std::string type_name()
const;
174 bool allow_numbers_as_tensors =
false;
179 PyObject *python_name;
181 std::vector<int64_t> default_intlist;
185 double default_double;
186 at::ScalarType default_scalartype;
194 throw ValueError(
"PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected %d (got %d)",
197 return raw_parse(args, kwargs, dst.args);
201 PyObject* obj = args[i];
203 if (!THPVariable_Check(obj)) {
205 if (THPUtils_checkLong(obj)) {
206 scalar =
at::Scalar(THPUtils_unpackLong(obj));
207 }
else if (THPUtils_checkDouble(obj)) {
208 scalar =
at::Scalar(THPUtils_unpackDouble(obj));
215 throw TypeError(
"expected Tensor as argument %d, but got %s", i,
216 Py_TYPE(obj)->tp_name);
218 auto tensor = scalar_to_tensor(scalar);
219 tensor.unsafeGetTensorImpl()->set_wrapped_number(
true);
220 return autograd::make_variable(tensor);
226 return scalarWithDefault(i, signature.params[i].default_scalar);
230 if (!args[i])
return default_scalar;
233 if (THPVariable_Check(args[i])) {
236 if (THPUtils_checkLong(args[i])) {
237 return at::Scalar(static_cast<int64_t>(THPUtils_unpackLong(args[i])));
240 if (PyComplex_Check(args[i])) {
241 return at::Scalar(THPUtils_unpackComplexDouble(args[i]));
243 return at::Scalar(THPUtils_unpackDouble(args[i]));
247 if (!args[i])
return c10::nullopt;
251 inline std::vector<at::Tensor> PythonArgs::tensorlist(
int i) {
252 if (!args[i])
return std::vector<at::Tensor>();
253 auto tuple = six::isTuple(args[i]);
255 auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
256 std::vector<at::Tensor> res(size);
257 for (
int idx = 0; idx < size; idx++) {
258 PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) : PyList_GET_ITEM(arg.get(), idx);
259 if (!THPVariable_Check(obj)) {
260 throw TypeError(
"expected Tensor as element %d in argument %d, but got %s",
261 idx, i, Py_TYPE(obj)->tp_name);
263 res[idx] =
reinterpret_cast<THPVariable*
>(obj)->cdata;
269 inline std::array<at::Tensor, N> PythonArgs::tensorlist_n(
int i) {
270 auto res = std::array<at::Tensor, N>();
271 if (!args[i])
return res;
272 auto tuple = six::isTuple(args[i]);
274 auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
276 throw TypeError(
"expected tuple of %d elements but got %d", N, (
int)size);
278 for (
int idx = 0; idx < size; idx++) {
279 PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) : PyList_GET_ITEM(arg.get(), idx);
280 if (!THPVariable_Check(obj)) {
281 throw TypeError(
"expected Tensor as element %d in argument %d, but got %s",
282 idx, i, Py_TYPE(obj)->tp_name);
284 res[idx] =
reinterpret_cast<THPVariable*
>(obj)->cdata;
289 inline std::vector<int64_t> PythonArgs::intlist(
int i) {
290 return intlistWithDefault(i, signature.params[i].default_intlist);
293 inline std::vector<int64_t> PythonArgs::intlistWithDefault(
int i, std::vector<int64_t> default_intlist) {
294 if (!args[i])
return default_intlist;
295 PyObject* arg = args[i];
296 auto size = signature.params[i].size;
297 if (size > 0 && THPUtils_checkLong(arg)) {
298 return std::vector<int64_t>(size, THPUtils_unpackIndex(arg));
300 auto tuple = PyTuple_Check(arg);
301 size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
302 std::vector<int64_t> res(size);
303 for (
int idx = 0; idx < size; idx++) {
304 PyObject* obj = tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
308 if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) {
309 auto & var = THPVariable_Unpack(obj);
310 jit::tracer::ArgumentStash::stashIntArrayRefElem(
311 signature.params[i].name, size, idx, var);
312 res[idx] = var.item<int64_t>();
315 res[idx] = THPUtils_unpackIndex(obj);
317 }
catch (
const std::exception &e) {
318 throw TypeError(
"%s(): argument '%s' must be %s, but found element of type %s at pos %d",
319 signature.name.c_str(), signature.params[i].name.c_str(),
320 signature.params[i].type_name().c_str(), Py_TYPE(obj)->tp_name, idx + 1);
326 inline at::ScalarType PythonArgs::scalartypeWithDefault(
int i, at::ScalarType default_scalartype) {
327 if (!args[i])
return default_scalartype;
328 return scalartype(i);
331 inline at::ScalarType PythonArgs::scalartype(
int i) {
333 auto scalartype = signature.params[i].default_scalartype;
334 return (scalartype == at::ScalarType::Undefined) ?
335 torch::tensors::get_default_tensor_type().scalarType() : scalartype;
337 return reinterpret_cast<THPDtype*
>(args[i])->scalar_type;
343 return scalartype(i);
346 inline const THPLayout& PythonArgs::layout(
int i) {
347 if (!args[i])
return *signature.params[i].default_layout;
348 return *
reinterpret_cast<THPLayout*
>(args[i]);
351 inline const THPLayout& PythonArgs::layoutWithDefault(
int i,
const THPLayout& default_layout) {
352 if (!args[i])
return default_layout;
356 static std::string cuda_str =
"cuda";
357 static std::string cpu_str =
"cpu";
358 static std::string cuda_prefix =
"cuda:";
359 static std::string cpu_prefix =
"cpu:";
363 const auto& default_tensor_type = torch::tensors::get_default_tensor_type();
364 return at::Device(default_tensor_type.device_type());
366 if (THPDevice_Check(args[i])) {
367 const auto device =
reinterpret_cast<THPDevice*
>(args[i]);
368 return device->device;
370 if (THPUtils_checkLong(args[i])) {
371 const auto device_index = THPUtils_unpackLong(args[i]);
372 AT_CHECK(device_index >= 0,
"Device index must not be negative");
373 return at::Device(at::DeviceType::CUDA, device_index);
375 const std::string &device_str = THPUtils_unpackString(args[i]);
380 if (!args[i])
return default_device;
390 inline std::string PythonArgs::string(
int i) {
391 if (!args[i])
return "";
392 return THPUtils_unpackString(args[i]);
395 inline int64_t PythonArgs::toInt64(
int i) {
396 if (!args[i])
return signature.params[i].default_int;
397 if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
398 auto & var = THPVariable_Unpack(args[i]);
399 jit::tracer::ArgumentStash::stashValue(
400 signature.params[i].name, idx, var, jit::IntType::get());
402 return THPUtils_unpackLong(args[i]);
405 inline int64_t PythonArgs::toInt64WithDefault(
int i, int64_t default_int) {
406 if (!args[i])
return default_int;
416 inline double PythonArgs::toDouble(
int i) {
417 if (!args[i])
return signature.params[i].default_double;
418 return THPUtils_unpackDouble(args[i]);
421 inline double PythonArgs::toDoubleWithDefault(
int i,
double default_double) {
422 if (!args[i])
return default_double;
426 inline bool PythonArgs::toBool(
int i) {
427 if (!args[i])
return signature.params[i].default_bool;
428 return args[i] == Py_True;
431 inline bool PythonArgs::toBoolWithDefault(
int i,
bool default_bool) {
432 if (!args[i])
return default_bool;
436 inline bool PythonArgs::isNone(
int i) {
437 return args[i] ==
nullptr;
441 if (!args[i])
return nullptr;
447 return createStorage(args[i]);
450 inline PyObject* PythonArgs::pyobject(
int i) {
451 if (!args[i])
return Py_None;
Scalar represents a 0-dimensional tensor which contains a single element.
Represents a a compute device on which a tensor is located.