Caffe2 - C++ API
A deep learning, cross platform ML framework
pybind_utils.h
1 #pragma once
2 
3 #include <ATen/core/ivalue.h>
4 #include <ATen/core/jit_type.h>
5 #include <ATen/core/stack.h>
6 #include <torch/csrc/Device.h>
7 #include <torch/csrc/jit/operator.h>
8 #include <torch/csrc/jit/script/module.h>
9 #include <torch/csrc/utils/auto_gil.h>
10 #include <torch/csrc/utils/pybind.h>
11 #include <torch/csrc/utils/six.h>
12 
13 #include <ATen/core/function_schema.h>
14 #include <c10/util/Exception.h>
15 
16 #include <algorithm>
17 #include <cstddef>
18 #include <string>
19 #include <utility>
20 #include <vector>
21 
22 // The visibility attribute is to avoid a warning about storing a field in the
23 // struct that has a different visibility (from pybind) than the struct.
24 #ifdef _WIN32
25 #define VISIBILITY_HIDDEN
26 #else
27 #define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
28 #endif
29 
30 namespace torch {
31 namespace jit {
32 namespace detail {
33 
34 using ::c10::Argument;
35 using ::c10::FunctionSchema;
36 
37 // error reporting: when reporting user-caused errors, these functions should
38 // not use AT_ERROR macros, since these macros add stack trace information
39 // that is confusing to display to the end user since it always reports
40 // locations in libtorch code rather than user code.
41 
42 inline void findErrorInKwargs(const FunctionSchema& schema, py::kwargs kwargs) {
43  const auto& arguments = schema.arguments();
44  // First check if any of the kwargs are unknown, i.e. don't match the name of
45  // any argument in the schema.
46  for (const auto& kwarg : kwargs) {
47  const auto key = py::cast<std::string>(kwarg.first);
48  if (!std::count_if(
49  arguments.begin(),
50  arguments.end(),
51  [&key](const Argument& argument) {
52  return argument.name() == key;
53  })) {
54  throw std::runtime_error(c10::str(
55  "Unknown keyword argument '",
56  key,
57  "' for operator '",
58  schema.name(),
59  "'. Schema: ",
60  schema));
61  }
62  }
63  // If there are unconsumed kwargs but none of them were unknown, the first
64  // positional argument present in the kwargs is duplicated.
65  for (const auto& argument : arguments) {
66  if (kwargs.contains(argument.name().c_str())) {
67  AT_ASSERT(!argument.default_value());
68  throw std::runtime_error(c10::str(
69  "Argument '",
70  argument.name(),
71  "' specified both as positional and ",
72  "keyword argument. Schema: ",
73  schema));
74  }
75  }
76 }
77 } // namespace detail
78 
79 inline IValue toIValue(py::handle input) {
80  if (THPVariable_Check(input.ptr())) {
81  auto ten = py::cast<at::Tensor>(input);
82  if (ten.is_sparse()) {
83  AT_ERROR("sparse tensors not supported");
84  }
85  return ten;
86  } else if (six::isTuple(input)) {
87  py::tuple input_tuple = py::cast<py::tuple>(input);
88  Stack s;
89  s.reserve(input_tuple.size());
90  for (py::handle elem : input_tuple) {
91  s.push_back(toIValue(elem));
92  }
93  return Tuple::create(s);
94  } else {
95  AT_ERROR(
96  "Only tensors and (possibly nested) tuples of tensors are supported "
97  "as inputs or outputs of traced functions");
98  }
99 }
100 
101 inline Stack toStack(const py::tuple& inputs) {
102  return toIValue(inputs).toTuple()->elements();
103 }
104 
105 inline IValue toIValue(
106  py::handle obj,
107  const TypePtr& type,
108  c10::optional<int32_t> N = c10::nullopt);
109 
110 inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
111  std::vector<IValue> elems;
112  for (auto elem : obj) {
113  elems.push_back(toIValue(elem, elem_type));
114  }
115  return List<IValue>::create(std::move(elems));
116 }
117 
118 inline IValue createGenericDict(
119  py::handle obj,
120  const TypePtr& key_type,
121  const TypePtr& value_type) {
122  at::ivalue::UnorderedMap elems;
123  elems.reserve(py::len(obj));
124  for (auto key : obj) {
125  elems.insert(std::make_pair(
126  toIValue(key, key_type), toIValue(obj[key], value_type)));
127  }
128  return at::ivalue::GenericDict::create(std::move(elems));
129 }
130 
131 inline IValue toIValue(
132  py::handle obj,
133  const TypePtr& type,
135  switch (type->kind()) {
136  case TypeKind::TensorType:
137  case TypeKind::AutogradZeroTensorType:
138  case TypeKind::DimensionedTensorType:
139  case TypeKind::CompleteTensorType: {
140  auto var = py::cast<autograd::Variable>(obj);
141  if (var.is_sparse()) {
142  AT_ERROR("sparse tensors not supported");
143  }
144  return var;
145  }
146  case TypeKind::FloatType:
147  return py::cast<double>(obj);
148  case TypeKind::IntType:
149  return py::cast<int64_t>(obj);
150  case TypeKind::NoneType:
151  if (obj != Py_None)
152  throw py::cast_error();
153 
154  return {};
155  case TypeKind::BoolType:
156  return py::cast<bool>(obj);
157  case TypeKind::TupleType: {
158  if (!PyTuple_Check(obj.ptr()))
159  throw py::cast_error(); // note: the py::cast does not throw cast_error
160  // because it attempts to iterate a non-tuple
161  py::tuple tuple = py::cast<py::tuple>(obj);
162  size_t tuple_size = tuple.size();
163  const auto& elem_types = type->cast<TupleType>()->elements();
164  if (elem_types.size() != tuple_size) {
165  throw py::cast_error();
166  }
167  std::vector<IValue> values;
168  values.reserve(tuple_size);
169  for (size_t i = 0; i < tuple_size; ++i) {
170  values.push_back(toIValue(tuple[i], elem_types[i]));
171  }
172  return Tuple::create(std::move(values));
173  }
174  case TypeKind::StringType:
175  return ConstantString::create(py::cast<std::string>(obj));
176  case TypeKind::DeviceObjType: {
177  auto device = reinterpret_cast<THPDevice*>(obj.ptr());
178  return device->device;
179  }
180  case TypeKind::ListType: {
181  const auto& elem_type = type->expect<ListType>()->getElementType();
182  switch (elem_type->kind()) {
183  // allows single int/float to be broadcasted to a fixed size list
184  case TypeKind::IntType:
185  if (!N || !py::isinstance<py::int_>(obj)) {
186  return py::cast<std::vector<int64_t>>(obj);
187  } else {
188  double value = py::cast<int64_t>(obj);
189  std::vector<double> repeated(*N, value);
190  return repeated;
191  }
192  case TypeKind::FloatType:
193  if (!N || !py::isinstance<py::float_>(obj)) {
194  return py::cast<std::vector<double>>(obj);
195  } else {
196  double value = py::cast<double>(obj);
197  std::vector<double> repeated(*N, value);
198  return repeated;
199  }
200  case TypeKind::DimensionedTensorType:
201  case TypeKind::TensorType:
202  return py::cast<std::vector<at::Tensor>>(obj);
203  default:
204  return createGenericList(obj, elem_type);
205  }
206  }
207  case TypeKind::DictType: {
208  const auto& dict_type = type->expect<DictType>();
209  return createGenericDict(
210  obj, dict_type->getKeyType(), dict_type->getValueType());
211  }
212  case TypeKind::OptionalType: {
213  // check if it's a none obj since optional accepts NoneType
214  if (obj == Py_None) {
215  // check if it's a none obj since optional accepts NoneType
216  // return an IValue() to denote a NoneType
217  return {};
218  }
219  return toIValue(obj, type->expect<OptionalType>()->getElementType());
220  }
221  case TypeKind::NumberType:
222  case TypeKind::GeneratorType:
223  case TypeKind::VarType:
224  case TypeKind::FutureType:
225  case TypeKind::ClassType:
226  break;
227  }
228  AT_ERROR(
229  "Missing cases in toIValue for type: ",
230  type->str(),
231  "! File a bug report.");
232 }
233 
234 inline IValue argumentToIValue(
235  const FunctionSchema& schema,
236  size_t argumentPosition,
237  py::handle object) {
238  const auto& argument = schema.arguments().at(argumentPosition);
239  try {
240  return toIValue(object, argument.type(), argument.N());
241  } catch (const py::cast_error& error) {
242  throw std::runtime_error(c10::str(
243  schema.name(),
244  "() expected value of type ",
245  argument.type()->str(),
246  " for argument '",
247  argument.name(),
248  "' in position ",
249  argumentPosition,
250  ", but instead got value of type ",
251  py::str(object.get_type().attr("__name__")),
252  ".",
253  "\nValue: ",
254  py::repr(object),
255  "\nDeclaration: ",
256  schema));
257  }
258 }
259 
260 inline IValue returnToIValue(const TypePtr& type, py::handle object) {
261  try {
262  return toIValue(object, type);
263  } catch (const py::cast_error& error) {
264  throw std::runtime_error(c10::str(
265  " expected value of type ",
266  type->str(),
267  " for return value but instead got value of type ",
268  py::str(object.get_type().attr("__name__")),
269  ".",
270  "\nValue: ",
271  py::repr(object)));
272  }
273 }
274 
275 inline py::object toPyObject(IValue&& ivalue) {
276  if (ivalue.isNone()) {
277  return py::none();
278  } else if (ivalue.isTensor()) {
279  auto tensor = std::move(ivalue).toTensor();
280  if (tensor.is_sparse()) {
281  AT_ERROR("sparse tensors not supported");
282  }
283  return py::cast(autograd::Variable(std::move(tensor)));
284  } else if (ivalue.isDouble()) {
285  return py::cast(ivalue.toDouble());
286  } else if (ivalue.isInt()) {
287  return py::cast(ivalue.toInt());
288  } else if (ivalue.isBool()) {
289  return py::cast(ivalue.toBool());
290  } else if (ivalue.isString()) {
291  return py::cast(ivalue.toStringRef());
292  } else if (ivalue.isIntList()) {
293  return py::cast(ivalue.toIntListRef());
294  } else if (ivalue.isDoubleList()) {
295  return py::cast(ivalue.toDoubleListRef());
296  } else if (ivalue.isBoolList()) {
297  return py::cast(ivalue.toBoolListRef());
298  } else if (ivalue.isTensorList()) {
299  return py::cast(ivalue.toTensorListRef());
300  } else if (ivalue.isGenericList()) {
301  auto list = ivalue.toGenericList();
302  const auto& elements = list->elements();
303  py::list t{elements.size()};
304  for (size_t i = 0; i < elements.size(); ++i) {
305  t[i] = toPyObject(IValue{elements[i]});
306  }
307  return std::move(t);
308  } else if (ivalue.isTuple()) {
309  auto tuple = ivalue.toTuple();
310  const auto& elements = tuple->elements();
311  py::tuple t{elements.size()};
312  for (size_t i = 0; i < elements.size(); ++i) {
313  t[i] = toPyObject(IValue{elements[i]});
314  }
315  return std::move(t);
316  } else if (ivalue.isDevice()) {
317  return py::cast<py::object>(THPDevice_New(ivalue.toDevice()));
318  } else if (ivalue.isGenericDict()) {
319  auto dict = ivalue.toGenericDict();
320  const auto& elements = dict->elements();
321  py::dict py_dict;
322  for (auto pair : elements) {
323  py_dict[toPyObject(IValue{pair.first})] = toPyObject(IValue{pair.second});
324  }
325  return std::move(py_dict);
326  } else {
327  AT_ERROR("Missing cases in 'toPyObject'! File a bug report.");
328  }
329 }
330 
331 struct VISIBILITY_HIDDEN tuple_slice {
332  /*implicit*/ tuple_slice(py::tuple tup_)
333  : tup(std::move(tup_)), b(0), e(tup.size()) {}
334  tuple_slice(py::tuple tup_, int64_t b_)
335  : tup(std::move(tup_)), b(b_), e(tup.size()) {}
336  tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
337  : tup(std::move(tup_)), b(b_), e(e_) {}
338  py::detail::tuple_iterator begin() const {
339  return {tup, static_cast<pybind11::ssize_t>(b)};
340  }
341  py::detail::tuple_iterator end() const {
342  return {tup, static_cast<pybind11::ssize_t>(e)};
343  }
344  size_t size() const {
345  return e - b;
346  }
347  py::detail::tuple_accessor operator[](size_t index) const {
348  return {tup, static_cast<size_t>(b + index)};
349  }
350 
351  private:
352  py::tuple tup;
353  int64_t b;
354  int64_t e;
355 };
356 
357 inline Stack createStackForSchema(
358  const FunctionSchema& schema,
359  const tuple_slice& args,
360  const py::kwargs& kwargs = py::kwargs()) {
361  if (args.size() + kwargs.size() > schema.arguments().size()) {
362  throw std::runtime_error(c10::str(
363  schema.name(),
364  "() expected at most ",
365  schema.arguments().size(),
366  " argument(s) but received ",
367  args.size() + kwargs.size(),
368  " argument(s). Declaration: ",
369  schema));
370  }
371  Stack stack;
372  stack.reserve(schema.arguments().size());
373 
374  // First push all positional args.
375  for (size_t i = 0; i < args.size(); ++i) {
376  // Use the type information from the schema to convert the PyObject.
377  push(stack, argumentToIValue(schema, i, args[i]));
378  }
379 
380  // Now for every remaining non-positional argument in the schema, look for it
381  // in the kwargs dict and push it if found, or use its default value if it
382  // has one.
383  size_t consumed_kwargs = 0;
384  for (size_t i = args.size(); i < schema.arguments().size(); ++i) {
385  const auto& arg = schema.arguments()[i];
386  if (kwargs.contains(arg.name().c_str())) {
387  push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()]));
388  consumed_kwargs += 1;
389  } else if (arg.default_value()) {
390  push(stack, *arg.default_value());
391  } else {
392  throw std::runtime_error(c10::str(
393  schema.name(),
394  "() is missing value for argument '",
395  arg.name(),
396  "'. Declaration: ",
397  schema));
398  }
399  }
400 
401  if (consumed_kwargs != kwargs.size()) {
402  detail::findErrorInKwargs(schema, kwargs);
403  }
404 
405  return stack;
406 }
407 
408 inline py::object createPyObjectForStack(Stack&& stack) {
409  if (stack.empty()) {
410  return py::none();
411  }
412 
413  // Return a simple value and not a single-element tuple if there is only one
414  // return value.
415  if (stack.size() == 1) {
416  return toPyObject(std::move(stack[0]));
417  }
418 
419  // If there is more than one return value, pop them into a py::tuple.
420  py::tuple return_values(stack.size());
421  for (size_t ret = 0; ret < return_values.size(); ++ret) {
422  return_values[ret] = toPyObject(std::move(stack[ret]));
423  }
424 
425  return std::move(return_values);
426 }
427 
428 // TODO: Remove once we clean up the GraphExecutor usage.
429 inline Stack evilDeprecatedBadCreateStackDoNotUse(
430  const py::tuple& tuple,
431  at::ArrayRef<Value*> inputs,
432  size_t reserve_extra_space = 0) {
433  if (tuple.size() != inputs.size()) {
434  AT_ERROR(
435  "expected " + std::to_string(inputs.size()) + " inputs, but got " +
436  std::to_string(tuple.size()));
437  }
438  Stack result;
439  result.reserve(tuple.size() + reserve_extra_space);
440  for (size_t i = 0; i < inputs.size(); ++i) {
441  result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
442  }
443  return result;
444 }
445 
446 inline py::object invokeScriptMethodFromPython(
447  script::Method& method,
448  tuple_slice args,
449  py::kwargs kwargs) {
450  auto stack = createStackForSchema(
451  method.getSchema(), std::move(args), std::move(kwargs));
452  {
453  AutoNoGIL no_gil_guard;
454  method.run(stack);
455  }
456  return toPyObject(std::move(stack.back()));
457 }
458 
459 inline py::object invokeOperatorFromPython(
460  const Operator& op,
461  py::args args,
462  py::kwargs kwargs) {
463  // Create a stack full of the arguments and keyword arguments.
464  auto stack =
465  createStackForSchema(op.schema(), std::move(args), std::move(kwargs));
466 
467  // Invoke the operation, which puts the return values onto the stack.
468  op.getOperation()(stack);
469 
470  return createPyObjectForStack(std::move(stack));
471 }
472 } // namespace jit
473 } // namespace torch
constexpr size_t size() const
size - Get the array size.
Definition: ArrayRef.h:138
Definition: jit_type.h:17
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:41