Caffe2 - C++ API
A deep learning, cross platform ML framework
python_ir.cpp
1 #include <torch/csrc/python_headers.h>
2 
3 #include <torch/csrc/jit/argument_spec.h>
4 #include <torch/csrc/jit/export.h>
5 #include <torch/csrc/jit/ir.h>
6 #include <torch/csrc/jit/passes/alias_analysis.h>
7 #include <torch/csrc/jit/passes/python_print.h>
8 #include <torch/csrc/jit/passes/shape_analysis.h>
9 #include <torch/csrc/jit/pybind.h>
10 #include <torch/csrc/jit/python_tracer.h>
11 #include <torch/csrc/utils/auto_gil.h>
12 #include <torch/csrc/utils/pybind.h>
13 #include <torch/csrc/utils/python_strings.h>
14 
15 #include <iostream>
16 #include <sstream>
17 
18 namespace torch {
19 namespace jit {
20 
21 using c10::Type;
22 
23 std::string getPythonName(const PyObject* obj_) {
24  AutoGIL gil;
25  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
26  PyObject* obj = const_cast<PyObject*>(obj_);
27  auto v = py::getattr(obj, "__name__", py::str("<python_value>"));
28  // if this was a autograd.Function recover the name of the class
29  return py::str(v);
30 }
31 
32 std::ostream& printPyObject(std::ostream& out, const THPObjectPtr& obj) {
33  AutoGIL gil;
34  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
35  auto pyobj = py::handle(const_cast<PyObject*>(obj.get()));
36  if (py::isinstance<py::tuple>(pyobj)) {
37  // This special-case for printing tuples handles a problem where
38  // str((2L, 3L)) outputs "(2L, 3L)" in Python 2 but "(2, 3)"
39  // in Python 3. In order to suppress the L-suffix, we must
40  // manually print the string ourselves, calling str() on the
41  // sub-elements.
42  //
43  // This is a fairly fragile fix (What if you have nested tuples
44  // in tuples? What if you have dictionaries?) but it seems to hit
45  // the cases that are triggered in practice in onnx-pytorch. Revisit
46  // this code if this is not the case.
47  //
48  // By the way, one non-solution for this problem is to monkeypatch
49  // tuple.__str__; this doesn't work because Python doesn't allow
50  // monkeypatching methods of built-in types.
51  auto pytuple = pyobj.cast<py::tuple>();
52  out << "(";
53  size_t i = 0;
54  for (const auto& o : pytuple) {
55  if (i > 0) {
56  out << ", ";
57  }
58  THPObjectPtr str(py::str(o).release().ptr());
59  out << THPUtils_unpackString(str.get());
60  i++;
61  }
62  if (i == 1) {
63  out << ",";
64  }
65  out << ")";
66  return out;
67  } else {
68  return out << THPUtils_unpackString(py::str(pyobj).ptr());
69  }
70 }
71 
72 std::vector<Node*> findAllNodes(
74  Symbol kind,
75  bool recurse = true) {
76  std::vector<Node*> ret;
77  for (Block* block : blocks) {
78  for (Node* n : block->nodes()) {
79  if (n->kind() == kind) {
80  ret.push_back(n);
81  }
82  if (recurse) {
83  auto nodes = findAllNodes(n->blocks(), kind, recurse);
84  ret.insert(ret.end(), nodes.begin(), nodes.end());
85  }
86  }
87  }
88  return ret;
89 }
90 
91 std::vector<Node*> findAllNodes(
92  Block* block,
93  Symbol kind,
94  bool recurse = true) {
95  std::vector<Block*> blocks = {block};
96  return findAllNodes(blocks, kind, recurse);
97 }
98 
99 Node* findNode(
101  Symbol kind,
102  bool recurse = true) {
103  for (Block* block : blocks) {
104  for (Node* n : block->nodes()) {
105  if (n->kind() == kind) {
106  return n;
107  }
108  if (recurse) {
109  auto node = findNode(n->blocks(), kind, recurse);
110  if (node != nullptr) {
111  return node;
112  }
113  }
114  }
115  }
116  return nullptr;
117 }
118 
119 Node* findNode(Block* block, Symbol kind, bool recurse = true) {
120  std::vector<Block*> blocks = {block};
121  return findNode(blocks, kind, recurse);
122 }
123 
124 // execute a Python function, used for Ops we can't optimize but that we want to
125 // optimize around
126 struct ConcretePythonOp : public PythonOp {
127  ConcretePythonOp(Graph* graph) : PythonOp(graph) {}
128  std::string name() const override {
129  AutoGIL gil;
130  if (auto autograd = autogradFunction()) {
131  return getPythonName(autograd->get());
132  } else {
133  return getPythonName(pyobj.get());
134  }
135  }
136  void cloneFrom(Node* other_) override {
137  Node::cloneFrom(other_);
138  auto other = other_->cast<PythonOp>();
139  this->cconv = other->cconv;
140  Py_INCREF(other->pyobj.get());
141  this->pyobj = THPObjectPtr(other->pyobj.get());
142  for (auto& sa : other->scalar_args) {
143  Py_INCREF(sa.get());
144  this->scalar_args.emplace_back(sa.get());
145  }
146  }
147  Node* allocNewInstance(Graph* g) override {
148  return new ConcretePythonOp(g);
149  }
150  // recover the autograd.Function instance, if this PythonOp's function
151  // was originally SomeFunction.apply
152  // used in ONNX for discovering symbolics
153  c10::optional<THPObjectPtr> autogradFunction() const override {
154  AutoGIL gil;
155  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
156  py::handle obj = const_cast<PyObject*>(pyobj.get());
157 
158  auto r = py::getattr(obj, "__self__", py::none());
159  if (r.is_none())
160  return c10::nullopt;
161 
162  auto apply = py::getattr(r, "apply", py::none());
163  if (apply.is_none())
164  return c10::nullopt;
165 
166  auto c = PyObject_RichCompareBool(apply.ptr(), obj.ptr(), Py_NE);
167  if (PyErr_Occurred())
168  throw py::error_already_set();
169  if (c)
170  return c10::nullopt;
171 
172  return THPObjectPtr(r.release().ptr());
173  }
174 
175  void writeScalars(std::ostream& out) const override {
176  out << "(";
177  int i = 0;
178  for (auto& scalar : scalar_args) {
179  if (i++ > 0)
180  out << ", ";
181  printPyObject(out, scalar);
182  }
183  out << ")";
184  }
185 };
186 
187 PythonOp* pythonAllocPythonOp(Graph* g) {
188  return new ConcretePythonOp(g);
189 }
190 
191 void initPythonIRBindings(PyObject* module_) {
192  setAllocPythonOp(pythonAllocPythonOp);
193 
194  auto m = py::handle(module_).cast<py::module>();
195 #define GS(name) def(#name, &Graph ::name)
196  py::class_<Graph, std::shared_ptr<Graph>>(m, "Graph")
197  .def(py::init<>())
198  .def(
199  "__repr__",
200  [](Graph& g) {
201  std::stringstream ss;
202  ss << g;
203  return ss.str();
204  })
205  .def(
206  "dump_alias_db",
207  [](std::shared_ptr<Graph> g) {
208  AliasDb db(g);
209  db.dump();
210  })
211  .def(
212  "propagate_shapes",
213  [](std::shared_ptr<Graph> g,
214  std::vector<at::Tensor> inputs,
215  bool with_grad) {
216  setInputTypes(
217  *g,
218  ArgumentSpec(with_grad, fmap<IValue>(inputs), inputs.size()));
219  PropagateInputShapes(g);
220  })
221  .def(
222  "_export_onnx",
223  [](const std::shared_ptr<Graph> g,
224  const std::vector<at::Tensor>& initializers,
225  int64_t onnx_opset_version,
226  bool defer_weight_export,
227  ::torch::onnx::OperatorExportTypes operator_export_type) {
228  std::string graph;
229  RawDataExportMap export_map;
230  std::tie(graph, export_map) = export_onnx(
231  g,
232  initializers,
233  onnx_opset_version,
234  defer_weight_export,
235  operator_export_type);
236  std::unordered_map<std::string, py::bytes>
237  python_serialized_export_map;
238  for (auto& kv : export_map) {
239  auto t = kv.second;
240  size_t copy_bytes = t.element_size() * t.numel();
241  // TODO: this is an unecessary copy. In theory we can directly
242  // return the map from identifier to Tensor, but we need some API
243  // in Python to get raw `bytes` containing the raw tensor data.
244  python_serialized_export_map[kv.first] =
245  py::bytes(static_cast<const char*>(t.data_ptr()), copy_bytes);
246  }
247  return std::make_tuple(
248  py::bytes(graph), python_serialized_export_map);
249  },
250  py::arg("initializers"),
251  py::arg("onnx_opset_version") = 0,
252  py::arg("defer_weight_export") = false,
253  py::arg("operator_export_type") =
254  ::torch::onnx::OperatorExportTypes::ONNX)
255  .def(
256  "_pretty_print_onnx",
257  [](const std::shared_ptr<Graph> g,
258  const std::vector<at::Tensor>& initializers,
259  int64_t onnx_opset_version,
260  bool defer_weight_export,
261  ::torch::onnx::OperatorExportTypes operator_export_type,
262  bool google_printer) {
263  return pretty_print_onnx(
264  g,
265  initializers,
266  onnx_opset_version,
267  defer_weight_export,
268  operator_export_type,
269  google_printer);
270  },
271  py::arg("initializers"),
272  py::arg("onnx_opset_version") = 0,
273  py::arg("defer_weight_export") = false,
274  py::arg("operator_export_type") =
275  ::torch::onnx::OperatorExportTypes::ONNX,
276  py::arg("google_printer") = false)
277  .def(
278  "inputs",
279  [](Graph& g) {
280  return py::make_iterator(g.inputs().begin(), g.inputs().end());
281  })
282  .def(
283  "outputs",
284  [](Graph& g) {
285  return py::make_iterator(g.outputs().begin(), g.outputs().end());
286  })
287  // TODO: Iterator invalidation might make this hazardous
288  .def(
289  "nodes",
290  [](Graph& g) {
291  return py::make_iterator(g.nodes().begin(), g.nodes().end());
292  })
293  .def(
294  "findNode",
295  [](Graph& g, const std::string& kind, bool recurse) {
296  return findNode(g.block(), Symbol::fromQualString(kind), recurse);
297  },
298  "Find Node",
299  py::arg("kind"),
300  py::arg("recurse") = true)
301  .def(
302  "findAllNodes",
303  [](Graph& g, const std::string& kind, bool recurse) {
304  return findAllNodes(
305  g.block(), Symbol::fromQualString(kind), recurse);
306  },
307  "Find all nodes",
308  py::arg("kind"),
309  py::arg("recurse") = true)
310  .def("addInput", [](Graph& g) { return g.addInput(); })
311  .def("copy", [](Graph& g) { return g.copy(); })
312  .GS(eraseInput)
313  .GS(registerOutput)
314  .def(
315  "create",
316  [](Graph& g, const char* str) {
317  return g.create(Symbol::fromQualString(str));
318  })
319  .def(
320  "create",
321  [](Graph& g, const char* str, size_t noutputs) {
322  return g.create(Symbol::fromQualString(str), noutputs);
323  })
324  .def(
325  "create",
326  [](Graph& g, const char* str, const std::vector<Value*>& inputs) {
327  return g.create(Symbol::fromQualString(str), inputs);
328  })
329  .def(
330  "create",
331  [](Graph& g,
332  const char* str,
333  const std::vector<Value*>& inputs,
334  size_t noutputs) {
335  return g.create(Symbol::fromQualString(str), inputs, noutputs);
336  })
337  .def("param_node", [](Graph& g) { return g.block()->param_node(); })
338  .def("return_node", [](Graph& g) { return g.block()->return_node(); })
339  .def(
340  "pretty_print",
341  [](Graph& g) {
342  std::ostringstream oss;
343  g.prettyPrint(oss);
344  return oss.str();
345  })
346  .GS(createFusionGroup)
347  .def(
348  "createClone",
349  [](Graph& g, Node* n, py::object fn) {
350  return g.createClone(
351  n, [&](Value* e) { return fn(e).cast<Value*>(); });
352  })
353  .GS(appendNode)
354  .GS(prependNode)
355  .GS(lint)
356  .GS(insertNode);
357 #undef GS
358 
359 #define VS(name) def(#name, &Value ::name)
360  py::class_<Value, std::unique_ptr<Value, py::nodelete>>(m, "Value")
361  .def(
362  "__repr__",
363  [](Value& n) {
364  std::stringstream ss;
365  ss << n.uniqueName() << " defined in (" << *n.node() << ")";
366  return ss.str();
367  })
368  .VS(type)
369  .VS(setType)
370  .VS(inferTypeFrom)
371  // skip owningGraph because it returns a raw pointer to a otherwise
372  // std::shared_ptr stored graph object, and would cause a double free
373  .VS(unique)
374  .VS(uniqueName)
375  .VS(setUniqueName)
376  .VS(offset)
377  .VS(uses)
378  .VS(replaceAllUsesWith)
379  .def("node", [](Value& v) { return v.node(); })
380  .def(
381  "setTypeAs",
382  [](Value* node, Value* other) {
383  node->setType(other->type());
384  return node;
385  })
386  .VS(copyMetadata)
387  .VS(isTensor)
388  .def("toIValue", [](Value& n) { return toIValue(&n); })
389  .def("type", [](Value& v) { return v.type(); });
390 #undef VS
391 
392  py::class_<Block, std::unique_ptr<Block, py::nodelete>>(m, "Block")
393  .def(
394  "nodes",
395  [](Block& b) {
396  return py::make_iterator(b.nodes().begin(), b.nodes().end());
397  })
398  .def(
399  "findNode",
400  [](Block& b, const std::string& kind, bool recurse) {
401  return findNode(&b, Symbol::fromQualString(kind), recurse);
402  },
403  "Find Node",
404  py::arg("kind"),
405  py::arg("recurse") = true)
406  .def(
407  "findAllNodes",
408  [](Block& b, const std::string& kind, bool recurse) {
409  return findAllNodes(&b, Symbol::fromQualString(kind), recurse);
410  },
411  "Find all nodes",
412  py::arg("kind"),
413  py::arg("recurse") = true)
414  .def(
415  "inputs",
416  [](Block& b) {
417  return py::make_iterator(b.inputs().begin(), b.inputs().end());
418  })
419  .def(
420  "outputs",
421  [](Block& b) {
422  return py::make_iterator(b.outputs().begin(), b.outputs().end());
423  })
424  .def(
425  "returnNode",
426  [](Block& b) {
427  return b.return_node();
428  })
429  .def(
430  "paramNode",
431  [](Block& b) {
432  return b.param_node();
433  });
434 
435 #define NS(name) def(#name, &Node ::name)
436  py::class_<Node, std::unique_ptr<Node, py::nodelete>>(m, "Node")
437  .def(
438  "__repr__",
439  [](Node& n) {
440  std::stringstream ss;
441  ss << n;
442  return ss.str();
443  })
444  .def(
445  "getSourceLocation",
446  [](Node& n) -> py::object {
447  std::stringstream ss;
448  if (auto sl = n.getSourceLocation()) {
449  sl->highlight(ss);
450  return py::str(ss.str());
451  } else {
452  return py::none();
453  }
454  })
455  .def("hasMultipleOutputs", [](Node& n) { return n.outputs().size() > 1; })
456  .def("outputsSize", [](Node& n) { return n.outputs().size(); })
457  .NS(kind)
458  .def("inputsAt", [](Node& n, size_t i) { return n.inputs().at(i); })
459  .def(
460  "inputs",
461  [](Node& n) {
462  return py::make_iterator(n.inputs().begin(), n.inputs().end());
463  })
464  .def(
465  "outputs",
466  [](Node& n) {
467  return py::make_iterator(n.outputs().begin(), n.outputs().end());
468  })
469  .def("outputsAt", [](Node& n, size_t i) { return n.outputs().at(i); })
470  .def(
471  "findNode",
472  [](Node& n, const std::string& kind, bool recurse) {
473  return findNode(n.blocks(), Symbol::fromQualString(kind), recurse);
474  },
475  "Find Node",
476  py::arg("kind"),
477  py::arg("recurse") = true)
478  .def(
479  "findAllNodes",
480  [](Node& n, const std::string& kind, bool recurse) {
481  return findAllNodes(
482  n.blocks(), Symbol::fromQualString(kind), recurse);
483  },
484  "Find all nodes",
485  py::arg("kind"),
486  py::arg("recurse") = true)
487  .def("input", [](Node& n) { return n.input(); })
488  .def("output", [](Node& n) { return n.output(); })
489  .NS(addInput)
490  .NS(replaceInput)
491  .NS(replaceInputWith)
492  .NS(replaceAllUsesWith)
493  .NS(insertBefore)
494  .NS(insertAfter)
495  .NS(moveAfter)
496  .NS(moveBefore)
497  .NS(removeInput)
498  .NS(removeAllInputs)
499  .NS(destroy)
500  .NS(hasUses)
501  .NS(eraseOutput)
502  .NS(addOutput)
503  .NS(scopeName)
504  .NS(isNondeterministic)
505  .def(
506  "blocks",
507  [](Node& n) {
508  return py::make_iterator(n.blocks().begin(), n.blocks().end());
509  })
510  .NS(addBlock)
511  .NS(mustBeNone)
512 
513 #define AS(name) def(#name, &Node::name)
514  // methods from Attributes
515  .AS(copyAttributes)
516  .AS(hasAttributes)
517 #undef AS
518 #define AS(name) def(#name, &Node::name##S)
519  // The default method names take Symbol, but the string conversion for
520  // Symbol you to qualify with attr::. This is not very user friendly
521  // for attributes, so expose the string variants instead.
522  .AS(hasAttribute)
523  .AS(kindOf)
524  .AS(removeAttribute)
525  .AS(attributeNames)
526 #undef AS
527 #define CREATE_ACCESSOR(Kind, method) \
528  def(#method "_", \
529  [](Node& n, const char* name, Kind##Attr::ValueType v) { \
530  return n.method##_(Symbol::attr(name), std::move(v)); \
531  }) \
532  .def(#method, [](Node& n, const char* name) { \
533  return n.method(Symbol::attr(name)); \
534  })
535  .CREATE_ACCESSOR(Float, f)
536  .CREATE_ACCESSOR(Floats, fs)
537  .CREATE_ACCESSOR(String, s)
538  .CREATE_ACCESSOR(Strings, ss)
539  .CREATE_ACCESSOR(Int, i)
540  .CREATE_ACCESSOR(Ints, is)
541  .CREATE_ACCESSOR(Graph, g)
542  .CREATE_ACCESSOR(Graphs, gs)
543 #undef CREATE_ACCESSOR
544  // Tensor (t_) -- manually written to unwrap the variable into a tensor.
545  .def(
546  "t_",
547  [](Node& n, const char* name, torch::autograd::Variable v) {
548  AT_ASSERT(!v.requires_grad());
549  return n.t_(Symbol::attr(name), v);
550  })
551  .def(
552  "t",
553  [](Node& n, const char* name) { return n.t(Symbol::attr(name)); })
554  // Tensors (ts_) -- manually written to unwrap variables into tensors.
555  .def(
556  "ts_",
557  [](Node& n,
558  const char* name,
559  std::vector<torch::autograd::Variable> vs) {
560  std::vector<at::Tensor> tensors;
561  tensors.reserve(vs.size());
562  for (auto& variable : vs) {
563  AT_ASSERT(!variable.requires_grad());
564  tensors.push_back(variable);
565  }
566  return n.ts_(Symbol::attr(name), std::move(tensors));
567  })
568  .def(
569  "ts",
570  [](Node& n, const char* name) {
571  auto tensors = n.ts(Symbol::attr(name));
572  std::vector<torch::autograd::Variable> variables;
573  variables.reserve(tensors.size());
574  for (auto& tensor : tensors) {
575  variables.emplace_back(std::move(tensor));
576  }
577  return variables;
578  })
579  .def(
580  "z_",
581  [](Node& n, const char* name, at::Tensor v) {
582  return n.t_(
583  Symbol::attr(name),
584  autograd::Variable(v.view({})).set_requires_grad(false));
585  })
586  .def(
587  "z",
588  [](Node& n, const char* name) { return n.t(Symbol::attr(name)); })
589  .def(
590  "zs_",
591  [](Node& n, const char* name, TensorsAttr::ValueType v) {
592  for (auto& i : v) {
593  i = autograd::Variable(i.view({})).set_requires_grad(false);
594  }
595  return n.ts_(Symbol::attr(name), std::move(v));
596  })
597  .def(
598  "zs",
599  [](Node& n, const char* name) { return n.ts(Symbol::attr(name)); })
600  .def(
601  "pyobj",
602  [](Node& n) {
603  return py::handle(n.expect<PythonOp>()->pyobj.get())
604  .cast<py::object>();
605  })
606  .def("cconv", [](Node& n) { return n.expect<PythonOp>()->cconv; })
607  .def("pyname", [](Node& n) { return n.expect<PythonOp>()->name(); })
608  .def("scalar_args", [](Node& n) {
609  auto op = n.expect<PythonOp>();
610  auto scalars = py::list();
611  auto append = scalars.attr("append");
612  for (auto& arg : op->scalar_args) {
613  append(py::handle(arg.get()));
614  }
615  return scalars;
616  });
617 
618  using ::c10::Type;
619  py::class_<Type, std::shared_ptr<Type>>(m, "Type")
620  .def("__repr__", [](Type& t) { return t.python_str(); })
621  .def(
622  "str",
623  [](Type& t) {
624  std::ostringstream s;
625  s << t;
626  return s.str();
627  })
628  .def("kind", [](const Type& t) { return typeKindToString(t.kind()); })
629  .def(
630  "dim",
631  [](const Type& t) {
632  return t.expect<DimensionedTensorType>()->dim();
633  })
634  .def(
635  "sizes",
636  [](Type& t) { return t.expect<CompleteTensorType>()->sizes(); })
637  .def(
638  "strides",
639  [](Type& t) { return t.expect<CompleteTensorType>()->strides(); })
640  .def(
641  "contiguous",
642  [](Type& t) {
643  return std::static_pointer_cast<Type>(
644  t.expect<CompleteTensorType>()->contiguous());
645  })
646  .def(
647  "scalarType",
648  [](Type& t) {
649  return toString(t.expect<DimensionedTensorType>()->scalarType());
650  })
651  .def(
652  "__eq__",
653  [](std::shared_ptr<Type>& self, std::shared_ptr<Type>& other) {
654  return *self == *other;
655  })
656  .def(
657  "isSubtypeOf",
658  [](std::shared_ptr<Type>& self, std::shared_ptr<Type> other) {
659  return self->isSubtypeOf(other);
660  });
661 
662  py::class_<NumberType, Type, std::shared_ptr<NumberType>>(m, "NumberType")
663  .def_static("get", &NumberType::get);
664  py::class_<IntType, Type, std::shared_ptr<IntType>>(m, "IntType")
665  .def_static("get", &IntType::get);
666  py::class_<FloatType, Type, std::shared_ptr<FloatType>>(m, "FloatType")
667  .def_static("get", &FloatType::get);
668  py::class_<TensorType, Type, std::shared_ptr<TensorType>>(m, "TensorType")
669  .def_static("get", &TensorType::get);
670  py::class_<BoolType, Type, std::shared_ptr<BoolType>>(m, "BoolType")
671  .def_static("get", &BoolType::get);
672  py::class_<StringType, Type, std::shared_ptr<StringType>>(m, "StringType")
673  .def_static("get", &StringType::get);
674 
675  py::class_<TupleType, Type, std::shared_ptr<TupleType>>(m, "TupleType")
676  .def(
677  py::init([](std::vector<TypePtr> a) { return TupleType::create(a); }))
678  .def("elements", [](TupleType& self) {
679  std::vector<TypePtr> types;
680  for (const auto& type : self.elements()) {
681  types.push_back(type);
682  }
683  return types;
684  });
685  py::class_<ListType, Type, std::shared_ptr<ListType>>(m, "ListType")
686  .def(py::init([](TypePtr a) { return ListType::create(a); }))
687  .def_static("ofInts", &ListType::ofInts)
688  .def_static("ofTensors", &ListType::ofTensors)
689  .def("getElementType", &ListType::getElementType);
690  py::class_<DictType, Type, std::shared_ptr<DictType>>(m, "DictType")
691  .def(py::init([](TypePtr key, TypePtr value) {
692  return DictType::create(key, value);
693  }));
694  py::class_<OptionalType, Type, std::shared_ptr<OptionalType>>(
695  m, "OptionalType")
696  .def(py::init([](TypePtr a) { return OptionalType::create(a); }))
697  .def_static("ofTensor", &OptionalType::ofTensor)
698  .def("getElementType", &OptionalType::getElementType);
699 
700  py::class_<Use>(m, "Use")
701  .def_readonly("user", &Use::user)
702  .def_readonly("offset", &Use::offset);
703 }
704 } // namespace jit
705 } // namespace torch
Alias analysis pass.
Variable A Variable augments a Tensor with the ability to interact in our autograd machinery...
Definition: variable.h:85
Definition: jit_type.h:17
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:41