1 #include <c10/util/Exception.h> 2 #include <torch/csrc/jit/attributes.h> 3 #include <torch/csrc/jit/export.h> 4 #include <torch/csrc/jit/ir.h> 5 #include <torch/csrc/jit/ir_views.h> 6 #include <torch/csrc/jit/passes/python_print.h> 7 #include <torch/csrc/jit/resource_guard.h> 8 #include <torch/csrc/jit/script/error_report.h> 9 #include <torch/csrc/jit/script/module.h> 15 static bool isPrint(
char s) {
16 return s > 0x1f && s < 0x7f;
19 void printQuotedString(std::ostream& stmt,
const std::string& str) {
73 static bool isValidIdentifierChar(
char c,
size_t pos) {
74 return islower(c) || isupper(c) || c ==
'_' || (pos > 0 && isdigit(c));
77 static bool isValidIdentifier(
const std::string& name) {
80 for (
size_t i = 0; i < name.size(); ++i) {
81 if (!isValidIdentifierChar(name[i], i))
94 : prefix_(std::move(prefix)), name_(std::move(name)) {}
98 return c10::make_intrusive<QualifiedName>(
99 std::move(prefix), std::move(name));
102 return c10::make_intrusive<QualifiedName>(
105 std::string str()
const {
106 std::stringstream ss;
112 void emit(std::ostream& out)
const {
113 if (isValidIdentifier(name_)) {
124 printQuotedString(out, name_);
130 void createTensorToParameterNameMap(
133 std::unordered_map<IValue*, QualifiedNamePtr>& result) {
134 for (
const auto& elem : module.get_parameters()) {
136 result[param.slot()] = QualifiedName::create(prefix, param.name_);
138 for (
const auto& elem : module.get_attributes()) {
140 result[param.slot()] = QualifiedName::create(prefix, param.name_);
142 for (
const auto& elem : module.get_modules()) {
143 createTensorToParameterNameMap(
144 *elem->module, QualifiedName::create(prefix, elem.key()), result);
150 const static std::unordered_set<std::string> reserved_names = {
205 std::vector<at::Tensor>& tensor_table_;
209 std::vector<ClassTypePtr>& class_table_;
211 void addToClassTable(
const ClassTypePtr& classType) {
212 if (std::find(class_table_.cbegin(), class_table_.cend(), classType) ==
213 class_table_.cend()) {
214 class_table_.push_back(classType);
220 std::unordered_set<Node*> output_inline_;
224 bool enforce_importable_;
227 std::unordered_set<std::string> used_names_;
230 std::unordered_set<std::string> used_method_names_;
234 std::vector<std::function<void(void)>> worklist;
257 bool canInline(
Value* v) {
261 if (n->outputs().size() != 1)
264 if (v->uses().size() != 1)
266 auto use = v->uses().at(0);
270 if (v->hasUniqueName() && use.user->kind() != prim::Return)
273 if (n->blocks().size() != 0)
278 if (use.user->kind() == prim::Loop && use.offset >= 2)
288 AT_ASSERT(n->kind() == prim::Constant || output_inline_.count(n) == 0);
290 if (n == block_point &&
294 block_point = scanNode(block_point);
295 output_inline_.insert(n);
296 }
else if (n->kind() == prim::Constant) {
299 output_inline_.insert(n);
303 Node* previousNonConstant(
Node* n) {
306 }
while (n->kind() == prim::Constant);
312 if (output_inline_.count(n)) {
315 for (
auto b : n->blocks()) {
318 Node* block_point = previousNonConstant(n);
319 for (
auto it = n->inputs().rbegin(), end = n->inputs().rend(); it != end;
321 block_point = scanValue(block_point, *it);
326 void scanBlock(
Block* b) {
327 scanNode(b->return_node());
328 for (
auto node : b->nodes().reverse()) {
338 for (
size_t i = 0; i < tensor_table_.size(); ++i) {
339 if (t.type() == tensor_table_[i].type() && t.equal(tensor_table_[i])) {
344 tensor_table_.emplace_back(std::move(t));
345 return tensor_table_.size() - 1;
348 std::unordered_set<Node*> seen_constants;
349 void buildConstantList(
Node* n, std::vector<Node*>& constants) {
350 for (
auto input : n->inputs()) {
351 if (input->node()->kind() == prim::Constant &&
352 seen_constants.count(input->node()) == 0) {
353 constants.push_back(input->node());
354 seen_constants.insert(input->node());
357 for (
auto b : n->blocks()) {
358 buildConstantList(b, constants);
361 void buildConstantList(
Block* b, std::vector<Node*>& constants) {
362 for (
auto n : b->nodes())
363 buildConstantList(n, constants);
364 buildConstantList(b->return_node(), constants);
369 std::unordered_map<std::string, size_t> next_id;
371 std::string genNameImpl(
372 const std::string& candidate,
373 std::unordered_set<std::string>& used) {
374 std::string name = candidate;
375 while (used.count(name) || reserved_names.count(name)) {
376 name = candidate + std::to_string(next_id[name]++);
381 std::string genName(
const std::string& candidate) {
382 return genNameImpl(candidate, used_names_);
388 std::string genMethodName(
const std::string& candidate) {
389 return genNameImpl(candidate, used_method_names_);
394 static std::string makeValidIdentifier(
const std::string& candidate) {
395 std::stringstream ss;
396 if (candidate.size() == 0 || isdigit(candidate[0]))
398 for (
char c : candidate) {
399 if (isupper(c) || islower(c) || isdigit(c) || c ==
'_')
408 std::string genUniqueNameFor(
Value* v) {
410 v->hasUniqueName() ? makeValidIdentifier(v->uniqueNameBase()) :
"_");
414 std::unordered_map<Value*, std::string> value_names_;
416 std::string useOf(
Value* v)
const {
417 return value_names_.at(v);
419 void assignValue(
Value* v,
const std::string& s) {
423 assignValue(v, useOf(w));
426 for (
auto v : values) {
427 assignValue(v, genUniqueNameFor(v));
433 std::ostream& indent() {
434 for (
size_t i = 0; i < level; ++i) {
445 template <
class T0,
class T1,
class F>
448 auto it_a = list_a.begin();
449 auto it_b = list_b.begin();
451 if (list_a.
size() != list_b.
size()) {
452 AT_ERROR(
"Python printer expected 2 lists of same size");
455 for (; it_a != list_a.end(); ++it_a, ++it_b) {
456 action(*it_a, *it_b);
463 const char* begin =
"",
464 const char* end =
"") {
467 for (
auto* value : list) {
469 stmt << useOf(value);
478 const char* begin =
"{",
479 const char* end =
"}") {
482 for (
size_t i = 0; i < key_value_pairs.
size(); i += 2) {
484 auto key = key_value_pairs[i];
485 auto value = key_value_pairs[i + 1];
487 stmt << useOf(key) <<
": " << useOf(value);
495 if (lhs.
size() > 0) {
497 printValueList(out, lhs);
499 printValueList(out, rhs);
504 void printIf(
IfView stmt) {
505 assignValuesToTheirUniqueNames(stmt.outputs());
506 indent() <<
"if " << useOf(stmt.cond()) <<
":\n";
508 auto guard = WithIndented();
510 printBlock(stmt.thenBlock(), stmt.outputs().size() > 0);
511 printAssignment(stmt.outputs(), stmt.thenOutputs());
513 indent() <<
"else:\n";
515 auto guard = WithIndented();
516 printBlock(stmt.elseBlock(), stmt.outputs().size() > 0);
517 printAssignment(stmt.outputs(), stmt.elseOutputs());
524 static bool shouldEmitAsForLoop(
LoopView stmt) {
525 auto trip_count = toIValue(stmt.maxTripCount());
526 auto cond_input = toIValue(stmt.inputCond());
527 auto cond_next = toIValue(stmt.nextCond());
529 bool condition_is_always_true =
530 cond_input && cond_input->toBool() && cond_next && cond_next->toBool();
531 bool trip_count_is_specified = !trip_count ||
532 trip_count->toInt() !=
533 std::numeric_limits<int64_t>::max() ||
535 stmt.currentTripCount()->uses().size() >
538 if (condition_is_always_true) {
540 return trip_count_is_specified;
544 if (trip_count_is_specified) {
546 <<
"loop cannot be printed as python " 547 <<
"because it has gone through an optimization " 548 <<
"that combined while and for loops. File a bug.";
559 bool emit_as_for_loop = shouldEmitAsForLoop(stmt);
561 assignValuesToTheirUniqueNames(stmt.carriedOutputs());
564 stmt.bodyCarriedInputs(),
565 stmt.carriedOutputs(),
567 assignValue(block_input, node_output);
571 printAssignment(stmt.carriedOutputs(), stmt.carriedInputs());
573 assignValuesToTheirUniqueNames(stmt.currentTripCount());
575 if (emit_as_for_loop) {
577 out <<
"for " << useOf(stmt.currentTripCount()) <<
" in range(" 578 << useOf(stmt.maxTripCount()) <<
"):\n";
582 printAssignment(stmt.currentTripCount(), stmt.inputCond());
584 out <<
"while " << useOf(stmt.currentTripCount()) <<
":\n";
592 size_t offset = emit_as_for_loop ? 1 : 0;
593 auto body_block = stmt.bodyBlock();
595 body_block->inputs().
slice(offset);
596 printBlock(body_block, loop_carried_block_inputs.
size() > 0);
598 loop_carried_block_inputs, body_block->outputs().
slice(offset));
602 bool isLongLine(
const std::string& str) {
603 return str.size() + level * 2 >= 40;
606 bool isLongInline(
Node* node) {
607 return output_inline_.count(node) && isLongLine(useOf(node->output()));
610 bool isNonConstantInline(
Value* input) {
611 return input->node()->kind() != prim::Constant &&
612 output_inline_.count(input->node());
629 size_t long_inline_slice = 0;
631 for (
size_t i = 0; i < inputs.
size(); ++i) {
632 if (isLongInline(inputs[i]->node())) {
633 long_inline_slice = i + 1;
639 for (
size_t i = 0; i < long_inline_slice; ++i) {
640 if (isNonConstantInline(inputs[i])) {
641 printOutputDefinition(inputs[i]->node(), useOf(inputs[i]));
646 void printOutputDefinition(
Node* node,
const std::string& str) {
647 assignValuesToTheirUniqueNames(node->outputs());
650 if (node->outputs().size() > 0) {
651 printValueList(out, node->outputs());
658 void registerClassDependencies(
const TypePtr& type) {
659 if (
const auto classType = type->cast<
ClassType>()) {
660 addToClassTable(classType);
662 for (
const auto& containedType : type->containedTypes()) {
663 registerClassDependencies(containedType);
667 void printNode(
Node* node,
bool print_const) {
670 for (
const auto input : node->inputs()) {
671 registerClassDependencies(input->type());
673 for (
const auto output : node->outputs()) {
674 registerClassDependencies(output->type());
677 if (!print_const && node->kind() == prim::Constant)
679 if (node->kind() == prim::PythonOp) {
680 auto value =
static_cast<const PythonOp*
>(node);
681 if (enforce_importable_ && value->ignore_on_export) {
684 out <<
"ops.prim.IgnoredPythonOp()\n";
688 splitLongInlines(node->inputs());
689 switch (node->kind()) {
691 if (enforce_importable_ && node->inputs().size() != 1) {
693 <<
"Exportable methods must have a single return value. " 694 <<
"Normal use of ScriptMethods should enforce this.";
696 if (node->inputs().size() > 0) {
699 printValueList(out, node->inputs());
709 case prim::TupleUnpack:
710 case prim::ListUnpack:
711 assignValuesToTheirUniqueNames(node->outputs());
717 if (node->outputs().size() > 0) {
718 printValueList(out, node->outputs(),
"",
", = ");
720 out << useOf(node->input()) <<
"\n";
722 case prim::SetAttr: {
723 const auto obj = node->inputs().at(0);
724 const auto newVal = node->inputs().at(1);
725 const auto type = obj->type()->expect<
ClassType>();
726 const auto& attrname = node->s(attr::name);
728 out << useOf(obj) <<
"." << attrname <<
" = " << useOf(newVal) <<
"\n";
731 std::stringstream ss;
737 if (output_inline_.count(node) == 0 ||
738 (node->kind() == prim::Constant && isLongLine(ss.str()))) {
739 printOutputDefinition(node, ss.str());
743 assignValue(node->output(), ss.str());
748 void printMaybeAnnotatedConstantList(
750 const char* the_type,
753 if (list_size == 0) {
754 stmt <<
"annotate(List[" << the_type <<
"], [])";
760 void printConstant(std::ostream& stmt,
const IValue& v) {
762 stmt <<
"CONSTANTS.c" << getOrAddTensorConstant(v.toTensor());
763 }
else if (v.isString()) {
764 printQuotedString(stmt, v.toStringRef());
765 }
else if (v.isDevice()) {
766 std::stringstream ss;
768 stmt <<
"torch.device(";
769 printQuotedString(stmt, ss.str());
771 }
else if (v.isTensorList()) {
773 const char* delim =
"";
774 for (
const auto& t : v.toTensorListRef()) {
775 stmt << delim <<
"CONSTANTS.c" << getOrAddTensorConstant(t);
779 }
else if (v.isBoolList()) {
780 printMaybeAnnotatedConstantList(
781 stmt,
"bool", v.toBoolListRef().size(), v);
782 }
else if (v.isIntList()) {
783 printMaybeAnnotatedConstantList(stmt,
"int", v.toIntListRef().size(), v);
784 }
else if (v.isDoubleList()) {
785 printMaybeAnnotatedConstantList(
786 stmt,
"float", v.toDoubleListRef().size(), v);
792 void printNone(std::ostream& stmt,
const Node* node) {
793 if (node->output()->type()->isSubtypeOf(NoneType::get())) {
803 const auto& uses = node->output()->uses();
804 bool all_usable_schema =
805 std::all_of(uses.begin(), uses.end(), [](
const Use& u) {
806 if (
auto schema = u.user->maybeSchema()) {
807 if (u.offset >= schema->arguments().size()) {
810 return !schema->arguments().at(u.offset).type()->hasFreeVariables();
815 if (all_usable_schema) {
818 stmt <<
"annotate(" << node->output()->type()->python_str() <<
", None)";
823 void printRHS(std::ostream& stmt,
Node* node) {
824 switch (node->kind()) {
825 case PythonOp::Kind: {
826 auto value =
static_cast<const PythonOp*
>(node);
827 if (enforce_importable_) {
829 <<
"could not export python function call " << value->name()
830 <<
". Remove calls to Python functions before export." 831 <<
"Did you forget add @script annotation? " 832 <<
"If this is a modulelist, add it to __constants__.";
835 stmt <<
"^" << value->name();
836 value->writeScalars(stmt);
837 printValueList(stmt, node->inputs(),
"(",
")");
839 case prim::Constant: {
840 if (node->kind() == prim::Constant && !node->mustBeNone()) {
841 IValue v = toIValue(node->output()).value();
842 printConstant(stmt, v);
844 printNone(stmt, node);
847 case prim::ImplicitTensorToNum: {
848 stmt <<
"annotate(" << node->output()->type()->python_str() <<
", " 849 << useOf(node->input()) <<
")";
852 printValueList(stmt, node->inputs(),
"int(",
")");
855 printValueList(stmt, node->inputs(),
"float(",
")");
858 printValueList(stmt, node->inputs(),
"bool(",
")");
861 printValueList(stmt, node->inputs(),
"print(",
")");
863 case prim::TupleConstruct: {
865 stmt, node->inputs(),
"(", node->inputs().size() == 1 ?
",)" :
")");
867 case prim::TupleIndex: {
868 stmt <<
"(" << useOf(node->input()) <<
")[" << node->i(attr::index)
871 case prim::TupleSlice: {
872 stmt <<
"(" << useOf(node->input()) <<
")[" << node->i(attr::beg) <<
":" 873 << node->i(attr::end) <<
"]";
875 case prim::ListConstruct: {
879 if (node->inputs().size() == 0 &&
880 !node->output()->type()->isSubtypeOf(TensorType::get())) {
881 stmt <<
"annotate(" << node->output()->type()->python_str()
884 printValueList(stmt, node->inputs(),
"[",
"]");
887 case prim::DictConstruct: {
888 auto dict_type = node->output()->type()->expect<
DictType>();
889 bool is_default_type =
890 dict_type->getKeyType()->isSubtypeOf(StringType::get()) &&
891 dict_type->getKeyType()->isSubtypeOf(TensorType::get());
892 if (node->inputs().size() == 0 && !is_default_type) {
893 stmt <<
"annotate(" << node->output()->type()->python_str()
896 printDict(stmt, node->inputs());
899 case prim::DictIndex: {
900 stmt <<
"(" << useOf(node->inputs().at(0)) <<
")[" 901 << useOf(node->inputs().at(1)) <<
"]";
905 auto name = genMethodName(
"__forked_function");
906 std::shared_ptr<Graph> graph = node->g(attr::Subgraph);
907 worklist.emplace_back(
908 [graph, name,
this] { printFunctionDefinition(*graph, name); });
910 stmt <<
"fork(self." << name;
911 for (
Value* v : node->inputs()) {
912 stmt <<
", " << useOf(v);
916 case prim::Function: {
917 if (enforce_importable_) {
919 <<
"closures are not exportable";
921 auto name = genMethodName(
"__lambda");
922 std::shared_ptr<Graph> graph = node->g(attr::Subgraph);
923 worklist.emplace_back(
924 [graph, name,
this] { printFunctionDefinition(*graph, name); });
925 stmt <<
"self." << name;
927 case prim::CreateObject: {
928 const auto classType = node->output()->type()->expect<
ClassType>();
929 stmt << classType->name() <<
".__new__(" << classType->name() <<
")";
931 case prim::GetAttr: {
932 const auto obj = node->inputs().at(0);
933 const auto classType = obj->type()->expect<
ClassType>();
934 const auto& field = node->s(attr::name);
935 stmt << useOf(obj) <<
"." << field;
938 Symbol kind = node->kind();
939 if (kind.is_aten()) {
943 stmt <<
"torch." << kind.toUnqualString() <<
"(";
945 stmt <<
"ops." << kind.ns().toUnqualString() <<
"." 946 << kind.toUnqualString() <<
"(";
949 for (
size_t i = 0; i < node->inputs().size(); ++i) {
953 auto v = useOf(node->inputs().at(i));
955 if (i < schema.arguments().size()) {
956 auto arg = schema.arguments().at(i);
957 if (arg.kwarg_only()) {
958 stmt << arg.name() <<
"=";
962 AT_ASSERT(schema.is_vararg());
971 std::ostream& printBlock(
Block* root,
bool block_has_other_statements) {
976 if (!block_has_other_statements &&
977 root->nodes().begin() == root->nodes().end()) {
981 for (
auto* node : root->nodes()) {
982 printNode(node,
false);
987 void printDefaultValue(
996 if (typ->kind() == ListType::Kind &&
997 (value.isInt() || value.isDouble() || value.isBool())) {
1001 printConstant(stmt, value);
1003 void printFunctionDefinition(
1005 const std::string& name,
1006 bool is_class =
false,
1008 const std::vector<std::string>& param_names = {}) {
1009 used_names_.clear();
1013 std::vector<Node*> constants;
1014 buildConstantList(graph.block(), constants);
1017 scanBlock(graph.block());
1023 graph.inputs().slice(0, graph.inputs().size() - param_names.size());
1024 auto param_names_it = param_names.begin();
1025 for (
auto param : graph.inputs().slice(true_inputs.size())) {
1026 assignValue(param, *param_names_it++);
1028 assignValuesToTheirUniqueNames(true_inputs);
1029 auto defaults_offset = defaults.begin();
1032 out <<
"def " << name <<
"(";
1034 auto input_iter = true_inputs.begin();
1039 AT_ASSERT(true_inputs.size() > 0);
1040 out << useOf(*input_iter);
1043 AT_ASSERT(!defaults_offset->has_value());
1051 for (; input_iter != true_inputs.end(); ++input_iter) {
1052 auto input = *input_iter;
1053 out <<
",\n " << useOf(input) <<
": " << input->type()->python_str();
1054 if (defaults_offset != defaults.end()) {
1057 printDefaultValue(input->type(), out, *def);
1063 AT_ASSERT(defaults_offset == defaults.end());
1065 out <<
") -> " << resultType(graph)->python_str() <<
":\n";
1067 auto guard = WithIndented();
1070 for (
Node* n : constants) {
1075 graph.block(), graph.block()->return_node()->inputs().size() > 0);
1076 printNode(graph.block()->return_node(),
false);
1083 std::vector<at::Tensor>& tensor_table,
1084 std::vector<ClassTypePtr>& class_table,
1085 bool enforce_importable)
1087 tensor_table_(tensor_table),
1088 class_table_(class_table),
1089 enforce_importable_(enforce_importable) {}
1093 TypePtr resultType(
const Graph& graph) {
1094 if (graph.outputs().size() == 1) {
1095 return graph.outputs().at(0)->type();
1097 return TupleType::create(
1098 fmap(graph.outputs(), [&](
const Value* v) {
return v->type(); }));
1104 const std::string& name,
1107 const std::vector<std::string>& param_names = {}) {
1108 printFunctionDefinition(graph, name, is_class, defaults, param_names);
1109 while (!worklist.empty()) {
1111 auto work = worklist.back();
1112 worklist.pop_back();
1117 std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
1118 createTensorToParameterNameMap(
1119 method.owner(), QualifiedName::create(
"self"), extra_ivalue_names);
1120 printMethod(method,
false, extra_ivalue_names);
1125 const std::unordered_map<IValue*, QualifiedNamePtr>& extra_ivalue_names) {
1126 std::vector<std::string> ivalue_names = fmap(
1127 method.initial_ivalues(),
1128 [&](
IValue* slot) {
return extra_ivalue_names.at(slot)->str(); });
1129 const std::string& name = method.name();
1130 Graph& graph = *method.graph();
1131 auto defaults = fmap(
1132 method.getSchema().arguments(),
1133 [](
const Argument& arg) {
return arg.default_value(); });
1134 printFunction(graph, name, is_class, defaults, ivalue_names);
1137 std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
1138 createTensorToParameterNameMap(
1139 module, QualifiedName::create(
"self"), extra_ivalue_names);
1140 for (
auto& method : module.get_methods()) {
1141 const std::string& name = method.value()->name();
1145 if (name.find(
"__forked_function") == 0) {
1148 printMethod(*method.value(),
false, extra_ivalue_names);
1152 void printClass(
const ClassTypePtr& classType) {
1153 out <<
"class " << classType->name() <<
":\n";
1155 const auto guard = WithIndented();
1156 std::unordered_map<IValue*, QualifiedNamePtr> extra_ivalue_names;
1157 for (
auto& method : classType->methods()) {
1158 printMethod(*method,
true, extra_ivalue_names);
1164 TORCH_API
void PythonPrint(
1167 std::vector<at::Tensor>& tensor_table,
1168 std::vector<ClassTypePtr>& class_table,
1169 bool enforce_importable) {
1170 PythonPrintPass pp(out, tensor_table, class_table, enforce_importable);
1172 pp.printFunction(const_cast<Graph&>(graph),
"graph",
false);
1175 TORCH_API
void PythonPrint(
1178 std::vector<at::Tensor>& tensor_table,
1179 std::vector<ClassTypePtr>& class_table,
1180 bool enforce_importable) {
1181 PythonPrintPass pp(out, tensor_table, class_table, enforce_importable);
1183 pp.printMethod(const_cast<script::Method&>(method));
1186 TORCH_API
void PythonPrint(
1189 std::vector<at::Tensor>& tensor_table,
1190 std::vector<ClassTypePtr>& class_table,
1191 bool enforce_importable) {
1192 PythonPrintPass pp(out, tensor_table, class_table, enforce_importable);
1194 pp.printModule(const_cast<script::Module&>(module));
1197 TORCH_API
void PythonPrint(
1199 const ClassTypePtr& classType,
1200 std::vector<at::Tensor>& tensor_table,
1201 std::vector<ClassTypePtr>& class_table,
1202 bool enforce_importable) {
1203 PythonPrintPass pp(out, tensor_table, class_table, enforce_importable);
1204 pp.printClass(classType);
1207 TORCH_API
bool printerHasSpecialCaseFor(
Symbol sym) {
1215 const static std::unordered_set<Symbol> handled = {
1218 prim::ListConstruct,
1219 prim::DictConstruct,
1223 prim::TupleConstruct,
1237 const static std::unordered_set<Symbol> unneeded = {
1241 prim::AutogradAnyNonZero,
1243 prim::ConstantChunk,
1244 prim::DifferentiableGraph,
1245 prim::BroadcastSizes,
1257 return handled.count(sym) || unneeded.count(sym);
AT_CPP14_CONSTEXPR ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
constexpr size_t size() const
size - Get the array size.
bool is_variable() const noexcept
Returns true if the Tensor is actually a torch::autograd::Variable.
intrusive_ptr<T> is an alternative to shared_ptr<T> that has better performance because it does the r...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
void splitLongInlines(at::ArrayRef< Value * > inputs)
and it is important that we un-inline all the inputs preceeding the long input: