1 #include <aten/src/ATen/Context.h> 2 #include <torch/csrc/autograd/edge.h> 3 #include <torch/csrc/autograd/function.h> 4 #include <torch/csrc/autograd/generated/variable_factories.h> 5 #include <torch/csrc/autograd/profiler.h> 6 #include <torch/csrc/autograd/variable.h> 7 #include <torch/csrc/jit/custom_operator.h> 8 #include <torch/csrc/jit/fuser/interface.h> 9 #include <torch/csrc/jit/graph_executor.h> 10 #include <torch/csrc/jit/ir.h> 11 #include <torch/csrc/jit/operator.h> 12 #include <torch/csrc/jit/script/jit_exception.h> 14 #include <ATen/ExpandUtils.h> 15 #include <ATen/WrapDimUtils.h> 16 #include <ATen/core/ivalue.h> 17 #include <c10/core/thread_pool.h> 18 #include <c10/util/SmallVector.h> 30 #include <unordered_map> 31 #include <unordered_set> 40 Operation noop(
const Node* n) {
41 return [](Stack& stack) {
return 0; };
47 void checkImplicitTensorToNum(
at::Tensor t,
bool toInt) {
49 throw std::runtime_error(
50 "Cannot input a tensor that requires grad as a scalar argument");
52 if (t.sizes().
size() != 0) {
53 throw std::runtime_error(
54 "Cannot input a tensor of dimension other than 0 as a scalar argument");
57 !isIntegralType(autograd::as_variable_ref(t).data().scalar_type())) {
59 ss <<
"Cannot input a tensor of type " << t.scalar_type()
60 <<
" as an integral argument";
61 throw std::runtime_error(ss.str());
65 template <
typename dtype>
66 Operation listConstruct(int64_t num_inputs) {
67 return [=](Stack& stack) {
68 auto inputs = peekSlice(stack, 0, num_inputs, num_inputs);
69 std::vector<dtype> vals =
70 fmap(inputs, [](
const IValue& v) {
return v.to<dtype>(); });
71 drop(stack, num_inputs);
72 push(stack, std::move(vals));
77 static int64_t floordiv(int64_t a, int64_t b) {
79 throw std::runtime_error(
"division by 0");
81 if ((a > 0) == (b > 0)) {
87 return (r.rem) ? r.quot - 1 : r.quot;
98 if (device && device->
is_cuda()) {
99 at::globalContext().lazyInitCUDA();
101 if (!device && !scalarType && !copy) {
103 }
else if (!device) {
104 return self.to(*scalarType, non_blocking, copy);
105 }
else if (!scalarType) {
106 return self.to(*device, non_blocking, copy);
108 return self.to(*device, *scalarType, non_blocking, copy);
112 RegisterOperators reg(
115 [](
const Node* node) {
116 const auto key = registerFusion(node);
117 return [key](Stack& stack) {
118 autograd::profiler::RecordFunction record(
"FusionGroup");
119 runFusion(key, stack);
124 "prim::rangelist(int n) -> int[]",
128 std::vector<int64_t> elems(n);
129 for (
int i = 0; i < n; i++) {
132 push(stack, jit::IntList::create(elems));
136 "prim::Bool(Tensor a) -> bool",
140 push(stack, a.item<int64_t>() != 0);
144 "prim::Bool(int a) -> bool",
148 push(stack, (
bool)i);
152 "prim::Bool(float a) -> bool",
156 push(stack, (
bool)d);
160 "prim::Int(Tensor a) -> int",
164 push(stack, a.item<int64_t>());
168 "prim::Float(Tensor a) -> float",
172 push(stack, a.item<
double>());
176 "prim::ImplicitTensorToNum(Tensor a) -> Scalar",
177 [](
const Node* node) -> Operation {
178 if (node->output()->type() == IntType::get()) {
179 return [](Stack& stack) {
182 checkImplicitTensorToNum(a,
true);
183 push(stack, a.item<int64_t>());
187 return [](Stack& stack) {
190 checkImplicitTensorToNum(a,
false);
191 push(stack, a.item<
double>());
197 "prim::NumToTensor(Scalar a) -> Tensor",
201 push(stack, autograd::make_variable(at::scalar_to_tensor(s)));
207 "prim::NumToTensor(bool a) -> Tensor",
211 push(stack, autograd::make_variable(at::scalar_to_tensor(b)));
215 "prim::Float(Scalar a) -> float",
219 if (scalar.isDouble()) {
222 push(stack, static_cast<double>(scalar.toInt()));
227 "prim::Float(int a) -> float",
231 push(stack, (
float)i);
235 "prim::Int(float a) -> int",
239 push(stack, (int64_t)d);
243 "prim::Float(bool a) -> float",
247 push(stack, (
float)b);
251 "prim::Int(bool a) -> int",
259 "prim::Int(Scalar a) -> float",
263 if (scalar.isInt()) {
266 push(stack, static_cast<int64_t>(scalar.toDouble()));
271 "prim::Float(str a) -> float",
273 auto s = pop(stack).toString();
274 if (s->string() ==
"inf")
275 push(stack, std::numeric_limits<double>::infinity());
276 else if (s->string() ==
"-inf")
277 push(stack, -std::numeric_limits<double>::infinity());
280 "Only 'inf' or '-inf' can be cast to a float, but got '",
286 "aten::device(str a) -> Device",
288 push(stack,
c10::Device(pop(stack).toStringRef()));
293 "aten::to(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
297 pop(stack, non_blocking, copy);
299 pop(stack).toOptional<at::ScalarType>();
305 to_dispatch(
self, device, scalarType, non_blocking, copy));
309 "aten::to(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
313 pop(stack, non_blocking, copy);
315 pop(stack).toOptional<at::ScalarType>();
317 at::Tensor
self = pop(stack).toTensor();
320 to_dispatch(
self, device, scalarType, non_blocking, copy));
324 "aten::to(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)",
329 pop(stack,
self, non_blocking, copy);
334 to_dispatch(
self, device, scalarType, non_blocking, copy));
338 "aten::eq(Device a, Device b) -> bool",
340 auto a = pop(stack).toDevice();
341 auto b = pop(stack).toDevice();
346 "prim::device(Tensor a) -> Device",
348 push(stack, pop(stack).toTensor().device());
352 "prim::dtype(Tensor a) -> int",
356 push(stack, static_cast<int64_t>(a.scalar_type()));
360 "prim::requires_grad(Tensor a) -> bool",
364 push(stack, a.requires_grad());
368 "prim::shape(Tensor a) -> int[]",
372 push(stack, a.sizes());
376 "prim::is_cuda(Tensor a) -> bool",
384 "aten::cpu(Tensor(a) self) -> Tensor(a|b)",
388 push(stack, a.cpu());
392 "aten::cuda(Tensor(a) self) -> Tensor(a|b)",
396 push(stack, a.cuda());
400 "prim::AutogradZero() -> Tensor",
401 [](
const Node* node) {
402 return [](Stack& stack) {
403 stack.emplace_back(at::Tensor());
409 [](
const Node* node) {
410 size_t num_inputs = node->inputs().size();
411 return [num_inputs](Stack& stack) {
413 for (
const IValue& i : last(stack, num_inputs)) {
419 drop(stack, num_inputs);
420 std::cout << std::endl;
425 prim::BroadcastSizes,
426 [](
const Node* node) -> Operation {
427 size_t num_inputs = node->inputs().size();
428 return [num_inputs](Stack& stack) {
429 std::vector<int64_t> size;
431 for (
size_t i = 0; i < num_inputs; ++i) {
432 size = at::infer_size(
433 size, peek(stack, i, num_inputs).toIntList()->elements());
435 drop(stack, num_inputs);
436 push(stack, std::move(size));
442 [](
const Node* node) -> Operation {
443 int64_t raw_dim = node->i(attr::dim);
444 int64_t chunks = node->i(attr::chunks);
445 return [raw_dim, chunks](Stack& stack) {
446 Shared<IntList> sizes_l;
448 const auto& shape = sizes_l->elements();
449 std::vector<int64_t> regular_shape = shape;
450 std::vector<int64_t> last_shape = shape;
451 int64_t dim = at::maybe_wrap_dim(raw_dim, shape.size());
453 dim < (int64_t)regular_shape.size(),
454 "Dimension out of range for chunk");
455 int64_t split_size = (regular_shape[dim] + chunks - 1) / chunks;
456 regular_shape[dim] = split_size;
457 if (shape[dim] % chunks == 0) {
458 last_shape[dim] = split_size;
460 int64_t num_splits = std::max<int64_t>(
461 (shape[dim] + split_size - 1) / split_size, 1);
463 split_size - (split_size * num_splits - shape[dim]);
464 AT_ASSERT(last_shape[dim] >= 0);
466 push(stack, std::move(regular_shape));
467 push(stack, std::move(last_shape));
475 {Argument(
"message", StringType::get()),
476 Argument(
"stacklevel", IntType::get(), c10::nullopt, 2,
true)},
478 [](
const Node* node) {
479 return [](Stack& stack) {
481 AT_WARN(pop(stack).toStringRef());
486 "prim::RaiseException(str msg) -> ()",
488 throw JITException(pop(stack).toStringRef());
493 "prim::IgnoredPythonOp(...) -> ()",
496 "This Python function is annotated to be ignored" 497 " and cannot be and has not been included in the exported" 498 " binary, meaning that it cannot be executed now." 499 " Make sure that ignored operations are never executed after" 508 Operator(prim::Load, noop),
513 Operator(prim::Store, noop),
516 [](
const Node* node) {
517 auto N = node->inputs().size();
518 return [=](Stack& stack) {
525 [](
const Node* node) {
526 return [=](Stack& stack) {
527 at::Tensor input, shape;
528 pop(stack, input, shape);
529 shape = shape.contiguous();
530 AT_ASSERT(shape.ndimension() == 1);
532 push(stack, input.reshape(shape_list));
538 [](
const Node* node) {
539 return [=](Stack& stack) {
540 auto t = pop(stack).toTensor();
542 auto sizes_tensor = torch::empty(
543 {
static_cast<int64_t
>(sizes.
size())}, at::dtype(at::kLong));
544 auto accessor = sizes_tensor.accessor<int64_t, 1>();
545 for (
size_t i = 0; i < sizes.
size(); ++i) {
546 accessor[i] = sizes[i];
548 stack.emplace_back(sizes_tensor);
553 prim::AutogradAnyNonZero,
554 [](
const Node* node) {
555 size_t num_inputs = node->inputs().size();
556 return [=](Stack& stack) {
558 for (
const IValue& t : last(stack, num_inputs)) {
559 if (t.toTensor().defined()) {
564 drop(stack, num_inputs);
565 stack.emplace_back(result);
571 [](
const Node* node) {
572 return [=](Stack& stack) {
576 stack.emplace_back(b);
577 else if (!b.defined())
578 stack.emplace_back(a);
580 stack.emplace_back(a + b);
585 "aten::_grad_sum_to_size(Tensor(a) self, int[] size) -> Tensor(a)",
588 Shared<IntList> desired_sizes;
589 pop(stack,
self, desired_sizes);
590 push(stack, at::sum_to(std::move(
self), desired_sizes->elements()));
595 [](
const Node* node) {
596 size_t num_elems = node->outputs().size();
597 return [=](Stack& stack) {
598 auto t = pop(stack).toTuple();
599 const auto& elems = t->elements();
600 if (elems.size() != num_elems) {
602 "Expected a tuple of ",
604 " elements, but got ",
607 stack.insert(stack.end(), elems.begin(), elems.end());
613 [](
const Node* node) {
614 int64_t beg_ind = node->i(attr::beg);
615 int64_t end_ind = node->i(attr::end);
616 return [=](Stack& stack) {
617 auto t = pop(stack).toTuple();
618 const auto& elems = t->elements();
619 std::vector<IValue> output_elems;
620 for (int64_t i = beg_ind; i < end_ind; ++i) {
621 output_elems.emplace_back(elems.at(i));
623 push(stack, Tuple::create(std::move(output_elems)));
629 [](
const Node* node) {
630 auto index = node->i(attr::index);
631 return [=](Stack& stack) {
632 auto tup = pop(stack).toTuple();
633 const auto& elems = tup->elements();
635 stack.emplace_back(elems.at(index));
640 prim::TupleConstruct,
641 [](
const Node* node) {
642 size_t num_inputs = node->inputs().size();
643 return [=](Stack& stack) {
644 std::vector<IValue> elems{
645 std::make_move_iterator(stack.end() - num_inputs),
646 std::make_move_iterator(stack.end())};
647 drop(stack, num_inputs);
648 push(stack, Tuple::create(std::move(elems)));
654 [](
const Node* node) {
655 int64_t chunks = node->i(attr::chunks);
656 int64_t dim = node->i(attr::dim);
657 auto outputs_used = fmap(node->outputs(), [](
const Value* v) {
658 return v->uses().size() > 0;
660 return [=](Stack& stack) {
661 autograd::profiler::RecordFunction record(
"chunk");
664 auto result = at::chunk(t, chunks, dim);
667 std::make_move_iterator(result.begin()),
668 std::make_move_iterator(result.end()));
670 int64_t num_results = result.size();
671 if (num_results != chunks) {
672 if (num_results > chunks) {
674 num_results == chunks,
675 "Expected chunk to return ",
677 " outputs, but got ",
680 for (int64_t i = num_results; i < chunks; ++i) {
683 "Expected chunk to return at least ",
685 " outputs, but got only ",
689 stack.emplace_back();
697 [](
const Node* node) -> Operation {
698 const auto num_outputs = node->outputs().size();
699 ListTypePtr lt = node->input()->type()->expect<ListType>();
700 if (lt->getElementType() == IntType::get()) {
701 return [=](Stack& stack) {
702 auto ilist = pop(stack);
703 const auto& list = ilist.toIntList()->elements();
705 list.size() == num_outputs,
708 " elements in a list but found ",
710 stack.insert(stack.end(), list.begin(), list.end());
713 }
else if (lt->getElementType() == FloatType::get()) {
714 return [=](Stack& stack) {
715 auto ilist = pop(stack);
716 const auto& list = ilist.toDoubleList()->elements();
718 list.size() == num_outputs,
721 " elements in a list but found ",
723 stack.insert(stack.end(), list.begin(), list.end());
726 }
else if (lt->getElementType() == TensorType::get()) {
727 return [=](Stack& stack) {
728 auto ilist = pop(stack);
729 const auto& list = ilist.toTensorList()->elements();
731 list.size() == num_outputs,
734 " elements in a list but found ",
736 stack.insert(stack.end(), list.begin(), list.end());
740 return [=](Stack& stack) {
741 auto glist = pop(stack);
742 const auto& list = glist.toGenericList()->elements();
744 list.size() == num_outputs,
747 " elements in a list but found ",
749 stack.insert(stack.end(), list.begin(), list.end());
756 [](
const Node* node) -> Operation {
757 const auto num_inputs = node->inputs().size();
758 ListTypePtr lt = node->output()->type()->expect<ListType>();
759 if (IntType::get() == lt->getElementType()) {
760 return listConstruct<int64_t>(num_inputs);
761 }
else if (FloatType::get() == lt->getElementType()) {
762 return listConstruct<double>(num_inputs);
763 }
else if (lt->getElementType() == BoolType::get()) {
764 return listConstruct<bool>(num_inputs);
765 }
else if (lt->getElementType()->isSubtypeOf(TensorType::get())) {
766 return [=](Stack& stack) {
767 const size_t stack_size = stack.size();
768 std::vector<at::Tensor> vals;
769 vals.reserve(num_inputs);
770 for (
size_t i = stack_size - num_inputs; i < stack_size; ++i) {
771 vals.emplace_back(std::move(stack[i]).toTensor());
773 drop(stack, num_inputs);
774 push(stack, std::move(vals));
778 return [=](Stack& stack) {
779 const size_t stack_size = stack.size();
780 std::vector<IValue> vals;
781 vals.reserve(num_inputs);
782 for (
size_t i = stack_size - num_inputs; i < stack_size; ++i) {
783 vals.emplace_back(std::move(stack[i]));
785 drop(stack, num_inputs);
786 push(stack, std::move(vals));
793 [](
const Node* node) -> Operation {
794 const auto num_inputs = node->inputs().size();
795 if (num_inputs % 2 != 0) {
796 throw std::runtime_error(
797 "DictConstruct must have an even number of inputs");
799 return [=](Stack& stack) {
800 c10::ivalue::UnorderedMap vals;
801 for (
size_t i = 0; i < num_inputs; i += 2) {
802 auto val = pop(stack);
803 auto key = pop(stack);
806 push(stack, std::move(vals));
811 "aten::_unwrap_optional(t(a)? optional) -> t(a)",
813 auto val = pop(stack);
814 AT_CHECK(!val.isNone(),
"Unwrapping null optional");
821 Operator(
"prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)", noop),
824 [](
const Node* node) {
825 Code code(node->g(attr::Subgraph));
826 int n_inputs = node->inputs().size();
827 AT_ASSERT(node->blocks().size() == 0);
828 AT_ASSERT(node->hasAttribute(attr::Subgraph));
829 return [=](Stack& stack) {
831 InterpreterState forked_interprester(code);
832 InterpreterContinuation continuation(
834 Stack(stack.end() - n_inputs, stack.end()),
835 autograd::GradMode::is_enabled());
836 drop(stack, n_inputs);
838 push(stack, forked_interprester.getFuture());
840 c10::global_work_queue().run(std::move(continuation));
845 "aten::wait(Future(t) self) -> t",
847 auto future = pop(stack).toFuture();
848 if (future->completed()) {
849 push(stack, future->value());
851 throw Suspend(future);
857 [](
const Node* node) {
858 const auto type = node->output()->type()->expect<ClassType>();
859 const auto name = Symbol::user(type->name());
860 const size_t numAttrs = type->numAttributes();
861 return [name, numAttrs](Stack& stack) {
862 auto userObj = c10::ivalue::Object::create(name, numAttrs);
863 push(stack, std::move(userObj));
869 [](
const Node* node) {
870 const auto type = node->input()->type()->expect<ClassType>();
871 const auto& field = node->s(attr::name);
872 const auto slot = type->getAttributeSlot(field);
873 return [slot](Stack& stack) {
874 auto userObj = pop(stack).toObject();
875 auto value = userObj->getSlot(slot);
876 push(stack, std::move(value));
880 Operator(prim::SetAttr, [](
const Node* node) {
881 const auto type = node->inputs().at(0)->type()->expect<ClassType>();
882 const auto& field = node->s(attr::name);
883 const auto slot = type->getAttributeSlot(field);
884 return [slot](Stack& stack) {
886 auto userObj = pop(stack).toObject();
887 userObj->setSlot(slot, std::move(v));
893 #define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \ 895 #aten_op "(int a, int b) -> " #int_result, \ 899 push(stack, int_op); \ 903 #aten_op "(float a, float b) -> " #float_result, [](Stack& stack) { \ 906 push(stack, float_op); \ 910 #define DEFINE_INT_FLOAT_OP(aten_op, op, result) \ 912 #aten_op "(int a, float b) -> " #result, \ 920 Operator(#aten_op "(float a, int b) -> " #result, [](Stack& stack) { \ 928 #define DEFINE_INT_OP(aten_op, op) \ 929 Operator(#aten_op "(int a, int b) -> int", [](Stack& stack) { \ 936 #define DEFINE_BINARY_OP(aten_op, op) \ 937 DEFINE_GENERIC_OP(aten_op, op, op, int, float), \ 938 DEFINE_INT_FLOAT_OP(aten_op, op, float) 939 #define DEFINE_COMPARISON_OP(aten_op, op) \ 940 DEFINE_GENERIC_OP(aten_op, op, op, bool, bool), \ 941 DEFINE_INT_FLOAT_OP(aten_op, op, bool) 942 #define DEFINE_BOOL_OP(aten_op, op) \ 943 Operator(#aten_op "(bool a, bool b) -> bool", [](Stack& stack) { \ 952 int64_t normalizeIndex(int64_t idx, int64_t list_size) {
955 idx = list_size + idx;
961 template <
typename TList>
962 typename TList::element_type::ElemType& getItem(TList& list, int64_t idx) {
963 const int64_t list_size = list->elements().size();
964 const int64_t normalized_idx = normalizeIndex(idx, list_size);
965 if (normalized_idx < 0 || normalized_idx >= list_size) {
966 throw std::out_of_range(
"list index out of range");
968 return list->elements()[normalized_idx];
972 bool getBoolItem(
const std::vector<bool>& list, int64_t idx) {
973 const int64_t list_size = list.size();
974 const int64_t normalized_idx = normalizeIndex(idx, list_size);
975 if (normalized_idx < 0 || normalized_idx >= list_size) {
976 throw std::out_of_range(
"list index out of range");
978 return list[normalized_idx];
981 template <
typename TList,
typename TElement>
982 int listAppend(Stack& stack) {
987 a->elements().push_back(el);
993 template <
typename TList>
994 int listReverse(Stack& stack) {
998 auto& elements = a->elements();
999 std::reverse(elements.begin(), elements.end());
1004 template <
typename TList>
1005 int listPop(Stack& stack) {
1008 pop(stack, list, idx);
1010 auto& elements = list->elements();
1011 const int64_t list_size = elements.size();
1012 const int64_t normalized_idx = normalizeIndex(idx, list_size);
1014 if (list_size == 0) {
1015 AT_ERROR(
"pop from empty list");
1018 push(stack, std::move(getItem(list, idx)));
1019 elements.erase(elements.begin() + normalized_idx);
1025 int listPop<Shared<BoolList>>(Stack& stack) {
1026 Shared<BoolList> list;
1028 pop(stack, list, idx);
1030 auto& elements = list->elements();
1031 const int64_t list_size = elements.size();
1032 const int64_t normalized_idx = normalizeIndex(idx, list_size);
1034 if (list_size == 0) {
1035 AT_ERROR(
"pop from empty list");
1038 push(stack, getBoolItem(elements, idx));
1039 elements.erase(elements.begin() + normalized_idx);
1044 template <
typename TList>
1045 int listClear(Stack& stack) {
1049 a->elements().clear();
1053 template <
typename TList,
typename TElement>
1054 int listInsert(Stack& stack) {
1058 pop(stack, list, idx, elem);
1060 auto& elements = list->elements();
1061 const int64_t list_size = elements.size();
1062 const int64_t normalized_idx = normalizeIndex(idx, list_size);
1064 if (normalized_idx < 0 || normalized_idx >= list_size) {
1065 if (normalized_idx < 0) {
1066 elements.insert(elements.begin(), elem);
1068 elements.push_back(elem);
1071 elements.insert(elements.begin() + normalized_idx, elem);
1077 template <
typename TList,
typename TElement>
1078 int listRemove(Stack& stack) {
1081 pop(stack, list, elem);
1083 auto& elements = list->elements();
1084 auto pos = std::find(elements.begin(), elements.end(), elem);
1086 if (pos != elements.end()) {
1087 elements.erase(pos);
1089 AT_ERROR(
"list.remove(x): x not in list");
1096 int listRemove<Shared<TensorList>, at::Tensor>(Stack& stack) {
1097 Shared<TensorList> list;
1099 pop(stack, list, elem);
1101 auto& elements = list->elements();
1102 auto pos = std::find_if(
1103 elements.begin(), elements.end(), [elem](
const at::Tensor& b) {
1104 const auto cmp_result = elem.eq(b);
1105 return cmp_result.is_nonzero();
1108 if (pos != elements.end()) {
1109 elements.erase(pos);
1111 AT_ERROR(
"list.remove(x): x not in list");
1117 template <
typename TList,
typename TElement>
1118 int listIndex(Stack& stack) {
1121 pop(stack, list, elem);
1123 auto& elements = list->elements();
1124 auto pos = std::find(elements.begin(), elements.end(), elem);
1126 if (pos != elements.end()) {
1127 push(stack, static_cast<int64_t>(std::distance(elements.begin(), pos)));
1129 AT_ERROR(
"'", elem,
"' is not in list");
1136 int listIndex<Shared<TensorList>, at::Tensor>(Stack& stack) {
1137 Shared<TensorList> list;
1139 pop(stack, list, elem);
1141 auto& elements = list->elements();
1142 auto pos = std::find_if(
1143 elements.begin(), elements.end(), [elem](
const at::Tensor& b) {
1144 const auto cmp_result = elem.eq(b);
1145 return cmp_result.is_nonzero();
1148 if (pos != elements.end()) {
1149 push(stack, static_cast<int64_t>(std::distance(elements.begin(), pos)));
1151 AT_ERROR(
"'", elem,
"' is not in list");
1157 template <
typename TList,
typename TElement>
1158 int listCount(Stack& stack) {
1161 pop(stack, list, elem);
1163 auto& elements = list->elements();
1164 const int64_t count = std::count(elements.begin(), elements.end(), elem);
1171 int listCount<Shared<TensorList>, at::Tensor>(Stack& stack) {
1172 Shared<TensorList> list;
1174 pop(stack, list, elem);
1176 auto& elements = list->elements();
1177 const int64_t count = std::count_if(
1178 elements.begin(), elements.end(), [elem](
const at::Tensor& b) {
1179 const auto cmp_result = elem.eq(b);
1180 return cmp_result.is_nonzero();
1187 template <
typename TList>
1188 Operation listExtend(
const Node* node) {
1189 return [](Stack& stack) {
1194 auto& vec_a = a->elements();
1195 const auto& vec_b = b->elements();
1196 vec_a.insert(vec_a.end(), vec_b.cbegin(), vec_b.cend());
1201 template <
typename TList>
1202 Operation listCopy(
const Node* node) {
1203 return [](Stack& stack) {
1207 const auto& vec = list->elements();
1214 template <
typename T>
1215 int listSelect(Stack& stack) {
1218 pop(stack, list, idx);
1220 auto element = getItem(list, idx);
1221 push(stack, std::move(element));
1227 int listSelect<Shared<BoolList>>(Stack& stack) {
1228 Shared<BoolList> list;
1230 pop(stack, list, idx);
1232 auto element = getBoolItem(list->elements(), idx);
1233 push(stack, element);
1237 template <
typename T>
1238 int listLen(Stack& stack) {
1242 const int64_t size = a->elements().size();
1247 template <
typename T>
1248 int listEq(Stack& stack) {
1252 push(stack, a->elements() == b->elements() ?
true :
false);
1256 template <
typename T>
1257 int listNe(Stack& stack) {
1261 push(stack, !(a->elements() == b->elements()));
1265 inline bool tensor_list_equal(Shared<TensorList> a, Shared<TensorList> b) {
1266 if (a->elements().size() != b->elements().size()) {
1270 for (
size_t i = 0; i < a->elements().size(); ++i) {
1271 const auto& a_element = a->elements()[i];
1272 const auto& b_element = b->elements()[i];
1276 const auto cmp_result = a_element.eq(b_element);
1277 if (!cmp_result.is_nonzero()) {
1287 int listEq<Shared<TensorList>>(Stack& stack) {
1288 Shared<TensorList> a;
1289 Shared<TensorList> b;
1291 push(stack, tensor_list_equal(a, b));
1297 int listNe<Shared<TensorList>>(Stack& stack) {
1298 Shared<TensorList> a;
1299 Shared<TensorList> b;
1301 push(stack, !tensor_list_equal(a, b));
1305 Operation listList(
const Node* node) {
1306 return [=](Stack& stack) {
1313 template <
class TList,
class TElement>
1314 int listAdd(Stack& stack) {
1319 std::vector<TElement> ret;
1320 const auto total_size = a->elements().size() + b->elements().size();
1321 ret.reserve(total_size);
1322 for (
const auto& a_element : a->elements()) {
1323 ret.push_back(a_element);
1325 for (
const auto& b_element : b->elements()) {
1326 ret.push_back(b_element);
1333 template <
class TList,
class TElement>
1334 int listMulIntLeft(Stack& stack) {
1337 pop(stack, list, n);
1339 std::vector<TElement> ret;
1340 const auto size = list->elements().size() * n;
1343 for (
auto i = 0; i < n; i++) {
1344 for (
const auto& e : list->elements()) {
1353 template <
class TList,
class TElement>
1354 int listMulIntRight(Stack& stack) {
1357 pop(stack, n, list);
1359 std::vector<TElement> ret;
1360 const auto size = list->elements().size() * n;
1363 for (
auto i = 0; i < n; i++) {
1364 for (
const auto& e : list->elements()) {
1373 template <
typename TList,
typename TElement>
1374 int listSlice(Stack& stack) {
1380 pop(stack, list, start, end, step);
1381 const int64_t list_size = list->elements().size();
1384 const auto normalized_start =
1385 std::max((int64_t)0, normalizeIndex(start, list_size));
1386 const auto normalized_end =
1387 std::min(list_size, normalizeIndex(end, list_size));
1389 std::vector<TElement> sliced_list;
1390 if (normalized_end <= normalized_start) {
1392 push(stack, sliced_list);
1396 sliced_list.reserve(normalized_end - normalized_start);
1398 for (
auto i = normalized_start; i < normalized_end;) {
1399 sliced_list.push_back(list->elements()[i]);
1403 push(stack, sliced_list);
1407 template <
typename TList,
typename TElement>
1408 int listSetItem(Stack& stack) {
1413 pop(stack, list, idx, value);
1414 getItem(list, idx) = value;
1421 int listSetItem<Shared<BoolList>,
bool>(Stack& stack) {
1422 Shared<BoolList> list;
1426 pop(stack, list, idx, value);
1428 int64_t list_size = list->elements().size();
1429 auto normalized_idx = normalizeIndex(idx, list_size);
1430 if (normalized_idx < 0 || normalized_idx >= list_size) {
1431 throw std::out_of_range(
"list index out of range");
1433 list->elements()[normalized_idx] = value;
1439 int dictSetItem(Stack& stack) {
1440 auto value = pop(stack);
1441 auto idx = pop(stack);
1442 auto& dict = pop(stack).toGenericDict()->elements();
1448 int dictLen(Stack& stack) {
1449 auto dict = pop(stack).toGenericDictRef();
1450 push(stack, int64_t(dict.size()));
1454 int dictKeys(Stack& stack) {
1455 auto dict = pop(stack).toGenericDictRef();
1456 std::vector<IValue> keys;
1457 keys.reserve(dict.size());
1458 for (
auto item : dict) {
1459 keys.push_back(item.first);
1461 push(stack, IValue(keys));
1465 int dictValues(Stack& stack) {
1466 auto dict = pop(stack).toGenericDictRef();
1467 std::vector<IValue> values;
1468 values.reserve(dict.size());
1469 for (
auto item : dict) {
1470 values.push_back(item.second);
1472 push(stack, IValue(values));
1476 int dictIndex(Stack& stack) {
1477 auto index = pop(stack);
1478 auto dict = pop(stack).toGenericDict();
1479 const auto& elems = dict->elements();
1480 auto value = elems.find(index);
1481 if (value == elems.end()) {
1482 AT_ERROR(
"KeyError: '", index,
"'");
1484 push(stack, value->second);
1488 int dictGet(Stack& stack) {
1489 auto index = pop(stack);
1490 auto dict = pop(stack).toGenericDict();
1491 const auto& elems = dict->elements();
1492 auto value = elems.find(index);
1493 if (value == elems.end()) {
1494 push(stack, IValue());
1496 push(stack, value->second);
1501 int dictGetDefault(Stack& stack) {
1502 auto default_value = pop(stack);
1503 auto index = pop(stack);
1504 auto dict = pop(stack).toGenericDict();
1505 const auto& elems = dict->elements();
1506 auto value = elems.find(index);
1507 if (value == elems.end()) {
1508 push(stack, default_value);
1510 push(stack, value->second);
1515 RegisterOperators reg2({
1517 #define DEFINE_STRING_OP(op_name, string_op, result) \ 1518 Operator(#op_name "(str a, str b) ->" #result, [](Stack& stack) { \ 1519 auto b = pop(stack).toStringRef(); \ 1520 auto a = pop(stack).toStringRef(); \ 1521 push(stack, string_op); \ 1525 DEFINE_STRING_OP(aten::eq, a == b,
bool),
1526 DEFINE_STRING_OP(aten::ne, a != b,
bool),
1527 DEFINE_STRING_OP(aten::add, a + b, str),
1528 #undef DEFINE_STRING_OP 1532 "aten::len(Tensor t) -> int",
1534 at::Tensor t = pop(stack).toTensor();
1536 AT_ERROR(
"len() of a 0-d tensor");
1538 push(stack, t.sizes()[0]);
1542 #define CREATE_MUTABLE_LIST_OPS(decl_type, c_type) \
1544 "aten::select(" decl_type
"[](a) list, int idx) -> " decl_type
"(*)", \
1545 listSelect<Shared<c_type>>), \
1547 "aten::append( " decl_type
"[](a!) self, " decl_type \
1548 "(c) el) -> " decl_type
"[](a!)", \
1549 listAppend<Shared<c_type>, c_type::ElemType>), \
1551 "aten::reverse( " decl_type
"[](a!) self) -> ()", \
1552 listReverse<Shared<c_type>>), \
1554 "aten::extend(" decl_type
"[](a!) self, " decl_type \
1555 " [] other) -> ()", \
1556 listExtend<Shared<c_type>>), \
1558 "aten::copy(" decl_type \
1560 " -> " decl_type
"[]", \
1561 listCopy<Shared<c_type>>), \
1563 "aten::_set_item(" decl_type
"[](a!) l, int idx, " decl_type \
1564 " el) -> " decl_type
"[](a!)", \
1565 listSetItem<Shared<c_type>, c_type::ElemType>), \
1567 "aten::clear( " decl_type
"[](a!) self) -> ()", \
1568 listClear<Shared<c_type>>), \
1570 "aten::insert( " decl_type \
1571 "[](a!) self, int idx, \ 1572 " decl_type
" el) -> ()", \
1573 listInsert<Shared<c_type>, c_type::ElemType>), \
1575 "aten::pop(" decl_type \
1576 "[](a!) self, int idx=-1) \ 1577 -> " decl_type
"(*)", \
1578 listPop<Shared<c_type>>)
1580 CREATE_MUTABLE_LIST_OPS("
Tensor", TensorList),
1583 "aten::remove(Tensor[](a!) self, Tensor el) -> ()",
1584 listRemove<Shared<TensorList>,
at::Tensor>),
1586 "aten::index(Tensor[] self, Tensor el) ->
int",
1587 listIndex<Shared<TensorList>,
at::Tensor>),
1589 "aten::count(Tensor[] self, Tensor el) ->
int",
1590 listCount<Shared<TensorList>,
at::Tensor>),
1593 #define CREATE_IMMUTABLE_LIST_OPS(decl_type, c_type) \ 1595 "aten::select(" decl_type "[] a, int b) -> " decl_type, \ 1596 listSelect<Shared<c_type>>), \ 1598 "aten::append(" decl_type "[](a!) self, " decl_type \ 1599 " el) -> " decl_type "[](a!)", \ 1600 listAppend<Shared<c_type>, c_type::ElemType>), \ 1602 "aten::reverse(" decl_type "[](a!) self) -> ()", \ 1603 listReverse<Shared<c_type>>), \ 1605 "aten::extend(" decl_type "[](a!) self, " decl_type \ 1606 " [] other) -> ()", \ 1607 listExtend<Shared<c_type>>), \ 1609 "aten::copy(" decl_type \ 1611 " -> " decl_type "[]", \ 1612 listCopy<Shared<c_type>>), \ 1614 "aten::_set_item(" decl_type "[](a!) l, int idx, " decl_type \ 1615 " el) -> " decl_type "[](a!)", \ 1616 listSetItem<Shared<c_type>, c_type::ElemType>), \ 1618 "aten::clear( " decl_type "[](a!) self) -> ()", \ 1619 listClear<Shared<c_type>>), \ 1621 "aten::insert( " decl_type \ 1622 "[](a!) self, int idx, \ 1623 " decl_type " el) -> ()", \ 1624 listInsert<Shared<c_type>, c_type::ElemType>), \ 1626 "aten::remove(" decl_type \ 1628 " decl_type " el) -> ()", \ 1629 listRemove<Shared<c_type>, c_type::ElemType>), \ 1631 "aten::index(" decl_type \ 1633 " decl_type " el) -> int", \ 1634 listIndex<Shared<c_type>, c_type::ElemType>), \ 1636 "aten::count(" decl_type \ 1638 " decl_type " el) -> int", \ 1639 listCount<Shared<c_type>, c_type::ElemType>), \ 1641 "aten::pop(" decl_type \ 1642 "[](a!) self, int idx=-1) \ 1644 listPop<Shared<c_type>>) 1646 CREATE_IMMUTABLE_LIST_OPS(
"int", IntList),
1647 CREATE_IMMUTABLE_LIST_OPS(
"float", DoubleList),
1648 CREATE_IMMUTABLE_LIST_OPS(
"bool", BoolList),
1652 CREATE_MUTABLE_LIST_OPS(
"t", GenericList),
1653 #undef CREATE_IMMUTABLE_LIST_OPS 1654 #undef CREATE_MUTABLE_LIST_OPS 1656 #define CREATE_LIST_OPS(decl_type, c_type) \ 1657 Operator("aten::len(" decl_type "[] a) -> int", listLen<Shared<c_type>>), \ 1659 "aten::add(" decl_type "[] a, " decl_type "[] b) -> " decl_type \ 1661 listAdd<Shared<c_type>, c_type::ElemType>), \ 1663 "aten::slice(" decl_type \ 1664 "[] l, int start, int end=9223372036854775807, int step=1) -> " decl_type \ 1666 listSlice<Shared<c_type>, c_type::ElemType>), \ 1667 Operator("aten::list(" decl_type "[] l) -> " decl_type "[]", listList), \ 1669 "aten::mul(" decl_type "[] l, int n) -> " decl_type "[]", \ 1670 listMulIntLeft<Shared<c_type>, c_type::ElemType>), \ 1672 "aten::mul(int n, " decl_type "[] l) -> " decl_type "[]", \ 1673 listMulIntRight<Shared<c_type>, c_type::ElemType>) 1675 CREATE_LIST_OPS(
"int", IntList),
1676 CREATE_LIST_OPS(
"float", DoubleList),
1677 CREATE_LIST_OPS(
"bool", BoolList),
1678 CREATE_LIST_OPS(
"Tensor", TensorList),
1679 CREATE_LIST_OPS(
"t", GenericList),
1680 #undef CREATE_LIST_OPS 1682 Operator(
"aten::eq(int[] a, int[] b) -> bool", listEq<Shared<IntList>>),
1684 "aten::eq(float[] a, float[] b) -> bool",
1685 listEq<Shared<DoubleList>>),
1687 "aten::eq(Tensor[] a, Tensor[] b) -> bool",
1688 listEq<Shared<TensorList>>),
1689 Operator(
"aten::eq(bool[] a, bool[] b) -> bool", listEq<Shared<BoolList>>),
1690 Operator(
"aten::ne(int[] a, int[] b) -> bool", listNe<Shared<IntList>>),
1692 "aten::ne(float[] a, float[] b) -> bool",
1693 listNe<Shared<DoubleList>>),
1695 "aten::ne(Tensor[] a, Tensor[] b) -> bool",
1696 listNe<Shared<TensorList>>),
1697 Operator(
"aten::ne(bool[] a, bool[] b) -> bool", listNe<Shared<BoolList>>),
1699 #define CREATE_COPY_OP(other_type, c_type) \ 1701 "aten::copy_(Tensor(a!) self, " #other_type " other) -> Tensor(a!)", \ 1702 [](Stack& stack) { \ 1705 pop(stack, t, other); \ 1706 std::move(t) = other; \ 1707 push(stack, std::move(t)); \ 1711 CREATE_COPY_OP(Tensor, at::Tensor),
1712 CREATE_COPY_OP(
int, int64_t),
1713 CREATE_COPY_OP(
float,
double),
1714 #undef CREATE_COPY_OP 1716 DEFINE_BINARY_OP(aten::add, a + b),
1717 DEFINE_BINARY_OP(aten::sub, a - b),
1718 DEFINE_BINARY_OP(aten::mul, a* b),
1719 DEFINE_BINARY_OP(aten::pow,
static_cast<decltype(a)
>(pow(a, b))),
1722 DEFINE_BINARY_OP(prim::min, a < b ? a : b),
1723 DEFINE_BINARY_OP(prim::max, a > b ? a : b),
1731 fmod((b + fmod(a, b)), b),
1734 DEFINE_INT_FLOAT_OP(aten::remainder, fmod((b + fmod(a, b)), b),
float),
1742 DEFINE_INT_FLOAT_OP(aten::floordiv, std::floor(a / b),
float),
1745 DEFINE_INT_OP(aten::__round_to_zero_floordiv, a / b),
1747 DEFINE_INT_OP(aten::__and__, a& b),
1748 DEFINE_INT_OP(aten::__or__, a | b),
1749 DEFINE_INT_OP(aten::__xor__, a ^ b),
1753 "aten::div(int a, int b) -> float",
1757 push(stack, static_cast<double>(a) / static_cast<double>(b));
1761 "aten::div(float a, float b) -> float",
1770 "aten::floor(float a) -> int",
1774 push(stack, static_cast<int64_t>(std::floor(a)));
1778 DEFINE_COMPARISON_OP(aten::ne, a != b),
1779 DEFINE_COMPARISON_OP(aten::eq, a == b),
1780 DEFINE_COMPARISON_OP(aten::lt, a < b),
1781 DEFINE_COMPARISON_OP(aten::gt, a > b),
1782 DEFINE_COMPARISON_OP(aten::le, a <= b),
1783 DEFINE_COMPARISON_OP(aten::ge, a >= b),
1785 DEFINE_BOOL_OP(aten::__and__, a&& b),
1786 DEFINE_BOOL_OP(aten::__or__, a || b),
1787 DEFINE_BOOL_OP(aten::__xor__, a != b),
1790 "aten::neg(int self) -> int",
1792 push(stack, -pop(stack).toInt());
1796 "aten::neg(float self) -> float",
1798 push(stack, -pop(stack).toDouble());
1802 "aten::__not__(bool self) -> bool",
1804 push(stack, !pop(stack).toBool());
1808 "aten::__is__(t1 self, t2 obj) -> bool",
1811 pop(stack,
self, obj);
1812 push(stack,
self.isSameIdentity(obj));
1816 "aten::__isnot__(t1 self, t2 obj) -> bool",
1819 pop(stack,
self, obj);
1820 push(stack, !
self.isSameIdentity(obj));
1824 "aten::_tensor_to_list(Tensor self) -> int[]",
1828 std::vector<int64_t> elems;
1829 elems.reserve(t.size(0));
1830 for (
int i = 0; i < t.size(0); i++) {
1831 elems.push_back(*t[i].data<int32_t>());
1833 push(stack, jit::IntList::create(elems));
1837 "aten::_list_to_tensor(int[] self) -> Tensor",
1839 std::vector<int64_t> l;
1841 auto t = torch::empty(
1842 {
static_cast<int64_t
>(l.size())}, at::dtype(at::kInt));
1843 for (
size_t i = 0; i < l.size(); i++) {
1849 #define CREATE_DICT_OPS(key_type) \
1850 Operator(
"aten::len(Dict(" key_type
", t) self) -> int", dictLen), \
1852 "aten::keys(Dict(" key_type
", t) self) -> " key_type
"[](*)", \
1855 "aten::values(Dict(" key_type
", t) self) -> t[](*)", dictValues), \
1857 "prim::DictIndex(Dict(" key_type
", t) self, " key_type \
1861 "aten::get(Dict(" key_type
", t) self, " key_type \
1865 "aten::get(Dict(" key_type
", t) self, " key_type \
1866 " key, t default_value) -> t(*)", \
1869 "aten::_set_item(Dict(" key_type
", t)(a!) l, " key_type \
1870 " idx, t v) -> ()", \
1873 CREATE_DICT_OPS("str"),
1874 CREATE_DICT_OPS("
int"),
1875 CREATE_DICT_OPS("
float"),
1876 #undef CREATE_DICT_OPS 1882 std::vector<int64_t> _output_size(
1883 const at::Tensor& input,
1886 const IValue& scale_factors) {
1887 if (!size.isNone()) {
1889 std::vector<int64_t> repeated(dim, size.toInt());
1892 return size.toIntListRef();
1895 std::vector<double> scale_repeated;
1896 if (scale_factors.isDouble()) {
1897 scale_repeated = std::vector<double>(dim, scale_factors.toDouble());
1899 scale_repeated = scale_factors.toDoubleListRef();
1901 std::vector<int64_t> ret;
1902 for (
size_t i = 0; i < dim; ++i) {
1903 ret.push_back(std::floor(input.size(i + 2) * scale_repeated[i]));
1911 at::Tensor interpolate(
1912 const at::Tensor& input,
1914 const IValue& scale_factors,
1915 const std::string& mode,
1917 if ((mode ==
"nearest" || mode ==
"area")) {
1918 if (align_corners != c10::nullopt) {
1919 throw std::runtime_error(
1920 "align_corners option can only be set with the " 1921 "interpolating modes: linear | bilinear | bicubic | trilinear");
1924 if (align_corners == c10::nullopt) {
1926 "Default upsampling behavior when mode=",
1929 "to align_corners=False since 0.4.0. Please specify align_corners=True " 1930 "if the old behavior is desired. See the documentation of nn.Upsample for details");
1931 align_corners =
false;
1935 auto input_dim = input.dim();
1936 if (input_dim == 3 && mode ==
"nearest")
1937 return at::upsample_nearest1d(
1938 input, _output_size(input, 1, size, scale_factors));
1939 if (input_dim == 4 && mode ==
"nearest")
1940 return at::upsample_nearest2d(
1941 input, _output_size(input, 2, size, scale_factors));
1942 if (input_dim == 5 && mode ==
"nearest")
1943 return at::upsample_nearest3d(
1944 input, _output_size(input, 3, size, scale_factors));
1945 if (input_dim == 3 && mode ==
"area")
1946 return at::adaptive_avg_pool1d(
1947 input, _output_size(input, 1, size, scale_factors));
1948 if (input_dim == 4 && mode ==
"area")
1949 return at::adaptive_avg_pool2d(
1950 input, _output_size(input, 2, size, scale_factors));
1951 if (input_dim == 5 && mode ==
"area")
1952 return at::adaptive_avg_pool3d(
1953 input, _output_size(input, 3, size, scale_factors));
1954 if (input_dim == 3 && mode ==
"linear")
1955 return at::upsample_linear1d(
1956 input, _output_size(input, 1, size, scale_factors), *align_corners);
1957 if (input_dim == 3 && mode ==
"bilinear")
1958 throw std::runtime_error(
"Got 3D input, but bilinear mode needs 4D input");
1959 if (input_dim == 3 && mode ==
"bicubic")
1960 throw std::runtime_error(
"Got 3D input, but bicubic mode needs 4D input");
1961 if (input_dim == 3 && mode ==
"trilinear")
1962 throw std::runtime_error(
"Got 3D input, but trilinear mode needs 5D input");
1963 if (input_dim == 4 && mode ==
"linear")
1964 throw std::runtime_error(
"Got 4D input, but linear mode needs 3D input");
1965 if (input_dim == 4 && mode ==
"bilinear")
1966 return at::upsample_bilinear2d(
1967 input, _output_size(input, 2, size, scale_factors), *align_corners);
1968 if (input_dim == 4 && mode ==
"bicubic")
1969 return at::upsample_bicubic2d(
1970 input, _output_size(input, 2, size, scale_factors), *align_corners);
1971 if (input_dim == 4 && mode ==
"trilinear")
1972 throw std::runtime_error(
"Got 4D input, but trilinear mode needs 5D input");
1973 if (input_dim == 5 && mode ==
"linear")
1974 throw std::runtime_error(
"Got 5D input, but linear mode needs 3D input");
1975 if (input_dim == 5 && mode ==
"bilinear")
1976 throw std::runtime_error(
"Got 5D input, but bilinear mode needs 4D input");
1977 if (input_dim == 5 && mode ==
"bicubic")
1978 throw std::runtime_error(
"Got 5D input, but bicubic mode needs 4D input");
1979 if (input_dim == 5 && mode ==
"trilinear")
1980 return at::upsample_trilinear3d(
1981 input, _output_size(input, 3, size, scale_factors), *align_corners);
1984 "Input Error: Only 3D, 4D and 5D input Tensors supported",
1987 "D) for the modes: nearest | linear | bilinear | trilinear",
1993 Operation interpolate_op(
const Node* n) {
1994 return [](Stack& stack) {
1997 IValue scale_factors;
1999 IValue align_corners;
2000 pop(stack, input, size, scale_factors, mode, align_corners);
2001 at::Tensor res = interpolate(
2002 input, size, scale_factors, mode, align_corners.toOptional<
bool>());
2011 IValue convert_scale_factor_to_double(
const IValue& int_ivalue) {
2012 IValue scale_factor_double;
2013 if (int_ivalue.isInt()) {
2014 scale_factor_double =
static_cast<double>(int_ivalue.toInt());
2015 }
else if (int_ivalue.isIntList()) {
2016 auto int_list = int_ivalue.toIntListRef();
2017 std::vector<double> double_vec(int_list.begin(), int_list.end());
2018 scale_factor_double = double_vec;
2019 }
else if (int_ivalue.isNone()) {
2022 std::stringstream ss;
2023 ss <<
"Expecting optional int or int list arg for scale factor, got" 2025 throw std::runtime_error(ss.str());
2027 return scale_factor_double;
2030 Operation upsample_nearest_op(
const Node* n) {
2031 return [](Stack& stack) {
2034 IValue scale_factor_int;
2035 pop(stack, input, size, scale_factor_int);
2036 IValue scale_factor_double =
2037 convert_scale_factor_to_double(scale_factor_int);
2039 interpolate(input, size, scale_factor_double,
"nearest", c10::nullopt);
2045 Operation upsample_op(
const Node* n) {
2046 return [](Stack& stack) {
2049 IValue scale_factor_int;
2051 IValue align_corners;
2052 pop(stack, input, size, scale_factor_int, mode, align_corners);
2053 IValue scale_factor_double =
2054 convert_scale_factor_to_double(scale_factor_int);
2055 at::Tensor res = interpolate(
2058 scale_factor_double,
2060 align_corners.toOptional<
bool>());
2066 Operation upsample_bilinear_op(
const Node* n) {
2067 return [](Stack& stack) {
2070 IValue scale_factor_int;
2071 pop(stack, input, size, scale_factor_int);
2072 IValue scale_factor_double =
2073 convert_scale_factor_to_double(scale_factor_int);
2075 interpolate(input, size, scale_factor_double,
"bilinear",
true);
2081 RegisterOperators reg3({
2083 "aten::__interpolate(Tensor input, int? size = None, float[]? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2086 "aten::__interpolate(Tensor input, int[]? size = None, float[]? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2089 "aten::__interpolate(Tensor input, int? size = None, float? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2092 "aten::__interpolate(Tensor input, int[]? size = None, float? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2096 "aten::__upsample_nearest(Tensor input, int? size = None, int? scale_factor = None) -> Tensor",
2097 upsample_nearest_op),
2099 "aten::__upsample_nearest(Tensor input, int[]? size = None, int? scale_factor = None) -> Tensor",
2100 upsample_nearest_op),
2103 "aten::__upsample(Tensor input, int? size = None, int? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2106 "aten::__upsample(Tensor input, int[]? size = None, int? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor",
2110 "aten::__upsample_bilinear(Tensor input, int? size = None, int? scale_factor = None) -> Tensor",
2111 upsample_bilinear_op),
2113 "aten::__upsample_bilinear(Tensor input, int[]? size = None, int? scale_factor = None) -> Tensor",
2114 upsample_bilinear_op),
2116 "aten::__upsample_bilinear(Tensor input, int? size = None, int[]? scale_factor = None) -> Tensor",
2117 upsample_bilinear_op),
2119 "aten::__upsample_bilinear(Tensor input, int[]? size = None, int[]? scale_factor = None) -> Tensor",
2120 upsample_bilinear_op),
2124 at::Tensor leaky_relu(
const at::Tensor& tensor,
double scalar) {
2125 return at::leaky_relu(tensor, scalar);
2127 at::Tensor cat(
const std::vector<at::Tensor>& tensors) {
2128 return at::cat(tensors);
2131 std::string get_first(
const std::vector<std::vector<std::string>>& strings) {
2132 return strings[0][0];
2137 .
op(
"_test::leaky_relu(Tensor self, float v=0.01) -> Tensor",
2139 .
op(
"_test::cat(Tensor[] inputs) -> Tensor", &cat)
2140 .
op(
"_test::get_first", &get_first);
RegisterOperators & op(const std::string &name, Implementation &&implementation)
Creates a new operator from a name and implementation function (function pointer or function object/l...
Scalar represents a 0-dimensional tensor which contains a single element.
bool is_cuda() const noexcept
Return true if the device is of CUDA type.
Represents a a compute device on which a tensor is located.
Registration class for new operators.
constexpr size_t size() const
size - Get the array size.
bool is_cuda() const
Returns if a Tensor has CUDA backend.
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...
Flush-To-Zero and Denormals-Are-Zero mode.