1 #include <torch/csrc/autograd/profiler.h> 2 #include <torch/csrc/jit/custom_operator.h> 3 #include <torch/csrc/jit/operator.h> 4 #include <torch/csrc/api/include/torch/utils.h> 5 #include <aten/src/ATen/ExpandUtils.h> 7 #include <c10/core/ScalarType.h> 8 #include <aten/src/ATen/InitialTensorOptions.h> 9 #include <torch/csrc/jit/script/error_report.h> 20 void checkListInputType(
const c10::TypePtr& elem_type,
const Node* node) {
21 if (!elem_type->isSubtypeOf(NumberType::get()) && elem_type != BoolType::get()) {
22 auto error = script::ErrorReport(node->getSourceLocation());
23 error <<
"Input list to torch.tensor must be of ints, floats, or bools, " <<
24 "got " << elem_type->str();
26 if (elem_type->isSubtypeOf(TensorType::get())) {
27 auto input = node->inputs().at(0);
28 if (input->node()->kind() == prim::ListConstruct && input->node()->inputs().size() == 0) {
29 error <<
"\n(Note: empty lists are constructed as Tensor[]; \n" 30 <<
"if you want an empty list of a different type, \n" 31 <<
"use `torch.jit.annotate(List[T], [])`, \n" 32 <<
"where `T` is the type of elements in the list)";
39 at::ScalarType scalarTypeFromJitType(
const c10::TypePtr& type) {
40 if (type == FloatType::get()) {
41 return at::ScalarType::Double;
42 }
else if (type == IntType::get()) {
43 return at::ScalarType::Long;
44 }
else if (type == BoolType::get()) {
45 return at::ScalarType::Byte;
47 AT_ASSERTM(0,
"Add new condition, expected Float, Int, or Bool but got",
52 int64_t list_size(
const IValue& list) {
53 if (list.isGenericList()) {
54 return list.toGenericListRef().size();
55 }
else if (list.isIntList()) {
56 return list.toIntListRef().size();
57 }
else if (list.isDoubleList()){
58 return list.toDoubleListRef().size();
59 }
else if (list.isBoolList()) {
60 return list.toBoolListRef().size();
62 AT_ASSERTM(0,
"Unexpected list type", list);
65 std::vector<int64_t> compute_sizes(
const IValue& seq) {
66 std::vector<int64_t> sizes;
70 while (seq_recur.isGenericList()) {
71 auto seq_list = seq_recur.toGenericListRef();
72 auto length = seq_list.size();
73 AT_ASSERT(length != 0);
74 sizes.push_back(length);
75 seq_recur = seq_list[0];
77 sizes.push_back(list_size(seq_recur));
81 void checkSequenceSize(int64_t n, int64_t dim, int64_t seq_size) {
83 AT_ERROR(
"Expected sequence of length ", n,
" at dim ", dim,
" (got ", seq_size,
")");
87 template <
typename DTYPE>
88 void storeLastDimension(
char* data,
const std::vector<int64_t>& sizes,
const c10::ArrayRef<int64_t>& strides, int64_t dim,
89 int elementSize,
const std::vector<DTYPE>& obj) {
91 auto seq_size = obj.size();
92 checkSequenceSize(n, dim, seq_size);
93 for (int64_t i = 0; i < n; i++) {
94 *(DTYPE*)data = obj[i];
95 data += strides[dim] * elementSize;
101 void storeLastDimension<bool>(
char* data,
const std::vector<int64_t>& sizes,
const c10::ArrayRef<int64_t>& strides, int64_t dim,
102 int elementSize,
const std::vector<bool>& obj) {
104 auto seq_size = obj.
size();
105 checkSequenceSize(n, dim, seq_size);
106 for (int64_t i = 0; i < n; i++) {
107 *(uint8_t*)data = static_cast<uint8_t>(obj[i]);
108 data += strides[dim] * elementSize;
114 void recursiveStore(
char* data,
const std::vector<int64_t>& sizes,
const c10::ArrayRef<int64_t>& strides, int64_t dim,
115 int elementSize,
const IValue& obj) {
117 auto ndim = sizes.size();
119 auto seq_size = list_size(obj);
120 checkSequenceSize(n, dim, seq_size);
121 if (dim + 1 < static_cast<long>(ndim)) {
122 auto items = obj.toGenericListRef();
123 for (int64_t i = 0; i < n; i++) {
124 recursiveStore(data, sizes, strides, dim + 1, elementSize, items[i]);
125 data += strides[dim] * elementSize;
128 AT_ASSERT(obj.isIntList() || obj.isDoubleList() || obj.isBoolList());
129 if (obj.isIntList()) {
130 storeLastDimension<int64_t>(data, sizes, strides, dim, elementSize, obj.toIntListRef());
131 }
else if (obj.isDoubleList()){
132 storeLastDimension<double>(data, sizes, strides, dim, elementSize, obj.toDoubleListRef());
134 storeLastDimension<bool>(data, sizes, strides, dim, elementSize, obj.toBoolListRef());
139 RegisterOperators reg({
141 "aten::split(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]",
143 autograd::profiler::RecordFunction record(
"split_with_sizes");
144 auto result = at::split_with_sizes(
145 (std::move(peek(stack, 0, 3))).toTensor(),
146 (std::move(peek(stack, 1, 3))).toIntList()->elements(),
147 (std::move(peek(stack, 2, 3))).toInt());
149 pack(stack, std::move(result));
153 "aten::Size(int[] sizes) -> int[]",
154 [](Stack& stack) {
return 0; }),
156 "aten::size(Tensor self) -> int[]",
158 autograd::profiler::RecordFunction record(
"sizes");
159 auto t = std::move(pop(stack)).toTensor();
160 pack(stack, t.sizes().vec());
164 "aten::list_with_default(int[] list, int[] defaults) -> int[]",
166 autograd::profiler::RecordFunction record(
"sizes");
167 auto list = peek(stack, 0, 2).toIntListRef();
168 auto defaults = peek(stack, 1, 2).toIntListRef();
171 AT_ASSERT(defaults.size() > list.size());
180 "aten::_infer_size(int[] a, int[] b) -> int[]",
181 [](
const Node* node) {
182 return [](Stack& stack) {
183 auto a = pop(stack).toIntList()->elements();
184 auto b = pop(stack).toIntList()->elements();
185 push(stack, at::infer_size(a, b));
190 "aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor",
191 [](
const Node* node) {
192 return [](Stack& stack) {
197 pop(stack, weight, input, max_norm, norm_type);
202 at::Tensor result = at::embedding_renorm_(weight, input, max_norm, norm_type);
209 "aten::format(str self, ...) -> str",
210 [](
const Node* node) {
211 size_t num_inputs = node->inputs().size();
212 std::regex unsupported_options(
"\\{(.*)\\}");
213 return [num_inputs, unsupported_options](Stack& stack) {
214 auto format = peek(stack, 0, num_inputs).toStringRef();
216 if (std::regex_search(format, unsupported_options)) {
217 AT_WARN(
"Format options are not supported.");
220 auto args = last(stack, num_inputs - 1);
221 std::stringstream ss;
222 for (
size_t begin = 0, used_args = 0;
true; ++used_args) {
223 size_t loc = format.find(
"{}", begin);
224 if (loc == std::string::npos) {
225 ss << format.substr(begin);
228 ss << format.substr(begin, loc - begin);
229 if (used_args >= args.size()) {
230 AT_ERROR(
"Too few arguments for format string: ", format);
232 ss << args[used_args];
236 drop(stack, num_inputs);
237 push(stack, ss.str());
242 #define DEFINE_TORCH_TENSOR_OP(operator_type, c_type, tensor_creation_op) \
244 "aten::tensor(" #operator_type
" t, *, ScalarType? dtype=None, Device? device=None"\
246 [](
const Node* node) { \
247 auto initial_scalar_type = scalarTypeFromJitType(node->inputs().at(0)->type()); \
248 return [initial_scalar_type](Stack& stack) { \
252 pop(stack, scalar_val, dtype, device); \
253 auto tensor = autograd::make_variable(tensor_creation_op); \
254 at::ScalarType scalar_type = dtype.isNone() ? \
255 tensor.scalar_type() : dtype.toScalarType(); \
256 c10::Device dev = device.isNone() ? tensor.device() : device.toDevice(); \
257 if (scalar_type != initial_scalar_type || dev != tensor.device()) { \
258 tensor = tensor.to(dev, scalar_type); \
260 push(stack, tensor); \
265 DEFINE_TORCH_TENSOR_OP(
float,
double, at::scalar_to_tensor(scalar_val))
266 DEFINE_TORCH_TENSOR_OP(
int, int64_t, at::scalar_to_tensor(scalar_val))
267 DEFINE_TORCH_TENSOR_OP(
bool,
bool, at::empty({}, at::CPU(at::kByte).options()).fill_(scalar_val))
272 "aten::_infer_size(int[] a, int[] b) -> int[]",
273 [](
const Node* node) {
274 return [](Stack& stack) {
275 auto a = pop(stack).toIntList()->elements();
276 auto b = pop(stack).toIntList()->elements();
277 push(stack, at::infer_size(a, b));
282 "aten::_no_grad_embedding_renorm_(Tensor weight, Tensor input, float max_norm, float norm_type) -> Tensor",
283 [](
const Node* node) {
284 return [](Stack& stack) {
289 pop(stack, weight, input, max_norm, norm_type);
295 at::embedding_renorm_(weight, input, max_norm, norm_type);
302 "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor",
303 [](
const Node* node) {
304 auto input = node->inputs().at(0);
305 auto elem_type = input->type();
306 while (
auto list_type = elem_type->cast<ListType>()) {
307 elem_type = list_type->getElementType();
309 checkListInputType(elem_type, node);
310 at::ScalarType initial_scalar_type = scalarTypeFromJitType(elem_type);
311 return [initial_scalar_type, elem_type](Stack& stack) {
315 pop(stack, data, dtype, device);
316 auto sizes = compute_sizes(data);
317 auto tensor = autograd::make_variable(
318 at::empty(sizes, at::initialTensorOptions().dtype(initial_scalar_type)));
320 recursiveStore((
char*)tensor.data_ptr(), sizes, tensor.strides(), 0,
321 tensor.element_size(), data);
323 at::ScalarType scalar_type = dtype.isNone() ? tensor.scalar_type() : dtype.toScalarType();
324 c10::Device dev = device.isNone() ? tensor.device() : device.toDevice();
325 if (scalar_type != initial_scalar_type || dev != tensor.device()) {
326 tensor = tensor.to(dev, scalar_type);
329 auto default_type = at::typeMetaToScalarType(at::get_default_dtype());
331 if (dtype.isNone() && tensor.scalar_type() != default_type &&
332 tensor.numel() == 0) {
333 AT_WARN(
"Creating a tensor from an empty ", elem_type->str(),
334 "list will create a tensor of default floating point type (currently ", default_type,
335 ") in python but a tensor of type ", elem_type->str(),
" in torchscript.\n",
336 "Pass in a dtype argument to ensure consistent behavior");
344 "aten::_assert_int_or_pair(int[] vals, str name, str message) -> Tensor",
345 [](
const Node* node) {
346 return [](Stack& stack) {
354 "aten::_pack_sequence(Tensor output, Tensor batch_sizes, Tensor? sorted_indices, " 355 "Tensor? unsorted_indices) -> (Tensor, Tensor, Tensor?, Tensor?)",
Represents a a compute device on which a tensor is located.
constexpr size_t size() const
size - Get the array size.