1 #include <torch/csrc/autograd/python_variable_indexing.h> 3 #include <torch/csrc/DynamicTypes.h> 4 #include <torch/csrc/Exceptions.h> 5 #include <torch/csrc/THP_export.h> 6 #include <torch/csrc/autograd/function.h> 7 #include <torch/csrc/autograd/python_variable.h> 8 #include <torch/csrc/autograd/utils/wrap_outputs.h> 9 #include <torch/csrc/autograd/variable.h> 10 #include <torch/csrc/utils/python_compat.h> 11 #include <torch/csrc/utils/python_numbers.h> 12 #include <torch/csrc/utils/tensor_new.h> 13 #include <torch/csrc/jit/tracer.h> 15 #include <ATen/DeviceGuard.h> 16 #include <ATen/ExpandUtils.h> 17 #include <c10/core/TensorOptions.h> 25 namespace torch {
namespace autograd {
27 Py_ssize_t THPVariable_length(PyObject*
self) {
29 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
30 if (self_.dim() == 0) {
33 return (Py_ssize_t)self_.size(0);
34 END_HANDLE_TH_ERRORS_RET(-1)
42 static int64_t count_specified_dimensions(PyObject* index) {
45 auto size = PyTuple_GET_SIZE(index);
46 for (Py_ssize_t i = 0; i < size; i++) {
47 PyObject* obj = PyTuple_GET_ITEM(index, i);
48 if (THPVariable_Check(obj)) {
49 auto& var =
reinterpret_cast<THPVariable*
>(obj)->cdata;
50 if (var.scalar_type() == kByte) {
55 }
else if (obj != Py_None && obj != Py_Ellipsis && obj != Py_True && obj != Py_False) {
63 static void invalid_index(PyObject* obj) {
65 "only integers, slices (`:`), ellipsis (`...`), None and long or byte " 66 "Variables are valid indices (got %s)", Py_TYPE(obj)->tp_name);
69 static Variable applySlice(
const Variable&
self, int64_t dim, PyObject* slice,
bool ensure_view=
false) {
70 Py_ssize_t start, stop, step;
71 auto length =
self.size(dim);
72 if (!THPUtils_unpackSlice(slice, &start, &stop, &step)) {
76 throw ValueError(
"step cannot be zero");
80 throw ValueError(
"negative step not yet supported");
85 if (!ensure_view && start == 0 && stop == length && step == 1 && !jit::tracer::isTracing()) {
88 return self.slice(dim, start, stop, step);
91 static Variable applySelect(
const Variable&
self, int64_t dim, int64_t index, int64_t real_dim=0) {
92 if (index == 0 && dim == 0 &&
self.dim() == 0) {
94 "invalid index of a 0-dim tensor. " 95 "Use tensor.item() to convert a 0-dim tensor to a Python number");
97 int64_t size =
self.size(dim);
98 if (index < -size || index >= size) {
99 throw IndexError(
"index %lld is out of bounds for dimension %lld with size %lld",
100 index, real_dim, size);
105 return self.select(dim, index);
108 static Variable sequenceToVariable(
const at::Type& type, PyObject* seq) {
109 auto& idx_type = type.toScalarType(kLong);
110 return torch::utils::indexing_tensor_from_data(idx_type, c10::nullopt, seq);
113 static Variable valueToTensor(
const at::Type & type, PyObject* value) {
114 if (THPVariable_Check(value)) {
115 return reinterpret_cast<THPVariable*
>(value)->cdata;
117 if (THPUtils_checkLong(value)) {
118 return at::scalar_tensor(
Scalar(THPUtils_unpackLong(value)), type.options());
120 if (PyFloat_Check(value)) {
121 return at::scalar_tensor(
Scalar(THPUtils_unpackDouble(value)), type.options());
123 throw TypeError(
"can't assign a %s to a %s", Py_TYPE(value)->tp_name, type.toString());
126 static Variable boolToIndexingTensor(
const Variable&
self,
bool value) {
129 return at::zeros({1},
self.options().dtype(kLong));
131 return at::empty({0},
self.options().dtype(kLong));
135 static Variable applySlicing(
const Variable&
self, PyObject* index, variable_list& outIndices) {
136 int64_t size = PyTuple_GET_SIZE(index);
138 int64_t specified_dims = count_specified_dimensions(index);
140 auto handle_var = [&](
const Variable& var) {
142 outIndices.resize(dim + 1);
143 outIndices[dim] = var;
147 if (specified_dims >
self.dim()) {
148 throw IndexError(
"too many indices for tensor of dimension %d", (
int)
self.dim());
151 Variable result =
self;
152 for (int64_t i = 0; i < size; i++) {
153 PyObject* obj = PyTuple_GET_ITEM(index, i);
154 if (THPUtils_checkLong(obj)) {
155 result = applySelect(result, dim, THPUtils_unpackLong(obj), i);
156 }
else if (PySlice_Check(obj)) {
157 result = applySlice(result, dim, obj);
159 }
else if (obj == Py_Ellipsis) {
160 dim +=
self.dim() - specified_dims;
161 }
else if (obj == Py_None) {
162 result = result.unsqueeze(dim);
164 }
else if (PyBool_Check(obj)) {
165 result = result.unsqueeze(dim);
166 handle_var(boolToIndexingTensor(result, obj == Py_True));
167 }
else if (THPVariable_Check(obj)) {
168 auto& var = THPVariable_Unpack(obj);
169 auto scalar_type = var.scalar_type();
170 if (var.dim() == 0 && at::isIntegralType(scalar_type)) {
171 if (scalar_type != at::kByte) {
172 result = applySelect(result, dim, THPUtils_unpackLong(obj), i);
174 result = result.unsqueeze(dim);
175 handle_var(boolToIndexingTensor(result, var.item<uint8_t>() != 0));
180 }
else if (PySequence_Check(obj)) {
181 handle_var(sequenceToVariable(
self.type(), obj));
188 result = applySelect(result, dim, THPUtils_unpackLong(index), i);
194 static std::vector<Tensor> typeConvertIndices(
const Variable&
self,
const variable_list& indices) {
195 std::vector<Tensor> converted_inds(indices.size());
196 for (
size_t i = 0; i < indices.size(); ++i) {
197 const auto &ind = indices[i];
199 converted_inds[i] = ind.to(ind.options().device(
self.device()));
201 converted_inds[i] = indices[i];
204 return converted_inds;
207 static Variable dispatch_index(
const Variable&
self,
const variable_list& indices) {
209 std::vector<Tensor> converted_indices = typeConvertIndices(
self, indices);
211 return self.index(converted_indices);
214 static Variable dispatch_index_put_(Variable&
self,
const variable_list& indices,
const Variable& value) {
216 std::vector<Tensor> converted_indices = typeConvertIndices(
self, indices);
218 return self.index_put_(converted_indices, value);
221 static bool treatSequenceAsTuple(PyObject* index) {
222 if (PyTuple_Check(index)) {
225 if (!PySequence_Check(index)) {
236 auto n = PySequence_Size(index);
245 for (Py_ssize_t i = 0; i < n; i++) {
251 if (THPVariable_Check(obj.get()) || PySequence_Check(obj.get()) || PySlice_Check(obj.get())) {
254 if (obj.get() == Py_Ellipsis || obj.get() == Py_None) {
263 if (treatSequenceAsTuple(index)) {
264 res = PySequence_Tuple(index);
266 res = PyTuple_Pack(1, index);
272 PyObject* THPVariable_getitem(PyObject*
self, PyObject* index) {
274 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
278 if (index == Py_None) {
279 return wrap(self_.unsqueeze(0));
280 }
else if (index == Py_Ellipsis) {
281 return wrap(at::alias(self_));
282 }
else if (THPUtils_checkLong(index)) {
283 return wrap(applySelect(self_, 0, THPUtils_unpackLong(index)));
284 }
else if (PySlice_Check(index)) {
285 return wrap(applySlice(self_, 0, index,
true));
291 variable_list variableIndices;
292 Variable sliced = applySlicing(self_, holder.get(), variableIndices);
293 if (variableIndices.empty()) {
294 if (sliced.is_same(self_)) {
296 sliced = at::alias(sliced);
302 return wrap(dispatch_index(sliced, variableIndices));
311 size_t first_non1_src = sizes.
size();
312 for (
size_t i = 0; i < sizes.
size(); ++i) {
319 return sizes.
slice(first_non1_src);
322 static void copy_to(Variable dst,
const Variable& src) {
324 IntArrayRef sliced_src_sizes = slicePrefix1sSize(src.sizes());
325 std::tie(b_src) = expand_inplace(dst, src.view(sliced_src_sizes),
"setitem");
329 int THPVariable_setitem(PyObject*
self, PyObject* index, PyObject* py_value) {
331 if (py_value ==
nullptr) {
332 throw TypeError(
"Tensor does not support deleting items");
335 auto& self_ =
reinterpret_cast<THPVariable*
>(
self)->cdata;
337 auto value = valueToTensor(self_.type(), py_value);
340 if (index == Py_False) {
344 }
else if (index == Py_Ellipsis) {
345 copy_to(self_, value);
347 }
else if (index == Py_None || index == Py_True) {
348 copy_to(self_.unsqueeze(0), value);
350 }
else if (THPUtils_checkLong(index)) {
351 copy_to(applySelect(self_, 0, THPUtils_unpackLong(index)), value);
353 }
else if (PySlice_Check(index)) {
354 copy_to(applySlice(self_, 0, index), value);
361 variable_list variableIndices;
362 Variable sliced = applySlicing(self_, holder.get(), variableIndices);
363 if (variableIndices.empty()) {
364 copy_to(sliced, value);
368 IntArrayRef slicedValueSizes = slicePrefix1sSize(value.sizes());
370 if (!value.sizes().equals(slicedValueSizes)) {
371 valuesSliced = value.view(slicedValueSizes);
373 valuesSliced = value;
375 dispatch_index_put_(sliced, variableIndices, valuesSliced);
377 END_HANDLE_TH_ERRORS_RET(-1)
optional< Device > device_of(Tensor t)
Return the Device of a Tensor, if the Tensor is defined.
Scalar represents a 0-dimensional tensor which contains a single element.
AT_CPP14_CONSTEXPR ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
constexpr size_t size() const
size - Get the array size.
A OptionalDeviceGuard is an RAII class that sets a device to some value on initialization, and resets the device to its original value on destruction.
Variable A Variable augments a Tensor with the ability to interact in our autograd machinery...
Flush-To-Zero and Denormals-Are-Zero mode.