3 #include <torch/csrc/autograd/variable.h> 4 #include <torch/csrc/jit/ir.h> 5 #include <ATen/core/stack.h> 6 #include <ATen/core/jit_type.h> 7 #include <torch/csrc/jit/variable_tensor_list.h> 8 #include <torch/csrc/utils/hash.h> 18 inline static at::Device ConvertIntToCPUOrCUDA(
int device) {
19 return device < 0 ? at::kCPU :
at::Device(at::DeviceType::CUDA, device);
23 using plain_data_type = uint32_t;
25 bool isTensor()
const {
28 bool defined()
const {
36 bool requires_grad()
const {
37 return requires_grad_;
42 at::ScalarType type()
const {
43 return at::ScalarType(type_);
45 operator TypePtr()
const {
47 return TensorType::get();
48 return DimensionedTensorType::create(type(), ConvertIntToCPUOrCUDA(device()), dim());
52 unsigned is_tensor_ : 1;
53 unsigned defined_ : 1;
54 unsigned requires_grad_ : 1;
63 std::is_pod<ArgumentInfo>::value,
64 "ArgumentInfo is to be a POD struct");
66 sizeof(
ArgumentInfo) ==
sizeof(ArgumentInfo::plain_data_type),
67 "ArgumentInfo is expected to be a 32-bit struct");
73 size_t num_flat_inputs) {
74 hash_code = num_flat_inputs;
75 args.resize(num_flat_inputs);
77 for (
const auto& i : inputs) {
78 addInput(i, offset, with_grad);
80 AT_ASSERT(offset <= num_flat_inputs);
83 void addInput(
const IValue& input,
size_t& offset,
bool with_grad) {
84 auto& arg = args.at(offset);
90 if (input.isTensor()) {
92 if ((arg.defined_ = t.defined())) {
96 arg.type_ =
static_cast<unsigned>(t.scalar_type());
99 arg.is_tensor_ =
true;
102 }
else if (input.isTuple()) {
103 for (
const IValue& elem : input.toTuple()->elements()) {
104 addInput(elem, offset, with_grad);
115 ArgumentInfo::plain_data_type arg_data;
117 hash_code = hash_combine(hash_code, arg_data);
123 if (args.size() != spec.args.size())
127 if (args.size() == 0)
135 return !(*
this == spec);
137 size_t size()
const {
143 size_t hashCode()
const {
148 std::vector<TypePtr> getTypes(
Graph& graph)
const {
151 graph.inputs(), [&](
Value* v) {
return fillType(v->type(), offset); });
155 TypePtr fillType(TypePtr original,
size_t& offset)
const {
156 if (original->isSubtypeOf(TensorType::get())) {
157 auto& arg = args.at(offset++);
159 return AutogradZeroTensorType::get();
160 return DimensionedTensorType::create(
162 ConvertIntToCPUOrCUDA(arg.device()),
164 arg.requires_grad());
165 }
else if (
auto tuple_type = original->cast<
TupleType>()) {
166 return TupleType::create(fmap(
167 tuple_type->elements(),
168 [&](
const TypePtr& subtype) {
return fillType(subtype, offset); }));
175 std::vector<ArgumentInfo> args;
187 unsigned is_tensor : 8;
189 unsigned defined : 1;
203 "CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work");
209 : hash_code(0), ninputs(inputs.
size()) {
210 int32_t all_dims = 0;
211 const int32_t num_inputs = inputs.
size();
212 for (int32_t i = 0; i < num_inputs; i++) {
213 if (!inputs[i].isTensor())
215 auto tensor = inputs[i].toTensor();
216 all_dims += tensor.defined() ? tensor.ndimension() : 0;
219 data.resize(ninputs + all_dims * 2);
223 int64_t* next_dim = sizes_strides();
224 int32_t total_dims = 0;
225 for (int32_t i = 0; i < num_inputs; i++) {
227 pod.is_tensor =
static_cast<uint32_t
>(inputs[i].isTensor());
230 pod.defined = t.defined();
232 pod.type =
static_cast<int>(t.scalar_type());
235 with_grad && autograd::as_variable_ref(t).requires_grad();
236 total_dims += t.ndimension();
237 auto sizes = t.sizes();
238 std::copy(sizes.begin(), sizes.end(), next_dim);
239 next_dim += sizes.size();
240 auto strides = t.strides();
241 std::copy(strides.begin(), strides.end(), next_dim);
242 next_dim += strides.
size();
246 pod.total_dims = total_dims;
250 hash_code = hash_combine(0, ninputs);
251 for (
auto d : data) {
252 hash_code = hash_combine(hash_code, d);
259 return ninputs == spec.ninputs && data == spec.data;
262 return !(*
this == spec);
266 size_t size()
const {
269 size_t hashCode()
const {
280 const int64_t* sizes_strides()
const {
281 return data.data() + ninputs;
283 int64_t* sizes_strides() {
284 return data.data() + ninputs;
292 std::vector<int64_t> data;
298 : spec(spec), i(i) {}
299 bool isTensor()
const {
300 return pod(i).is_tensor;
302 at::ScalarType type()
const {
303 return at::ScalarType(pod(i).type);
305 bool defined()
const {
306 return pod(i).defined;
312 return pod(i).device;
314 int ndimension()
const {
316 return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2;
320 spec.sizes_strides() + sizes_strides_offset(i), ndimension());
323 int ndim = ndimension();
325 spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim);
327 operator TypePtr()
const {
329 return TensorType::get();
330 return CompleteTensorType::create(
331 type(), ConvertIntToCPUOrCUDA(device()), sizes(), strides());
339 int sizes_strides_offset(
int j)
const {
342 return 2 * pod(j - 1).total_dims;
345 return spec.tensor_info().at(j);
351 inline std::ostream& operator<<(std::ostream& out,
const ArgumentInfo& info) {
352 if (!info.defined()) {
353 return out <<
"<undefined>";
355 out <<
"Tensor(device=" << info.device() <<
", type=" << toString(info.type())
356 <<
", requires_grad=" << info.requires_grad() <<
", dims=" << info.dim()
361 inline std::ostream& operator<<(std::ostream& out,
const ArgumentSpec& spec) {
363 for (
size_t i = 0; i < spec.size(); ++i) {
372 inline std::ostream& operator<<(
375 if (!info.defined()) {
376 return out <<
"<undefined>";
378 out <<
"Tensor(device=" << info.device() <<
", type=" << toString(info.type())
379 <<
", requires_grad=" << info.requires_grad()
380 <<
", sizes=" << info.sizes() <<
", strides=" << info.strides() <<
")";
384 inline std::ostream& operator<<(
388 for (
size_t i = 0; i < spec.size(); ++i) {
402 auto input_types = spec.getTypes(g);
403 auto inputs = g.inputs();
404 for (
size_t i = 0; i < inputs.size(); ++i) {
405 inputs[i]->setType(input_types[i]);
414 struct hash<
torch::jit::ArgumentSpec> {
416 return spec.hashCode();
420 struct hash<
torch::jit::CompleteArgumentSpec> {
422 return spec.hashCode();
int64_t get_device() const
Returns a Tensor's device index.
Represents a a compute device on which a tensor is located.
constexpr size_t size() const
size - Get the array size.
bool is_cuda() const
Returns if a Tensor has CUDA backend.
Variable A Variable augments a Tensor with the ability to interact in our autograd machinery...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...
Flush-To-Zero and Denormals-Are-Zero mode.
C10_NODISCARD TensorOptions requires_grad(c10::optional< bool > requires_grad) const noexcept
Sets the requires_grad property of the TensorOptions.