Caffe2 - C++ API
A deep learning, cross platform ML framework
check_alias_annotation.cpp
1 #include <torch/csrc/jit/passes/utils/check_alias_annotation.h>
2 
3 namespace torch {
4 namespace jit {
5 namespace {
6 
7 IValue deepCopy(const IValue& self) {
8  // primitive types can be copied directly
9  if (!self.isPtrType()) {
10  return self;
11  }
12 
13  // Tensors need special handling, since copy assignment creates an alias
14  if (self.isTensor()) {
15  return IValue(self.toTensor().clone());
16  }
17  if (self.isTensorList()) {
18  std::vector<at::Tensor> newList;
19  for (const auto& oldTensor : self.toTensorListRef()) {
20  newList.push_back(oldTensor.clone());
21  }
22  return newList;
23  }
24 
25  // Lists of ivalues should recursively deep copy their contents
26  if (self.isGenericList()) {
27  std::vector<IValue> newList;
28  for (const auto& value : self.toGenericListRef()) {
29  newList.push_back(deepCopy(value));
30  }
31  return newList;
32  }
33 
34  // Regular lists can copy assign
35  if (self.isIntList()) {
36  return IValue(self.toIntListRef());
37  } else if (self.isDoubleList()) {
38  return IValue(self.toDoubleListRef());
39  } else if (self.isBoolList()) {
40  return IValue(self.toBoolListRef());
41  } else if (self.isString()) {
42  return IValue(self.toStringRef());
43  }
44 
45  // If in the future we add more reference types that are used in aten ops,
46  // we'll have to add them as cases here.
47  AT_ASSERT(false);
48 }
49 
50 Stack deepCopy(const Stack& stack) {
51  Stack ret;
52  ret.reserve(stack.size());
53  for (const auto& v : stack) {
54  ret.push_back(deepCopy(v));
55  }
56  return ret;
57 }
58 
59 bool deepEquals(const IValue& lhs, const IValue& rhs) {
60  if (lhs.isInt() && rhs.isInt()) {
61  return lhs.toInt() == rhs.toInt();
62  } else if (lhs.isDouble() && rhs.isDouble()) {
63  return lhs.toDouble() == rhs.toDouble();
64  } else if (lhs.isNone() && rhs.isNone()) {
65  return true;
66  } else if (lhs.isIntList() && rhs.isIntList()) {
67  return lhs.toIntList()->elements() == rhs.toIntList()->elements();
68  } else if (lhs.isTensor() && rhs.isTensor()) {
69  return lhs.toTensor().equal(rhs.toTensor());
70  }
71 
72  throw std::runtime_error("Deep equals not implemented for type");
73 }
74 
75 struct AliasAndIValue {
76  AliasAndIValue(c10::optional<at::AliasInfo> aliasInfo, IValue iValue)
77  : aliasInfo(std::move(aliasInfo)), iValue(std::move(iValue)) {}
78 
79  const c10::optional<at::AliasInfo> aliasInfo;
80  const IValue iValue;
81 };
82 
83 // No inputs should alias each other
84 void checkInputPreconditions(const Stack& inputs) {
85  for (size_t i = 0; i < inputs.size(); i++) {
86  for (size_t j = 0; j < inputs.size(); j++) {
87  if (i == j) {
88  continue;
89  }
90  const auto& lhs = inputs.at(i);
91  const auto& rhs = inputs.at(j);
92  AT_ASSERT(!lhs.isAliasOf(rhs));
93  }
94  }
95 }
96 
97 // If two ivalues alias, they must share an alias set
98 void checkAliases(
99  const std::vector<AliasAndIValue>& inputs,
100  const std::vector<AliasAndIValue>& outputs) {
101  for (const auto& output : outputs) {
102  // if this output aliases any input, make sure that they share an alias set
103  for (const auto& input : inputs) {
104  if (output.iValue.isAliasOf(input.iValue)) {
105  const auto inputSet = input.aliasInfo;
106  const auto outputSet = output.aliasInfo;
107  AT_ASSERT(inputSet && outputSet);
108  bool found = false;
109  for (const auto& set : inputSet->beforeSets()) {
110  if (outputSet->beforeSets().count(set)) {
111  found = true;
112  break;
113  }
114  }
115  AT_ASSERT(found);
116  }
117  }
118  }
119 }
120 
121 // If we didn't specify that we write to an input value, it must have not
122 // changed
123 void checkWrites(
124  const std::vector<AliasAndIValue>& inputs,
125  const std::vector<IValue>& deepCopiedInputs) {
126  AT_ASSERT(inputs.size() == deepCopiedInputs.size());
127  for (size_t i = 0; i < inputs.size(); i++) {
128  const auto& input = inputs[i];
129  const auto& deepCopiedInput = deepCopiedInputs[i];
130  if (!input.aliasInfo || !input.aliasInfo->isWrite()) {
131  AT_ASSERT(deepEquals(input.iValue, deepCopiedInput));
132  }
133  }
134 }
135 
136 const Node* findNodeForOp(
137  const Graph& g,
138  const std::string& unqualifiedOpName) {
139  const auto opName = Symbol::fromQualString("aten::" + unqualifiedOpName);
140  for (const auto node : g.nodes()) {
141  if (node->kind() == opName) {
142  return node;
143  }
144  }
145  AT_ASSERT(false);
146 }
147 
148 // Handle a few special cases where we need to propagate constants
149 // manually
150 // TODO(suo): we should be able to move this stuff to constant prop
151 c10::optional<IValue> toIValueProp(const Value* v) {
152  if (v->node()->kind() == prim::ListConstruct) {
153  std::vector<IValue> genericList;
154  for (auto input : v->node()->inputs()) {
155  if (auto elem = toIValue(input)) {
156  genericList.push_back(*elem);
157  } else {
158  // One of the list elements isn't constant.
159  return c10::nullopt;
160  }
161  }
162 
163  // Specialize the list based on ListConstruct's return type
164  auto listType = v->node()->output()->type();
165  auto containedType = listType->containedTypes().at(0);
166  if (containedType == IntType::get()) {
167  return fmap(genericList, [](const IValue& v) { return v.toInt(); });
168  } else if (containedType == FloatType::get()) {
169  return fmap(genericList, [](const IValue& v) { return v.toDouble(); });
170  } else if (containedType->isSubtypeOf(TensorType::get())) {
171  return fmap(genericList, [](const IValue& v) { return v.toTensor(); });
172  } else {
173  return c10::nullopt;
174  }
175  }
176 
177  if (v->node()->kind() == prim::Float) {
178  auto op = getOperation(v->node());
179  if (auto input = toIValue(v->node()->input())) {
180  auto op = getOperation(v->node());
181  Stack stack;
182  push(stack, *input);
183  op(stack);
184  return stack.back();
185  } else {
186  return c10::nullopt;
187  }
188  }
189  return c10::nullopt;
190 }
191 } // namespace
192 
193 void checkAliasAnnotation(
194  const std::shared_ptr<Graph>& graph,
195  std::vector<IValue> pythonInputs,
196  const std::string& unqualifiedOpName) {
197  // Find the node that corresponds to our op name
198  const auto node = findNodeForOp(*graph, unqualifiedOpName);
199 
200  // Build the stack to use as input to the op
201  Stack stack;
202  for (const auto input : node->inputs()) {
203  if (input->node() == graph->param_node()) {
204  // This value was passed as an input in python
205  push(stack, pythonInputs.at(input->offset()));
206  } else {
207  // This a generated constant, which we need to evaluate
208  auto inputValue = toIValue(input);
209  if (!inputValue) {
210  inputValue = toIValueProp(input);
211  }
212 
213  if (inputValue) {
214  push(stack, *inputValue);
215  } else {
216  AT_ASSERT(input->type()->kind() == TypeKind::OptionalType);
217  push(stack, IValue());
218  }
219  }
220  }
221 
222  // Precondition: no inputs should alias each other. So if we find an alias,
223  // it was created by the op.
224  checkInputPreconditions(stack);
225 
226  const auto schema = node->schema();
227 
228  std::vector<AliasAndIValue> inputsToCheck;
229  for (size_t i = 0; i < schema.arguments().size(); i++) {
230  inputsToCheck.emplace_back(
231  schema.arguments().at(i).alias_info(), stack.at(i));
232  }
233 
234  // Save a copy of the inputs so we can check whether the original inputs were
235  // written to.
236  const auto inputsDeepCopy = deepCopy(stack);
237 
238  // Run the op
239  getOperation(node)(stack);
240 
241  const auto outputs = std::move(stack);
242 
243  std::vector<AliasAndIValue> outputsToCheck;
244  for (size_t i = 0; i < schema.returns().size(); i++) {
245  outputsToCheck.emplace_back(
246  schema.returns().at(i).alias_info(), outputs.at(i));
247  }
248 
249  // Check that if any alias was created, we annotated it properly.
250  checkAliases(inputsToCheck, outputsToCheck);
251 
252  // Check that if nothing was accidentally written to.
253  checkWrites(inputsToCheck, inputsDeepCopy);
254 }
255 
256 } // namespace jit
257 } // namespace torch
Definition: jit_type.h:17