Caffe2 - C++ API
A deep learning, cross platform ML framework
tensor_interop_test.cpp
1 #include "gtest/gtest.h"
2 
3 #include "ATen/ATen.h"
4 #include <caffe2/core/init.h>
5 #include <caffe2/core/operator.h>
6 
7 TEST(Caffe2ToPytorch, SimpleLegacy) {
8  caffe2::Tensor c2_tensor(caffe2::CPU);
9  c2_tensor.Resize(4, 4);
10  auto data = c2_tensor.mutable_data<int64_t>();
11  for (int64_t i = 0; i < 16; i++) {
12  data[i] = i;
13  }
14  at::Tensor at_tensor(c2_tensor);
15  ASSERT_TRUE(&at_tensor.type() != nullptr);
16 
17  auto it = at_tensor.data<int64_t>();
18  for (int64_t i = 0; i < 16; i++) {
19  ASSERT_EQ(it[i], i);
20  }
21 }
22 
23 TEST(Caffe2ToPytorch, Simple) {
24  caffe2::Tensor c2_tensor = caffe2::empty({4, 4}, at::kLong);
25  auto data = c2_tensor.mutable_data<int64_t>();
26  for (int64_t i = 0; i < 16; i++) {
27  data[i] = i;
28  }
29  at::Tensor at_tensor(c2_tensor);
30  ASSERT_TRUE(&at_tensor.type() != nullptr);
31 
32  auto it = at_tensor.data<int64_t>();
33  for (int64_t i = 0; i < 16; i++) {
34  ASSERT_EQ(it[i], i);
35  }
36 }
37 
38 TEST(Caffe2ToPytorch, ExternalData) {
39  caffe2::Tensor c2_tensor = caffe2::empty({4, 4}, at::kLong);
40  int64_t buf[16];
41  for (int64_t i = 0; i < 16; i++) {
42  buf[i] = i;
43  }
44  c2_tensor.ShareExternalPointer(buf, 16);
45 
46  // If the buffer is allocated externally, we can still pass tensor around,
47  // but we can't resize its storage using PT APIs
48  at::Tensor at_tensor(c2_tensor);
49  auto it = at_tensor.data<int64_t>();
50  for (int64_t i = 0; i < 16; i++) {
51  ASSERT_EQ(it[i], i);
52  }
53  ASSERT_FALSE(at_tensor.storage().resizable());
54  ASSERT_ANY_THROW(at_tensor.resize_({7,7}));
55 }
56 
57 TEST(Caffe2ToPytorch, Op) {
58  caffe2::Tensor c2_tensor(caffe2::CPU);
59  c2_tensor.Resize(3, 3);
60  auto data = c2_tensor.mutable_data<int64_t>();
61  for (int64_t i = 0; i < 9; i++) {
62  data[i] = i;
63  }
64  at::Tensor at_tensor(c2_tensor);
65 
66  ASSERT_EQ(at::sum(at_tensor).item<int64_t>(), 36);
67 }
68 
69 // Caffe2 doesn't actually have another always-on backend that is not CPU or GPU
70 // TEST(Caffe2ToPytorch, UnsupportedDevice) {
71 // caffe2::Tensor c2_tensor(caffe2::OPENGL);
72 // c2_tensor.Resize(4, 4);
73 // c2_tensor.mutable_data<float>();
74 // at::Tensor at_tensor(c2_tensor);
75 // ASSERT_ANY_THROW(at::sum(at_tensor));
76 // }
77 
78 TEST(Caffe2ToPytorch, PartiallyInitialized) {
79  // These APIs for partially initialized tensors should go away soon, in the
80  // meantime ensure they are caught
81  {
82  // no dtype, no storage
83  caffe2::Tensor c2_tensor(caffe2::CPU);
84  ASSERT_ANY_THROW(at::Tensor at_tensor(c2_tensor));
85  }
86  {
87  // storage, no dtype
88  caffe2::Tensor c2_tensor(caffe2::CPU);
89  c2_tensor.Resize(4,4);
90  ASSERT_ANY_THROW(at::Tensor at_tensor(c2_tensor));
91  }
92  {
93  // dtype, no storage
94  caffe2::Tensor c2_tensor(caffe2::CPU);
95  c2_tensor.Resize(4,4);
96  c2_tensor.mutable_data<float>();
97  c2_tensor.FreeMemory();
98  ASSERT_ANY_THROW(at::Tensor at_tensor(c2_tensor));
99  }
100 }
101 
102 TEST(Caffe2ToPytorch, MutualResizes) {
103  caffe2::Tensor c2_tensor = caffe2::empty({5, 5}, at::kFloat);
104  auto data = c2_tensor.mutable_data<float>();
105  for (int64_t i = 0; i < 25; i++) {
106  data[i] = 0;
107  }
108 
109  at::Tensor at_tensor(c2_tensor);
110 
111  // change is visible
112  at_tensor[0][0] = 123;
113  ASSERT_EQ(c2_tensor.mutable_data<float>()[0], 123);
114 
115  // resize PT tensor in smaller direction - storage is preserved
116  at_tensor.resize_({4, 4});
117  c2_tensor.mutable_data<float>()[1] = 234;
118  ASSERT_EQ(at_tensor[0][1].item().to<float>(), 234);
119 
120  // resize PT tensor in larger direction - storage is preserved
121  at_tensor.resize_({6, 6});
122  c2_tensor.mutable_data<float>()[2] = 345;
123  ASSERT_EQ(at_tensor[0][2].item().to<float>(), 345);
124  ASSERT_EQ(c2_tensor.sizes()[0], 6);
125  ASSERT_EQ(c2_tensor.sizes()[1], 6);
126 
127  // resize Caffe2 tensor - semantics are to NOT preserve the data, but the
128  // TensorImpl is still shared
129  c2_tensor.Resize(7, 7);
130  c2_tensor.mutable_data<float>()[3] = 456;
131  ASSERT_EQ(at_tensor[0][3].item().to<float>(), 456);
132  ASSERT_EQ(at_tensor.sizes()[0], 7);
133  ASSERT_EQ(at_tensor.sizes()[1], 7);
134 }
135 
136 TEST(PytorchToCaffe2, Op) {
137  caffe2::Workspace workspace;
138  caffe2::NetDef net;
139 
140  auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat));
141  auto at_tensor_b = at::ones({5, 5}, at::dtype(at::kFloat));
142  auto at_tensor_c = at::ones({5, 5}, at::dtype(at::kFloat));
143 
144  auto* c2_tensor_a = BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
145  auto* c2_tensor_b = BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
146 
147  // Test Alias
148  {
149  caffe2::Tensor c2_tensor_from_aten(at_tensor_c);
150  BlobSetTensor(workspace.CreateBlob("c"), c2_tensor_from_aten.Alias());
151  }
152 
153  {
154  auto op = net.add_op();
155  op->set_type("Sum");
156  op->add_input("a");
157  op->add_input("b");
158  op->add_input("c");
159  op->add_output("d");
160  }
161 
162  workspace.RunNetOnce(net);
163 
164  auto result = XBlobGetMutableTensor(workspace.CreateBlob("d"), {5, 5}, at::kCPU);
165 
166  auto it = result.data<float>();
167  for (int64_t i = 0; i < 25; i++) {
168  ASSERT_EQ(it[i], 3.0);
169  }
170  at::Tensor at_result(result);
171  ASSERT_EQ(at::sum(at_result).item<float>(), 75);
172 }
173 
174 TEST(PytorchToCaffe2, SharedStorageRead) {
175  caffe2::Workspace workspace;
176  caffe2::NetDef net;
177 
178  auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat));
179  auto at_tensor_b = at_tensor_a.view({5, 5});
180 
181  auto* c2_tensor_a = BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
182  auto* c2_tensor_b = BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
183 
184  {
185  auto op = net.add_op();
186  op->set_type("Add");
187  op->add_input("a");
188  op->add_input("b");
189  op->add_output("c");
190  }
191 
192  workspace.RunNetOnce(net);
193 
194  auto result = XBlobGetMutableTensor(workspace.CreateBlob("c"), {5, 5}, at::kCPU);
195  auto it = result.data<float>();
196  for (int64_t i = 0; i < 25; i++) {
197  ASSERT_EQ(it[i], 2.0);
198  }
199  at::Tensor at_result(result);
200  ASSERT_EQ(at::sum(at_result).item<float>(), 50);
201 }
202 
203 TEST(PytorchToCaffe2, SharedStorageWrite) {
204  auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat));
205  auto at_tensor_b = at_tensor_a.view({25});
206 
207  caffe2::Tensor c2_tensor_a(at_tensor_a);
208  caffe2::Tensor c2_tensor_b(at_tensor_b);
209 
210  // change is visible everywhere
211  c2_tensor_a.mutable_data<float>()[1] = 123;
212  ASSERT_EQ(c2_tensor_b.mutable_data<float>()[1], 123);
213  ASSERT_EQ(at_tensor_a[0][1].item().to<float>(), 123);
214  ASSERT_EQ(at_tensor_b[1].item().to<float>(), 123);
215 }
216 
217 TEST(PytorchToCaffe2, MutualResizes) {
218  auto at_tensor = at::ones({5, 5}, at::dtype(at::kFloat));
219 
220  caffe2::Tensor c2_tensor(at_tensor);
221 
222  // change is visible
223  c2_tensor.mutable_data<float>()[0] = 123;
224  ASSERT_EQ(at_tensor[0][0].item().to<float>(), 123);
225 
226  // resize PT tensor in smaller direction - storage is preserved
227  at_tensor.resize_({4, 4});
228  c2_tensor.mutable_data<float>()[1] = 234;
229  ASSERT_EQ(at_tensor[0][1].item().to<float>(), 234);
230 
231  // resize PT tensor in larger direction - storage is preserved
232  at_tensor.resize_({6, 6});
233  c2_tensor.mutable_data<float>()[2] = 345;
234  ASSERT_EQ(at_tensor[0][2].item().to<float>(), 345);
235  ASSERT_EQ(c2_tensor.sizes()[0], 6);
236  ASSERT_EQ(c2_tensor.sizes()[1], 6);
237 
238  // resize Caffe2 tensor - semantics are to NOT preserve the data, but the
239  // TensorImpl is still shared
240  c2_tensor.Resize(7, 7);
241  c2_tensor.mutable_data<float>()[3] = 456;
242  ASSERT_EQ(at_tensor[0][3].item().to<float>(), 456);
243  ASSERT_EQ(at_tensor.sizes()[0], 7);
244  ASSERT_EQ(at_tensor.sizes()[1], 7);
245 }
246 
247 TEST(PytorchToCaffe2, Strided) {
248  auto at_tensor = at::ones({5, 5}, at::dtype(at::kFloat)).t();
249  ASSERT_ANY_THROW(caffe2::Tensor c2_tensor(at_tensor));
250  // but calling contiguous is fine
251  caffe2::Tensor c2_tensor(at_tensor.contiguous());
252  for (int64_t i = 0; i < 25; i++) {
253  ASSERT_EQ(c2_tensor.data<float>()[i], 1.0);
254  }
255 }
256 
257 TEST(PytorchToCaffe2, InplaceStrided) {
258  auto at_tensor = at::zeros({2, 5}, at::dtype(at::kFloat));
259  caffe2::Tensor c2_tensor(at_tensor);
260  ASSERT_EQ(c2_tensor.sizes()[0], 2);
261  ASSERT_EQ(c2_tensor.sizes()[1], 5);
262 
263  c2_tensor.mutable_data<float>()[1] = 234;
264  ASSERT_EQ(at_tensor[0][1].item().to<float>(), 234);
265 
266  at_tensor.t_();
267  ASSERT_EQ(c2_tensor.sizes()[0], 5);
268  ASSERT_EQ(c2_tensor.sizes()[1], 2);
269  // This is BROKEN situation, however checking is_contiguous on every data
270  // access is expensive. We rely on user to not do crazy stuff.
271  ASSERT_EQ(at_tensor[1][0].item().to<float>(), 234);
272  ASSERT_EQ(c2_tensor.data<float>()[1], 234);
273 }
274 
275 TEST(PytorchToCaffe2, NonRegularTensor) {
276  at::Tensor at_tensor =
277  at::empty({2, 3}, at::dtype<float>().layout(at::kSparse));
278  ASSERT_TRUE(at_tensor.is_sparse());
279  ASSERT_ANY_THROW(caffe2::Tensor c2_tensor(at_tensor));
280 }
281 
282 // With current build system it's too bothersome to set it up, but the test
283 // passes
284 // TEST(PytorchToCaffe2, Variable) {
285 // at::Tensor var =
286 // torch::autograd::make_variable(at::empty({2, 3}, at::dtype<float>()));
287 // ASSERT_TRUE(var.is_variable());
288 // ASSERT_ANY_THROW(caffe2::Tensor c2_tensor(var));
289 // }
290 
291 TEST(Caffe2ToPytorch, NonPOD) {
292  caffe2::Tensor c2_tensor = caffe2::empty({1}, at::dtype<std::string>());
293  auto data = c2_tensor.mutable_data<std::string>();
294  *data = "test";
295  ASSERT_ANY_THROW(at::Tensor at_tensor(c2_tensor));
296 }
297 
298 TEST(Caffe2ToPytorch, Nullptr) {
299  caffe2::Tensor c2_tensor;
300  ASSERT_FALSE(c2_tensor.defined());
301  at::Tensor at_tensor(c2_tensor);
302  ASSERT_FALSE(at_tensor.defined());
303 }
304 
305 TEST(PytorchToCaffe2, Nullptr) {
306  at::Tensor at_tensor;
307  ASSERT_FALSE(at_tensor.defined());
308  caffe2::Tensor c2_tensor(at_tensor);
309  ASSERT_FALSE(c2_tensor.defined());
310 }
Blob * CreateBlob(const string &name)
Creates a blob of the given name.
Definition: workspace.cc:100
Workspace is a class that holds all the related objects created during runtime: (1) all blobs...
Definition: workspace.h:47
bool is_sparse() const
Returns if a Tensor has sparse backend.