Caffe2 - Python API
A deep learning, cross platform ML framework
helper.py
1 ## @package onnx
2 # Module caffe2.python.onnx.helper
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7 
8 from caffe2.proto import caffe2_pb2
9 from onnx.backend.base import namedtupledict
10 
11 from caffe2.python.onnx.workspace import Workspace
13 
14 import io
15 import logging
16 import time
17 
18 
19 log = logging.getLogger(__name__)
20 
21 
22 def c2_native_run_op(op_def, inputs):
23  ws = Workspace()
24  if isinstance(inputs, dict):
25  for key, value in inputs.items():
26  ws.FeedBlob(key, value, op_def.device_option)
27  else:
28  assert(len(op_def.input) == len(inputs))
29  for key, value in zip(op_def.input, inputs):
30  ws.FeedBlob(key, value, op_def.device_option)
31 
32  ws.RunOperatorOnce(op_def)
33 
34  output_names = op_def.output
35  output_values = [ws.FetchBlob(name) for name in output_names]
36  return ws, namedtupledict('Outputs', output_names)(*output_values)
37 
38 
39 def c2_native_run_net(init_net, predict_net, inputs):
40  ws = Workspace()
41  if init_net:
42  ws.RunNetOnce(init_net)
43 
44  if isinstance(inputs, dict):
45  for key, value in inputs.items():
46  ws.FeedBlob(key, value, predict_net.device_option)
47  else:
48  uninitialized = [input_name
49  for input_name in predict_net.external_input
50  if not ws.HasBlob(input_name)]
51  if len(uninitialized) == len(inputs):
52  for key, value in zip(uninitialized, inputs):
53  ws.FeedBlob(key, value, predict_net.device_option)
54  else:
55  # If everything is initialized,
56  # we just initialized the first len(inputs) external_input.
57  assert(len(inputs) <= len(predict_net.external_input))
58  for i in range(len(inputs)):
59  ws.FeedBlob(predict_net.external_input[i], inputs[i],
60  predict_net.device_option)
61 
62  ws.RunNetOnce(predict_net)
63 
64  output_names = predict_net.external_output
65  output_values = [ws.FetchBlob(name) for name in output_names]
66  return ws, namedtupledict('Outputs', output_names)(*output_values)
67 
68 
69 def load_caffe2_net(file):
70  net = caffe2_pb2.NetDef()
71  with open(file, "rb") as f:
72  net.ParseFromString(f.read())
73  return net
74 
75 
76 def save_caffe2_net(net, file, output_txt=False):
77  with open(file, "wb") as f:
78  f.write(net.SerializeToString())
79  if output_txt:
80  with open(file + "txt", "w") as f:
81  f.write(str(net))
82 
83 
84 def benchmark_caffe2_model(init_net, predict_net, warmup_iters=3, main_iters=10, layer_details=True):
85  '''
86  Run the benchmark net on the target model.
87  Return the execution time per iteration (millisecond).
88  '''
89  ws = Workspace()
90  if init_net:
91  ws.RunNetOnce(init_net)
92  ws.CreateNet(predict_net)
93  results = ws.BenchmarkNet(predict_net.name, warmup_iters, main_iters, layer_details)
94  del ws
95  return results[0]
96 
97 
98 def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
99  main_iters=10, verbose=False):
100  '''
101  Run the model several times, and measure the execution time.
102  Return the execution time per iteration (millisecond).
103  '''
104  for _i in range(warmup_iters):
105  model(*inputs)
106  total_pytorch_time = 0.0
107  for _i in range(main_iters):
108  ts = time.time()
109  model(*inputs)
110  te = time.time()
111  total_pytorch_time += te - ts
112  log.info("The PyTorch model execution time per iter is {} milliseconds, "
113  "{} iters per second.".format(total_pytorch_time / main_iters * 1000,
114  main_iters / total_pytorch_time))
115  return total_pytorch_time * 1000 / main_iters
Definition: model.py:1