Caffe2 - C++ API
A deep learning, cross platform ML framework
export.h
1 #pragma once
2 
3 #include <torch/csrc/jit/ir.h>
4 #include <torch/csrc/jit/script/module.h>
5 #include <torch/csrc/onnx/onnx.h>
6 
7 #include <ostream>
8 
9 namespace torch {
10 namespace jit {
11 
12 // This map is used to keep track of parameters that should be exported
13 // externally. When `defer_weight_export` is true, the returned map contains
14 // kv pairs that map {external reference name} -> {at::Tensor to be exported}.
15 // It is the responsibility of the caller to export these appropriately.
16 //
17 // For example, when exporting to a zip archive, the caller may write out files
18 // for each entry in the export map, with the filename being the key and the
19 // file contents being the raw tensor data.
20 using RawDataExportMap = std::unordered_map<std::string, at::Tensor>;
21 
22 TORCH_API std::tuple<std::string, RawDataExportMap> export_onnx(
23  const std::shared_ptr<Graph>& graph,
24  const std::vector<at::Tensor>& initializers,
25  int64_t onnx_opset_version,
26  bool defer_weight_export = false,
27  ::torch::onnx::OperatorExportTypes operator_export_type =
28  ::torch::onnx::OperatorExportTypes::ONNX);
29 
30 // For testing purposes
31 TORCH_API std::string pretty_print_onnx(
32  const std::shared_ptr<Graph>& graph,
33  const std::vector<at::Tensor>& initializers,
34  int64_t onnx_opset_version,
35  bool defer_weight_export,
36  ::torch::onnx::OperatorExportTypes operator_export_type =
37  ::torch::onnx::OperatorExportTypes::ONNX,
38  bool google_printer = false);
39 
40 TORCH_API void ExportModule(
41  const script::Module& module,
42  std::ostream& out,
43  const script::ExtraFilesMap& metadata = script::ExtraFilesMap());
44 
45 TORCH_API void ExportModule(
46  const script::Module& module,
47  const std::string& filename,
48  const script::ExtraFilesMap& metadata = script::ExtraFilesMap());
49 
50 } // namespace jit
51 } // namespace torch
Definition: jit_type.h:17