2 #include <torch/csrc/autograd/variable.h>     3 #include <torch/csrc/autograd/generated/variable_factories.h>     4 #include <torch/csrc/jit/argument_spec.h>     5 #include <c10/util/Exception.h>     6 #include <torch/csrc/jit/graph_executor.h>     7 #include <torch/csrc/jit/ir.h>     8 #include <torch/csrc/jit/named_value.h>     9 #include <torch/csrc/jit/passes/shape_analysis.h>    10 #include <torch/csrc/jit/source_range.h>    12 #include <torch/csrc/WindowsTorchApiMacro.h>    13 #include <torch/csrc/api/include/torch/ordered_dict.h>    14 #include <torch/csrc/utils/memory.h>    16 #include <ATen/core/function_schema.h>    17 #include <c10/util/ArrayRef.h>    18 #include <c10/util/Optional.h>    25 #include <unordered_map>    36 using ::c10::Argument;
    37 using ::c10::FunctionSchema;
    39 using ExtraFilesMap = std::unordered_map<std::string, std::string>;
    52 using ModuleLookup = std::function<std::shared_ptr<Module>(
    53     const std::vector<std::string>&)>;
    60       std::shared_ptr<Graph> graph,
    61       std::vector<IValue*> initial_members,
    62       std::function<
void(
Method&)> method_creator)
    64         name_(std::move(name)),
    65         graph_(std::move(graph)),
    67         initial_ivalues_(std::move(initial_members)),
    68         method_creator(std::move(method_creator)) {
    69     AT_ASSERT(graph_->inputs().size() >= initial_ivalues_.size());
    70     int i = graph_->inputs().size() - initial_ivalues_.size();
    71     for (
auto member : initial_ivalues_) {
    72       initial_ivalue_index[member] = i++;
    76   void run(Stack& stack) {
    77     for (
auto input : initial_ivalues_) {
    80     get_executor().run(stack);
    83   void run(Stack&& stack) {
    87   IValue operator()(std::vector<IValue> stack) {
    88     checkInputsAgainstSchema(stack);
    93   std::shared_ptr<Graph> graph_for(Stack inputs) {
    94     for (
auto tp : initial_ivalues_) {
    95       inputs.emplace_back(*tp);
    97     return get_executor().graphFor(inputs);
    99   TORCH_API std::shared_ptr<Graph> graph()
 const {
   103   TORCH_API 
const std::string& name()
 const {
   118   TORCH_API 
void ensure_defined();
   120   size_t num_inputs()
 const {
   121     return graph()->inputs().size() - initial_ivalues_.size();
   123   TORCH_API 
Value* get_or_add_parameter(
IValue* slot) {
   124     AT_ASSERT(slot->isTensor());
   125     return get_or_add_attribute(TensorType::get(), slot);
   128   TORCH_API 
Value* get_or_add_attribute(TypePtr type, 
IValue* slot) {
   129     auto it = initial_ivalue_index.find(slot);
   130     if (it != initial_ivalue_index.end()) {
   131       return graph()->inputs().at(it->second);
   133     initial_ivalues_.push_back(slot);
   134     initial_ivalue_index[slot] = graph()->inputs().size();
   135     return graph()->addInput()->setType(type);
   138   std::shared_ptr<Graph> propagate_shapes(
   139       std::vector<at::Tensor> inputs,
   140       bool with_grad = 
false) {
   141     auto retval = graph_->copy();
   143     stack.reserve(inputs.size() + initial_ivalues_.size());
   145       stack.emplace_back(std::move(i));
   147     for (
IValue* inp : initial_ivalues_) {
   148       stack.push_back(*inp);
   150     const auto size = stack.size();
   151     setInputTypes(*retval, 
ArgumentSpec(with_grad, stack, size));
   152     PropagateInputShapes(retval);
   156   std::shared_ptr<Graph> propagate_and_assign_input_and_output_shapes(
   157       std::vector<at::Tensor> inputs,
   158       std::vector<at::Tensor> outputs,
   159       bool with_grad = 
false,
   160       bool propagate = 
true) {
   161     auto retval = graph_->copy();
   162     for (
auto inp : initial_ivalues_) {
   163       if (inp->isTensor()) {
   164         inputs.push_back(inp->toTensor());
   170           ArgumentSpec(with_grad, fmap<IValue>(inputs), inputs.size()));
   171       PropagateInputShapes(retval);
   173     AT_ASSERT(retval->inputs().size() == inputs.size());
   174     for (
size_t i = 0; i < retval->inputs().size(); ++i) {
   175       auto scalar_type = inputs[i].scalar_type();
   176       auto sizes = inputs[i].sizes();
   178           torch::jit::CompleteTensorType::create(scalar_type, at::kCPU, sizes);
   179       retval->inputs()[i]->setType(type);
   183     if (output_values.
at(0)->type()->kind() == TupleType::Kind) {
   184       AT_ASSERT(output_values.
at(0)->node()->kind() == prim::TupleConstruct);
   185       output_values = output_values.
at(0)->node()->inputs();
   187     AT_ASSERT(output_values.
size() == outputs.size());
   188     for (
size_t i = 0; i < retval->outputs().size(); ++i) {
   189       auto scalar_type = outputs[i].scalar_type();
   190       auto sizes = outputs[i].sizes();
   192           torch::jit::CompleteTensorType::create(scalar_type, at::kCPU, sizes);
   193       output_values[i]->setType(type);
   198   const std::vector<IValue*>& initial_ivalues()
 const {
   199     return initial_ivalues_;
   202   Method& setSchema(FunctionSchema schema_) {
   203     schema = make_unique<FunctionSchema>(std::move(schema_));
   207   TORCH_API 
const FunctionSchema& getSchema()
 const {
   208     if (schema == 
nullptr) {
   209       schema = make_unique<FunctionSchema>(defaultSchemaFor(*
this));
   214   std::string pretty_print_schema()
 const {
   216     std::stringstream ss;
   222     return get_executor().getDebugState();
   225   void debugDisableAutodiffSubgraphInlining() {
   226     return get_executor().debugDisableAutodiffSubgraphInlining();
   229   bool is_optimized()
 const {
   238   void check_single_output() {
   240         graph()->outputs().size() == 1,
   241         "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs");
   245   static FunctionSchema defaultSchemaFor(
const Method& method) {
   246     std::vector<Argument> args;
   247     std::vector<Argument> returns;
   248     Graph& g = *method.graph();
   249     size_t num_inputs = method.num_inputs();
   250     for (
size_t i = 0; i < num_inputs; ++i) {
   251       const Value* v = g.inputs().at(i);
   252       std::string name = v->hasUniqueName() ? v->uniqueNameBase()
   253                                             : (
"argument_" + std::to_string(i));
   254       args.emplace_back(std::move(name), unshapedType(g.inputs()[i]->type()));
   256     for (
size_t i = 0; i < g.outputs().size(); ++i) {
   257       returns.emplace_back(
"", unshapedType(g.outputs()[i]->type()));
   259     return {method.name(), 
"", std::move(args), std::move(returns)};
   263     std::call_once(executor_init, [&] {
   264       check_single_output();
   270   void checkInputsAgainstSchema(std::vector<IValue>& inputs) {
   271     const auto& schema = getSchema();
   274         inputs.size() <= schema.arguments().size(),
   276         schema.arguments().size(),
   277         " argument(s) for operator '",
   281         " argument(s). Declaration: ",
   284     for (
size_t pos = 0; pos < schema.arguments().size(); ++pos) {
   285       const auto& argument = schema.arguments()[pos];
   286       if (pos < inputs.size()) {
   287         if (!isSubvalueOf(inputs[pos], argument.type())) {
   289             "Expected value of type ",
   295             ", but instead got value of type ",
   296             attemptToRecoverType(inputs[pos])->str(),
   300       } 
else if (argument.default_value()) {
   301         inputs.push_back(*argument.default_value());
   305             "() is missing value for argument '",
   318   std::shared_ptr<Graph> graph_; 
   327   std::vector<IValue*> initial_ivalues_;
   331   std::unordered_map<IValue*, size_t> initial_ivalue_index;
   341   std::once_flag executor_init;
   346   std::function<void(Method&)> method_creator;
   351   mutable std::unique_ptr<FunctionSchema> schema;
   358   std::shared_ptr<Module> module;
   365         ivalue(torch::make_unique<IValue>(std::move(ivalue))) {}
   370   const std::string name_;
   372   std::unique_ptr<IValue> ivalue;
   376   TH_DISALLOW_COPY_AND_ASSIGN(
Module);
   379         parameters(
"Parameter"),
   380         attributes(
"Attributes"),
   386   void set_optimized(
bool o) {
   390   bool is_optimized()
 const {
   394   IValue forward(std::vector<IValue> inputs) {
   395     return get_method(
"forward")(std::move(inputs));
   399     if (
auto b = attributes.find(name)) {
   400       AT_ASSERT(b->type->isSubtypeOf(TensorType::get()));
   404     attributes.insert(name, 
NamedIValue(name, TensorType::get(), std::move(v)));
   406   void register_parameter(
   407       const std::string& name,
   411       register_buffer(name, std::move(v));
   414     if (
auto p = parameters.find(name)) {
   418     parameters.insert(name, 
NamedIValue(name, TensorType::get(), std::move(v)));
   420   void register_attribute(
   421       const std::string& name,
   424     attributes.insert(name, 
NamedIValue(name, type, ivalue));
   426   void register_module(
   427       const std::string& name,
   428       std::shared_ptr<Module> module) {
   429     modules.insert(name, {name, std::move(module)});
   433       const std::string& name,
   434       std::shared_ptr<Graph> graph,
   435       std::vector<IValue*> member_inputs) {
   437     std::unique_ptr<Method> method(
new Method(
   442         std::move(member_inputs),
   444     return *methods.insert(name, std::move(method));
   448       const std::string& name,
   449       std::function<
void(
Method&)> creator) {
   450     std::unique_ptr<Method> method(
new Method(
   454         std::make_shared<Graph>(),
   456         std::move(creator)));
   457     return *methods.insert(name, std::move(method));
   460   IValue* parameter_slot(
const std::string& name)
 const {
   461     return parameters[name].slot();
   464   void set_parameter(
const std::string& name, 
at::Tensor v) {
   465     *parameter_slot(name) = std::move(v);
   469     return autograd::as_variable_ref(parameter_slot(name)->toTensor());
   472     return autograd::as_variable_ref(attributes.find(name)->slot()->toTensor());
   477   Method& get_method(
const std::string& name)
 const {
   478     return *methods[name];
   481   std::shared_ptr<Module> get_module(
const std::string& name)
 const {
   482     return modules[name].module;
   501   NamedIValue* find_parameter(
const std::string& name) {
   502     return parameters.find(name);
   504   NamedIValue* find_attribute(
const std::string& name) {
   505     return attributes.find(name);
   507   NamedIValue* find_buffer(
const std::string& name) {
   508     auto b = attributes.find(name);
   509     if (b && b->type->isSubtypeOf(TensorType::get())) {
   514   NamedModule* find_module(
const std::string& name) {
   515     return modules.find(name);
   517   Method* find_method(
const std::string& name) {
   518     if (
auto* pm = methods.find(name)) {
   523   void apply(std::function<
void(
Module&)> fn) {
   524     for (
auto& submod : get_modules()) {
   525       submod.value().module->apply(fn);
   531     for (
auto& submod : get_modules()) {
   532       submod->module->train(on);
   534     register_buffer(
"training", torch::tensor(on ? 1 : 0, at::kLong));
   543     if (
auto p = find_buffer(
"training")) {
   544       return p->slot()->toTensor().item<int64_t>() == 1;
   558       at::ScalarType dtype,
   559       bool non_blocking = 
false);
   567   TORCH_API 
void to(at::ScalarType dtype, 
bool non_blocking = 
false);
   575   TORCH_API 
void to(
at::Device device, 
bool non_blocking = 
false);
   590   template <
typename... Types>
   592     return get_method(method_name)({
IValue(std::forward<Types>(args))...});
   597       const ExtraFilesMap& extra_files = ExtraFilesMap());
   600       const std::string& filename,
   601       const ExtraFilesMap& extra_files = ExtraFilesMap());
   604       ModuleLookup module_lookup,
   607       std::unordered_map<IValue*, IValue*>& parameter_remap,
   608       std::vector<std::string> names = {}) 
const {
   609     auto curr = module_lookup(names);
   610     for (
auto& kv : parameters) {
   611       curr->register_parameter(
   613           kv.value().slot()->toTensor(),
   615       parameter_remap[kv.value().slot()] = curr->parameter_slot(kv.key());
   617     for (
auto& kv : attributes) {
   618       if (!kv.value().type->isSubtypeOf(TensorType::get())) {
   621       curr->register_buffer(
   623           kv.value().slot()->toTensor());
   624       parameter_remap[kv.value().slot()] = curr->find_buffer(kv.key())->slot();
   626     for (
auto& kv : modules) {
   627       names.push_back(kv.key());
   630       kv.value().module->copy_into(module_lookup, parameter_remap, names);
   633     for (
auto& kv : methods) {
   634       std::vector<IValue*> initial_ivalues;
   635       for (
auto& p : kv.value()->initial_ivalues()) {
   636         initial_ivalues.push_back(parameter_remap.at(p));
   639           kv.key(), kv.value()->graph()->copy(), initial_ivalues);
   662 Value* try_emit_call_to(
   669     std::stringstream& failure_messages,
   673     bool conv_tensors_to_nums);
 bool is_training()
True if the module is in training mode. 
 
Represents a a compute device on which a tensor is located. 
 
constexpr size_t size() const 
size - Get the array size. 
 
void train(bool on=true)
Enables "training" mode. 
 
Variable A Variable augments a Tensor with the ability to interact in our autograd machinery...
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
 
AT_CPP14_CONSTEXPR const T & at(size_t Index) const 
Vector compatibility. 
 
IValue run_method(const std::string &method_name, Types &&...args)
Run a method from this module. 
 
void eval()
Calls train(false) to enable "eval" mode. 
 
An ordered dictionary implementation, akin to Python's OrderedDict.