Public Member Functions | |
TH_DISALLOW_COPY_AND_ASSIGN (Module) | |
void | set_optimized (bool o) |
bool | is_optimized () const |
IValue | forward (std::vector< IValue > inputs) |
void | register_buffer (const std::string &name, autograd::Variable v) |
void | register_parameter (const std::string &name, autograd::Variable v, bool is_buffer) |
void | register_attribute (const std::string &name, const TypePtr type, IValue ivalue) |
void | register_module (const std::string &name, std::shared_ptr< Module > module) |
Method & | create_method (const std::string &name, std::shared_ptr< Graph > graph, std::vector< IValue * > member_inputs) |
Method & | create_method (const std::string &name, std::function< void(Method &)> creator) |
IValue * | parameter_slot (const std::string &name) const |
void | set_parameter (const std::string &name, at::Tensor v) |
autograd::Variable | get_parameter (const std::string &name) const |
autograd::Variable | get_buffer (const std::string &name) const |
Method & | get_method (const std::string &name) const |
std::shared_ptr< Module > | get_module (const std::string &name) const |
const torch::OrderedDict< std::string, NamedModule > & | get_modules () const |
const torch::OrderedDict< std::string, NamedIValue > & | get_parameters () const |
const torch::OrderedDict< std::string, NamedIValue > & | get_attributes () const |
const torch::OrderedDict< std::string, std::unique_ptr< Method > > & | get_methods () const |
NamedIValue * | find_parameter (const std::string &name) |
NamedIValue * | find_attribute (const std::string &name) |
NamedIValue * | find_buffer (const std::string &name) |
NamedModule * | find_module (const std::string &name) |
Method * | find_method (const std::string &name) |
void | apply (std::function< void(Module &)> fn) |
void | train (bool on=true) |
Enables "training" mode. | |
void | eval () |
Calls train(false) to enable "eval" mode. More... | |
bool | is_training () |
True if the module is in training mode. | |
TORCH_API void | to (at::Device device, at::ScalarType dtype, bool non_blocking=false) |
Recursively casts all parameters to the given dtype and device . More... | |
TORCH_API void | to (at::ScalarType dtype, bool non_blocking=false) |
Recursively casts all parameters to the given dtype. More... | |
TORCH_API void | to (at::Device device, bool non_blocking=false) |
Recursively moves all parameters to the given device. More... | |
template<typename... Types> | |
IValue | run_method (const std::string &method_name, Types &&...args) |
Run a method from this module. More... | |
void | save (std::ostream &out, const ExtraFilesMap &extra_files=ExtraFilesMap()) |
void | save (const std::string &filename, const ExtraFilesMap &extra_files=ExtraFilesMap()) |
void | copy_into (ModuleLookup module_lookup, std::unordered_map< IValue *, IValue * > ¶meter_remap, std::vector< std::string > names={}) const |
|
inline |
|
inline |
Run a method from this module.
For example:
To get a compile a module from a source string, see torch::jit::compile
method_name | The name of the method to run |
args | Arguments to be passed to the method |
void torch::jit::script::Module::to | ( | at::Device | device, |
at::ScalarType | dtype, | ||
bool | non_blocking = false |
||
) |
Recursively casts all parameters to the given dtype
and device
.
If non_blocking
is true and the source is in pinned memory and destination is on the GPU or vice versa, the copy is performed asynchronously with respect to the host. Otherwise, the argument has no effect.
Definition at line 98 of file module.cpp.
void torch::jit::script::Module::to | ( | at::ScalarType | dtype, |
bool | non_blocking = false |
||
) |
Recursively casts all parameters to the given dtype.
If non_blocking
is true and the source is in pinned memory and destination is on the GPU or vice versa, the copy is performed asynchronously with respect to the host. Otherwise, the argument has no effect.
Definition at line 102 of file module.cpp.
void torch::jit::script::Module::to | ( | at::Device | device, |
bool | non_blocking = false |
||
) |
Recursively moves all parameters to the given device.
If non_blocking
is true and the source is in pinned memory and destination is on the GPU or vice versa, the copy is performed asynchronously with respect to the host. Otherwise, the argument has no effect.
Definition at line 106 of file module.cpp.