Caffe2 - Python API
A deep learning, cross platform ML framework
torch.nn.parallel.deprecated.distributed.DistributedDataParallel Member List

This is the complete list of members for torch.nn.parallel.deprecated.distributed.DistributedDataParallel, including all inherited members.

__getstate__(self) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
__init__(self, module, device_ids=None, output_device=None, dim=0, broadcast_buffers=True) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
__setstate__(self, state) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
broadcast_bucket_size (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
broadcast_buffers (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
bucket_events (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
bucket_map (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
bucket_sizes (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
buckets (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
device_ids (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
dim (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
dispatch_lock (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
forward(self, inputs, kwargs) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
gather(self, outputs, output_device) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
module (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
nccl_reduce_bucket_size (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
nccl_reduction_group_id (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
need_reduction (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
output_device (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
parallel_apply(self, replicas, inputs, kwargs) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
reduced (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
scatter(self, inputs, kwargs, device_ids) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel
train(self, mode=True) (defined in torch.nn.parallel.deprecated.distributed.DistributedDataParallel)torch.nn.parallel.deprecated.distributed.DistributedDataParallel