| __getstate__(self) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| __init__(self, module, device_ids=None, output_device=None, dim=0, broadcast_buffers=True, process_group=None, bucket_cap_mb=25, check_reduction=False) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| __setstate__(self, state) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| all_buckets_reduced (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| broadcast_bucket_size (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| broadcast_buffers (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| bucket_bytes_cap (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| bucket_map (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| bucket_sizes (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| buckets (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| buckets_coalesced (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| buckets_ready_size (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| check_previous_reduction (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| check_reduction (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| default_streams (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| device_ids (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| devs_ready (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| dim (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| forward(self, inputs, kwargs) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| gather(self, outputs, output_device) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| module (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| modules_buffers_data (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| modules_params_data (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| next_bucket (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| output_device (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| parallel_apply(self, replicas, inputs, kwargs) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| process_group (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| ready_buckets_not_reduced (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| reduction_works (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| scatter(self, inputs, kwargs, device_ids) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |
| train(self, mode=True) (defined in torch.nn.parallel.distributed.DistributedDataParallel) | torch.nn.parallel.distributed.DistributedDataParallel | |