|
def | is_available () |
|
def | destroy_process_group () |
|
def | is_initialized () |
|
def | init_process_group (backend, init_method='env://', kwargs) |
|
def | init_master_worker (backend, init_method='env://', kwargs) |
|
def | get_rank () |
|
def | get_world_size () |
|
def | isend (tensor, dst) |
|
def | irecv (tensor, src) |
|
def | send (tensor, dst) |
|
def | recv (tensor, src=None) |
|
def | broadcast_multigpu (tensor_list, src, group=group.WORLD) |
|
def | broadcast (tensor, src, group=group.WORLD) |
|
def | all_reduce_multigpu (tensor_list, op=reduce_op.SUM, group=group.WORLD) |
|
def | all_reduce (tensor, op=reduce_op.SUM, group=group.WORLD) |
|
def | reduce_multigpu (tensor_list, dst, op=reduce_op.SUM, group=group.WORLD) |
|
def | reduce (tensor, dst, op=reduce_op.SUM, group=group.WORLD) |
|
def | all_gather_multigpu (output_tensor_lists, input_tensor_list, group=group.WORLD) |
|
def | all_gather (tensor_list, tensor, group=group.WORLD) |
|
def | gather (tensor, kwargs) |
|
def | scatter (tensor, kwargs) |
|
def | barrier (group=group.WORLD) |
|
def | new_group (ranks=None) |
|
torch.distributed.deprecated provides an MPI-like interface for exchanging tensor
data across multi-machine networks. It supports a few different backends
and initialization methods.