| __init__(cls, name, bases, attrs) (defined in torch.autograd.function.FunctionMeta) | torch.autograd.function.FunctionMeta | |
| backward(self, grad_output) (defined in torch.nn.modules._functions.SyncBatchNorm) | torch.nn.modules._functions.SyncBatchNorm | static |
| dirty_tensors (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size) (defined in torch.nn.modules._functions.SyncBatchNorm) | torch.nn.modules._functions.SyncBatchNorm | static |
| forward(ctx, args, kwargs) (defined in torch.autograd.function.Function) | torch.autograd.function.Function | static |
| is_traceable (defined in torch.autograd.function.Function) | torch.autograd.function.Function | static |
| mark_dirty(self, args) (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| mark_non_differentiable(self, args) (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| mark_shared_storage(self, pairs) (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| non_differentiable (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| process_group (defined in torch.nn.modules._functions.SyncBatchNorm) | torch.nn.modules._functions.SyncBatchNorm | |
| save_for_backward(self, tensors) (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| to_save (defined in torch.autograd.function._ContextMethodMixin) | torch.autograd.function._ContextMethodMixin | |
| world_size (defined in torch.nn.modules._functions.SyncBatchNorm) | torch.nn.modules._functions.SyncBatchNorm | |