5 r"""Context-manager that enable anomaly detection for the autograd engine. 8 - Running the forward pass with detection enabled will allow the backward 9 pass to print the traceback of the forward operation that created the failing 11 - Any backward computation that generate "nan" value will raise an error. 16 >>> from torch import autograd 17 >>> class MyFunc(autograd.Function): 19 ... def forward(ctx, inp): 20 ... return inp.clone() 22 ... def backward(ctx, gO): 23 ... # Error during the backward pass 24 ... raise RuntimeError("Some error in backward") 27 ... out = MyFunc.apply(a) 29 >>> inp = torch.rand(10, 10, requires_grad=True) 32 Traceback (most recent call last): 33 File "<stdin>", line 1, in <module> 34 File "/your/pytorch/install/torch/tensor.py", line 93, in backward 35 torch.autograd.backward(self, gradient, retain_graph, create_graph) 36 File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward 37 allow_unreachable=True) # allow_unreachable flag 38 File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply 39 return self._forward_cls.backward(self, *args) 40 File "<stdin>", line 8, in backward 41 RuntimeError: Some error in backward 42 >>> with autograd.detect_anomaly(): 43 ... inp = torch.rand(10, 10, requires_grad=True) 46 Traceback of forward call that caused the error: 47 File "tmp.py", line 53, in <module> 49 File "tmp.py", line 44, in run_fn 51 Traceback (most recent call last): 52 File "<stdin>", line 4, in <module> 53 File "/your/pytorch/install/torch/tensor.py", line 93, in backward 54 torch.autograd.backward(self, gradient, retain_graph, create_graph) 55 File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward 56 allow_unreachable=True) # allow_unreachable flag 57 File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply 58 return self._forward_cls.backward(self, *args) 59 File "<stdin>", line 8, in backward 60 RuntimeError: Some error in backward 65 self.
prev = torch.is_anomaly_enabled()
68 torch.set_anomaly_enabled(
True)
70 def __exit__(self, *args):
71 torch.set_anomaly_enabled(self.
prev)
76 r"""Context-manager that sets the anomaly detection for the autograd engine on or off. 78 ``set_detect_anomaly`` will enable or disable the autograd anomaly detection 79 based on its argument :attr:`mode`. 80 It can be used as a context-manager or as a function. 82 See ``detect_anomaly`` above for details of the anomaly detection behaviour. 85 mode (bool): Flag whether to enable anomaly detection (``True``), 86 or disable (``False``). 90 def __init__(self, mode):
91 self.
prev = torch.is_anomaly_enabled()
92 torch.set_anomaly_enabled(mode)
97 def __exit__(self, *args):
98 torch.set_anomaly_enabled(self.prev)