Caffe2 - Python API
A deep learning, cross platform ML framework
anomaly_mode.py
1 import torch
2 
3 
4 class detect_anomaly(object):
5  r"""Context-manager that enable anomaly detection for the autograd engine.
6 
7  This does two things:
8  - Running the forward pass with detection enabled will allow the backward
9  pass to print the traceback of the forward operation that created the failing
10  backward function.
11  - Any backward computation that generate "nan" value will raise an error.
12 
13  Example:
14 
15  >>> import torch
16  >>> from torch import autograd
17  >>> class MyFunc(autograd.Function):
18  ... @staticmethod
19  ... def forward(ctx, inp):
20  ... return inp.clone()
21  ... @staticmethod
22  ... def backward(ctx, gO):
23  ... # Error during the backward pass
24  ... raise RuntimeError("Some error in backward")
25  ... return gO.clone()
26  >>> def run_fn(a):
27  ... out = MyFunc.apply(a)
28  ... return out.sum()
29  >>> inp = torch.rand(10, 10, requires_grad=True)
30  >>> out = run_fn(inp)
31  >>> out.backward()
32  Traceback (most recent call last):
33  File "<stdin>", line 1, in <module>
34  File "/your/pytorch/install/torch/tensor.py", line 93, in backward
35  torch.autograd.backward(self, gradient, retain_graph, create_graph)
36  File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
37  allow_unreachable=True) # allow_unreachable flag
38  File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
39  return self._forward_cls.backward(self, *args)
40  File "<stdin>", line 8, in backward
41  RuntimeError: Some error in backward
42  >>> with autograd.detect_anomaly():
43  ... inp = torch.rand(10, 10, requires_grad=True)
44  ... out = run_fn(inp)
45  ... out.backward()
46  Traceback of forward call that caused the error:
47  File "tmp.py", line 53, in <module>
48  out = run_fn(inp)
49  File "tmp.py", line 44, in run_fn
50  out = MyFunc.apply(a)
51  Traceback (most recent call last):
52  File "<stdin>", line 4, in <module>
53  File "/your/pytorch/install/torch/tensor.py", line 93, in backward
54  torch.autograd.backward(self, gradient, retain_graph, create_graph)
55  File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
56  allow_unreachable=True) # allow_unreachable flag
57  File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
58  return self._forward_cls.backward(self, *args)
59  File "<stdin>", line 8, in backward
60  RuntimeError: Some error in backward
61 
62  """
63 
64  def __init__(self):
65  self.prev = torch.is_anomaly_enabled()
66 
67  def __enter__(self):
68  torch.set_anomaly_enabled(True)
69 
70  def __exit__(self, *args):
71  torch.set_anomaly_enabled(self.prev)
72  return False
73 
74 
75 class set_detect_anomaly(object):
76  r"""Context-manager that sets the anomaly detection for the autograd engine on or off.
77 
78  ``set_detect_anomaly`` will enable or disable the autograd anomaly detection
79  based on its argument :attr:`mode`.
80  It can be used as a context-manager or as a function.
81 
82  See ``detect_anomaly`` above for details of the anomaly detection behaviour.
83 
84  Arguments:
85  mode (bool): Flag whether to enable anomaly detection (``True``),
86  or disable (``False``).
87 
88  """
89 
90  def __init__(self, mode):
91  self.prev = torch.is_anomaly_enabled()
92  torch.set_anomaly_enabled(mode)
93 
94  def __enter__(self):
95  pass
96 
97  def __exit__(self, *args):
98  torch.set_anomaly_enabled(self.prev)
99  return False