Caffe2 - Python API
A deep learning, cross platform ML framework
random.py
1 from torch import _C, device
2 from . import _lazy_init, _lazy_call, device_count, device as device_ctx_manager
3 
4 __all__ = ['get_rng_state', 'get_rng_state_all',
5  'set_rng_state', 'set_rng_state_all',
6  'manual_seed', 'manual_seed_all',
7  'seed', 'seed_all', 'initial_seed']
8 
9 
10 def get_rng_state(device=device('cuda')):
11  r"""Returns the random number generator state of the current
12  GPU as a ByteTensor.
13 
14  Args:
15  device (torch.device or int, optional): The device to return the RNG state of.
16  Default: ``torch.device('cuda')`` (i.e., the current CUDA device).
17 
18  .. warning::
19  This function eagerly initializes CUDA.
20  """
21  _lazy_init()
22  with device_ctx_manager(device):
23  return _C._cuda_getRNGState()
24 
25 
26 def get_rng_state_all():
27  r"""Returns a tuple of ByteTensor representing the random number states of all devices."""
28 
29  results = []
30  for i in range(device_count()):
31  with device_ctx_manager(i):
32  results.append(get_rng_state())
33  return results
34 
35 
36 def set_rng_state(new_state, device=device('cuda')):
37  r"""Sets the random number generator state of the current GPU.
38 
39  Args:
40  new_state (torch.ByteTensor): The desired state
41  device (torch.device or int, optional): The device to set the RNG state.
42  Default: ``torch.device('cuda')`` (i.e., the current CUDA device).
43  """
44  new_state_copy = new_state.clone()
45 
46  # NB: What if device=-1? You might be afraid that the "current"
47  # device would change by the time we actually get around to invoking
48  # the lazy callback. But actually, this is not possible: changing
49  # the current device involves a CUDA call, which would in turn
50  # initialize the state. So then _lazy_call would execute cb
51  # immediately.
52  def cb():
53  with device_ctx_manager(device):
54  _C._cuda_setRNGState(new_state_copy)
55 
56  _lazy_call(cb)
57 
58 
59 def set_rng_state_all(new_states):
60  r"""Sets the random number generator state of all devices.
61 
62  Args:
63  new_state (tuple of torch.ByteTensor): The desired state for each device"""
64  for i, state in enumerate(new_states):
65  set_rng_state(state, i)
66 
67 
68 def manual_seed(seed):
69  r"""Sets the seed for generating random numbers for the current GPU.
70  It's safe to call this function if CUDA is not available; in that
71  case, it is silently ignored.
72 
73  Args:
74  seed (int): The desired seed.
75 
76  .. warning::
77  If you are working with a multi-GPU model, this function is insufficient
78  to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
79  """
80  seed = int(seed)
81  _lazy_call(lambda: _C._cuda_manualSeed(seed))
82 
83 
84 def manual_seed_all(seed):
85  r"""Sets the seed for generating random numbers on all GPUs.
86  It's safe to call this function if CUDA is not available; in that
87  case, it is silently ignored.
88 
89  Args:
90  seed (int): The desired seed.
91  """
92  seed = int(seed)
93  _lazy_call(lambda: _C._cuda_manualSeedAll(seed))
94 
95 
96 def seed():
97  r"""Sets the seed for generating random numbers to a random number for the current GPU.
98  It's safe to call this function if CUDA is not available; in that
99  case, it is silently ignored.
100 
101  .. warning::
102  If you are working with a multi-GPU model, this function will only initialize
103  the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
104  """
105  _lazy_call(lambda: _C._cuda_seed())
106 
107 
108 def seed_all():
109  r"""Sets the seed for generating random numbers to a random number on all GPUs.
110  It's safe to call this function if CUDA is not available; in that
111  case, it is silently ignored.
112  """
113  _lazy_call(lambda: _C._cuda_seedAll())
114 
115 
116 def initial_seed():
117  r"""Returns the current random seed of the current GPU.
118 
119  .. warning::
120  This function eagerly initializes CUDA.
121  """
122  _lazy_init()
123  return _C._cuda_initialSeed()