1 from numbers 
import Number
    12     Creates a Bernoulli distribution parameterized by :attr:`probs`    13     or :attr:`logits` (but not both).    15     Samples are binary (0 or 1). They take the value `1` with probability `p`    16     and `0` with probability `1 - p`.    20         >>> m = Bernoulli(torch.tensor([0.3]))    21         >>> m.sample()  # 30% chance 1; 70% chance 0    25         probs (Number, Tensor): the probability of sampling `1`    26         logits (Number, Tensor): the log-odds of sampling `1`    28     arg_constraints = {
'probs': constraints.unit_interval,
    29                        'logits': constraints.real}
    30     support = constraints.boolean
    31     has_enumerate_support = 
True    32     _mean_carrier_measure = 0
    34     def __init__(self, probs=None, logits=None, validate_args=None):
    35         if (probs 
is None) == (logits 
is None):
    36             raise ValueError(
"Either `probs` or `logits` must be specified, but not both.")
    38             is_scalar = isinstance(probs, Number)
    39             self.
probs, = broadcast_all(probs)
    41             is_scalar = isinstance(logits, Number)
    42             self.
logits, = broadcast_all(logits)
    45             batch_shape = torch.Size()
    47             batch_shape = self._param.size()
    48         super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args)
    50     def expand(self, batch_shape, _instance=None):
    52         batch_shape = torch.Size(batch_shape)
    53         if 'probs' in self.__dict__:
    54             new.probs = self.probs.expand(batch_shape)
    55             new._param = new.probs
    57             new.logits = self.logits.expand(batch_shape)
    58             new._param = new.logits
    59         super(Bernoulli, new).__init__(batch_shape, validate_args=
False)
    63     def _new(self, *args, **kwargs):
    64         return self._param.new(*args, **kwargs)
    76         return probs_to_logits(self.
probs, is_binary=
True)
    80         return logits_to_probs(self.
logits, is_binary=
True)
    83     def param_shape(self):
    84         return self._param.size()
    86     def sample(self, sample_shape=torch.Size()):
    89             return torch.bernoulli(self.probs.expand(shape))
    91     def log_prob(self, value):
    94         logits, value = broadcast_all(self.
logits, value)
    95         return -binary_cross_entropy_with_logits(logits, value, reduction=
'none')
    98         return binary_cross_entropy_with_logits(self.
logits, self.
probs, reduction=
'none')
   100     def enumerate_support(self, expand=True):
   101         values = torch.arange(2, dtype=self._param.dtype, device=self._param.device)
   102         values = values.view((-1,) + (1,) * len(self.
_batch_shape))
   108     def _natural_params(self):
   109         return (torch.log(self.
probs / (1 - self.
probs)), )
   111     def _log_normalizer(self, x):
   112         return torch.log(1 + torch.exp(x))
 
def _get_checked_instance(self, cls, _instance=None)
def _extended_shape(self, sample_shape=torch.Size())
def _validate_sample(self, value)