5 from ..
import functional
as F
7 from .module
import Module
8 from ..._jit_internal
import weak_module, weak_script_method
13 r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` 16 in_features: size of each input sample 17 out_features: size of each output sample 18 bias: If set to ``False``, the layer will not learn an additive bias. 22 - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of 23 additional dimensions and :math:`H_{in} = \text{in\_features}` 24 - Output: :math:`(N, *, H_{out})` where all but the last dimension 25 are the same shape as the input and :math:`H_{out} = \text{out\_features}`. 28 weight: the learnable weights of the module of shape 29 :math:`(\text{out\_features}, \text{in\_features})`. The values are 30 initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where 31 :math:`k = \frac{1}{\text{in\_features}}` 32 bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. 33 If :attr:`bias` is ``True``, the values are initialized from 34 :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where 35 :math:`k = \frac{1}{\text{in\_features}}` 39 >>> m = nn.Linear(20, 30) 40 >>> input = torch.randn(128, 20) 42 >>> print(output.size()) 45 __constants__ = [
'bias']
47 def __init__(self, in_features, out_features, bias=True):
48 super(Linear, self).__init__()
55 self.register_parameter(
'bias',
None)
58 def reset_parameters(self):
59 init.kaiming_uniform_(self.
weight, a=math.sqrt(5))
60 if self.
bias is not None:
61 fan_in, _ = init._calculate_fan_in_and_fan_out(self.
weight)
62 bound = 1 / math.sqrt(fan_in)
63 init.uniform_(self.
bias, -bound, bound)
66 def forward(self, input):
70 return 'in_features={}, out_features={}, bias={}'.format(
77 r"""Applies a bilinear transformation to the incoming data: 78 :math:`y = x_1 A x_2 + b` 81 in1_features: size of each first input sample 82 in2_features: size of each second input sample 83 out_features: size of each output sample 84 bias: If set to False, the layer will not learn an additive bias. 88 - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and 89 :math:`*` means any number of additional dimensions. All but the last dimension 90 of the inputs should be the same. 91 - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`. 92 - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}` 93 and all but the last dimension are the same shape as the input. 96 weight: the learnable weights of the module of shape 97 :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`. 98 The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where 99 :math:`k = \frac{1}{\text{in1\_features}}` 100 bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. 101 If :attr:`bias` is ``True``, the values are initialized from 102 :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where 103 :math:`k = \frac{1}{\text{in1\_features}}` 107 >>> m = nn.Bilinear(20, 30, 40) 108 >>> input1 = torch.randn(128, 20) 109 >>> input2 = torch.randn(128, 30) 110 >>> output = m(input1, input2) 111 >>> print(output.size()) 112 torch.Size([128, 40]) 114 __constants__ = [
'in1_features',
'in2_features',
'out_features',
'bias']
116 def __init__(self, in1_features, in2_features, out_features, bias=True):
117 super(Bilinear, self).__init__()
121 self.
weight =
Parameter(torch.Tensor(out_features, in1_features, in2_features))
126 self.register_parameter(
'bias',
None)
129 def reset_parameters(self):
130 bound = 1 / math.sqrt(self.weight.size(1))
131 init.uniform_(self.
weight, -bound, bound)
132 if self.
bias is not None:
133 init.uniform_(self.
bias, -bound, bound)
136 def forward(self, input1, input2):
137 return F.bilinear(input1, input2, self.
weight, self.
bias)
139 def extra_repr(self):
140 return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
def reset_parameters(self)
def reset_parameters(self)