Caffe2 - Python API
A deep learning, cross platform ML framework
padding.py
1 from .module import Module
2 from .utils import _pair, _quadruple, _ntuple
3 from .. import functional as F
4 from ..._jit_internal import weak_module, weak_script_method
5 
6 
7 # TODO: grad_output size asserts in THNN
8 
9 
10 @weak_module
11 class _ConstantPadNd(Module):
12  __constants__ = ['padding', 'value']
13 
14  def __init__(self, value):
15  super(_ConstantPadNd, self).__init__()
16  self.value = value
17 
18  @weak_script_method
19  def forward(self, input):
20  return F.pad(input, self.padding, 'constant', self.value)
21 
22  def extra_repr(self):
23  return 'padding={}, value={}'.format(self.padding, self.value)
24 
25 
26 @weak_module
28  r"""Pads the input tensor boundaries with a constant value.
29 
30  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
31 
32  Args:
33  padding (int, tuple): the size of the padding. If is `int`, uses the same
34  padding in both boundaries. If a 2-`tuple`, uses
35  (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
36 
37  Shape:
38  - Input: :math:`(N, C, W_{in})`
39  - Output: :math:`(N, C, W_{out})` where
40 
41  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
42 
43  Examples::
44 
45  >>> m = nn.ConstantPad1d(2, 3.5)
46  >>> input = torch.randn(1, 2, 4)
47  >>> input
48  tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
49  [-1.3287, 1.8966, 0.1466, -0.2771]]])
50  >>> m(input)
51  tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
52  3.5000],
53  [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
54  3.5000]]])
55  >>> m = nn.ConstantPad1d(2, 3.5)
56  >>> input = torch.randn(1, 2, 3)
57  >>> input
58  tensor([[[ 1.6616, 1.4523, -1.1255],
59  [-3.6372, 0.1182, -1.8652]]])
60  >>> m(input)
61  tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
62  [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
63  >>> # using different paddings for different sides
64  >>> m = nn.ConstantPad1d((3, 1), 3.5)
65  >>> m(input)
66  tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
67  [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
68 
69  """
70 
71  def __init__(self, padding, value):
72  super(ConstantPad1d, self).__init__(value)
73  self.padding = _pair(padding)
74 
75 
76 @weak_module
78  r"""Pads the input tensor boundaries with a constant value.
79 
80  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
81 
82  Args:
83  padding (int, tuple): the size of the padding. If is `int`, uses the same
84  padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
85  :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
86 
87  Shape:
88  - Input: :math:`(N, C, H_{in}, W_{in})`
89  - Output: :math:`(N, C, H_{out}, W_{out})` where
90 
91  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
92 
93  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
94 
95  Examples::
96 
97  >>> m = nn.ConstantPad2d(2, 3.5)
98  >>> input = torch.randn(1, 2, 2)
99  >>> input
100  tensor([[[ 1.6585, 0.4320],
101  [-0.8701, -0.4649]]])
102  >>> m(input)
103  tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
104  [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
105  [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
106  [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
107  [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
108  [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
109  >>> # using different paddings for different sides
110  >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
111  >>> m(input)
112  tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
113  [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
114  [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
115  [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
116  [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
117 
118  """
119  __constants__ = ['padding', 'value']
120 
121  def __init__(self, padding, value):
122  super(ConstantPad2d, self).__init__(value)
123  self.padding = _quadruple(padding)
124 
125 
126 @weak_module
128  r"""Pads the input tensor boundaries with a constant value.
129 
130  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
131 
132  Args:
133  padding (int, tuple): the size of the padding. If is `int`, uses the same
134  padding in all boundaries. If a 6-`tuple`, uses
135  (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
136  :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
137  :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
138 
139  Shape:
140  - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
141  - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
142 
143  :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
144 
145  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
146 
147  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
148 
149  Examples::
150 
151  >>> m = nn.ConstantPad3d(3, 3.5)
152  >>> input = torch.randn(16, 3, 10, 20, 30)
153  >>> output = m(input)
154  >>> # using different paddings for different sides
155  >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
156  >>> output = m(input)
157 
158  """
159 
160  def __init__(self, padding, value):
161  super(ConstantPad3d, self).__init__(value)
162  self.padding = _ntuple(6)(padding)
163 
164 
165 @weak_module
166 class _ReflectionPadNd(Module):
167  __constants__ = ['padding']
168 
169  @weak_script_method
170  def forward(self, input):
171  return F.pad(input, self.padding, 'reflect')
172 
173  def extra_repr(self):
174  return '{}'.format(self.padding)
175 
176 
177 @weak_module
179  r"""Pads the input tensor using the reflection of the input boundary.
180 
181  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
182 
183  Args:
184  padding (int, tuple): the size of the padding. If is `int`, uses the same
185  padding in all boundaries. If a 2-`tuple`, uses
186  (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
187 
188  Shape:
189  - Input: :math:`(N, C, W_{in})`
190  - Output: :math:`(N, C, W_{out})` where
191 
192  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
193 
194  Examples::
195 
196  >>> m = nn.ReflectionPad1d(2)
197  >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
198  >>> input
199  tensor([[[0., 1., 2., 3.],
200  [4., 5., 6., 7.]]])
201  >>> m(input)
202  tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
203  [6., 5., 4., 5., 6., 7., 6., 5.]]])
204  >>> # using different paddings for different sides
205  >>> m = nn.ReflectionPad1d((3, 1))
206  >>> m(input)
207  tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
208  [7., 6., 5., 4., 5., 6., 7., 6.]]])
209 
210  """
211 
212  def __init__(self, padding):
213  super(ReflectionPad1d, self).__init__()
214  self.padding = _pair(padding)
215 
216 
217 @weak_module
219  r"""Pads the input tensor using the reflection of the input boundary.
220 
221  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
222 
223  Args:
224  padding (int, tuple): the size of the padding. If is `int`, uses the same
225  padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
226  :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
227 
228  Shape:
229  - Input: :math:`(N, C, H_{in}, W_{in})`
230  - Output: :math:`(N, C, H_{out}, W_{out})` where
231 
232  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
233 
234  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
235 
236  Examples::
237 
238  >>> m = nn.ReflectionPad2d(2)
239  >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
240  >>> input
241  tensor([[[[0., 1., 2.],
242  [3., 4., 5.],
243  [6., 7., 8.]]]])
244  >>> m(input)
245  tensor([[[[8., 7., 6., 7., 8., 7., 6.],
246  [5., 4., 3., 4., 5., 4., 3.],
247  [2., 1., 0., 1., 2., 1., 0.],
248  [5., 4., 3., 4., 5., 4., 3.],
249  [8., 7., 6., 7., 8., 7., 6.],
250  [5., 4., 3., 4., 5., 4., 3.],
251  [2., 1., 0., 1., 2., 1., 0.]]]])
252  >>> # using different paddings for different sides
253  >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
254  >>> m(input)
255  tensor([[[[7., 6., 7., 8., 7.],
256  [4., 3., 4., 5., 4.],
257  [1., 0., 1., 2., 1.],
258  [4., 3., 4., 5., 4.],
259  [7., 6., 7., 8., 7.]]]])
260 
261  """
262 
263  def __init__(self, padding):
264  super(ReflectionPad2d, self).__init__()
265  self.padding = _quadruple(padding)
266 
267 
268 @weak_module
269 class _ReplicationPadNd(Module):
270  __constants__ = ['padding']
271 
272  @weak_script_method
273  def forward(self, input):
274  return F.pad(input, self.padding, 'replicate')
275 
276  def extra_repr(self):
277  return '{}'.format(self.padding)
278 
279 
280 @weak_module
282  r"""Pads the input tensor using replication of the input boundary.
283 
284  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
285 
286  Args:
287  padding (int, tuple): the size of the padding. If is `int`, uses the same
288  padding in all boundaries. If a 2-`tuple`, uses
289  (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
290 
291  Shape:
292  - Input: :math:`(N, C, W_{in})`
293  - Output: :math:`(N, C, W_{out})` where
294 
295  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
296 
297  Examples::
298 
299  >>> m = nn.ReplicationPad1d(2)
300  >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
301  >>> input
302  tensor([[[0., 1., 2., 3.],
303  [4., 5., 6., 7.]]])
304  >>> m(input)
305  tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
306  [4., 4., 4., 5., 6., 7., 7., 7.]]])
307  >>> # using different paddings for different sides
308  >>> m = nn.ReplicationPad1d((3, 1))
309  >>> m(input)
310  tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
311  [4., 4., 4., 4., 5., 6., 7., 7.]]])
312 
313  """
314 
315  def __init__(self, padding):
316  super(ReplicationPad1d, self).__init__()
317  self.padding = _pair(padding)
318 
319 
320 @weak_module
322  r"""Pads the input tensor using replication of the input boundary.
323 
324  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
325 
326  Args:
327  padding (int, tuple): the size of the padding. If is `int`, uses the same
328  padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
329  :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
330 
331  Shape:
332  - Input: :math:`(N, C, H_{in}, W_{in})`
333  - Output: :math:`(N, C, H_{out}, W_{out})` where
334 
335  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
336 
337  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
338 
339  Examples::
340 
341  >>> m = nn.ReplicationPad2d(2)
342  >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
343  >>> input
344  tensor([[[[0., 1., 2.],
345  [3., 4., 5.],
346  [6., 7., 8.]]]])
347  >>> m(input)
348  tensor([[[[0., 0., 0., 1., 2., 2., 2.],
349  [0., 0., 0., 1., 2., 2., 2.],
350  [0., 0., 0., 1., 2., 2., 2.],
351  [3., 3., 3., 4., 5., 5., 5.],
352  [6., 6., 6., 7., 8., 8., 8.],
353  [6., 6., 6., 7., 8., 8., 8.],
354  [6., 6., 6., 7., 8., 8., 8.]]]])
355  >>> # using different paddings for different sides
356  >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
357  >>> m(input)
358  tensor([[[[0., 0., 1., 2., 2.],
359  [0., 0., 1., 2., 2.],
360  [0., 0., 1., 2., 2.],
361  [3., 3., 4., 5., 5.],
362  [6., 6., 7., 8., 8.]]]])
363 
364  """
365 
366  def __init__(self, padding):
367  super(ReplicationPad2d, self).__init__()
368  self.padding = _quadruple(padding)
369 
370 
371 @weak_module
373  r"""Pads the input tensor using replication of the input boundary.
374 
375  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
376 
377  Args:
378  padding (int, tuple): the size of the padding. If is `int`, uses the same
379  padding in all boundaries. If a 6-`tuple`, uses
380  (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
381  :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
382  :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
383 
384  Shape:
385  - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
386  - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
387 
388  :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
389 
390  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
391 
392  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
393 
394  Examples::
395 
396  >>> m = nn.ReplicationPad3d(3)
397  >>> input = torch.randn(16, 3, 8, 320, 480)
398  >>> output = m(input)
399  >>> # using different paddings for different sides
400  >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
401  >>> output = m(input)
402 
403  """
404 
405  def __init__(self, padding):
406  super(ReplicationPad3d, self).__init__()
407  self.padding = _ntuple(6)(padding)
408 
409 
410 @weak_module
412  r"""Pads the input tensor boundaries with zero.
413 
414  For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
415 
416  Args:
417  padding (int, tuple): the size of the padding. If is `int`, uses the same
418  padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
419  :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
420 
421  Shape:
422  - Input: :math:`(N, C, H_{in}, W_{in})`
423  - Output: :math:`(N, C, H_{out}, W_{out})` where
424 
425  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
426 
427  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
428 
429  Examples::
430 
431  >>> m = nn.ZeroPad2d(2)
432  >>> input = torch.randn(1, 1, 3, 3)
433  >>> input
434  tensor([[[[-0.1678, -0.4418, 1.9466],
435  [ 0.9604, -0.4219, -0.5241],
436  [-0.9162, -0.5436, -0.6446]]]])
437  >>> m(input)
438  tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
439  [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
440  [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
441  [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
442  [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
443  [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
444  [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
445  >>> # using different paddings for different sides
446  >>> m = nn.ZeroPad2d((1, 1, 2, 0))
447  >>> m(input)
448  tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
449  [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
450  [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
451  [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
452  [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
453 
454  """
455 
456  def __init__(self, padding):
457  super(ZeroPad2d, self).__init__(padding, 0.)