3 from __future__
import absolute_import
4 from __future__
import division
5 from __future__
import print_function
6 from __future__
import unicode_literals
22 WeightInitializer=
None,
25 transform_inputs=
None,
28 cudnn_exhaustive_search=
False,
30 float16_compute=
False,
35 if not isinstance(kernel, list):
40 if isinstance(kernel, list):
41 assert len(kernel) == 2,
"Conv support only a 2D kernel." 44 kernels = [kernel] * 2
46 requested_engine = kwargs.get(
'engine')
47 if requested_engine
is not None:
48 if use_cudnn
and requested_engine !=
'CUDNN':
50 'When use_cudnn=True, the only engine you can specify is ' 52 elif not use_cudnn
and requested_engine ==
'CUDNN':
54 'When use_cudnn=False, the only engine you can specify is ' 58 kwargs[
'engine'] =
'CUDNN' 59 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
61 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
64 False if (
"no_bias" in kwargs
and kwargs[
"no_bias"])
else True 65 blob_out = blob_out
or model.net.NextName()
66 weight_shape = [dim_out]
68 weight_shape.append(int(dim_in / group))
69 weight_shape.extend(kernels)
71 weight_shape.extend(kernels)
72 weight_shape.append(int(dim_in / group))
74 WeightInitializer = initializers.update_initializer(
75 WeightInitializer, weight_init, (
"XavierFill", {})
77 BiasInitializer = initializers.update_initializer(
78 BiasInitializer, bias_init, (
"ConstantFill", {})
80 if not model.init_params:
81 WeightInitializer = initializers.ExternalInitializer()
82 BiasInitializer = initializers.ExternalInitializer()
84 weight = model.create_param(
85 param_name=blob_out +
'_w',
87 initializer=WeightInitializer,
88 tags=ParameterTags.WEIGHT
91 bias = model.create_param(
92 param_name=blob_out +
'_b',
94 initializer=BiasInitializer,
95 tags=ParameterTags.BIAS
99 inputs = [blob_in, weight, bias]
101 inputs = [blob_in, weight]
103 if transform_inputs
is not None:
104 transform_inputs(model, blob_out, inputs)
108 kwargs[
'float16_compute'] =
True 113 if 'no_bias' in kwargs:
114 del kwargs[
'no_bias']
116 kwargs[
'group'] = group
118 return model.net.Conv(
125 if isinstance(kernel, list):
126 return model.net.Conv(
134 return model.net.Conv(
152 WeightInitializer=
None,
153 BiasInitializer=
None,
155 transform_inputs=
None,
159 """N-dimensional convolution for inputs with NCHW storage order. 161 assert order ==
"NCHW",
"ConvNd only supported for NCHW storage." 162 return _ConvBase(model,
True, blob_in, blob_out, dim_in, dim_out, kernel,
163 weight_init, bias_init, WeightInitializer, BiasInitializer,
164 group, transform_inputs, order=order, **kwargs)
176 WeightInitializer=
None,
177 BiasInitializer=
None,
179 transform_inputs=
None,
182 """2-dimensional convolution. 184 return _ConvBase(model,
False, blob_in, blob_out, dim_in, dim_out, kernel,
185 weight_init, bias_init, WeightInitializer, BiasInitializer,
186 group, transform_inputs, **kwargs)
200 cudnn_exhaustive_search=
False,
201 ws_nbytes_limit=
None,
206 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
207 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
208 blob_out = blob_out
or model.net.NextName()
210 [dim_in, dim_out, kernel, kernel]
211 if order ==
"NCHW" else [dim_in, kernel, kernel, dim_out]
213 if model.init_params:
214 weight = model.param_init_net.__getattr__(weight_init[0])(
220 bias = model.param_init_net.__getattr__(bias_init[0])(
227 weight = core.ScopedBlobReference(
228 blob_out +
'_w', model.param_init_net)
229 bias = core.ScopedBlobReference(
230 blob_out +
'_b', model.param_init_net)
231 model.AddParameter(weight, ParameterTags.WEIGHT)
232 model.AddParameter(bias, ParameterTags.BIAS)
234 kwargs[
'engine'] =
'CUDNN' 235 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
237 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
238 return model.net.ConvTranspose(
239 [blob_in, weight, bias],
259 """Group Convolution. 261 This is essentially the same as Conv with a group argument passed in. 262 We specialize this for backward interface compatibility. 264 return conv(model, blob_in, blob_out, dim_in, dim_out, kernel,
265 weight_init=weight_init, bias_init=bias_init,
266 group=group, **kwargs)
269 def group_conv_deprecated(
281 cudnn_exhaustive_search=
False,
282 ws_nbytes_limit=
None,
285 """GroupConvolution's deprecated interface. 287 This is used to simulate a group convolution via split and concat. You 288 should always use the new group convolution in your new code. 290 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
291 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
292 use_bias =
False if (
"no_bias" in kwargs
and kwargs[
"no_bias"])
else True 294 kwargs[
'engine'] =
'CUDNN' 295 kwargs[
'exhaustive_search'] = cudnn_exhaustive_search
297 kwargs[
'ws_nbytes_limit'] = ws_nbytes_limit
299 raise ValueError(
"dim_in should be divisible by group.")
301 raise ValueError(
"dim_out should be divisible by group.")
302 splitted_blobs = model.net.DepthSplit(
304 [
'_' + blob_out +
'_gconv_split_' + str(i)
for i
in range(group)],
305 dimensions=[int(dim_in / group)
for i
in range(group)],
309 [dim_out / group, dim_in / group, kernel, kernel]
310 if order ==
"NCHW" else 311 [dim_out / group, kernel, kernel, dim_in / group]
315 weight_shape = [int(v)
for v
in weight_shape]
317 for i
in range(group):
318 if model.init_params:
319 weight = model.param_init_net.__getattr__(weight_init[0])(
321 blob_out +
'_gconv_%d_w' % i,
326 bias = model.param_init_net.__getattr__(bias_init[0])(
328 blob_out +
'_gconv_%d_b' % i,
329 shape=[int(dim_out / group)],
333 weight = core.ScopedBlobReference(
334 blob_out +
'_gconv_%d_w' % i, model.param_init_net)
336 bias = core.ScopedBlobReference(
337 blob_out +
'_gconv_%d_b' % i, model.param_init_net)
338 model.AddParameter(weight, ParameterTags.WEIGHT)
340 model.AddParameter(bias, ParameterTags.BIAS)
342 inputs = [weight, bias]
345 if 'no_bias' in kwargs:
346 del kwargs[
'no_bias']
348 splitted_blobs[i].Conv(
350 blob_out +
'_gconv_%d' % i,
356 concat, concat_dims = model.net.Concat(
359 "_" + blob_out +
"_concat_dims"],
Module caffe2.python.helpers.conv.