3 from __future__
import absolute_import
4 from __future__
import division
5 from __future__
import print_function
6 from __future__
import unicode_literals
14 model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=
None,
15 bias_init=
None, WeightInitializer=
None, BiasInitializer=
None,
16 enable_tensor_core=
False, float16_compute=
False, **kwargs
18 WeightInitializer = initializers.update_initializer(
19 WeightInitializer, weight_init, (
"XavierFill", {})
21 BiasInitializer = initializers.update_initializer(
22 BiasInitializer, bias_init, (
"ConstantFill", {})
24 if not model.init_params:
25 WeightInitializer = initializers.ExternalInitializer()
26 BiasInitializer = initializers.ExternalInitializer()
28 blob_out = blob_out
or model.net.NextName()
29 bias_tags = [ParameterTags.BIAS]
30 if 'freeze_bias' in kwargs:
31 bias_tags.append(ParameterTags.COMPUTED_PARAM)
33 weight = model.create_param(
34 param_name=blob_out +
'_w',
35 shape=[dim_out, dim_in],
36 initializer=WeightInitializer,
37 tags=ParameterTags.WEIGHT
39 bias = model.create_param(
40 param_name=blob_out +
'_b',
42 initializer=BiasInitializer,
47 if enable_tensor_core:
48 kwargs[
'engine'] =
'TENSORCORE' 52 kwargs[
'float16_compute'] =
True 54 return op_call([blob_in, weight, bias], blob_out, **kwargs)
57 def fc(model, *args, **kwargs):
58 return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
61 def packed_fc(model, *args, **kwargs):
62 return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
66 model, blob_in, blob_out, dim_in, dim_out,
67 rank_approx=5, weight_init=
None, bias_init=
None,
68 WeightInitializer=
None, BiasInitializer=
None, **kwargs
71 Here we assume that the rank of original input is bigger than 5. 73 WeightInitializer = initializers.update_initializer(
74 WeightInitializer, weight_init, (
"XavierFill", {})
76 BiasInitializer = initializers.update_initializer(
77 BiasInitializer, bias_init, (
"ConstantFill", {})
79 blob_out = blob_out
or model.net.NextName()
80 u = model.create_param(
81 param_name=blob_out +
'_u',
82 shape=[dim_out, rank_approx],
83 initializer=WeightInitializer,
85 v = model.create_param(
86 param_name=blob_out +
'_v',
87 shape=[dim_in, rank_approx],
88 initializer=WeightInitializer,
90 bias = model.create_param(
91 param_name=blob_out +
'_b',
93 initializer=BiasInitializer,
95 return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
99 model, blob_in, blob_out, dim_in, dim_out,
100 weight_init=
None, bias_init=
None, mask_init=
None,
101 threshold=0.00001, need_compress_rate=
False,
106 Runnable so far. Great!:) 108 weight_init = weight_init
if weight_init
else (
'XavierFill', {})
109 bias_init = bias_init
if bias_init
else (
'ConstantFill', {})
110 mask_init = mask_init
if mask_init
else (
'ConstantFill', {})
111 blob_out = blob_out
or model.net.NextName()
112 compress_rate = blob_out +
'_compress_rate' 113 if model.init_params:
114 compress_lb = model.param_init_net.ConstantFill(
120 weight = model.param_init_net.__getattr__(weight_init[0])(
123 shape=[dim_out, dim_in],
126 mask = model.param_init_net.ConstantFill(
129 shape=[dim_out, dim_in],
132 ag_dw = model.param_init_net.__getattr__(mask_init[0])(
135 shape=[dim_out, dim_in],
138 bias = model.param_init_net.__getattr__(bias_init[0])(
144 mask_seq = model.param_init_net.__getattr__(mask_init[0])(
146 blob_out +
'_mask_seq',
147 shape=[dim_out, dim_in],
150 thres = model.param_init_net.ConstantFill(
157 compress_lb = core.ScopedBlobReference(
158 blob_out +
'_lb', model.param_init_net)
159 weight = core.ScopedBlobReference(
160 blob_out +
'_w', model.param_init_net)
161 bias = core.ScopedBlobReference(
162 blob_out +
'_b', model.param_init_net)
163 mask = core.ScopedBlobReference(
164 blob_out +
'_m', model.param_init_net)
165 ag_dw = core.ScopedBlobReference(
166 blob_out +
'_ag_dw', model.param_init_net)
167 mask_seq = core.ScopedBlobReference(
168 blob_out +
'_mask_seq', model.param_init_net)
169 thres = core.ScopedBlobReference(
170 blob_out +
'_thres', model.param_init_net)
172 model.AddParameter(weight)
173 model.AddParameter(bias)
174 if need_compress_rate:
175 return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq,
177 [blob_out, compress_rate], **kwargs)
179 return model.net.FC_Prune([blob_in, weight, mask,
180 bias, ag_dw, mask_seq,
186 model, blob_in, blob_out, w_csr, iw, jw, bias,
189 """FC_Sparse: Only takes in alocated weights""" 190 if not (w_csr
and iw
and jw
and bias):
192 model.AddParameter(w_csr)
193 model.AddParameter(iw)
194 model.AddParameter(jw)
195 model.AddParameter(bias)
196 return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias],
Module caffe2.python.helpers.fc.