3 from __future__ 
import absolute_import
     4 from __future__ 
import division
     5 from __future__ 
import print_function
     6 from __future__ 
import unicode_literals
    14     This layer implements the weighted sum:    15     weighted element-wise sum of input blobs.    23         name=
'blob_weighted_sum',
    26         super(BlobWeightedSum, self).__init__(model, name, input_record, **kwargs)
    28         self.
blobs = self.input_record.field_blobs()
    32             "BlobWeightedSum expects more than one input blobs"    35         assert len(input_record.field_types()[0].shape) > 0, (
    36             "BlobWeightedSum expects limited dimensions of the input tensor"    40             input_record.field_types()[0].shape == input_record.field_types()[i].shape
    42         ), 
"Shape of input blobs should be the same shape {}".format(
    43             input_record.field_types()[0].shape
    48                 "the size of init_weights should be the same as input blobs, "    49                 "expects {}, got {}".format(self.
num_weights, len(init_weights))
    56                 param_name=
"w_{}".format(idx),
    58                 initializer=(
'ConstantFill', {
'value': float(init_weights[idx])}),
    59                 optimizer=weight_optim
    64             input_record.field_types()[0],
    68     def add_ops(self, net):
    70             [x 
for pair 
in zip(self.
blobs, self.
weights) 
for x 
in pair],
 
def get_next_blob_reference(self, name)
def create_param(self, param_name, shape, initializer, optimizer, ps_param=None, regularizer=None)