|
def | __init__ (self, alpha=0.01, beta=1e-4, lambda1=0, lambda2=0, sparse_dedup_aggregator=None, engine='') |
|
def | scale_learning_rate (self, scale) |
|
def | __init__ (self) |
|
def | __call__ (self, net, param_init_net, param, grad=None) |
|
def | get_cpu_blob_name (self, base_str, node_name='') |
|
def | get_gpu_blob_name (self, base_str, gpu_id, node_name) |
|
def | make_unique_blob_name (self, base_str) |
|
def | build_lr (self, net, param_init_net, base_learning_rate, learning_rate_blob=None, policy="fixed", iter_val=0, kwargs) |
|
def | add_lr_multiplier (self, lr_multiplier) |
|
def | get_auxiliary_parameters (self) |
|
def | scale_learning_rate (self, args, kwargs) |
|
def | create_lars_inputs (self, param_init_net, weight_decay, trust, lr_max) |
|
|
| alpha |
|
| beta |
|
| lambda1 |
|
| lambda2 |
|
| sparse_dedup_aggregator |
|
| engine |
|
|
def | dedup (net, sparse_dedup_aggregator, grad) |
|
Group Lasso FTRL Optimizer.
Definition at line 862 of file optimizer.py.
The documentation for this class was generated from the following file: