|
def | scope (self, name) |
|
def | __init__ (self, encoder_outputs, encoder_output_dim, encoder_lengths, vocab_size, attention_type, embedding_size, decoder_num_units, decoder_cells, residual_output_layers=None, name=None, weighted_encoder_outputs=None) |
|
def | get_state_names (self) |
|
def | get_outputs_with_grads (self) |
|
def | get_output_dim (self) |
|
def | get_attention_weights (self) |
|
def | apply (self, model, input_t, seq_lengths, states, timestep) |
|
def | apply_over_sequence (self, model, inputs, seq_lengths, initial_states) |
|
|
| name |
|
| num_layers |
|
| cell |
|
| use_attention |
|
| decoder_output_dim |
|
| output_indices |
|
Definition at line 320 of file seq2seq_util.py.
The documentation for this class was generated from the following file: