Объект LSTMStateTuple не имеет атрибута get_shape при добавлении двунаправленного кодировщика
Краткое описание проблемы
У меня проблема с добавлением двунаправленного энкодера в мою модель NMT. Это добавляет вторую ячейку LSTM, которая проходит назад через исходные данные, чтобы улучшить качество перевода. Моя реализация ниже основана на этом уроке.
Я отметил и исходный код NMT (поиск "двунаправленный") и этот ответ, но не нашел, что решение, данное в ответе, сработало.
Код модели
def __init__(self, hparams, iterator, mode):
tf.set_random_seed(hparams.graph_seed)
source, target_in, target_out, weights, source_lengths, target_lengths = iterator.get_next()
true_batch_size = tf.size(source_lengths)
# Lookup embeddings
embedding_encoder = tf.get_variable("embedding_encoder", [hparams.src_vsize, hparams.src_emsize])
encoder_emb_inp = tf.nn.embedding_lookup(embedding_encoder, source)
embedding_decoder = tf.get_variable("embedding_decoder", [hparams.tgt_vsize, hparams.tgt_emsize])
decoder_emb_inp = tf.nn.embedding_lookup(embedding_decoder, target_in)
# Build and run Encoder LSTM
if hparams.bidir_encoder:
forward_cell = tf.nn.rnn_cell.BasicLSTMCell(hparams.num_units)
backward_cell = tf.nn.rnn_cell.BasicLSTMCell(hparams.num_units)
bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, encoder_emb_inp, sequence_length=source_lengths, dtype=tf.float32)
encoder_outputs = tf.concat(bi_outputs, -1)
encoder_state = list()
encoder_state.append(bi_state[0]) # forward
encoder_state.append(bi_state[1]) # backward
encoder_state = tuple(encoder_state)
else:
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(hparams.num_units)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell, encoder_emb_inp, sequence_length=source_lengths, dtype=tf.float32)
# Build Decoder cell
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(hparams.num_units)
# Add Helper (if needed) and ProjectionLayer and run Decoder LSTM
projection_layer = layers_core.Dense(units=hparams.tgt_vsize, use_bias=False)
if mode is 'TRAIN' or mode is 'EVAL':
helper = tf.contrib.seq2seq.TrainingHelper(inputs=decoder_emb_inp,
sequence_length=target_lengths)
elif mode is 'INFER':
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=embedding_decoder,
start_tokens=tf.fill([true_batch_size], hparams.sos),
end_token=hparams.eos)
decoder = tf.contrib.seq2seq.BasicDecoder(cell=decoder_cell,
helper=helper,
initial_state=decoder_initial_state,
output_layer=projection_layer)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, maximum_iterations=tf.reduce_max(target_lengths))
logits = outputs.rnn_output
ids = outputs.sample_id
# Calculate loss
if mode is 'TRAIN' or mode is 'EVAL':
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_out, logits=logits)
target_weights = tf.sequence_mask(target_lengths, maxlen=tf.shape(target_out)[1], dtype=logits.dtype)
masked_loss = crossent * target_weights * weights
self.loss = tf.reduce_sum(masked_loss) / tf.cast(true_batch_size, tf.float32)
self.char_loss = tf.reduce_sum(tf.reduce_sum(masked_loss, axis=1) / tf.cast(target_lengths, tf.float32)) / tf.cast(true_batch_size, tf.float32)
# Calculate/clip gradients, then optimize model
if mode is 'TRAIN':
params = tf.trainable_variables()
gradients = tf.gradients(self.loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, hparams.max_gradient_norm)
optimizer = tf.train.AdamOptimizer(hparams.l_rate)
self.update_step = optimizer.apply_gradients(zip(clipped_gradients, params))
Отслеживание ошибок (только когда hparams.bidir_encoder = True
)
Traceback (most recent call last):
File "/home/nave01314/IdeaProjects/tf-nmt/main.py", line 18, in <module>
train_model = model_builder.create_train_model(hparams)
File "/home/nave01314/IdeaProjects/tf-nmt/model_builder.py", line 34, in create_train_model
mode='TRAIN')
File "/home/nave01314/IdeaProjects/tf-nmt/models.py", line 54, in __init__
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, maximum_iterations=tf.reduce_max(target_lengths))
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/seq2seq/python/ops/decoder.py", line 286, in dynamic_decode
swap_memory=swap_memory)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2816, in while_loop
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2640, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2590, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/seq2seq/python/ops/decoder.py", line 234, in body
decoder_finished) = decoder.step(time, inputs, state)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/seq2seq/python/ops/basic_decoder.py", line 138, in step
cell_outputs, cell_state = self._cell(inputs, state)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 183, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 575, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 438, in call
self._linear = _Linear([inputs, h], 4 * self._num_units, True)
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1154, in __init__
shapes = [a.get_shape() for a in args]
File "/home/nave01314/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1154, in <listcomp>
shapes = [a.get_shape() for a in args]
AttributeError: 'LSTMStateTuple' object has no attribute 'get_shape'