Tensorflow- восстановить модель, результат новых данных кормления неверен

Я обучил модель Tensorflow и сохранил тензоры и операции. После восстановления я передаю новые данные, но получаю неправильный результат. Мой код ниже, пожалуйста, помогите.

class DCNN():

def __init__(self, batch_size, sentence_length, num_filters,num_class,vocab_size, embed_dim, top_k, k1):
    self.batch_size = batch_size
    self.sentence_length = sentence_length
    self.num_filters = num_filters
    self.num_class = num_class
    self.vocab_size = vocab_size
    self.embed_dim = embed_dim
    self.top_k = top_k
    self.k1 = k1

    self.global_step = tf.Variable(0, name="global_step", trainable=False)

    self.sent = tf.placeholder(tf.int64, [None, sentence_length],name="sent")
    self.y = tf.placeholder(tf.float64, [None, num_class], name="y")
    self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

    self.ws = [7,5]
    self.num_hidden = 100
    with tf.name_scope("embedding_layer"):
        self.W = tf.Variable(tf.random_uniform([self.vocab_size+1, self.embed_dim], -1.0, 1.0), name="embed_W")

        #[batch_size, sentence_length, embed_dim, 1]
    def init_weights(shape, name):
        return tf.Variable(tf.truncated_normal(shape, stddev=0.01, dtype=tf.float32), name=name)
    W1 = init_weights([self.ws[0], self.embed_dim, 1, self.num_filters[0]], "W1")
    b1 = tf.Variable(tf.constant(0.1, shape=[self.num_filters[0], self.embed_dim]), "b1")#和常规的cnn的b相比,多了embed_dim一个维度

    W2 = init_weights([self.ws[1], int(self.embed_dim/2), self.num_filters[0], self.num_filters[1]], "W2")
    b2 = tf.Variable(tf.constant(0.1, shape=[self.num_filters[1], self.embed_dim]), "b2")

    Wh = init_weights([int(self.top_k*self.embed_dim*self.num_filters[1]/4), self.num_hidden], "Wh")
    bh = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), "bh")

    Wo = init_weights([self.num_hidden, self.num_class], "Wo")

    self.out = self.train( W1, W2, b1, b2,  Wh, bh, Wo)   
    self.predict_op = tf.argmax(self.out, 1, name='predictions')
    self.acc = self.accuracy()
    self.cost= self.cost()
    self.train_op = self.train_op().....

Я восстановил модель и сделаю прогноз:

checkpoint_file='runs/1522129502/checkpoints/model-4000'
with tf.Session() as sess:
    saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
    saver.restore(sess, checkpoint_file)
    #print_tensors_in_checkpoint_file(checkpoint_file, None, True)

    graph = tf.get_default_graph()

    #for op in graph.get_operations():  
    #    print(op.name,"======",op.values()) 
    print("begin to test!")
    # Collect the predictions here

    #graph = sess.graph

    # Get the placeholders from the graph by name
    sent = graph.get_tensor_by_name("sent:0")
    dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")

    def dev_step(x_test, y_test,batch_size, writer=None):
       """
        Evaluates model on a dev set
        """
        number_examples = len(x_test)
        eval_loss, eval_acc, eval_counter = 0.0, 0.0, 0
        eval_out = []

        for start, end in zip(range(0, number_examples, batch_size), range(batch_size, number_examples, batch_size)):

            print("start=",start, " end=", end)

            batch_in = x_test[start:end]
            print("in lenght:",len(batch_in))
            predictions_res  = sess.run("predictions:0", feed_dict={sent:batch_in,  dropout_keep_prob:1.0})
            print("batch=",len(predictions_res),predictions_res).....

Размер ввода равен 64, но размер результата равен 22764, а 22764 - это число всех наборов тестовых данных, которые использовались в состоянии обучения, например:

in lenght: 64

batch= 22764 [ 0  0  0 ..., 14 14 14]

0 ответов

Другие вопросы по тегам