diff --git a/main.py b/main.py index f3d0fb1..a49575f 100644 --- a/main.py +++ b/main.py @@ -32,7 +32,6 @@ def train(conf, data): conf.img_height, conf.img_width, conf.channel])) batch_y = one_hot(batch_y, conf.num_classes) else: - pointer = 0 batch_X, pointer = get_batch(data, pointer, conf.batch_size) #batch_X, batch_y = next(data) data_dict = {X:batch_X} @@ -40,7 +39,6 @@ def train(conf, data): #TODO extract one-hot classes data_dict[model.h] = batch_y _, cost,_f = sess.run([optimizer, model.loss, model.fc2], feed_dict=data_dict) - print _f[0] print "Epoch: %d, Cost: %f"%(i, cost) saver.save(sess, conf.ckpt_file) diff --git a/models.py b/models.py index 13177f6..ef732bc 100644 --- a/models.py +++ b/models.py @@ -56,6 +56,6 @@ def __init__(self, X, conf, h=None): self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(self.fc2, tf.cast(tf.reshape(self.X, [-1]), dtype=tf.int32))) - #self.pred = tf.reshape(tf.argmax(tf.nn.softmax(self.fc2), dimension=tf.rank(self.fc2) - 1), tf.shape(self.X)) - self.pred = tf.reshape(tf.multinomial(tf.nn.softmax(self.fc2), num_samples=1, seed=100), tf.shape(self.X)) + self.pred_argmax = tf.reshape(tf.argmax(tf.nn.softmax(self.fc2), dimension=tf.rank(self.fc2) - 1), tf.shape(self.X)) + self.pred_sample = tf.reshape(tf.multinomial(tf.nn.softmax(self.fc2), num_samples=1, seed=100), tf.shape(self.X))