variables_names = [v.name for v in tf.trainable_variables()] values = sess.run(variables_names) for k, v in zip(variables_names, values): print "Variable: ", k print "Shape: ", v.shape print v
# Last layer has a 10 times learning rate self.lr = tf.train.exponential_decay(self.learning_rate, global_step, self.decay_step, self.learning_rate_decay_factor, staircase=True) # 学习率随着global_step而衰减的 # Last layer has a 10 times learning rate self.lr_last = tf.train.exponential_decay(self.learning_rate*10, global_step, self.decay_step, self.learning_rate_decay_factor, staircase=True)
beta1: 浮点数或者常量张量 ,表示 The exponential decay rate for the 1st moment estimates.
beta2: 浮点数或者常量张量 ,表示 The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is “epsilon hat” in the Kingma and Ba paper (in the formula just before Section 2.1), not the - epsilon in Algorithm 1 of the paper.
with tf.Session() as sess: tf.global_variables_initializer().run() # 初始化所有的变量 for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, mse_loss: %f" % (i,sess.run(mse_loss, feed_dict={x: data, y_: label})))
After 0 steps, mse_loss: 0.588501 After 2000 steps, mse_loss: 0.039796 After 4000 steps, mse_loss: 0.018524 After 6000 steps, mse_loss: 0.018494 After 8000 steps, mse_loss: 0.018374 After 10000 steps, mse_loss: 0.018358 After 12000 steps, mse_loss: 0.018356 After 14000 steps, mse_loss: 0.018355 After 16000 steps, mse_loss: 0.016440 After 18000 steps, mse_loss: 0.013988 After 20000 steps, mse_loss: 0.013142 After 22000 steps, mse_loss: 0.012886 After 24000 steps, mse_loss: 0.012700 After 26000 steps, mse_loss: 0.012550 After 28000 steps, mse_loss: 0.006441 After 30000 steps, mse_loss: 0.006439 After 32000 steps, mse_loss: 0.006438 After 34000 steps, mse_loss: 0.006438 After 36000 steps, mse_loss: 0.006445 After 38000 steps, mse_loss: 0.006438
with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, loss: %f" % (i, sess.run(loss, feed_dict={x: data, y_: label})))
After 0 steps, loss: 0.705000 After 2000 steps, loss: 0.056949 After 4000 steps, loss: 0.045995 After 6000 steps, loss: 0.041472 After 8000 steps, loss: 0.040165 After 10000 steps, loss: 0.039961 After 12000 steps, loss: 0.039916 After 14000 steps, loss: 0.039912 After 16000 steps, loss: 0.039912 After 18000 steps, loss: 0.038334 After 20000 steps, loss: 0.038128 After 22000 steps, loss: 0.037962 After 24000 steps, loss: 0.037932 After 26000 steps, loss: 0.037921 After 28000 steps, loss: 0.037918 After 30000 steps, loss: 0.037910 After 32000 steps, loss: 0.037908 After 34000 steps, loss: 0.037910 After 36000 steps, loss: 0.037907 After 38000 steps, loss: 0.037905
import tensorflow as tf tf.add_to_collection('losses', tf.constant(2.2)) tf.add_to_collection('losses', tf.constant(3.)) with tf.Session() as sess: print(sess.run(tf.get_collection('losses'))) print(sess.run(tf.add_n(tf.get_collection('losses'))