20170514222210919

20170514222232901

20170514222254419


20170514222322232






20170514222526973



20170514222613950


import math

import tensorflow as tf

# MNIST 有10个类, 表达了0到9的10个数字.
NUM_CLASSES = 10
# MNIST 中的图像都是 28x28 像素,展开成784维的特征向量
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
batch_size = 50 #每个批次的样本数量
hidden1_units = 20 #第一个隐藏层的大小.
hidden2_units = 15 #第二个隐藏层的大小.
learning_rate = 0.1 #优化器的学习率

images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))

#构建学习器模型的前向预测过程(从输入到预测输出的计算图路径)
def inference(images, hidden1_units, hidden2_units):
  # Hidden 1:y1 = relu(x*W1 +b1)
  with tf.name_scope('hidden1'):
    weights = tf.Variable(
        tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
                            stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
                            name='weights')
    biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')
    hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
  # Hidden 2: y2 = relu(y1*W2 + b2)
  with tf.name_scope('hidden2'):
    weights = tf.Variable(
        tf.truncated_normal([hidden1_units, hidden2_units],
                            stddev=1.0 / math.sqrt(float(hidden1_units))),
                            name='weights')
    biases = tf.Variable(tf.zeros([hidden2_units]), name='biases')
    hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
  # Linear: logits = y2*W3 + b3
  with tf.name_scope('softmax_linear'):
    weights = tf.Variable(
        tf.truncated_normal([hidden2_units, NUM_CLASSES],
                            stddev=1.0 / math.sqrt(float(hidden2_units))),
                            name='weights')
    biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')
    logits = tf.matmul(hidden2, weights) + biases
  return logits

#根据logits和labels计算输出层损失。
def loss(logits, labels):
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')

#为损失模型添加训练节点(需要产生和应用梯度的节点)
def training(loss, learning_rate):
  # 为保存loss的值添加一个标量汇总(scalar summary).
  tf.summary.scalar('loss', loss)
  # 根据给定的学习率创建梯度下降优化器
  optimizer = tf.train.GradientDescentOptimizer(learning_rate)
  # 创建一个变量来跟踪global step.
  global_step = tf.Variable(0, name='global_step', trainable=False)
  # 在训练节点,使用optimizer将梯度下降法应用到可调参数上来最小化损失
  # (同时不断增加 global step 计数器) .
  train_op = optimizer.minimize(loss=loss,global_step=global_step)
  return train_op

#评估模型输出的logits在预测类标签方面的质量
def evaluation(logits, labels):
  correct = tf.nn.in_top_k(logits, labels, 1)
  # 返回 当前批次的样本中预测正确的样本数量.
  return tf.reduce_sum(tf.cast(correct, tf.int32))


logits = inference(images_placeholder,hidden1_units, hidden2_units)

batch_loss = loss(logits=logits, labels=labels_placeholder)

train_on_batch = training(loss=batch_loss, learning_rate=learning_rate)

correct_counts =  evaluation(logits=logits, labels=labels_placeholder)

##调用Summary.FileWriter写入计算图
writer = tf.summary.FileWriter("logs/mnistboard", tf.get_default_graph())
writer.close()

20170514223313236






 收藏 (0)  打赏  点赞 (1)

菜鸟后飞 12个月前


TensorFlow 1.2.0 测试OK


(0) 回复

您可能需要以下文章!

友情介绍

powered by studyai.com 2017