大数据下基于Tensorflow框架的深度学习示例教程

print_functionimport mathimport osimport tensorflow as tfflags = tf.app.flags# Flags for configuring the taskflags.DEFINE_string("job_name", None, "job name: worker or ps")flags.DEFINE_integer("task_index", 0, "Worker task index, should be >= 0. task_index=0 is " "the chief worker task the performs the variable " "initialization")flags.DEFINE_string("ps_hosts", "", "Comma-separated list of hostname:port pairs")flags.DEFINE_string("worker_hosts", "", "Comma-separated list of hostname:port pairs")# Training related flagsflags.DEFINE_string("data_dir", None, "Directory where the mnist data is stored")flags.DEFINE_string("train_dir", None, "Directory for storing the checkpoints")flags.DEFINE_integer("hidden1", 128, "Number of units in the 1st hidden layer of the NN")flags.DEFINE_integer("hidden2", 128, "Number of units in the 2nd hidden layer of the NN")flags.DEFINE_integer("batch_size", 100, "Training batch size")flags.DEFINE_float("learning_rate", 0.01, "Learning rate")FLAGS = flags.FLAGSTRAIN_FILE = "train.tfrecords"NUM_CLASSES = 10IMAGE_SIZE = 28IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZEdef inference(images, hidden1_units, hidden2_units): with tf.name_scope('hidden1'): weights = tf.Variable( tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),name='weights') biases = tf.Variable(tf.zeros([hidden1_units]),name='biases') hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) with tf.name_scope('hidden2'): weights = tf.Variable( tf.truncated_normal([hidden1_units, hidden2_units], stddev=1.0 / math.sqrt(float(hidden1_units))), name='weights') biases = tf.Variable(tf.zeros([hidden2_units]), name='biases') hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) with tf.name_scope('softmax_linear'): weights = tf.Variable( tf.truncated_normal([hidden2_units, NUM_CLASSES], stddev=1.0 / math.sqrt(float(hidden2_units))),name='weights') biases = tf.Variable(tf.zeros([NUM_CLASSES]),name='biases') logits = tf.matmul(hidden2, weights) + biases return logitsdef lossFunction(logits, labels): labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean'