print_function
import math
import os
import tensorflow
as tfflags = tf.app.flagsflags.DEFINE_string(
"job_name",
None,
"job name: worker or ps")flags.DEFINE_integer(
"task_index",
0,
"Worker task index, should be >= 0. task_index=0 is " "the chief worker task the performs the variable " "initialization")flags.DEFINE_string(
"ps_hosts",
"",
"Comma-separated list of hostname:port pairs")flags.DEFINE_string(
"worker_hosts",
"",
"Comma-separated list of hostname:port pairs")flags.DEFINE_string(
"data_dir",
None,
"Directory where the mnist data is stored")flags.DEFINE_string(
"train_dir",
None,
"Directory for storing the checkpoints")flags.DEFINE_integer(
"hidden1",
128,
"Number of units in the 1st hidden layer of the NN")flags.DEFINE_integer(
"hidden2",
128,
"Number of units in the 2nd hidden layer of the NN")flags.DEFINE_integer(
"batch_size",
100,
"Training batch size")flags.DEFINE_float(
"learning_rate",
0.01,
"Learning rate")FLAGS = flags.FLAGSTRAIN_FILE =
"train.tfrecords"NUM_CLASSES =
10IMAGE_SIZE =
28IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def inference(images, hidden1_units, hidden2_units): with tf.name_scope(
'hidden1'): weights = tf.Variable( tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=
1.0 / math.sqrt(float(IMAGE_PIXELS))),name=
'weights') biases = tf.Variable(tf.zeros([hidden1_units]),name=
'biases') hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
with tf.name_scope(
'hidden2'): weights = tf.Variable( tf.truncated_normal([hidden1_units, hidden2_units], stddev=
1.0 / math.sqrt(float(hidden1_units))), name=
'weights') biases = tf.Variable(tf.zeros([hidden2_units]), name=
'biases') hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope(
'softmax_linear'): weights = tf.Variable( tf.truncated_normal([hidden2_units, NUM_CLASSES], stddev=
1.0 / math.sqrt(float(hidden2_units))),name=
'weights') biases = tf.Variable(tf.zeros([NUM_CLASSES]),name=
'biases') logits = tf.matmul(hidden2, weights) + biases
return logits
def lossFunction(logits, labels): labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name=
'xentropy') loss = tf.reduce_mean(cross_entropy, name=
'xentropy_mean' 4/8 首页 上一页 2 3 4 5 6 7 下一页 尾页