)
return loss
def training(loss, learning_rate): tf.summary.scalar(loss.op.name, loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate) global_step = tf.Variable(
0, name=
'global_step', trainable=
False) train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64), }) image = tf.decode_raw(features[
'image_raw'], tf.uint8) image.set_shape([IMAGE_PIXELS]) image = tf.cast(image, tf.float32) * (
1. /
255) -
0.5 label = tf.cast(features[
'label'], tf.int32)
return image, label
def inputs(batch_size): """Reads input data. Args: batch_size: Number of examples per returned batch. Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS] in the range [-0.5, 0.5]. * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, mnist.NUM_CLASSES). """ filename = os.path.join(FLAGS.data_dir, TRAIN_FILE)
with tf.name_scope(
'input'): filename_queue = tf.train.string_input_producer([filename]) image, label = read_and_decode(filename_queue) images, sparse_labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=
2, capacity=
1000 +
3 * batch_size, min_after_dequeue=
1000)
return images, sparse_labels
def device_and_target(): if FLAGS.job_name
is None:
raise ValueError(
"Must specify an explicit `job_name`") print(
"Running distributed training")
if FLAGS.task_index
is None or FLAGS.task_index ==
"":
raise ValueError(
"Must specify an explicit `task_index`")
if FLAGS.ps_hosts
is 5/8 首页 上一页 3 4 5 6 7 8 下一页 尾页