forked from aws/amazon-sagemaker-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmnist.py
111 lines (81 loc) · 3.87 KB
/
mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import tensorflow as tf
from tensorflow.python.estimator.model_fn import ModeKeys as Modes
INPUT_TENSOR_NAME = "inputs"
SIGNATURE_NAME = "predictions"
def model_fn(features, labels, mode, params):
# this script takes learning_rate as a hyperparameter
learning_rate = params.get("learning_rate", 0.1)
# Input Layer
input_layer = tf.reshape(features[INPUT_TENSOR_NAME], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu
)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu
)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=(mode == Modes.TRAIN))
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
# Define operations
if mode in (Modes.PREDICT, Modes.EVAL):
predicted_indices = tf.argmax(input=logits, axis=1)
probabilities = tf.nn.softmax(logits, name="softmax_tensor")
if mode in (Modes.TRAIN, Modes.EVAL):
global_step = tf.train.get_or_create_global_step()
label_indices = tf.cast(labels, tf.int32)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=tf.one_hot(label_indices, depth=10), logits=logits
)
tf.summary.scalar("OptimizeLoss", loss)
if mode == Modes.PREDICT:
predictions = {"classes": predicted_indices, "probabilities": probabilities}
export_outputs = {SIGNATURE_NAME: tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs
)
if mode == Modes.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
if mode == Modes.EVAL:
eval_metric_ops = {"accuracy": tf.metrics.accuracy(label_indices, predicted_indices)}
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops)
def serving_input_fn(params):
inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 784])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
"image_raw": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
},
)
image = tf.decode_raw(features["image_raw"], tf.uint8)
image.set_shape([784])
image = tf.cast(image, tf.float32) * (1.0 / 255)
label = tf.cast(features["label"], tf.int32)
return image, label
def train_input_fn(training_dir, params):
return _input_fn(training_dir, "train.tfrecords", batch_size=100)
def eval_input_fn(training_dir, params):
return _input_fn(training_dir, "test.tfrecords", batch_size=100)
def _input_fn(training_dir, training_filename, batch_size=100):
test_file = os.path.join(training_dir, training_filename)
filename_queue = tf.train.string_input_producer([test_file])
image, label = read_and_decode(filename_queue)
images, labels = tf.train.batch(
[image, label], batch_size=batch_size, capacity=1000 + 3 * batch_size
)
return {INPUT_TENSOR_NAME: images}, labels