Skip to content

Commit 545324f

Browse files
mvsuspvrkhare
authored andcommitted
Example migrating to script mode from framework mode (#806)
Confirmed with Marcio that Chuyang Deng is a team member who should have the write permissions. Approving based on her review.
1 parent 1dea98b commit 545324f

File tree

3 files changed

+477
-0
lines changed

3 files changed

+477
-0
lines changed
Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
import os
2+
import tensorflow as tf
3+
from tensorflow.python.estimator.model_fn import ModeKeys as Modes
4+
5+
INPUT_TENSOR_NAME = 'inputs'
6+
SIGNATURE_NAME = 'predictions'
7+
8+
LEARNING_RATE = 0.001
9+
10+
11+
def model_fn(features, labels, mode, params):
12+
# Input Layer
13+
input_layer = tf.reshape(features[INPUT_TENSOR_NAME], [-1, 28, 28, 1])
14+
15+
# Convolutional Layer #1
16+
conv1 = tf.layers.conv2d(
17+
inputs=input_layer,
18+
filters=32,
19+
kernel_size=[5, 5],
20+
padding='same',
21+
activation=tf.nn.relu)
22+
23+
# Pooling Layer #1
24+
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
25+
26+
# Convolutional Layer #2 and Pooling Layer #2
27+
conv2 = tf.layers.conv2d(
28+
inputs=pool1,
29+
filters=64,
30+
kernel_size=[5, 5],
31+
padding='same',
32+
activation=tf.nn.relu)
33+
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
34+
35+
# Dense Layer
36+
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
37+
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
38+
dropout = tf.layers.dropout(
39+
inputs=dense, rate=0.4, training=(mode == Modes.TRAIN))
40+
41+
# Logits Layer
42+
logits = tf.layers.dense(inputs=dropout, units=10)
43+
44+
# Define operations
45+
if mode in (Modes.PREDICT, Modes.EVAL):
46+
predicted_indices = tf.argmax(input=logits, axis=1)
47+
probabilities = tf.nn.softmax(logits, name='softmax_tensor')
48+
49+
if mode in (Modes.TRAIN, Modes.EVAL):
50+
global_step = tf.train.get_or_create_global_step()
51+
label_indices = tf.cast(labels, tf.int32)
52+
loss = tf.losses.softmax_cross_entropy(
53+
onehot_labels=tf.one_hot(label_indices, depth=10), logits=logits)
54+
tf.summary.scalar('OptimizeLoss', loss)
55+
56+
if mode == Modes.PREDICT:
57+
predictions = {
58+
'classes': predicted_indices,
59+
'probabilities': probabilities
60+
}
61+
export_outputs = {
62+
SIGNATURE_NAME: tf.estimator.export.PredictOutput(predictions)
63+
}
64+
return tf.estimator.EstimatorSpec(
65+
mode, predictions=predictions, export_outputs=export_outputs)
66+
67+
if mode == Modes.TRAIN:
68+
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
69+
train_op = optimizer.minimize(loss, global_step=global_step)
70+
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
71+
72+
if mode == Modes.EVAL:
73+
eval_metric_ops = {
74+
'accuracy': tf.metrics.accuracy(label_indices, predicted_indices)
75+
}
76+
return tf.estimator.EstimatorSpec(
77+
mode, loss=loss, eval_metric_ops=eval_metric_ops)
78+
79+
80+
def serving_input_fn(params):
81+
inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 784])}
82+
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
83+
84+
85+
def read_and_decode(filename_queue):
86+
reader = tf.TFRecordReader()
87+
_, serialized_example = reader.read(filename_queue)
88+
89+
features = tf.parse_single_example(
90+
serialized_example,
91+
features={
92+
'image_raw': tf.FixedLenFeature([], tf.string),
93+
'label': tf.FixedLenFeature([], tf.int64),
94+
})
95+
96+
image = tf.decode_raw(features['image_raw'], tf.uint8)
97+
image.set_shape([784])
98+
image = tf.cast(image, tf.float32) * (1. / 255)
99+
label = tf.cast(features['label'], tf.int32)
100+
101+
return image, label
102+
103+
104+
def train_input_fn(training_dir, params):
105+
return _input_fn(training_dir, 'train.tfrecords', batch_size=100)
106+
107+
108+
def eval_input_fn(training_dir, params):
109+
return _input_fn(training_dir, 'test.tfrecords', batch_size=100)
110+
111+
112+
def _input_fn(training_dir, training_filename, batch_size=100):
113+
test_file = os.path.join(training_dir, training_filename)
114+
filename_queue = tf.train.string_input_producer([test_file])
115+
116+
image, label = read_and_decode(filename_queue)
117+
images, labels = tf.train.batch(
118+
[image, label], batch_size=batch_size,
119+
capacity=1000 + 3 * batch_size)
120+
121+
return {INPUT_TENSOR_NAME: images}, labels
122+
123+
def neo_preprocess(payload, content_type):
124+
import logging
125+
import numpy as np
126+
import PIL.Image # Training container doesn't have this package
127+
import io
128+
129+
logging.info('Invoking user-defined pre-processing function')
130+
131+
if content_type != 'application/x-image':
132+
raise RuntimeError('Content type must be application/x-image')
133+
134+
f = io.BytesIO(payload)
135+
# Load image and convert to greyscale space
136+
image = PIL.Image.open(f).convert('L')
137+
# Resize
138+
image = np.asarray(image.resize((28, 28)))
139+
# Reshape
140+
image = image.reshape((1,-1)).astype('float32')
141+
142+
return image
143+
144+
### NOTE: this function cannot use MXNet
145+
def neo_postprocess(result):
146+
import logging
147+
import numpy as np
148+
import json
149+
150+
logging.info('Invoking user-defined post-processing function')
151+
152+
# Softmax (assumes batch size 1)
153+
result = np.squeeze(result)
154+
result_exp = np.exp(result - np.max(result))
155+
result = result_exp / np.sum(result_exp)
156+
157+
response_body = json.dumps(result.tolist())
158+
content_type = 'application/json'
159+
160+
return response_body, content_type

0 commit comments

Comments
 (0)