Skip to content

Instantly share code, notes, and snippets.

@lakshmanok
Created August 28, 2018 16:11
Show Gist options
  • Save lakshmanok/26310b5a70a0668576b7f43fafa1d26c to your computer and use it in GitHub Desktop.
Save lakshmanok/26310b5a70a0668576b7f43fafa1d26c to your computer and use it in GitHub Desktop.
def image_classifier(features, labels, mode, params):
image = features
if isinstance(features, dict):
image = features['image']
ylogits, nclasses = cnn_model(image, mode, params)
probabilities = tf.nn.softmax(ylogits)
class_int = tf.cast(tf.argmax(probabilities, 1), tf.int32)
class_str = tf.gather(LIST_OF_LABELS, class_int)
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=ylogits, labels=tf.one_hot(labels, nclasses)))
def metric_fn(class_int, labels):
return {'accuracy': tf.metrics.accuracy(class_int, labels)}
evalmetrics = (metric_fn, [class_int, labels])
if mode == tf.estimator.ModeKeys.TRAIN:
# this is needed for batch normalization, but has no effect otherwise
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'])
if params['use_tpu']:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) # TPU change 1
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, tf.train.get_global_step())
else:
train_op = None
else:
loss = None
train_op = None
evalmetrics = None
return tf.contrib.tpu.TPUEstimatorSpec( # TPU change 2
mode=mode,
predictions={"probabilities": probabilities,
"classid": class_int, "class": class_str},
loss=loss,
train_op=train_op,
eval_metrics=evalmetrics,
export_outputs={'classes': tf.estimator.export.PredictOutput(
{"probabilities": probabilities, "classid": class_int,
"class": class_str})}
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment