Skip to content

Instantly share code, notes, and snippets.

@tonyreina
Last active June 18, 2022 18:05
Show Gist options
  • Save tonyreina/5bbe050c2cfceae62a1dda7d9010b692 to your computer and use it in GitHub Desktop.
Save tonyreina/5bbe050c2cfceae62a1dda7d9010b692 to your computer and use it in GitHub Desktop.
How to add a TensorFlow timeline to Keras inference
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
import json
def create_model():
# Just a simple CNN
# Alternatively, just
# model = tf.keras.models.load_model("mymodel.hdf5")
inputs_to_model = tf.keras.layers.Input([224,224,3], name="myInput")
conv1 = tf.keras.layers.Conv2D(32, (3,3), activation="relu")(inputs_to_model)
pool1 = tf.keras.layers.MaxPooling2D((2,2))(conv1)
conv2 = tf.keras.layers.Conv2D(32, (3,3), activation="relu")(pool1)
pool2 = tf.keras.layers.MaxPooling2D((2,2))(conv2)
flat1 = tf.keras.layers.Flatten()(pool2)
layer1 = tf.keras.layers.Dense(units=256, name="myLayer1")(flat1)
activate1 = tf.keras.layers.Activation("relu")(layer1)
layer2 = tf.keras.layers.Dense(units=128, name="yourLayer2")(activate1)
activate2 = tf.keras.layers.Activation("relu")(layer2)
outputs_to_model = tf.keras.layers.Dense(units=1, name="myPrediction", activation="sigmoid")(activate2)
model = tf.keras.models.Model(inputs=[inputs_to_model], outputs=[outputs_to_model])
model.summary() # Ask Keras to print the model summary
model.compile(loss="binary_crossentropy", optimizer="Adam", metrics=["accuracy"])
# We don't need to train since we just want to test timeline profile.
return model
def write_timeline(event_times):
# Setup TensorBoard
graph_location = "./tensorboard-logs/test"
print("Saving graph to: {}".format(graph_location))
train_writer = tf.summary.FileWriter(graph_location)
# Write timeline trace
print("Writing event trace")
with open("tf_event_trace.json", "w") as f:
f.write("[\n")
for event in event_times:
chrome_trace = event.generate_chrome_trace_format(
show_dataflow=False)
parsed_trace = json.loads(chrome_trace)
for tr in parsed_trace["traceEvents"]:
f.write(json.dumps(tr) + ",\n")
train_writer.add_graph(tf.get_default_graph())
if __name__ == "__main__":
import datetime
sess = tf.keras.backend.get_session()
model = create_model()
#model.predict(random_input, verbose=1) # Can't use with timeline (?)
print("Python: Running Keras within TF Session")
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
event_times = []
sess.run(tf.global_variables_initializer())
start_time = datetime.datetime.now()
num_iterations = 10
for i in range(num_iterations):
print("Inference iteration #{} of {}".format(i+1, num_iterations))
batch_size = 1024
input_shape = [batch_size] + [i.value for i in model.input.get_shape()[1:]]
random_input = np.random.random(input_shape)
result = sess.run(model.output, feed_dict={model.input: random_input},
options=options, run_metadata=run_metadata)
event_times.append(timeline.Timeline(run_metadata.step_stats))
stop_time = datetime.datetime.now()
print("Inference on {} iterations took {}".format(num_iterations, stop_time-start_time))
write_timeline(event_times)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment