Skip to content

Instantly share code, notes, and snippets.

@yaroslavvb
yaroslavvb / sessrun
Created November 1, 2016 17:59
Example of wrapper for session.run that returns dictionaries instead of lists
def sessrun(fetches):
values = tf.get_default_session().run(fetches)
return {fetches[i]: values[i] for i in range(len(values))}
a = tf.constant(1)
b = tf.constant(2)
c = tf.constant(3)
sess = tf.InteractiveSession()
result1 = sessrun([a, b])
import tensorflow as tf
from numpy.testing.utils import nulp_diff
import time
n = 1024
v1 = tf.Variable(tf.ones_initializer((n, n)))
v2 = tf.Variable(tf.ones_initializer((n, n)))
op = v1.assign(tf.matmul(v1, v2)/n).op
norm = tf.reduce_sum(v1)
@yaroslavvb
yaroslavvb / simple_signal.py
Last active August 1, 2023 19:41
Example of bringing down parameter server by using shared queue to signal
"""Example of launching distributed service and then bringint it down."""
import subprocess
import tensorflow as tf
import time
import sys
flags = tf.flags
flags.DEFINE_string("port1", "12222", "port of worker1")
flags.DEFINE_string("port2", "12223", "port of worker2")
@yaroslavvb
yaroslavvb / gist:710a16fe93f0c91c7e052fcb5b0bccc0
Created October 19, 2016 18:22
iterating over TensorBoard event files
import tensorflow as tf
fn = "/tmp/efs/yaroslav/g/train_1/events.out.tfevents.1476852642.g-w-1-vblgv"
for summary in tf.train.summary_iterator():
print(summary)
@yaroslavvb
yaroslavvb / cpu_device_test.py
Created October 16, 2016 20:25
Run matmul on different CPU devices, plot timeline
import tensorflow as tf
from tensorflow.python.client import timeline
n = 1024
with tf.device("cpu:0"):
a1 = tf.ones((n, n))
a2 = tf.ones((n, n))
with tf.device("cpu:1"):
a3 = tf.matmul(a1, a2)
with tf.device("cpu:2"):
@yaroslavvb
yaroslavvb / smart_initialize.py
Created October 13, 2016 23:20
Better initialize_all_variables which respects variable dependencies and doesn't rerun initializers
# testing variable order init
import tensorflow as tf
def initialize_all_variables(sess=None):
"""Initializes all uninitialized variables in correct order. Initializers
are only run for uninitialized variables, so it's safe to run this multiple
times.
Args:
@yaroslavvb
yaroslavvb / gist:eb91fe4b221c6b365f927c56f8827448
Last active September 29, 2016 00:16
Example of adding a vector to first row of matrix
a = tf.constant([[1,2],[3,4]])
row_to_add = tf.constant([[1, 1]])
updated_row = a[0]+row_to_add
updated_a = tf.concat(0, [updated_row, a[1:]])
sess.run(updated_a)
@yaroslavvb
yaroslavvb / Example of adding a vector to first row of matrix in TensorFlow
Last active August 3, 2017 08:18
Example of dynamic stitch to add vector to first row of matrix
a = tf.constant([[1,2],[3,4]])
row_to_add = tf.constant([1, 1])
original_row = a[0]
updated_row = original_row + row_to_add
unchanged_indices = tf.range(tf.size(a))
changed_indices = tf.range(a.get_shape()[0])
a_flat = tf.reshape(a, [-1])
updated_a_flat = tf.dynamic_stitch([unchanged_indices, changed_indices], [a_flat, updated_row])
updated_a = tf.reshape(updated_a_flat, a.get_shape())
print sess.run(updated_a)
@yaroslavvb
yaroslavvb / local_distributed_benchmark.py
Last active September 16, 2021 10:26
Benchmark distributed tensorflow locally by adding vector of ones on worker2 to variable on worker1 as fast as possible
"""Benchmark tensorflow distributed by adding vector of ones on worker2
to variable on worker1 as fast as possible.
On 2014 macbook, TensorFlow 0.10 this shows
Local rate: 2175.28 MB per second
Distributed rate: 107.13 MB per second
"""
@yaroslavvb
yaroslavvb / gist:b73ff35424dd7ab762234620cf583aac
Created September 16, 2016 23:08
Example of restricting part of graph to run on single core
# try running cpu intensive test on two devices
import tensorflow as tf
import time
def matmul_op():
"""Multiply two matrices together"""
n = 2000
a = tf.ones((n, n), dtype=tf.float32)