Created
May 20, 2017 12:06
-
-
Save vdt/42bae2e754cf9dbaa0c8b3a77e103dc2 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### Visualize your network's feature maps here. | |
### Feel free to use as many code cells as needed. | |
# image_input: the test image being fed into the network to produce the feature maps | |
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer | |
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output | |
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry | |
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): | |
# Here make sure to preprocess your image_input in a way your network expects | |
# with size, normalization, ect if needed | |
# image_input = | |
# Note: x should be the same name as your network's tensorflow data placeholder variable | |
# If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function | |
activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) | |
featuremaps = activation.shape[3] | |
plt.figure(plt_num, figsize=(15,15)) | |
for featuremap in range(featuremaps): | |
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column | |
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number | |
if activation_min != -1 & activation_max != -1: | |
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") | |
elif activation_max != -1: | |
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") | |
elif activation_min !=-1: | |
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") | |
else: | |
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray") | |
#####My codes go here | |
yy = np.floor(image_0).astype(np.float32, copy=False) | |
class Ihatethis: | |
def what(self, x): | |
x = self.LeNet(x) | |
def getConv1(self, y): | |
y = self.what(y) | |
def tell_me_answer(self, outputFeatureMap): | |
output = self.outputFeatureMap() | |
conv1 = Ihatethis.getConv1(y) | |
with tf.Session() as sess: | |
sess.run(tf.global_variables_initializer()) | |
Ihatethis.tell_me_answer(yy, conv1) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment