Skip to content

Instantly share code, notes, and snippets.

View dipanjanS's full-sized avatar
:octocat:

Dipanjan (DJ) Sarkar dipanjanS

:octocat:
View GitHub Profile
import pandas as pd
df = pd.DataFrame([ibmseti.compamp.SimCompamp(open(file_record,'rb').read()).header()
for file_record in raw_signal_files])
df.signal_classification.value_counts()
# Get sample signal
import ibmseti
sample_data = ibmseti.compamp.SimCompamp(open(raw_signal_files[0],'rb').read())
print('File Name:', raw_signal_files[0])
print('Header:', sample_data.header())
# Output
Out [4]: File Name: ./primary_small_v3/a33c85e3-9316-4871-bcdc-10882a7fe6bd.dat
Header: {'signal_classification': 'narrowband',
'uuid': 'a33c85e3-9316-4871-bcdc-10882a7fe6bd'}
import os
import glob
base_dir = os.path.join('./primary_small_v3/')
raw_signal_files = glob.glob(base_dir+'*.dat')
len(raw_signal_files)
from tf_explain.core.smoothgrad import SmoothGrad
explainer = SmoothGrad()
grid1 = explainer.explain(([img], None), model, 281, 80, .2)
grid2 = explainer.explain(([img], None), model, 285, 80, .2)
fig = plt.figure(figsize = (18, 8))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(img_inp / 255.)
# visualize GradCAM output from Block 14
grid1 = explainer.explain(([img], None), model, 'block14_sepconv1', 281)
grid2 = explainer.explain(([img], None), model, 'block14_sepconv1', 285)
fig = plt.figure(figsize = (18, 8))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(img_inp / 255.)
ax1.imshow(grid1, alpha=0.6)
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(img_inp / 255.)
# visualize GradCAM output from Block 6
grid1 = explainer.explain(([img], None), model, 'block6_sepconv1', 281)
grid2 = explainer.explain(([img], None), model, 'block6_sepconv1', 285)
fig = plt.figure(figsize = (18, 8))
ax1 = fig.add_subplot(1, 3, 1)
ax1.imshow(img_inp / 255.)
ax1.imshow(grid1, alpha=0.6)
ax2 = fig.add_subplot(1, 3, 2)
ax2.imshow(img_inp / 255.)
from tf_explain.core.grad_cam import GradCAM
explainer = GradCAM()
# get imagenet IDs for cat breeds
imgnet_map['tabby'], imgnet_map['Egyptian_cat']
Out [24]: ('281', '285')
# visualize GradCAM outputs in Block 1
grid1 = explainer.explain(([img], None), model, 'block1_conv2', 281)
# get label imagenet ID
imgnet_map['Egyptian_cat']
from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
explainer = OcclusionSensitivity()
img_inp = tf.keras.preprocessing.image.load_img(IMAGE_PATH, target_size=(299, 299))
img_inp = tf.keras.preprocessing.image.img_to_array(img_inp)
grid = explainer.explain(([img_inp], None), model, 285, 7)
fig, ax = plt.subplots(figsize=(8, 8))
explainer = ExtractActivations()
grid = explainer.explain((np.array([img]), None), model, ['block2_sepconv2_act'])
fig, ax = plt.subplots(figsize=(18, 18))
ax.imshow(grid, cmap='binary_r')
# load imagenet id to class label mappings
import requests
response = requests.get('https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json')
imgnet_map = response.json()
imgnet_map = {v[1]: k for k, v in imgnet_map.items()}
# make model predictions
img = tf.keras.applications.xception.preprocess_input(img)
predictions = model.predict(np.array([img]))