Last active
February 8, 2021 14:19
-
-
Save SuperShinyEyes/02c5e5336ca74b30ed12c0b0b386f4e9 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import matplotlib.pyplot as plt | |
import soundfile as sf | |
import librosa | |
plt.rcParams['font.family'] = 'serif' | |
plt.rcParams['font.serif'] = 'Ubuntu' | |
plt.rcParams['font.monospace'] = 'Ubuntu Mono' | |
plt.rcParams['font.size'] = 10 | |
plt.rcParams['axes.labelsize'] = 10 # Time and Hz, i.e. labels | |
plt.rcParams['axes.labelweight'] = 'bold' | |
plt.rcParams['xtick.labelsize'] = 15 # Time(x) tick values | |
plt.rcParams['ytick.labelsize'] = 15 # Hz(y) tick values | |
plt.rcParams['legend.fontsize'] = 17 | |
plt.rcParams['figure.titlesize'] = 10 | |
plt.rcParams['axes.titlesize'] = 14 # Title font | |
#--------------------------------------------------- | |
# Plot amplitudes | |
fig, axarr = plt.subplots(3, 3, figsize=(17, 8)) | |
fig.suptitle("15 sec & 3 sec & 1.5 sec", fontsize=16) | |
length = 5 # sec | |
#---------------------------------------------------- | |
# Reference | |
ref, sr = sf.read("adele/test/11 Someone Like You.wav", 44100*15, ) | |
axarr[0,0].plot(ref[:44100*length], 'b') | |
axarr[0,1].plot(ref[:44100*length//5], 'b') | |
axarr[0,2].plot(ref[:44100*length//10], 'b') | |
axarr[0,2].legend(['Reference'], loc=1) | |
#---------------------------------------------------- | |
# Damaged | |
y, sr = librosa.core.load("adele/test/4410/11 Someone Like You.wav", sr=None) | |
axarr[1,0].plot(y[:4410*length], 'r') | |
axarr[1,1].plot(y[:4410*length//5], 'r') | |
axarr[1,2].plot(y[:4410*length//10], 'r') | |
axarr[1,2].legend(['Damaged'], loc=1) | |
#---------------------------------------------------- | |
# Decoded | |
decoded, sr = librosa.core.load("adele/test/decoded/11 Someone Like You.mp3", sr=None) | |
axarr[2,0].plot(decoded[:44100*length], 'g') | |
axarr[2,1].plot(decoded[:44100*length//5], 'g') | |
axarr[2,2].plot(decoded[:44100*length//10], 'g') | |
axarr[2,2].legend(['Decoded'], loc=1) | |
#---------------------------------------------------- | |
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots | |
plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) | |
plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) | |
# Tight layout often produces nice results | |
# but requires the title to be spaced accordingly | |
fig.tight_layout() | |
fig.subplots_adjust(top=0.92) |
Author
SuperShinyEyes
commented
Dec 19, 2018
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib.pyplot as plt
width = 10
x = np.linspace(start=-width, stop=width, num=1000)
def relu(x: ndarray) -> ndarray:
x = np.copy(x)
x[x < 0] = 0
return x
sigmoid = lambda x: 1 / (1 + np.exp(-x))
tanh = lambda x: 2 * sigmoid(2*x) - 1
plt.rcParams['axes.titlesize'] = 18 # Title font
plt.rcParams['axes.labelsize'] = 12 # Label font size
fig, axarr = plt.subplots(nrows=1, ncols=3, figsize=(12,3), sharex=False, sharey=False)
fig.suptitle(f'Popular activation functions', fontsize=22)
# fig.tight_layout(pad=3.0)
axarr[0].plot(x, sigmoid(x))
axarr[0].set_xlim(xmin=-width, xmax=width)
axarr[0].set_yticks([0, 0.5, 1])
axarr[0].set_title("Sigmoid")
axarr[0].set(xlabel='X', ylabel='Y')
axarr[0].label_outer()
axarr[1].plot(x, tanh(x))
axarr[1].set_xlim(xmin=-width, xmax=width)
axarr[1].set_yticks([-1, 0, 1])
axarr[1].set_title("Tanh")
axarr[1].set(xlabel='X',)
# axarr[1].label_outer() # This will remove ticks
axarr[2].plot(x, relu(x))
axarr[2].set_xlim(xmin=-width, xmax=width)
axarr[2].set_yticks([0, 5, 10])
axarr[2].set_title("ReLU")
axarr[2].set(xlabel='X',)
# axarr[2].label_outer()
# Tight layout often produces nice results
# but requires the title to be spaced accordingly
fig.tight_layout()
fig.subplots_adjust(top=0.75)
plt.show(); plt.clf(); plt.cla(); plt.close()
def plot_confusion_matrix(
y_true: ndarray, # 1-D array
y_pred: ndarray, # 1-D array
classes: Container[str], # Names of classes/labels
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Blues,
save: bool = False,
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cm: ndarray = confusion_matrix(
y_true=y_true,
y_pred=y_pred,
labels=range(len(classes))
)
np.set_printoptions(precision=2)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fmt = '.2f'
text_color_threshold = cm.max() / 1.5
else:
fmt = 'd'
text_color_threshold = cm.max() / 2
fig, ax = plt.subplots(figsize=(12,9.5))
fig.suptitle(title, fontsize=20, y=1.01)
color_mappable: matplotlib.image.AxesImage = ax.imshow(cm, interpolation='nearest', cmap=cmap)
cb: matplotlib.colorbar.Colorbar = fig.colorbar(color_mappable, ax=ax)
ax.set_xticks(range(len(classes)))
ax.set_xticklabels(classes)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
ax.set_yticks(range(len(classes)))
ax.set_yticklabels(classes)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
value: np.int64 = cm[i, j]
plt.text(x=j, y=i, s=format(value, fmt), fontdict={'fontsize':12, 'fontweight':'bold'},
horizontalalignment="center",
color="black" if value < text_color_threshold else 'white')
ax.set(ylabel='True label', xlabel='Predicted label')
fig.tight_layout()
if save:
plt.savefig(f'plots/{title}.png', bbox_inches='tight')
classes = ['non_event', 'cuteness', 'startle', 'surprise', 'frustration', 'happiness', 'excitement', 'relief', 'wonder', 'amusement', 'insight_firo', 'moved', 'puzzlement', 'na']
plot_confusion_matrix(
y_true=np.argmax(trainy, axis=1),
y_pred=np.argmax(model.predict(normalize(trainX), batch_size=4), axis=1),
classes=classes,
normalize=True,
title='Baseline 8-window confusion matrix',
save=True,
)
def plot_mean_video_wise_poster(total_data: ndarray, data_name:str, x_labels):
'''
Reference:
- https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/subplots_demo.html
'''
plt.rcParams['axes.labelsize'] = 20 # Label font size
nrows = 6; ncols = 3
fig, axarr = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(18, 18),
sharex=True, sharey=True,
)
fig.suptitle(f'Unravel {data_name} facial expression embedding, \ncolumn-wise means before preprocess', fontsize=26)
for video_id, data in zip(range(1, 18), total_data):
#---------------------------------------------------
# Plot amplitudes
i = video_id - 1
ax = axarr[i // ncols, i % ncols]
ax.plot(
range(len(data)),
data
)
plt.ylim(ymax=1)
ax.set_xticks(range(len(x_labels)))
ax.set_xticklabels(x_labels)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.set_title(f'Video {video_id}')
ax.set(xlabel='Class', ylabel='Mean', )
ax.label_outer()
plt.setp(axarr[-1, -1].get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Tight layout often produces nice results
# but requires the title to be spaced accordingly
fig.tight_layout()
fig.subplots_adjust(top=0.92)
plt.savefig(f'plots/data-analysis/poster-embedding={data_name}-columnwise_mean.png', bbox_inches = "tight")
classes=['Anger', 'Disgust', 'Fear', 'Happiness','Sadness','Surprise','Neutrality']
plot_mean_video_wise_poster(means, "Affectnet", x_labels=classes)
def plot_predictions_batch(images: List[ndarray], predictions: List[str], gts: List[str], fig_title: str):
n_image_per_plot = 42
n_image = len(images)
for i in range(math.ceil(n_image / n_image_per_plot)):
start_i = i * n_image_per_plot
end_i = start_i + n_image_per_plot
plot_predictions(
images[start_i: end_i],
predictions[start_i: end_i],
gts[start_i: end_i],
fig_title=f'{fig_title}_{i}'
)
def plot_predictions(images: List[ndarray], predictions: List[str], gts: List[str], fig_title: str):
n_image = len(images)
n_col = 6
n_row = math.ceil(n_image / n_col)
fig, axes = plt.subplots(n_row, n_col)
for i, (y, x) in zip(range(n_image), itertools.product(range(n_row), range(n_col))):
ax = axes[y, x]
ax.axis('off')
ax.imshow(images[i])
pred, gt = predictions[i], gts[i]
if pred == gt:
ax.set_title(gt, fontdict={'color':'green'}, fontsize=11)
else:
ax.set_title(f'gt: {gt}\npr: {pred}', fontdict={'color':'red'}, fontsize=11)
fig.set_size_inches(n_col * 2, n_row * 0.9)
fig.suptitle(fig_title, fontsize=16)
fig.savefig(f'plots/{fig_title}.jpg')
plot_predictions_batch(val_input_images, result_validation, val_labels, fig_title='pretrained=synth-val_set')
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment