-
-
Save hiromu/9909d9b6b88e66d2573deca43715b333 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
import glob | |
import librosa | |
import numpy as np | |
import os | |
import sklearn.mixture | |
import sys | |
def load(audio_path): | |
y, sr = librosa.load(audio_path) | |
y_trim = librosa.effects.remix(y, intervals=librosa.effects.split(y)) | |
mfcc = librosa.feature.mfcc(y=y_trim, sr=sr) | |
return mfcc.T | |
def fit(frames, test_ratio=0.05, n_components=16): | |
index = np.arange(len(frames)) | |
np.random.shuffle(index) | |
train_idx = index[int(len(index) * test_ratio):] | |
test_idx = index[:int(len(index) * test_ratio)] | |
gmm = sklearn.mixture.GaussianMixture(n_components=n_components) | |
gmm.fit(frames[train_idx]) | |
return gmm, frames[test_idx] | |
def predict(gmms, test_frame): | |
scores = [] | |
for gmm_name, gmm in gmms.items(): | |
scores.append((gmm_name, gmm.score(test_frame))) | |
return sorted(scores, key=lambda x: x[1], reverse=True) | |
def evaluate(gmms, test_frames): | |
correct = 0 | |
for name in test_frames: | |
best_name, best_score = predict(gmms, test_frames[name])[0] | |
print 'Ground Truth: %s, Predicted: %s, Score: %f' % (name, best_name, best_score) | |
if name == best_name: | |
correct += 1 | |
print 'Overall Accuracy: %f%%' % (float(correct) / len(test_frames)) | |
if __name__ == '__main__': | |
gmms, test_frames = {}, {} | |
for filename in glob.glob(os.path.join(sys.argv[1], '*.wav')): | |
name = os.path.splitext(os.path.basename(filename))[0] | |
print 'Processing %s ...' % name | |
gmms[name], test_frames[name] = fit(load(filename)) | |
evaluate(gmms, test_frames) | |
for filename in glob.glob(os.path.join(sys.argv[2], '*.wav')): | |
result = predict(gmms, load(filename)) | |
print '%s: %s' % (os.path.basename(filename), ' / '.join(map(lambda x: '%s = %f' % x, result[:5]))) |
I am a beginner in ml using this code to (hopefully) train a speaker identification for my open source karaoke software. I managed to haver it run, but the results output is terrible and I think the reason is simply that I don't understand what audio files to use for training. So long story short I have downloaded a bunch of audio samples from openslr (http://www.openslr.org/12/) and now I have a gazillion recordings by different voices.
Should I train on only one speaker at the time? Or will this handle multiple speakers?
Thanks!
You might try to train something simpler to identify issues in your code. For example, training with only 2 examples, and debugging the data as it navigates through the training module. Most errors are usually in between. After you manage to overfit that toy experiment, you should start increasing the data with such datasets.
Just wanted to note that the classification method with this GMM is slightly different than the proposed by sklearn and other frameworks where a single GMM with n_clases components is instantiated and trained over the training data, and prediction is made by getting the likely cluster label. In this case for each class, a single GMM with 16 components is trained and the prediction is made by a Log likelihood loss ranking.
This method is similar to the one proposed by Tsai et. works on Singing Voice Identification (SID), and probably much more works related to voice classification.
Thank you for the code ^^