如何改善我的神经网络专业知识(说话者识别-MFCC)

我正在研究说话者识别神经网络。

我正在做的是获取[Bing Bang理论的第一篇论文:-)的wav文件,而不是将其转换为MFCC coeffs,而不是将其作为神经网络(MLPClassifier)的开源api的输入和输出为每个讲话者定义一个唯一的向量(比方说:[1,0]-sheldon; [0,1,0]-Penny;等等),我为50个随机值测试和其他的拟合(培训)

这是我的代码,一开始我得到了关于NN的随机提示,但是在一个很棒的家伙的帮助下,我将其提高到了〜42%,但是我想要更多:)大约是70%:

from sklearn.neural_network import MLPClassifier
import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
from os.path import isfile,join
from random import shuffle
import matplotlib.pyplot as plt
from tqdm import tqdm
from random import randint
import random
winner = []  # this array count how much Bingo we had when we test the NN
random_winner = []
win_len = 0.04  # in seconds
step = win_len / 2
nfft = 2048
for TestNum in tqdm(range(20)):  # in every round we build NN with X,Y that out of them we check 50 after we build the NN
    X = []
    Y = []
    onlyfiles = [f for f in listdir("FinalAudios/") if isfile(join("FinalAudios/",f))]   # Files in dir
    names = []  # names of the speakers
    for file in onlyfiles:  # for each wav sound
        # UnesSECERY TO UNDERSTAND THE CODE
        if " " not in file.split("_")[0]:
            names.append(file.split("_")[0])
        else:
            names.append(file.split("_")[0].split(" ")[0])
    only_speakers = [] + names
    #print only_speakers
    names = list(dict.fromkeys(names))  # names of speakers
    print names
    vector_names = []  # vector for each name
    i = 0
    vector_for_each_name = [0] * len(names)
    for name in names:
        vector_for_each_name[i] += 1
        vector_names.append(np.array(vector_for_each_name))
        vector_for_each_name[i] -= 1
        i += 1
    for f in onlyfiles:
        if " " not in f.split("_")[0]:
            f_speaker = f.split("_")[0]
        else:
            f_speaker = f.split("_")[0].split(" ")[0]
        fs,audio = wav.read("FinalAudios/" + f)  # read the file
        try:
            mfcc_feat = python_speech_features.mfcc(audio,samplerate=fs,winlen=win_len,winstep=step,nfft=nfft,appendEnergy=False)
            flat_list = [item for sublist in mfcc_feat for item in sublist]
            X.append(np.array(flat_list))
            Y.append(np.array(vector_names[names.index(f_speaker)]))
        except IndexError:
            pass
    Z = list(zip(X,Y))

    shuffle(Z)  # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL

    X,Y = zip(*Z)
    X = list(X)
    Y = list(Y)
    X = np.asarray(X)
    Y = np.asarray(Y)

    Y_test = Y[:50]  # CHOOSE 50 FOR TEST,OTHERS FOR TRAIN
    X_test = X[:50]
    X = X[50:]
    Y = Y[50:]
    print len(X)
    clf = MLPClassifier(solver='lbfgs',alpha=3e-2,hidden_layer_sizes=(50,20),random_state=2)  # create the NN
    clf.fit(X,Y)  # Train it
    print list(clf.predict_proba([X[0]])[0])
    print list(Y_test[0])
    for sample in range(len(X_test)):  # add 1 to winner array if we correct and 0 if not,than in the end it plot it
        arr = list(clf.predict([X_test[sample]])[0])
        if arr.index(max(arr)) == list(Y_test[sample]).index(1):
            winner.append(1)
        else:
            winner.append(0)
        if only_speakers[randint(0,len(only_speakers) - 1)] == only_speakers[randint(0,len(only_speakers) - 1)]:
            random_winner.append(1)
        else:
            random_winner.append(0)

# plot winner
plot_x = []
plot_y = []
for i in range(1,len(winner)):
    plot_y.append(sum(winner[0:i])*1.0/len(winner[0:i]))
    plot_x.append(i)
plot_random_x = []
plot_random_y = []
for i in range(1,len(random_winner)):
    plot_random_y.append(sum(random_winner[0:i])*1.0/len(random_winner[0:i]))
    plot_random_x.append(i)
plt.plot(plot_x,plot_y,'r',label='machine learning')
plt.plot(plot_random_x,plot_random_y,'b',label='random')
plt.xlabel('Number Of Samples')
# naming the y axis
plt.ylabel('Success Rate')

# giving a title to my graph
plt.title('Success Rate : Random Vs ML!')

# function to show the plot
plt.show()

这是我的zip文件,其中包含代码和音频文件:https://ufile.io/eggjm1gw

有人知道我该如何提高自己的水平

编辑:

我改进了数据集并放入了卷积模型,并获得了60%的准确率,这没关系,但也不是很好的光头

import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
import os
import shutil
from os.path import isfile,join
from random import shuffle
from matplotlib import pyplot
from tqdm import tqdm
from random import randint
import tensorflow as tf
from ast import literal_eval as str2arr
from tempfile import TemporaryFile
#win_len = 0.04  # in seconds
#step = win_len / 2
#nfft = 2048
win_len = 0.05  # in seconds
step = win_len
nfft = 16384
results = []
outfile_x = None
outfile_y = None
winner = []

for TestNum in tqdm(range(40)):  # We check it several times
    if not outfile_x:  # if path not exist we create it
        X = []  # inputs
        Y = []  # outputs
        onlyfiles = [f for f in listdir("FinalAudios") if isfile(join("FinalAudios",f))]   # Files in dir
        names = []  # names of the speakers
        for file in onlyfiles:  # for each wav sound
            # UnesSECERY TO UNDERSTAND THE CODE
            if " " not in file.split("_")[0]:
                names.append(file.split("_")[0])
            else:
                names.append(file.split("_")[0].split(" ")[0])
        only_speakers = [] + names
        namesWithoutDuplicate = list(dict.fromkeys(names))
        namesWithoutDuplicateCopy = namesWithoutDuplicate[:]
        for name in namesWithoutDuplicateCopy:  # we remove low samples files
            if names.count(name) < 107:
                namesWithoutDuplicate.remove(name)
        names = namesWithoutDuplicate
        print(names)  # print it
        vector_names = []  # output for each name
        i = 0
        for name in names:
            vector_for_each_name = i
            vector_names.append(np.array(vector_for_each_name))
            i += 1
        for f in onlyfiles:  # for all the files
            if " " not in f.split("_")[0]:
                f_speaker = f.split("_")[0]
            else:
                f_speaker = f.split("_")[0].split(" ")[0]
            if f_speaker in namesWithoutDuplicate:
                fs,audio = wav.read("FinalAudios\\" + f)  # read the file
                try:
                    # compute MFCC
                    mfcc_feat = python_speech_features.mfcc(audio,appendEnergy=False)
                    #flat_list = [item for sublist in mfcc_feat for item in sublist]
                    # Create output + inputs
                    for i in mfcc_feat:
                        X.append(np.array(i))
                        Y.append(np.array(vector_names[names.index(f_speaker)]))
                except IndexError:
                    pass
            else:
                if not os.path.exists("TooLowSamples"):  # if path not exist we create it
                    os.makedirs("TooLowSamples")
                shutil.move("FinalAudios\\" + f,"TooLowSamples\\" + f)
        outfile_x = TemporaryFile()
        np.save(outfile_x,X)
        outfile_y = TemporaryFile()
        np.save(outfile_y,Y)



    # ------------------- RANDOMIZATION,UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
    else:
        outfile_x.seek(0)
        X = np.load(outfile_x)
        outfile_y.seek(0)
        Y = np.load(outfile_y)
    Z = list(zip(X,Y))
    shuffle(Z)  # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL
    X,Y = zip(*Z)
    X = list(X)
    Y = list(Y)
    lenX = len(X)
    # ------------------- RANDOMIZATION,UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
    y_test = np.asarray(Y[:4000])   # CHOOSE 100 FOR TEST,OTHERS FOR TRAIN
    x_test = np.asarray(X[:4000])   # CHOOSE 100 FOR TEST,OTHERS FOR TRAIN
    x_train = np.asarray(X[4000:])  # CHOOSE 100 FOR TEST,OTHERS FOR TRAIN
    y_train = np.asarray(Y[4000:])  # CHOOSE 100 FOR TEST,OTHERS FOR TRAIN
    x_val = x_train[-4000:]         # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
    y_val = y_train[-4000:]         # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
    x_train = x_train[:-4000]       # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
    y_train = y_train[:-4000]       # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
    x_train = x_train.reshape(np.append(x_train.shape,(1,1)))  # RESHAPE FOR INPUT
    x_test = x_test.reshape(np.append(x_test.shape,1)))     # RESHAPE FOR INPUT
    x_val = x_val.reshape(np.append(x_val.shape,1)))  # RESHAPE FOR INPUT
    features_shape = x_val.shape

    # -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #
    model = tf.keras.models.Sequential([
        tf.keras.layers.Input(name='inputs',shape=(13,1),dtype='float32'),tf.keras.layers.Conv2D(32,(3,3),activation='relu',padding='same',strides=1,name='block1_conv',input_shape=(13,1)),tf.keras.layers.MaxPooling2D((3,strides=(2,2),name='block1_pool'),tf.keras.layers.BatchNormalization(name='block1_norm'),name='block2_conv',name='block2_pool'),tf.keras.layers.BatchNormalization(name='block2_norm'),name='block3_conv',name='block3_pool'),tf.keras.layers.BatchNormalization(name='block3_norm'),tf.keras.layers.flatten(),tf.keras.layers.Dense(64,name='dense'),tf.keras.layers.BatchNormalization(name='dense_norm'),tf.keras.layers.Dropout(0.2,name='dropout'),tf.keras.layers.Dense(10,activation='softmax',name='pred')

    ])
    model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
    # -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #

    print("fitting")
    history = model.fit(x_train,y_train,epochs=15,validation_data=(x_val,y_val))
    print("testing")
    results.append(model.evaluate(x_test,y_test)[1])
    print(results)
    print(sum(results)/len(results))
    for i in range(10000):
        f_1 = only_speakers[randint(0,len(only_speakers) - 1)]
        f_2 = only_speakers[randint(0,len(only_speakers) - 1)]
        if " " not in f_1.split("_")[0]:
            f_speaker_1 = f_1.split("_")[0]
        else:
            f_speaker_1 =f_1.split("_")[0].split(" ")[0]
        if " " not in f_2.split("_")[0]:
            f_speaker_2 = f_2.split("_")[0]
        else:
            f_speaker_2 =f_2.split("_")[0].split(" ")[0]
        if f_speaker_2 == f_speaker_1:
            winner.append(1)
        else:
            winner.append(0)
    print(sum(winner)/len(winner))
    #]
    # if onlyfiles[randint(len(onlyfiles) - 1)] == onlyfiles[randint(len(onlyfiles) - 1)]
    #pyplot.plot(history.history['loss'],label='train')
    #pyplot.plot(history.history['val_loss'],label='test')                                          Q
    #pyplot.legend()
    #pyplot.show()
womenzhelihaiyouyu11 回答:如何改善我的神经网络专业知识(说话者识别-MFCC)

在您的文章中阅读以下内容,我建议您修复/探索

  • 42%对于您手头的任务的准确性并不令人印象深刻,请考虑您交叉验证的方式,例如您如何在验证,测试和训练数据集之间进行划分

  • 您的数据集似乎非常有限。您的任务是确定发言人。单个情节可能不足以完成此任务。

  • 您可能需要考虑使用Keras和Tensorflow之类的深度神经网络库。卷积可以直接应用于MFC Graph。

  • 如果您决定使用Tensorflow或Keras,请考虑使用Triplet-Loss(三重损失),您可以在其中预先设定一个正例和一个负例。

  • 考虑为您的任务阅读最新技术水平:https://github.com/grausof/keras-sincnet

  • 考虑阅读https://arxiv.org/abs/1503.03832并将其用于语音识别。

最简单的改善结果的方法是添加CNN图层以从MFCC中提取特征

本文链接:https://www.f2er.com/3033223.html

大家都在问