style: 规范代码风格

This commit is contained in:
nl 2021-11-27 20:51:32 +08:00
parent 3fd36cbf13
commit 50ae952cd6
4 changed files with 69 additions and 51 deletions

View File

@ -18,4 +18,9 @@
# along with ASRT. If not, see <https://www.gnu.org/licenses/>.
# ============================================================================
from .speech_features import *
'''
@author: nl8590687
ASRT语音识别声学特征模块
'''
from .speech_features import *

View File

@ -20,6 +20,12 @@
# calculate filterbank features. Provides e.g. fbank and mfcc features for use in ASR applications
# Author: James Lyons 2012
'''
@author: nl8590687
ASRT语音识别声学特征基础库模块一些基础函数实现
'''
from __future__ import division
import numpy
from scipy.fftpack import dct
@ -29,7 +35,7 @@ from .sigproc import preemphasis, framesig, powspec
def calculate_nfft(samplerate, winlen):
"""Calculates the FFT size as a power of two greater than or equal to
the number of samples in a single window length.
Having an FFT less than the window length loses precision by dropping
many of the samples; a longer FFT than the window allows zero-padding
of the FFT buffer which is neutral in terms of frequency domain conversion.
@ -218,4 +224,4 @@ def delta(feat, N):
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
return delta_feat

View File

@ -20,10 +20,16 @@
# This file includes routines for basic signal processing including framing and computing power spectra.
# Author: James Lyons 2012
'''
@author: nl8590687
ASRT语音识别声学特征计算的信号处理计算的函数库
'''
import decimal
import numpy
import math
import logging
import math
import numpy
def round_half_up(number):
@ -135,7 +141,7 @@ def logpowspec(frames, NFFT, norm=1):
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 0.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames, NFFT);
ps = powspec(frames, NFFT)
ps[ps <= 1e-30] = 1e-30
lps = 10 * numpy.log10(ps)
if norm:
@ -150,4 +156,4 @@ def preemphasis(signal, coeff=0.95):
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return numpy.append(signal[0], signal[1:] - coeff * signal[:-1])
return numpy.append(signal[0], signal[1:] - coeff * signal[:-1])

View File

@ -20,13 +20,11 @@
"""
@author: nl8590687
ASRT语音识别内置声学特征提取模块
ASRT语音识别内置声学特征提取模块定义了几个常用的声学特征类
"""
import random
import numpy as np
import math
from scipy.fftpack import fft
from .base import mfcc, delta, logfbank
@ -38,10 +36,12 @@ class SpeechFeatureMeta():
'''
def __init__(self, framesamplerate = 16000):
self.framesamplerate = framesamplerate
pass
def run(self, wavsignal, fs = 16000):
raise NotImplementedError('[ASRT] Get speech feature function is not implemented. Please define "a run method"')
'''
run method
'''
raise NotImplementedError('[ASRT] `run()` method is not implemented.')
class MFCC(SpeechFeatureMeta):
'''
@ -56,7 +56,12 @@ class MFCC(SpeechFeatureMeta):
:param nfilt: the number of filters in the filterbank, default 26.
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
'''
def __init__(self, framesamplerate = 16000, winlen=0.025, winstep=0.01, numcep=13, nfilt=26, preemph=0.97):
def __init__(self, framesamplerate = 16000,
winlen=0.025,
winstep=0.01,
numcep=13,
nfilt=26,
preemph=0.97):
self.framesamplerate = framesamplerate
self.winlen = winlen
self.winstep = winstep
@ -64,7 +69,7 @@ class MFCC(SpeechFeatureMeta):
self.nfilt = nfilt
self.preemph = preemph
super().__init__(framesamplerate)
def run(self, wavsignal, fs = 16000):
'''
计算mfcc声学特征包含静态特征一阶差分和二阶差分
@ -73,7 +78,8 @@ class MFCC(SpeechFeatureMeta):
'''
wavsignal = np.array(wavsignal, dtype=np.float)
# 获取输入特征
feat_mfcc=mfcc(wavsignal[0], samplerate=self.framesamplerate, winlen=self.winlen, winstep=self.winstep, numcep=self.numcep, nfilt=self.nfilt, preemph=self.preemph)
feat_mfcc=mfcc(wavsignal[0], samplerate=self.framesamplerate, winlen=self.winlen,
winstep=self.winstep, numcep=self.numcep, nfilt=self.nfilt, preemph=self.preemph)
feat_mfcc_d=delta(feat_mfcc, 2)
feat_mfcc_dd=delta(feat_mfcc_d, 2)
# 返回值分别是mfcc特征向量的矩阵及其一阶差分和二阶差分矩阵
@ -86,7 +92,7 @@ class Logfbank(SpeechFeatureMeta):
'''
def __init__(self, framesamplerate = 16000):
super().__init__(framesamplerate)
def run(self, wavsignal, fs = 16000):
wavsignal = np.array(wavsignal, dtype=np.float)
# 获取输入特征
@ -94,9 +100,13 @@ class Logfbank(SpeechFeatureMeta):
return wav_feature
class Spectrogram(SpeechFeatureMeta):
'''
ASRT语音识别内置的语谱图声学特征提取类
'''
def __init__(self, framesamplerate = 16000, timewindow = 25, timeshift = 10):
self.time_window = timewindow
self.window_length = int(framesamplerate / 1000 * self.time_window) # 计算窗长度的公式目前全部为400固定值
self.timeshift = timeshift
'''
# 保留将来用于不同采样频率
@ -107,47 +117,45 @@ class Spectrogram(SpeechFeatureMeta):
self.x=np.linspace(0, 400 - 1, 400, dtype = np.int64)
self.w = 0.54 - 0.46 * np.cos(2 * np.pi * (self.x) / (400 - 1) ) # 汉明窗
super().__init__(framesamplerate)
def run(self, wavsignal, fs = 16000):
if(16000 != fs):
if fs != 16000:
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
window_length = int(fs / 1000 * time_window) # 计算窗长度的公式目前全部为400固定值
wav_arr = np.array(wavsignal)
#wav_length = len(wavsignal[0])
wav_length = wav_arr.shape[1]
#wav_length = wav_arr.shape[1]
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 + 1 # 计算循环终止的位置,也就是最终生成的窗数
data_input = np.zeros((range0_end, window_length // 2), dtype = np.float) # 用于存放最终的频率特征数据
data_line = np.zeros((1, window_length), dtype = np.float)
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = wav_arr[0, p_start:p_end]
data_line = data_line * self.w # 加窗
#data_line = np.abs(fft(data_line)) / wav_length
data_line = np.abs(fft(data_line))
data_input[i]=data_line[0: window_length // 2] # 设置为400除以2的值即200是取一半数据因为是对称的
#print(data_input.shape)
data_input = np.log(data_input + 1)
return data_input
class SpecAugment(SpeechFeatureMeta):
'''
复现谷歌SpecAugment数据增强算法
复现谷歌SpecAugment数据增强特征算法基于Spectrogram语谱图基础特征
'''
def __init__(self, framesamplerate = 16000, timewindow = 25, timeshift = 10):
self.time_window = timewindow
self.window_length = int(framesamplerate / 1000 * self.time_window) # 计算窗长度的公式目前全部为400固定值
self.timeshift = timeshift
'''
# 保留将来用于不同采样频率
@ -158,36 +166,33 @@ class SpecAugment(SpeechFeatureMeta):
self.x=np.linspace(0, 400 - 1, 400, dtype = np.int64)
self.w = 0.54 - 0.46 * np.cos(2 * np.pi * (self.x) / (400 - 1) ) # 汉明窗
super().__init__(framesamplerate)
def run(self, wavsignal, fs = 16000):
if(16000 != fs):
if fs != 16000:
raise ValueError('[Error] ASRT currently only supports wav audio files with a sampling rate of 16000 Hz, but this audio is ' + str(fs) + ' Hz. ')
# wav波形 加时间窗以及时移10ms
time_window = 25 # 单位ms
window_length = int(fs / 1000 * time_window) # 计算窗长度的公式目前全部为400固定值
wav_arr = np.array(wavsignal)
#wav_length = len(wavsignal[0])
wav_length = wav_arr.shape[1]
#wav_length = wav_arr.shape[1]
range0_end = int(len(wavsignal[0])/fs*1000 - time_window) // 10 + 1 # 计算循环终止的位置,也就是最终生成的窗数
data_input = np.zeros((range0_end, window_length // 2), dtype = np.float) # 用于存放最终的频率特征数据
data_line = np.zeros((1, window_length), dtype = np.float)
for i in range(0, range0_end):
p_start = i * 160
p_end = p_start + 400
data_line = wav_arr[0, p_start:p_end]
data_line = data_line * self.w # 加窗
#data_line = np.abs(fft(data_line)) / wav_length
data_line = np.abs(fft(data_line))
data_input[i]=data_line[0: window_length // 2] # 设置为400除以2的值即200是取一半数据因为是对称的
#print(data_input.shape)
data_input = np.log(data_input + 1)
@ -199,17 +204,13 @@ class SpecAugment(SpeechFeatureMeta):
v_start = random.randint(1,data_input.shape[1])
v_width = random.randint(1,100)
if(mode <= 60): # 正常特征 60%
if mode <= 60: # 正常特征 60%
pass
elif(mode > 60 and mode <=75): # 横向遮盖 15%
elif 60 < mode <=75: # 横向遮盖 15%
data_input[h_start:h_start+h_width,:] = 0
pass
elif(mode > 75 and mode <= 90): # 纵向遮盖 15%
elif 75 < mode <= 90: # 纵向遮盖 15%
data_input[:,v_start:v_start+v_width] = 0
pass
else: # 两种遮盖叠加 10%
data_input[h_start:h_start+h_width,:v_start:v_start+v_width] = 0
pass
return data_input