wav_frontend.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. # Copyright (c) Alibaba, Inc. and its affiliates.
  2. # Part of the implementation is borrowed from espnet/espnet.
  3. from typing import Tuple
  4. import numpy as np
  5. import torch
  6. import torchaudio.compliance.kaldi as kaldi
  7. from torch.nn.utils.rnn import pad_sequence
  8. from typeguard import check_argument_types
  9. import funasr.models.frontend.eend_ola_feature as eend_ola_feature
  10. from funasr.models.frontend.abs_frontend import AbsFrontend
  11. def load_cmvn(cmvn_file):
  12. with open(cmvn_file, 'r', encoding='utf-8') as f:
  13. lines = f.readlines()
  14. means_list = []
  15. vars_list = []
  16. for i in range(len(lines)):
  17. line_item = lines[i].split()
  18. if line_item[0] == '<AddShift>':
  19. line_item = lines[i + 1].split()
  20. if line_item[0] == '<LearnRateCoef>':
  21. add_shift_line = line_item[3:(len(line_item) - 1)]
  22. means_list = list(add_shift_line)
  23. continue
  24. elif line_item[0] == '<Rescale>':
  25. line_item = lines[i + 1].split()
  26. if line_item[0] == '<LearnRateCoef>':
  27. rescale_line = line_item[3:(len(line_item) - 1)]
  28. vars_list = list(rescale_line)
  29. continue
  30. means = np.array(means_list).astype(np.float)
  31. vars = np.array(vars_list).astype(np.float)
  32. cmvn = np.array([means, vars])
  33. cmvn = torch.as_tensor(cmvn, dtype=torch.float32)
  34. return cmvn
  35. def apply_cmvn(inputs, cmvn): # noqa
  36. """
  37. Apply CMVN with mvn data
  38. """
  39. device = inputs.device
  40. dtype = inputs.dtype
  41. frame, dim = inputs.shape
  42. means = cmvn[0:1, :dim]
  43. vars = cmvn[1:2, :dim]
  44. inputs += means.to(device)
  45. inputs *= vars.to(device)
  46. return inputs.type(torch.float32)
  47. def apply_lfr(inputs, lfr_m, lfr_n):
  48. LFR_inputs = []
  49. T = inputs.shape[0]
  50. T_lfr = int(np.ceil(T / lfr_n))
  51. left_padding = inputs[0].repeat((lfr_m - 1) // 2, 1)
  52. inputs = torch.vstack((left_padding, inputs))
  53. T = T + (lfr_m - 1) // 2
  54. for i in range(T_lfr):
  55. if lfr_m <= T - i * lfr_n:
  56. LFR_inputs.append((inputs[i * lfr_n:i * lfr_n + lfr_m]).view(1, -1))
  57. else: # process last LFR frame
  58. num_padding = lfr_m - (T - i * lfr_n)
  59. frame = (inputs[i * lfr_n:]).view(-1)
  60. for _ in range(num_padding):
  61. frame = torch.hstack((frame, inputs[-1]))
  62. LFR_inputs.append(frame)
  63. LFR_outputs = torch.vstack(LFR_inputs)
  64. return LFR_outputs.type(torch.float32)
  65. class WavFrontend(AbsFrontend):
  66. """Conventional frontend structure for ASR.
  67. """
  68. def __init__(
  69. self,
  70. cmvn_file: str = None,
  71. fs: int = 16000,
  72. window: str = 'hamming',
  73. n_mels: int = 80,
  74. frame_length: int = 25,
  75. frame_shift: int = 10,
  76. filter_length_min: int = -1,
  77. filter_length_max: int = -1,
  78. lfr_m: int = 1,
  79. lfr_n: int = 1,
  80. dither: float = 1.0,
  81. snip_edges: bool = True,
  82. upsacle_samples: bool = True,
  83. ):
  84. assert check_argument_types()
  85. super().__init__()
  86. self.fs = fs
  87. self.window = window
  88. self.n_mels = n_mels
  89. self.frame_length = frame_length
  90. self.frame_shift = frame_shift
  91. self.filter_length_min = filter_length_min
  92. self.filter_length_max = filter_length_max
  93. self.lfr_m = lfr_m
  94. self.lfr_n = lfr_n
  95. self.cmvn_file = cmvn_file
  96. self.dither = dither
  97. self.snip_edges = snip_edges
  98. self.upsacle_samples = upsacle_samples
  99. self.cmvn = None if self.cmvn_file is None else load_cmvn(self.cmvn_file)
  100. def output_size(self) -> int:
  101. return self.n_mels * self.lfr_m
  102. def forward(
  103. self,
  104. input: torch.Tensor,
  105. input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  106. batch_size = input.size(0)
  107. feats = []
  108. feats_lens = []
  109. for i in range(batch_size):
  110. waveform_length = input_lengths[i]
  111. waveform = input[i][:waveform_length]
  112. if self.upsacle_samples:
  113. waveform = waveform * (1 << 15)
  114. waveform = waveform.unsqueeze(0)
  115. mat = kaldi.fbank(waveform,
  116. num_mel_bins=self.n_mels,
  117. frame_length=self.frame_length,
  118. frame_shift=self.frame_shift,
  119. dither=self.dither,
  120. energy_floor=0.0,
  121. window_type=self.window,
  122. sample_frequency=self.fs,
  123. snip_edges=self.snip_edges)
  124. if self.lfr_m != 1 or self.lfr_n != 1:
  125. mat = apply_lfr(mat, self.lfr_m, self.lfr_n)
  126. if self.cmvn is not None:
  127. mat = apply_cmvn(mat, self.cmvn)
  128. feat_length = mat.size(0)
  129. feats.append(mat)
  130. feats_lens.append(feat_length)
  131. feats_lens = torch.as_tensor(feats_lens)
  132. feats_pad = pad_sequence(feats,
  133. batch_first=True,
  134. padding_value=0.0)
  135. return feats_pad, feats_lens
  136. def forward_fbank(
  137. self,
  138. input: torch.Tensor,
  139. input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  140. batch_size = input.size(0)
  141. feats = []
  142. feats_lens = []
  143. for i in range(batch_size):
  144. waveform_length = input_lengths[i]
  145. waveform = input[i][:waveform_length]
  146. waveform = waveform * (1 << 15)
  147. waveform = waveform.unsqueeze(0)
  148. mat = kaldi.fbank(waveform,
  149. num_mel_bins=self.n_mels,
  150. frame_length=self.frame_length,
  151. frame_shift=self.frame_shift,
  152. dither=self.dither,
  153. energy_floor=0.0,
  154. window_type=self.window,
  155. sample_frequency=self.fs)
  156. feat_length = mat.size(0)
  157. feats.append(mat)
  158. feats_lens.append(feat_length)
  159. feats_lens = torch.as_tensor(feats_lens)
  160. feats_pad = pad_sequence(feats,
  161. batch_first=True,
  162. padding_value=0.0)
  163. return feats_pad, feats_lens
  164. def forward_lfr_cmvn(
  165. self,
  166. input: torch.Tensor,
  167. input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  168. batch_size = input.size(0)
  169. feats = []
  170. feats_lens = []
  171. for i in range(batch_size):
  172. mat = input[i, :input_lengths[i], :]
  173. if self.lfr_m != 1 or self.lfr_n != 1:
  174. mat = apply_lfr(mat, self.lfr_m, self.lfr_n)
  175. if self.cmvn is not None:
  176. mat = apply_cmvn(mat, self.cmvn)
  177. feat_length = mat.size(0)
  178. feats.append(mat)
  179. feats_lens.append(feat_length)
  180. feats_lens = torch.as_tensor(feats_lens)
  181. feats_pad = pad_sequence(feats,
  182. batch_first=True,
  183. padding_value=0.0)
  184. return feats_pad, feats_lens
  185. class WavFrontendOnline(AbsFrontend):
  186. """Conventional frontend structure for streaming ASR/VAD.
  187. """
  188. def __init__(
  189. self,
  190. cmvn_file: str = None,
  191. fs: int = 16000,
  192. window: str = 'hamming',
  193. n_mels: int = 80,
  194. frame_length: int = 25,
  195. frame_shift: int = 10,
  196. filter_length_min: int = -1,
  197. filter_length_max: int = -1,
  198. lfr_m: int = 1,
  199. lfr_n: int = 1,
  200. dither: float = 1.0,
  201. snip_edges: bool = True,
  202. upsacle_samples: bool = True,
  203. ):
  204. assert check_argument_types()
  205. super().__init__()
  206. self.fs = fs
  207. self.window = window
  208. self.n_mels = n_mels
  209. self.frame_length = frame_length
  210. self.frame_shift = frame_shift
  211. self.frame_sample_length = int(self.frame_length * self.fs / 1000)
  212. self.frame_shift_sample_length = int(self.frame_shift * self.fs / 1000)
  213. self.filter_length_min = filter_length_min
  214. self.filter_length_max = filter_length_max
  215. self.lfr_m = lfr_m
  216. self.lfr_n = lfr_n
  217. self.cmvn_file = cmvn_file
  218. self.dither = dither
  219. self.snip_edges = snip_edges
  220. self.upsacle_samples = upsacle_samples
  221. self.waveforms = None
  222. self.reserve_waveforms = None
  223. self.fbanks = None
  224. self.fbanks_lens = None
  225. self.cmvn = None if self.cmvn_file is None else load_cmvn(self.cmvn_file)
  226. self.input_cache = None
  227. self.lfr_splice_cache = []
  228. def output_size(self) -> int:
  229. return self.n_mels * self.lfr_m
  230. @staticmethod
  231. def apply_cmvn(inputs: torch.Tensor, cmvn: torch.Tensor) -> torch.Tensor:
  232. """
  233. Apply CMVN with mvn data
  234. """
  235. device = inputs.device
  236. dtype = inputs.dtype
  237. frame, dim = inputs.shape
  238. means = np.tile(cmvn[0:1, :dim], (frame, 1))
  239. vars = np.tile(cmvn[1:2, :dim], (frame, 1))
  240. inputs += torch.from_numpy(means).type(dtype).to(device)
  241. inputs *= torch.from_numpy(vars).type(dtype).to(device)
  242. return inputs.type(torch.float32)
  243. @staticmethod
  244. # inputs tensor has catted the cache tensor
  245. # def apply_lfr(inputs: torch.Tensor, lfr_m: int, lfr_n: int, inputs_lfr_cache: torch.Tensor = None,
  246. # is_final: bool = False) -> Tuple[torch.Tensor, torch.Tensor, int]:
  247. def apply_lfr(inputs: torch.Tensor, lfr_m: int, lfr_n: int, is_final: bool = False) -> Tuple[
  248. torch.Tensor, torch.Tensor, int]:
  249. """
  250. Apply lfr with data
  251. """
  252. LFR_inputs = []
  253. # inputs = torch.vstack((inputs_lfr_cache, inputs))
  254. T = inputs.shape[0] # include the right context
  255. T_lfr = int(np.ceil((T - (lfr_m - 1) // 2) / lfr_n)) # minus the right context: (lfr_m - 1) // 2
  256. splice_idx = T_lfr
  257. for i in range(T_lfr):
  258. if lfr_m <= T - i * lfr_n:
  259. LFR_inputs.append((inputs[i * lfr_n:i * lfr_n + lfr_m]).view(1, -1))
  260. else: # process last LFR frame
  261. if is_final:
  262. num_padding = lfr_m - (T - i * lfr_n)
  263. frame = (inputs[i * lfr_n:]).view(-1)
  264. for _ in range(num_padding):
  265. frame = torch.hstack((frame, inputs[-1]))
  266. LFR_inputs.append(frame)
  267. else:
  268. # update splice_idx and break the circle
  269. splice_idx = i
  270. break
  271. splice_idx = min(T - 1, splice_idx * lfr_n)
  272. lfr_splice_cache = inputs[splice_idx:, :]
  273. LFR_outputs = torch.vstack(LFR_inputs)
  274. return LFR_outputs.type(torch.float32), lfr_splice_cache, splice_idx
  275. @staticmethod
  276. def compute_frame_num(sample_length: int, frame_sample_length: int, frame_shift_sample_length: int) -> int:
  277. frame_num = int((sample_length - frame_sample_length) / frame_shift_sample_length + 1)
  278. return frame_num if frame_num >= 1 and sample_length >= frame_sample_length else 0
  279. def forward_fbank(
  280. self,
  281. input: torch.Tensor,
  282. input_lengths: torch.Tensor
  283. ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
  284. batch_size = input.size(0)
  285. if self.input_cache is None:
  286. self.input_cache = torch.empty(0)
  287. input = torch.cat((self.input_cache, input), dim=1)
  288. frame_num = self.compute_frame_num(input.shape[-1], self.frame_sample_length, self.frame_shift_sample_length)
  289. # update self.in_cache
  290. self.input_cache = input[:, -(input.shape[-1] - frame_num * self.frame_shift_sample_length):]
  291. waveforms = torch.empty(0)
  292. feats_pad = torch.empty(0)
  293. feats_lens = torch.empty(0)
  294. if frame_num:
  295. waveforms = []
  296. feats = []
  297. feats_lens = []
  298. for i in range(batch_size):
  299. waveform = input[i]
  300. # we need accurate wave samples that used for fbank extracting
  301. waveforms.append(
  302. waveform[:((frame_num - 1) * self.frame_shift_sample_length + self.frame_sample_length)])
  303. waveform = waveform * (1 << 15)
  304. waveform = waveform.unsqueeze(0)
  305. mat = kaldi.fbank(waveform,
  306. num_mel_bins=self.n_mels,
  307. frame_length=self.frame_length,
  308. frame_shift=self.frame_shift,
  309. dither=self.dither,
  310. energy_floor=0.0,
  311. window_type=self.window,
  312. sample_frequency=self.fs)
  313. feat_length = mat.size(0)
  314. feats.append(mat)
  315. feats_lens.append(feat_length)
  316. waveforms = torch.stack(waveforms)
  317. feats_lens = torch.as_tensor(feats_lens)
  318. feats_pad = pad_sequence(feats,
  319. batch_first=True,
  320. padding_value=0.0)
  321. self.fbanks = feats_pad
  322. import copy
  323. self.fbanks_lens = copy.deepcopy(feats_lens)
  324. return waveforms, feats_pad, feats_lens
  325. def get_fbank(self) -> Tuple[torch.Tensor, torch.Tensor]:
  326. return self.fbanks, self.fbanks_lens
  327. def forward_lfr_cmvn(
  328. self,
  329. input: torch.Tensor,
  330. input_lengths: torch.Tensor,
  331. is_final: bool = False
  332. ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
  333. batch_size = input.size(0)
  334. feats = []
  335. feats_lens = []
  336. lfr_splice_frame_idxs = []
  337. for i in range(batch_size):
  338. mat = input[i, :input_lengths[i], :]
  339. if self.lfr_m != 1 or self.lfr_n != 1:
  340. # update self.lfr_splice_cache in self.apply_lfr
  341. # mat, self.lfr_splice_cache[i], lfr_splice_frame_idx = self.apply_lfr(mat, self.lfr_m, self.lfr_n, self.lfr_splice_cache[i],
  342. mat, self.lfr_splice_cache[i], lfr_splice_frame_idx = self.apply_lfr(mat, self.lfr_m, self.lfr_n,
  343. is_final)
  344. if self.cmvn_file is not None:
  345. mat = self.apply_cmvn(mat, self.cmvn)
  346. feat_length = mat.size(0)
  347. feats.append(mat)
  348. feats_lens.append(feat_length)
  349. lfr_splice_frame_idxs.append(lfr_splice_frame_idx)
  350. feats_lens = torch.as_tensor(feats_lens)
  351. feats_pad = pad_sequence(feats,
  352. batch_first=True,
  353. padding_value=0.0)
  354. lfr_splice_frame_idxs = torch.as_tensor(lfr_splice_frame_idxs)
  355. return feats_pad, feats_lens, lfr_splice_frame_idxs
  356. def forward(
  357. self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False, reset: bool = False
  358. ) -> Tuple[torch.Tensor, torch.Tensor]:
  359. if reset:
  360. self.cache_reset()
  361. batch_size = input.shape[0]
  362. assert batch_size == 1, 'we support to extract feature online only when the batch size is equal to 1 now'
  363. waveforms, feats, feats_lengths = self.forward_fbank(input, input_lengths) # input shape: B T D
  364. if feats.shape[0]:
  365. # if self.reserve_waveforms is None and self.lfr_m > 1:
  366. # self.reserve_waveforms = waveforms[:, :(self.lfr_m - 1) // 2 * self.frame_shift_sample_length]
  367. self.waveforms = waveforms if self.reserve_waveforms is None else torch.cat(
  368. (self.reserve_waveforms, waveforms), dim=1)
  369. if not self.lfr_splice_cache: # 初始化splice_cache
  370. for i in range(batch_size):
  371. self.lfr_splice_cache.append(feats[i][0, :].unsqueeze(dim=0).repeat((self.lfr_m - 1) // 2, 1))
  372. # need the number of the input frames + self.lfr_splice_cache[0].shape[0] is greater than self.lfr_m
  373. if feats_lengths[0] + self.lfr_splice_cache[0].shape[0] >= self.lfr_m:
  374. lfr_splice_cache_tensor = torch.stack(self.lfr_splice_cache) # B T D
  375. feats = torch.cat((lfr_splice_cache_tensor, feats), dim=1)
  376. feats_lengths += lfr_splice_cache_tensor[0].shape[0]
  377. frame_from_waveforms = int(
  378. (self.waveforms.shape[1] - self.frame_sample_length) / self.frame_shift_sample_length + 1)
  379. minus_frame = (self.lfr_m - 1) // 2 if self.reserve_waveforms is None else 0
  380. feats, feats_lengths, lfr_splice_frame_idxs = self.forward_lfr_cmvn(feats, feats_lengths, is_final)
  381. if self.lfr_m == 1:
  382. self.reserve_waveforms = None
  383. else:
  384. reserve_frame_idx = lfr_splice_frame_idxs[0] - minus_frame
  385. # print('reserve_frame_idx: ' + str(reserve_frame_idx))
  386. # print('frame_frame: ' + str(frame_from_waveforms))
  387. self.reserve_waveforms = self.waveforms[:, reserve_frame_idx * self.frame_shift_sample_length:frame_from_waveforms * self.frame_shift_sample_length]
  388. sample_length = (frame_from_waveforms - 1) * self.frame_shift_sample_length + self.frame_sample_length
  389. self.waveforms = self.waveforms[:, :sample_length]
  390. else:
  391. # update self.reserve_waveforms and self.lfr_splice_cache
  392. self.reserve_waveforms = self.waveforms[:,
  393. :-(self.frame_sample_length - self.frame_shift_sample_length)]
  394. for i in range(batch_size):
  395. self.lfr_splice_cache[i] = torch.cat((self.lfr_splice_cache[i], feats[i]), dim=0)
  396. return torch.empty(0), feats_lengths
  397. else:
  398. if is_final:
  399. self.waveforms = waveforms if self.reserve_waveforms is None else self.reserve_waveforms
  400. feats = torch.stack(self.lfr_splice_cache)
  401. feats_lengths = torch.zeros(batch_size, dtype=torch.int) + feats.shape[1]
  402. feats, feats_lengths, _ = self.forward_lfr_cmvn(feats, feats_lengths, is_final)
  403. if is_final:
  404. self.cache_reset()
  405. return feats, feats_lengths
  406. def get_waveforms(self):
  407. return self.waveforms
  408. def cache_reset(self):
  409. self.reserve_waveforms = None
  410. self.input_cache = None
  411. self.lfr_splice_cache = []
  412. class WavFrontendMel23(AbsFrontend):
  413. """Conventional frontend structure for ASR.
  414. """
  415. def __init__(
  416. self,
  417. fs: int = 16000,
  418. frame_length: int = 25,
  419. frame_shift: int = 10,
  420. lfr_m: int = 1,
  421. lfr_n: int = 1,
  422. ):
  423. assert check_argument_types()
  424. super().__init__()
  425. self.fs = fs
  426. self.frame_length = frame_length
  427. self.frame_shift = frame_shift
  428. self.lfr_m = lfr_m
  429. self.lfr_n = lfr_n
  430. self.n_mels = 23
  431. def output_size(self) -> int:
  432. return self.n_mels * (2 * self.lfr_m + 1)
  433. def forward(
  434. self,
  435. input: torch.Tensor,
  436. input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  437. batch_size = input.size(0)
  438. feats = []
  439. feats_lens = []
  440. for i in range(batch_size):
  441. waveform_length = input_lengths[i]
  442. waveform = input[i][:waveform_length]
  443. waveform = waveform.numpy()
  444. mat = eend_ola_feature.stft(waveform, self.frame_length, self.frame_shift)
  445. mat = eend_ola_feature.transform(mat)
  446. mat = eend_ola_feature.splice(mat, context_size=self.lfr_m)
  447. mat = mat[::self.lfr_n]
  448. mat = torch.from_numpy(mat)
  449. feat_length = mat.size(0)
  450. feats.append(mat)
  451. feats_lens.append(feat_length)
  452. feats_lens = torch.as_tensor(feats_lens)
  453. feats_pad = pad_sequence(feats,
  454. batch_first=True,
  455. padding_value=0.0)
  456. return feats_pad, feats_lens