e2e_vad.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. from enum import Enum
  2. from typing import List, Tuple, Dict, Any
  3. import torch
  4. from torch import nn
  5. import math
  6. from funasr.models.encoder.fsmn_encoder import FSMN
  7. class VadStateMachine(Enum):
  8. kVadInStateStartPointNotDetected = 1
  9. kVadInStateInSpeechSegment = 2
  10. kVadInStateEndPointDetected = 3
  11. class FrameState(Enum):
  12. kFrameStateInvalid = -1
  13. kFrameStateSpeech = 1
  14. kFrameStateSil = 0
  15. # final voice/unvoice state per frame
  16. class AudioChangeState(Enum):
  17. kChangeStateSpeech2Speech = 0
  18. kChangeStateSpeech2Sil = 1
  19. kChangeStateSil2Sil = 2
  20. kChangeStateSil2Speech = 3
  21. kChangeStateNoBegin = 4
  22. kChangeStateInvalid = 5
  23. class VadDetectMode(Enum):
  24. kVadSingleUtteranceDetectMode = 0
  25. kVadMutipleUtteranceDetectMode = 1
  26. class VADXOptions:
  27. """
  28. Author: Speech Lab of DAMO Academy, Alibaba Group
  29. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  30. https://arxiv.org/abs/1803.05030
  31. """
  32. def __init__(
  33. self,
  34. sample_rate: int = 16000,
  35. detect_mode: int = VadDetectMode.kVadMutipleUtteranceDetectMode.value,
  36. snr_mode: int = 0,
  37. max_end_silence_time: int = 800,
  38. max_start_silence_time: int = 3000,
  39. do_start_point_detection: bool = True,
  40. do_end_point_detection: bool = True,
  41. window_size_ms: int = 200,
  42. sil_to_speech_time_thres: int = 150,
  43. speech_to_sil_time_thres: int = 150,
  44. speech_2_noise_ratio: float = 1.0,
  45. do_extend: int = 1,
  46. lookback_time_start_point: int = 200,
  47. lookahead_time_end_point: int = 100,
  48. max_single_segment_time: int = 60000,
  49. nn_eval_block_size: int = 8,
  50. dcd_block_size: int = 4,
  51. snr_thres: int = -100.0,
  52. noise_frame_num_used_for_snr: int = 100,
  53. decibel_thres: int = -100.0,
  54. speech_noise_thres: float = 0.6,
  55. fe_prior_thres: float = 1e-4,
  56. silence_pdf_num: int = 1,
  57. sil_pdf_ids: List[int] = [0],
  58. speech_noise_thresh_low: float = -0.1,
  59. speech_noise_thresh_high: float = 0.3,
  60. output_frame_probs: bool = False,
  61. frame_in_ms: int = 10,
  62. frame_length_ms: int = 25,
  63. ):
  64. self.sample_rate = sample_rate
  65. self.detect_mode = detect_mode
  66. self.snr_mode = snr_mode
  67. self.max_end_silence_time = max_end_silence_time
  68. self.max_start_silence_time = max_start_silence_time
  69. self.do_start_point_detection = do_start_point_detection
  70. self.do_end_point_detection = do_end_point_detection
  71. self.window_size_ms = window_size_ms
  72. self.sil_to_speech_time_thres = sil_to_speech_time_thres
  73. self.speech_to_sil_time_thres = speech_to_sil_time_thres
  74. self.speech_2_noise_ratio = speech_2_noise_ratio
  75. self.do_extend = do_extend
  76. self.lookback_time_start_point = lookback_time_start_point
  77. self.lookahead_time_end_point = lookahead_time_end_point
  78. self.max_single_segment_time = max_single_segment_time
  79. self.nn_eval_block_size = nn_eval_block_size
  80. self.dcd_block_size = dcd_block_size
  81. self.snr_thres = snr_thres
  82. self.noise_frame_num_used_for_snr = noise_frame_num_used_for_snr
  83. self.decibel_thres = decibel_thres
  84. self.speech_noise_thres = speech_noise_thres
  85. self.fe_prior_thres = fe_prior_thres
  86. self.silence_pdf_num = silence_pdf_num
  87. self.sil_pdf_ids = sil_pdf_ids
  88. self.speech_noise_thresh_low = speech_noise_thresh_low
  89. self.speech_noise_thresh_high = speech_noise_thresh_high
  90. self.output_frame_probs = output_frame_probs
  91. self.frame_in_ms = frame_in_ms
  92. self.frame_length_ms = frame_length_ms
  93. class E2EVadSpeechBufWithDoa(object):
  94. """
  95. Author: Speech Lab of DAMO Academy, Alibaba Group
  96. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  97. https://arxiv.org/abs/1803.05030
  98. """
  99. def __init__(self):
  100. self.start_ms = 0
  101. self.end_ms = 0
  102. self.buffer = []
  103. self.contain_seg_start_point = False
  104. self.contain_seg_end_point = False
  105. self.doa = 0
  106. def Reset(self):
  107. self.start_ms = 0
  108. self.end_ms = 0
  109. self.buffer = []
  110. self.contain_seg_start_point = False
  111. self.contain_seg_end_point = False
  112. self.doa = 0
  113. class E2EVadFrameProb(object):
  114. """
  115. Author: Speech Lab of DAMO Academy, Alibaba Group
  116. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  117. https://arxiv.org/abs/1803.05030
  118. """
  119. def __init__(self):
  120. self.noise_prob = 0.0
  121. self.speech_prob = 0.0
  122. self.score = 0.0
  123. self.frame_id = 0
  124. self.frm_state = 0
  125. class WindowDetector(object):
  126. """
  127. Author: Speech Lab of DAMO Academy, Alibaba Group
  128. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  129. https://arxiv.org/abs/1803.05030
  130. """
  131. def __init__(self, window_size_ms: int, sil_to_speech_time: int,
  132. speech_to_sil_time: int, frame_size_ms: int):
  133. self.window_size_ms = window_size_ms
  134. self.sil_to_speech_time = sil_to_speech_time
  135. self.speech_to_sil_time = speech_to_sil_time
  136. self.frame_size_ms = frame_size_ms
  137. self.win_size_frame = int(window_size_ms / frame_size_ms)
  138. self.win_sum = 0
  139. self.win_state = [0] * self.win_size_frame # 初始化窗
  140. self.cur_win_pos = 0
  141. self.pre_frame_state = FrameState.kFrameStateSil
  142. self.cur_frame_state = FrameState.kFrameStateSil
  143. self.sil_to_speech_frmcnt_thres = int(sil_to_speech_time / frame_size_ms)
  144. self.speech_to_sil_frmcnt_thres = int(speech_to_sil_time / frame_size_ms)
  145. self.voice_last_frame_count = 0
  146. self.noise_last_frame_count = 0
  147. self.hydre_frame_count = 0
  148. def Reset(self) -> None:
  149. self.cur_win_pos = 0
  150. self.win_sum = 0
  151. self.win_state = [0] * self.win_size_frame
  152. self.pre_frame_state = FrameState.kFrameStateSil
  153. self.cur_frame_state = FrameState.kFrameStateSil
  154. self.voice_last_frame_count = 0
  155. self.noise_last_frame_count = 0
  156. self.hydre_frame_count = 0
  157. def GetWinSize(self) -> int:
  158. return int(self.win_size_frame)
  159. def DetectOneFrame(self, frameState: FrameState, frame_count: int) -> AudioChangeState:
  160. cur_frame_state = FrameState.kFrameStateSil
  161. if frameState == FrameState.kFrameStateSpeech:
  162. cur_frame_state = 1
  163. elif frameState == FrameState.kFrameStateSil:
  164. cur_frame_state = 0
  165. else:
  166. return AudioChangeState.kChangeStateInvalid
  167. self.win_sum -= self.win_state[self.cur_win_pos]
  168. self.win_sum += cur_frame_state
  169. self.win_state[self.cur_win_pos] = cur_frame_state
  170. self.cur_win_pos = (self.cur_win_pos + 1) % self.win_size_frame
  171. if self.pre_frame_state == FrameState.kFrameStateSil and self.win_sum >= self.sil_to_speech_frmcnt_thres:
  172. self.pre_frame_state = FrameState.kFrameStateSpeech
  173. return AudioChangeState.kChangeStateSil2Speech
  174. if self.pre_frame_state == FrameState.kFrameStateSpeech and self.win_sum <= self.speech_to_sil_frmcnt_thres:
  175. self.pre_frame_state = FrameState.kFrameStateSil
  176. return AudioChangeState.kChangeStateSpeech2Sil
  177. if self.pre_frame_state == FrameState.kFrameStateSil:
  178. return AudioChangeState.kChangeStateSil2Sil
  179. if self.pre_frame_state == FrameState.kFrameStateSpeech:
  180. return AudioChangeState.kChangeStateSpeech2Speech
  181. return AudioChangeState.kChangeStateInvalid
  182. def FrameSizeMs(self) -> int:
  183. return int(self.frame_size_ms)
  184. class E2EVadModel(nn.Module):
  185. """
  186. Author: Speech Lab of DAMO Academy, Alibaba Group
  187. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  188. https://arxiv.org/abs/1803.05030
  189. """
  190. def __init__(self, encoder: FSMN, vad_post_args: Dict[str, Any], frontend=None):
  191. super(E2EVadModel, self).__init__()
  192. self.vad_opts = VADXOptions(**vad_post_args)
  193. self.windows_detector = WindowDetector(self.vad_opts.window_size_ms,
  194. self.vad_opts.sil_to_speech_time_thres,
  195. self.vad_opts.speech_to_sil_time_thres,
  196. self.vad_opts.frame_in_ms)
  197. self.encoder = encoder
  198. # init variables
  199. self.data_buf_start_frame = 0
  200. self.frm_cnt = 0
  201. self.latest_confirmed_speech_frame = 0
  202. self.lastest_confirmed_silence_frame = -1
  203. self.continous_silence_frame_count = 0
  204. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  205. self.confirmed_start_frame = -1
  206. self.confirmed_end_frame = -1
  207. self.number_end_time_detected = 0
  208. self.sil_frame = 0
  209. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  210. self.noise_average_decibel = -100.0
  211. self.pre_end_silence_detected = False
  212. self.next_seg = True
  213. self.output_data_buf = []
  214. self.output_data_buf_offset = 0
  215. self.frame_probs = []
  216. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  217. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  218. self.scores = None
  219. self.max_time_out = False
  220. self.decibel = []
  221. self.data_buf = None
  222. self.data_buf_all = None
  223. self.waveform = None
  224. self.frontend = frontend
  225. self.last_drop_frames = 0
  226. def AllResetDetection(self):
  227. self.data_buf_start_frame = 0
  228. self.frm_cnt = 0
  229. self.latest_confirmed_speech_frame = 0
  230. self.lastest_confirmed_silence_frame = -1
  231. self.continous_silence_frame_count = 0
  232. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  233. self.confirmed_start_frame = -1
  234. self.confirmed_end_frame = -1
  235. self.number_end_time_detected = 0
  236. self.sil_frame = 0
  237. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  238. self.noise_average_decibel = -100.0
  239. self.pre_end_silence_detected = False
  240. self.next_seg = True
  241. self.output_data_buf = []
  242. self.output_data_buf_offset = 0
  243. self.frame_probs = []
  244. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  245. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  246. self.scores = None
  247. self.max_time_out = False
  248. self.decibel = []
  249. self.data_buf = None
  250. self.data_buf_all = None
  251. self.waveform = None
  252. self.last_drop_frames = 0
  253. self.windows_detector.Reset()
  254. def ResetDetection(self):
  255. self.continous_silence_frame_count = 0
  256. self.latest_confirmed_speech_frame = 0
  257. self.lastest_confirmed_silence_frame = -1
  258. self.confirmed_start_frame = -1
  259. self.confirmed_end_frame = -1
  260. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  261. self.windows_detector.Reset()
  262. self.sil_frame = 0
  263. self.frame_probs = []
  264. if self.output_data_buf:
  265. assert self.output_data_buf[-1].contain_seg_end_point == True
  266. drop_frames = int(self.output_data_buf[-1].end_ms / self.vad_opts.frame_in_ms)
  267. real_drop_frames = drop_frames - self.last_drop_frames
  268. self.last_drop_frames = drop_frames
  269. self.data_buf_all = self.data_buf_all[real_drop_frames * int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):]
  270. self.decibel = self.decibel[real_drop_frames:]
  271. self.scores = self.scores[:, real_drop_frames:, :]
  272. def ComputeDecibel(self) -> None:
  273. frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000)
  274. frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  275. if self.data_buf_all is None:
  276. self.data_buf_all = self.waveform[0] # self.data_buf is pointed to self.waveform[0]
  277. self.data_buf = self.data_buf_all
  278. else:
  279. self.data_buf_all = torch.cat((self.data_buf_all, self.waveform[0]))
  280. for offset in range(0, self.waveform.shape[1] - frame_sample_length + 1, frame_shift_length):
  281. self.decibel.append(
  282. 10 * math.log10((self.waveform[0][offset: offset + frame_sample_length]).square().sum() + \
  283. 0.000001))
  284. def ComputeScores(self, feats: torch.Tensor, in_cache: Dict[str, torch.Tensor]) -> None:
  285. scores = self.encoder(feats, in_cache).to('cpu') # return B * T * D
  286. assert scores.shape[1] == feats.shape[1], "The shape between feats and scores does not match"
  287. self.vad_opts.nn_eval_block_size = scores.shape[1]
  288. self.frm_cnt += scores.shape[1] # count total frames
  289. if self.scores is None:
  290. self.scores = scores # the first calculation
  291. else:
  292. self.scores = torch.cat((self.scores, scores), dim=1)
  293. def PopDataBufTillFrame(self, frame_idx: int) -> None: # need check again
  294. while self.data_buf_start_frame < frame_idx:
  295. if len(self.data_buf) >= int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):
  296. self.data_buf_start_frame += 1
  297. self.data_buf = self.data_buf_all[(self.data_buf_start_frame - self.last_drop_frames) * int(
  298. self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):]
  299. def PopDataToOutputBuf(self, start_frm: int, frm_cnt: int, first_frm_is_start_point: bool,
  300. last_frm_is_end_point: bool, end_point_is_sent_end: bool) -> None:
  301. self.PopDataBufTillFrame(start_frm)
  302. expected_sample_number = int(frm_cnt * self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000)
  303. if last_frm_is_end_point:
  304. extra_sample = max(0, int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000 - \
  305. self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000))
  306. expected_sample_number += int(extra_sample)
  307. if end_point_is_sent_end:
  308. expected_sample_number = max(expected_sample_number, len(self.data_buf))
  309. if len(self.data_buf) < expected_sample_number:
  310. print('error in calling pop data_buf\n')
  311. if len(self.output_data_buf) == 0 or first_frm_is_start_point:
  312. self.output_data_buf.append(E2EVadSpeechBufWithDoa())
  313. self.output_data_buf[-1].Reset()
  314. self.output_data_buf[-1].start_ms = start_frm * self.vad_opts.frame_in_ms
  315. self.output_data_buf[-1].end_ms = self.output_data_buf[-1].start_ms
  316. self.output_data_buf[-1].doa = 0
  317. cur_seg = self.output_data_buf[-1]
  318. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  319. print('warning\n')
  320. out_pos = len(cur_seg.buffer) # cur_seg.buff现在没做任何操作
  321. data_to_pop = 0
  322. if end_point_is_sent_end:
  323. data_to_pop = expected_sample_number
  324. else:
  325. data_to_pop = int(frm_cnt * self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  326. if data_to_pop > len(self.data_buf):
  327. print('VAD data_to_pop is bigger than self.data_buf.size()!!!\n')
  328. data_to_pop = len(self.data_buf)
  329. expected_sample_number = len(self.data_buf)
  330. cur_seg.doa = 0
  331. for sample_cpy_out in range(0, data_to_pop):
  332. # cur_seg.buffer[out_pos ++] = data_buf_.back();
  333. out_pos += 1
  334. for sample_cpy_out in range(data_to_pop, expected_sample_number):
  335. # cur_seg.buffer[out_pos++] = data_buf_.back()
  336. out_pos += 1
  337. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  338. print('Something wrong with the VAD algorithm\n')
  339. self.data_buf_start_frame += frm_cnt
  340. cur_seg.end_ms = (start_frm + frm_cnt) * self.vad_opts.frame_in_ms
  341. if first_frm_is_start_point:
  342. cur_seg.contain_seg_start_point = True
  343. if last_frm_is_end_point:
  344. cur_seg.contain_seg_end_point = True
  345. def OnSilenceDetected(self, valid_frame: int):
  346. self.lastest_confirmed_silence_frame = valid_frame
  347. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  348. self.PopDataBufTillFrame(valid_frame)
  349. # silence_detected_callback_
  350. # pass
  351. def OnVoiceDetected(self, valid_frame: int) -> None:
  352. self.latest_confirmed_speech_frame = valid_frame
  353. self.PopDataToOutputBuf(valid_frame, 1, False, False, False)
  354. def OnVoiceStart(self, start_frame: int, fake_result: bool = False) -> None:
  355. if self.vad_opts.do_start_point_detection:
  356. pass
  357. if self.confirmed_start_frame != -1:
  358. print('not reset vad properly\n')
  359. else:
  360. self.confirmed_start_frame = start_frame
  361. if not fake_result and self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  362. self.PopDataToOutputBuf(self.confirmed_start_frame, 1, True, False, False)
  363. def OnVoiceEnd(self, end_frame: int, fake_result: bool, is_last_frame: bool) -> None:
  364. for t in range(self.latest_confirmed_speech_frame + 1, end_frame):
  365. self.OnVoiceDetected(t)
  366. if self.vad_opts.do_end_point_detection:
  367. pass
  368. if self.confirmed_end_frame != -1:
  369. print('not reset vad properly\n')
  370. else:
  371. self.confirmed_end_frame = end_frame
  372. if not fake_result:
  373. self.sil_frame = 0
  374. self.PopDataToOutputBuf(self.confirmed_end_frame, 1, False, True, is_last_frame)
  375. self.number_end_time_detected += 1
  376. def MaybeOnVoiceEndIfLastFrame(self, is_final_frame: bool, cur_frm_idx: int) -> None:
  377. if is_final_frame:
  378. self.OnVoiceEnd(cur_frm_idx, False, True)
  379. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  380. def GetLatency(self) -> int:
  381. return int(self.LatencyFrmNumAtStartPoint() * self.vad_opts.frame_in_ms)
  382. def LatencyFrmNumAtStartPoint(self) -> int:
  383. vad_latency = self.windows_detector.GetWinSize()
  384. if self.vad_opts.do_extend:
  385. vad_latency += int(self.vad_opts.lookback_time_start_point / self.vad_opts.frame_in_ms)
  386. return vad_latency
  387. def GetFrameState(self, t: int) -> FrameState:
  388. frame_state = FrameState.kFrameStateInvalid
  389. cur_decibel = self.decibel[t]
  390. cur_snr = cur_decibel - self.noise_average_decibel
  391. # for each frame, calc log posterior probability of each state
  392. if cur_decibel < self.vad_opts.decibel_thres:
  393. frame_state = FrameState.kFrameStateSil
  394. self.DetectOneFrame(frame_state, t, False)
  395. return frame_state
  396. sum_score = 0.0
  397. noise_prob = 0.0
  398. assert len(self.sil_pdf_ids) == self.vad_opts.silence_pdf_num
  399. if len(self.sil_pdf_ids) > 0:
  400. assert len(self.scores) == 1 # 只支持batch_size = 1的测试
  401. sil_pdf_scores = [self.scores[0][t][sil_pdf_id] for sil_pdf_id in self.sil_pdf_ids]
  402. sum_score = sum(sil_pdf_scores)
  403. noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio
  404. total_score = 1.0
  405. sum_score = total_score - sum_score
  406. speech_prob = math.log(sum_score)
  407. if self.vad_opts.output_frame_probs:
  408. frame_prob = E2EVadFrameProb()
  409. frame_prob.noise_prob = noise_prob
  410. frame_prob.speech_prob = speech_prob
  411. frame_prob.score = sum_score
  412. frame_prob.frame_id = t
  413. self.frame_probs.append(frame_prob)
  414. if math.exp(speech_prob) >= math.exp(noise_prob) + self.speech_noise_thres:
  415. if cur_snr >= self.vad_opts.snr_thres and cur_decibel >= self.vad_opts.decibel_thres:
  416. frame_state = FrameState.kFrameStateSpeech
  417. else:
  418. frame_state = FrameState.kFrameStateSil
  419. else:
  420. frame_state = FrameState.kFrameStateSil
  421. if self.noise_average_decibel < -99.9:
  422. self.noise_average_decibel = cur_decibel
  423. else:
  424. self.noise_average_decibel = (cur_decibel + self.noise_average_decibel * (
  425. self.vad_opts.noise_frame_num_used_for_snr
  426. - 1)) / self.vad_opts.noise_frame_num_used_for_snr
  427. return frame_state
  428. def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  429. is_final: bool = False
  430. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  431. if not in_cache:
  432. self.AllResetDetection()
  433. self.waveform = waveform # compute decibel for each frame
  434. self.ComputeDecibel()
  435. self.ComputeScores(feats, in_cache)
  436. if not is_final:
  437. self.DetectCommonFrames()
  438. else:
  439. self.DetectLastFrames()
  440. segments = []
  441. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  442. segment_batch = []
  443. if len(self.output_data_buf) > 0:
  444. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  445. if not is_final and (not self.output_data_buf[i].contain_seg_start_point or not self.output_data_buf[
  446. i].contain_seg_end_point):
  447. continue
  448. segment = [self.output_data_buf[i].start_ms, self.output_data_buf[i].end_ms]
  449. segment_batch.append(segment)
  450. self.output_data_buf_offset += 1 # need update this parameter
  451. if segment_batch:
  452. segments.append(segment_batch)
  453. if is_final:
  454. # reset class variables and clear the dict for the next query
  455. self.AllResetDetection()
  456. return segments, in_cache
  457. def forward_online(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  458. is_final: bool = False, max_end_sil: int = 800
  459. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  460. if not in_cache:
  461. self.AllResetDetection()
  462. self.max_end_sil_frame_cnt_thresh = max_end_sil - self.vad_opts.speech_to_sil_time_thres
  463. self.waveform = waveform # compute decibel for each frame
  464. self.ComputeScores(feats, in_cache)
  465. self.ComputeDecibel()
  466. if not is_final:
  467. self.DetectCommonFrames()
  468. else:
  469. self.DetectLastFrames()
  470. segments = []
  471. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  472. segment_batch = []
  473. if len(self.output_data_buf) > 0:
  474. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  475. if not self.output_data_buf[i].contain_seg_start_point:
  476. continue
  477. if not self.next_seg and not self.output_data_buf[i].contain_seg_end_point:
  478. continue
  479. start_ms = self.output_data_buf[i].start_ms if self.next_seg else -1
  480. if self.output_data_buf[i].contain_seg_end_point:
  481. end_ms = self.output_data_buf[i].end_ms
  482. self.next_seg = True
  483. self.output_data_buf_offset += 1
  484. else:
  485. end_ms = -1
  486. self.next_seg = False
  487. segment = [start_ms, end_ms]
  488. segment_batch.append(segment)
  489. if segment_batch:
  490. segments.append(segment_batch)
  491. if is_final:
  492. # reset class variables and clear the dict for the next query
  493. self.AllResetDetection()
  494. return segments, in_cache
  495. def DetectCommonFrames(self) -> int:
  496. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  497. return 0
  498. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  499. frame_state = FrameState.kFrameStateInvalid
  500. frame_state = self.GetFrameState(self.frm_cnt - 1 - i - self.last_drop_frames)
  501. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  502. return 0
  503. def DetectLastFrames(self) -> int:
  504. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  505. return 0
  506. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  507. frame_state = FrameState.kFrameStateInvalid
  508. frame_state = self.GetFrameState(self.frm_cnt - 1 - i - self.last_drop_frames)
  509. if i != 0:
  510. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  511. else:
  512. self.DetectOneFrame(frame_state, self.frm_cnt - 1, True)
  513. return 0
  514. def DetectOneFrame(self, cur_frm_state: FrameState, cur_frm_idx: int, is_final_frame: bool) -> None:
  515. tmp_cur_frm_state = FrameState.kFrameStateInvalid
  516. if cur_frm_state == FrameState.kFrameStateSpeech:
  517. if math.fabs(1.0) > self.vad_opts.fe_prior_thres:
  518. tmp_cur_frm_state = FrameState.kFrameStateSpeech
  519. else:
  520. tmp_cur_frm_state = FrameState.kFrameStateSil
  521. elif cur_frm_state == FrameState.kFrameStateSil:
  522. tmp_cur_frm_state = FrameState.kFrameStateSil
  523. state_change = self.windows_detector.DetectOneFrame(tmp_cur_frm_state, cur_frm_idx)
  524. frm_shift_in_ms = self.vad_opts.frame_in_ms
  525. if AudioChangeState.kChangeStateSil2Speech == state_change:
  526. silence_frame_count = self.continous_silence_frame_count
  527. self.continous_silence_frame_count = 0
  528. self.pre_end_silence_detected = False
  529. start_frame = 0
  530. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  531. start_frame = max(self.data_buf_start_frame, cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  532. self.OnVoiceStart(start_frame)
  533. self.vad_state_machine = VadStateMachine.kVadInStateInSpeechSegment
  534. for t in range(start_frame + 1, cur_frm_idx + 1):
  535. self.OnVoiceDetected(t)
  536. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  537. for t in range(self.latest_confirmed_speech_frame + 1, cur_frm_idx):
  538. self.OnVoiceDetected(t)
  539. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  540. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  541. self.OnVoiceEnd(cur_frm_idx, False, False)
  542. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  543. elif not is_final_frame:
  544. self.OnVoiceDetected(cur_frm_idx)
  545. else:
  546. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  547. else:
  548. pass
  549. elif AudioChangeState.kChangeStateSpeech2Sil == state_change:
  550. self.continous_silence_frame_count = 0
  551. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  552. pass
  553. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  554. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  555. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  556. self.OnVoiceEnd(cur_frm_idx, False, False)
  557. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  558. elif not is_final_frame:
  559. self.OnVoiceDetected(cur_frm_idx)
  560. else:
  561. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  562. else:
  563. pass
  564. elif AudioChangeState.kChangeStateSpeech2Speech == state_change:
  565. self.continous_silence_frame_count = 0
  566. if self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  567. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  568. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  569. self.max_time_out = True
  570. self.OnVoiceEnd(cur_frm_idx, False, False)
  571. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  572. elif not is_final_frame:
  573. self.OnVoiceDetected(cur_frm_idx)
  574. else:
  575. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  576. else:
  577. pass
  578. elif AudioChangeState.kChangeStateSil2Sil == state_change:
  579. self.continous_silence_frame_count += 1
  580. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  581. # silence timeout, return zero length decision
  582. if ((self.vad_opts.detect_mode == VadDetectMode.kVadSingleUtteranceDetectMode.value) and (
  583. self.continous_silence_frame_count * frm_shift_in_ms > self.vad_opts.max_start_silence_time)) \
  584. or (is_final_frame and self.number_end_time_detected == 0):
  585. for t in range(self.lastest_confirmed_silence_frame + 1, cur_frm_idx):
  586. self.OnSilenceDetected(t)
  587. self.OnVoiceStart(0, True)
  588. self.OnVoiceEnd(0, True, False);
  589. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  590. else:
  591. if cur_frm_idx >= self.LatencyFrmNumAtStartPoint():
  592. self.OnSilenceDetected(cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  593. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  594. if self.continous_silence_frame_count * frm_shift_in_ms >= self.max_end_sil_frame_cnt_thresh:
  595. lookback_frame = int(self.max_end_sil_frame_cnt_thresh / frm_shift_in_ms)
  596. if self.vad_opts.do_extend:
  597. lookback_frame -= int(self.vad_opts.lookahead_time_end_point / frm_shift_in_ms)
  598. lookback_frame -= 1
  599. lookback_frame = max(0, lookback_frame)
  600. self.OnVoiceEnd(cur_frm_idx - lookback_frame, False, False)
  601. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  602. elif cur_frm_idx - self.confirmed_start_frame + 1 > \
  603. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  604. self.OnVoiceEnd(cur_frm_idx, False, False)
  605. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  606. elif self.vad_opts.do_extend and not is_final_frame:
  607. if self.continous_silence_frame_count <= int(
  608. self.vad_opts.lookahead_time_end_point / frm_shift_in_ms):
  609. self.OnVoiceDetected(cur_frm_idx)
  610. else:
  611. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  612. else:
  613. pass
  614. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected and \
  615. self.vad_opts.detect_mode == VadDetectMode.kVadMutipleUtteranceDetectMode.value:
  616. self.ResetDetection()