e2e_vad.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. from enum import Enum
  2. from typing import List, Tuple, Dict, Any
  3. import torch
  4. from torch import nn
  5. import math
  6. from funasr.models.encoder.fsmn_encoder import FSMN
  7. class VadStateMachine(Enum):
  8. kVadInStateStartPointNotDetected = 1
  9. kVadInStateInSpeechSegment = 2
  10. kVadInStateEndPointDetected = 3
  11. class FrameState(Enum):
  12. kFrameStateInvalid = -1
  13. kFrameStateSpeech = 1
  14. kFrameStateSil = 0
  15. # final voice/unvoice state per frame
  16. class AudioChangeState(Enum):
  17. kChangeStateSpeech2Speech = 0
  18. kChangeStateSpeech2Sil = 1
  19. kChangeStateSil2Sil = 2
  20. kChangeStateSil2Speech = 3
  21. kChangeStateNoBegin = 4
  22. kChangeStateInvalid = 5
  23. class VadDetectMode(Enum):
  24. kVadSingleUtteranceDetectMode = 0
  25. kVadMutipleUtteranceDetectMode = 1
  26. class VADXOptions:
  27. """
  28. Author: Speech Lab of DAMO Academy, Alibaba Group
  29. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  30. https://arxiv.org/abs/1803.05030
  31. """
  32. def __init__(
  33. self,
  34. sample_rate: int = 16000,
  35. detect_mode: int = VadDetectMode.kVadMutipleUtteranceDetectMode.value,
  36. snr_mode: int = 0,
  37. max_end_silence_time: int = 800,
  38. max_start_silence_time: int = 3000,
  39. do_start_point_detection: bool = True,
  40. do_end_point_detection: bool = True,
  41. window_size_ms: int = 200,
  42. sil_to_speech_time_thres: int = 150,
  43. speech_to_sil_time_thres: int = 150,
  44. speech_2_noise_ratio: float = 1.0,
  45. do_extend: int = 1,
  46. lookback_time_start_point: int = 200,
  47. lookahead_time_end_point: int = 100,
  48. max_single_segment_time: int = 60000,
  49. nn_eval_block_size: int = 8,
  50. dcd_block_size: int = 4,
  51. snr_thres: int = -100.0,
  52. noise_frame_num_used_for_snr: int = 100,
  53. decibel_thres: int = -100.0,
  54. speech_noise_thres: float = 0.6,
  55. fe_prior_thres: float = 1e-4,
  56. silence_pdf_num: int = 1,
  57. sil_pdf_ids: List[int] = [0],
  58. speech_noise_thresh_low: float = -0.1,
  59. speech_noise_thresh_high: float = 0.3,
  60. output_frame_probs: bool = False,
  61. frame_in_ms: int = 10,
  62. frame_length_ms: int = 25,
  63. ):
  64. self.sample_rate = sample_rate
  65. self.detect_mode = detect_mode
  66. self.snr_mode = snr_mode
  67. self.max_end_silence_time = max_end_silence_time
  68. self.max_start_silence_time = max_start_silence_time
  69. self.do_start_point_detection = do_start_point_detection
  70. self.do_end_point_detection = do_end_point_detection
  71. self.window_size_ms = window_size_ms
  72. self.sil_to_speech_time_thres = sil_to_speech_time_thres
  73. self.speech_to_sil_time_thres = speech_to_sil_time_thres
  74. self.speech_2_noise_ratio = speech_2_noise_ratio
  75. self.do_extend = do_extend
  76. self.lookback_time_start_point = lookback_time_start_point
  77. self.lookahead_time_end_point = lookahead_time_end_point
  78. self.max_single_segment_time = max_single_segment_time
  79. self.nn_eval_block_size = nn_eval_block_size
  80. self.dcd_block_size = dcd_block_size
  81. self.snr_thres = snr_thres
  82. self.noise_frame_num_used_for_snr = noise_frame_num_used_for_snr
  83. self.decibel_thres = decibel_thres
  84. self.speech_noise_thres = speech_noise_thres
  85. self.fe_prior_thres = fe_prior_thres
  86. self.silence_pdf_num = silence_pdf_num
  87. self.sil_pdf_ids = sil_pdf_ids
  88. self.speech_noise_thresh_low = speech_noise_thresh_low
  89. self.speech_noise_thresh_high = speech_noise_thresh_high
  90. self.output_frame_probs = output_frame_probs
  91. self.frame_in_ms = frame_in_ms
  92. self.frame_length_ms = frame_length_ms
  93. class E2EVadSpeechBufWithDoa(object):
  94. """
  95. Author: Speech Lab of DAMO Academy, Alibaba Group
  96. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  97. https://arxiv.org/abs/1803.05030
  98. """
  99. def __init__(self):
  100. self.start_ms = 0
  101. self.end_ms = 0
  102. self.buffer = []
  103. self.contain_seg_start_point = False
  104. self.contain_seg_end_point = False
  105. self.doa = 0
  106. def Reset(self):
  107. self.start_ms = 0
  108. self.end_ms = 0
  109. self.buffer = []
  110. self.contain_seg_start_point = False
  111. self.contain_seg_end_point = False
  112. self.doa = 0
  113. class E2EVadFrameProb(object):
  114. """
  115. Author: Speech Lab of DAMO Academy, Alibaba Group
  116. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  117. https://arxiv.org/abs/1803.05030
  118. """
  119. def __init__(self):
  120. self.noise_prob = 0.0
  121. self.speech_prob = 0.0
  122. self.score = 0.0
  123. self.frame_id = 0
  124. self.frm_state = 0
  125. class WindowDetector(object):
  126. """
  127. Author: Speech Lab of DAMO Academy, Alibaba Group
  128. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  129. https://arxiv.org/abs/1803.05030
  130. """
  131. def __init__(self, window_size_ms: int, sil_to_speech_time: int,
  132. speech_to_sil_time: int, frame_size_ms: int):
  133. self.window_size_ms = window_size_ms
  134. self.sil_to_speech_time = sil_to_speech_time
  135. self.speech_to_sil_time = speech_to_sil_time
  136. self.frame_size_ms = frame_size_ms
  137. self.win_size_frame = int(window_size_ms / frame_size_ms)
  138. self.win_sum = 0
  139. self.win_state = [0] * self.win_size_frame # 初始化窗
  140. self.cur_win_pos = 0
  141. self.pre_frame_state = FrameState.kFrameStateSil
  142. self.cur_frame_state = FrameState.kFrameStateSil
  143. self.sil_to_speech_frmcnt_thres = int(sil_to_speech_time / frame_size_ms)
  144. self.speech_to_sil_frmcnt_thres = int(speech_to_sil_time / frame_size_ms)
  145. self.voice_last_frame_count = 0
  146. self.noise_last_frame_count = 0
  147. self.hydre_frame_count = 0
  148. def Reset(self) -> None:
  149. self.cur_win_pos = 0
  150. self.win_sum = 0
  151. self.win_state = [0] * self.win_size_frame
  152. self.pre_frame_state = FrameState.kFrameStateSil
  153. self.cur_frame_state = FrameState.kFrameStateSil
  154. self.voice_last_frame_count = 0
  155. self.noise_last_frame_count = 0
  156. self.hydre_frame_count = 0
  157. def GetWinSize(self) -> int:
  158. return int(self.win_size_frame)
  159. def DetectOneFrame(self, frameState: FrameState, frame_count: int) -> AudioChangeState:
  160. cur_frame_state = FrameState.kFrameStateSil
  161. if frameState == FrameState.kFrameStateSpeech:
  162. cur_frame_state = 1
  163. elif frameState == FrameState.kFrameStateSil:
  164. cur_frame_state = 0
  165. else:
  166. return AudioChangeState.kChangeStateInvalid
  167. self.win_sum -= self.win_state[self.cur_win_pos]
  168. self.win_sum += cur_frame_state
  169. self.win_state[self.cur_win_pos] = cur_frame_state
  170. self.cur_win_pos = (self.cur_win_pos + 1) % self.win_size_frame
  171. if self.pre_frame_state == FrameState.kFrameStateSil and self.win_sum >= self.sil_to_speech_frmcnt_thres:
  172. self.pre_frame_state = FrameState.kFrameStateSpeech
  173. return AudioChangeState.kChangeStateSil2Speech
  174. if self.pre_frame_state == FrameState.kFrameStateSpeech and self.win_sum <= self.speech_to_sil_frmcnt_thres:
  175. self.pre_frame_state = FrameState.kFrameStateSil
  176. return AudioChangeState.kChangeStateSpeech2Sil
  177. if self.pre_frame_state == FrameState.kFrameStateSil:
  178. return AudioChangeState.kChangeStateSil2Sil
  179. if self.pre_frame_state == FrameState.kFrameStateSpeech:
  180. return AudioChangeState.kChangeStateSpeech2Speech
  181. return AudioChangeState.kChangeStateInvalid
  182. def FrameSizeMs(self) -> int:
  183. return int(self.frame_size_ms)
  184. class E2EVadModel(nn.Module):
  185. """
  186. Author: Speech Lab of DAMO Academy, Alibaba Group
  187. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  188. https://arxiv.org/abs/1803.05030
  189. """
  190. def __init__(self, encoder: FSMN, vad_post_args: Dict[str, Any], frontend=None):
  191. super(E2EVadModel, self).__init__()
  192. self.vad_opts = VADXOptions(**vad_post_args)
  193. self.windows_detector = WindowDetector(self.vad_opts.window_size_ms,
  194. self.vad_opts.sil_to_speech_time_thres,
  195. self.vad_opts.speech_to_sil_time_thres,
  196. self.vad_opts.frame_in_ms)
  197. self.encoder = encoder
  198. # init variables
  199. self.is_final = False
  200. self.data_buf_start_frame = 0
  201. self.frm_cnt = 0
  202. self.latest_confirmed_speech_frame = 0
  203. self.lastest_confirmed_silence_frame = -1
  204. self.continous_silence_frame_count = 0
  205. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  206. self.confirmed_start_frame = -1
  207. self.confirmed_end_frame = -1
  208. self.number_end_time_detected = 0
  209. self.sil_frame = 0
  210. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  211. self.noise_average_decibel = -100.0
  212. self.pre_end_silence_detected = False
  213. self.next_seg = True
  214. self.output_data_buf = []
  215. self.output_data_buf_offset = 0
  216. self.frame_probs = []
  217. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  218. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  219. self.scores = None
  220. self.max_time_out = False
  221. self.decibel = []
  222. self.data_buf = None
  223. self.data_buf_all = None
  224. self.waveform = None
  225. self.ResetDetection()
  226. self.frontend = frontend
  227. def AllResetDetection(self):
  228. self.is_final = False
  229. self.data_buf_start_frame = 0
  230. self.frm_cnt = 0
  231. self.latest_confirmed_speech_frame = 0
  232. self.lastest_confirmed_silence_frame = -1
  233. self.continous_silence_frame_count = 0
  234. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  235. self.confirmed_start_frame = -1
  236. self.confirmed_end_frame = -1
  237. self.number_end_time_detected = 0
  238. self.sil_frame = 0
  239. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  240. self.noise_average_decibel = -100.0
  241. self.pre_end_silence_detected = False
  242. self.next_seg = True
  243. self.output_data_buf = []
  244. self.output_data_buf_offset = 0
  245. self.frame_probs = []
  246. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  247. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  248. self.scores = None
  249. self.max_time_out = False
  250. self.decibel = []
  251. self.data_buf = None
  252. self.data_buf_all = None
  253. self.waveform = None
  254. self.ResetDetection()
  255. def ResetDetection(self):
  256. self.continous_silence_frame_count = 0
  257. self.latest_confirmed_speech_frame = 0
  258. self.lastest_confirmed_silence_frame = -1
  259. self.confirmed_start_frame = -1
  260. self.confirmed_end_frame = -1
  261. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  262. self.windows_detector.Reset()
  263. self.sil_frame = 0
  264. self.frame_probs = []
  265. def ComputeDecibel(self) -> None:
  266. frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000)
  267. frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  268. if self.data_buf_all is None:
  269. self.data_buf_all = self.waveform[0] # self.data_buf is pointed to self.waveform[0]
  270. self.data_buf = self.data_buf_all
  271. else:
  272. self.data_buf_all = torch.cat((self.data_buf_all, self.waveform[0]))
  273. for offset in range(0, self.waveform.shape[1] - frame_sample_length + 1, frame_shift_length):
  274. self.decibel.append(
  275. 10 * math.log10((self.waveform[0][offset: offset + frame_sample_length]).square().sum() + \
  276. 0.000001))
  277. def ComputeScores(self, feats: torch.Tensor, in_cache: Dict[str, torch.Tensor]) -> None:
  278. scores = self.encoder(feats, in_cache) # return B * T * D
  279. assert scores.shape[1] == feats.shape[1], "The shape between feats and scores does not match"
  280. self.vad_opts.nn_eval_block_size = scores.shape[1]
  281. self.frm_cnt += scores.shape[1] # count total frames
  282. if self.scores is None:
  283. self.scores = scores # the first calculation
  284. else:
  285. self.scores = torch.cat((self.scores, scores), dim=1)
  286. def PopDataBufTillFrame(self, frame_idx: int) -> None: # need check again
  287. while self.data_buf_start_frame < frame_idx:
  288. if len(self.data_buf) >= int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):
  289. self.data_buf_start_frame += 1
  290. self.data_buf = self.data_buf_all[self.data_buf_start_frame * int(
  291. self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):]
  292. def PopDataToOutputBuf(self, start_frm: int, frm_cnt: int, first_frm_is_start_point: bool,
  293. last_frm_is_end_point: bool, end_point_is_sent_end: bool) -> None:
  294. self.PopDataBufTillFrame(start_frm)
  295. expected_sample_number = int(frm_cnt * self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000)
  296. if last_frm_is_end_point:
  297. extra_sample = max(0, int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000 - \
  298. self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000))
  299. expected_sample_number += int(extra_sample)
  300. if end_point_is_sent_end:
  301. expected_sample_number = max(expected_sample_number, len(self.data_buf))
  302. if len(self.data_buf) < expected_sample_number:
  303. print('error in calling pop data_buf\n')
  304. if len(self.output_data_buf) == 0 or first_frm_is_start_point:
  305. self.output_data_buf.append(E2EVadSpeechBufWithDoa())
  306. self.output_data_buf[-1].Reset()
  307. self.output_data_buf[-1].start_ms = start_frm * self.vad_opts.frame_in_ms
  308. self.output_data_buf[-1].end_ms = self.output_data_buf[-1].start_ms
  309. self.output_data_buf[-1].doa = 0
  310. cur_seg = self.output_data_buf[-1]
  311. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  312. print('warning\n')
  313. out_pos = len(cur_seg.buffer) # cur_seg.buff现在没做任何操作
  314. data_to_pop = 0
  315. if end_point_is_sent_end:
  316. data_to_pop = expected_sample_number
  317. else:
  318. data_to_pop = int(frm_cnt * self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  319. if data_to_pop > len(self.data_buf):
  320. print('VAD data_to_pop is bigger than self.data_buf.size()!!!\n')
  321. data_to_pop = len(self.data_buf)
  322. expected_sample_number = len(self.data_buf)
  323. cur_seg.doa = 0
  324. for sample_cpy_out in range(0, data_to_pop):
  325. # cur_seg.buffer[out_pos ++] = data_buf_.back();
  326. out_pos += 1
  327. for sample_cpy_out in range(data_to_pop, expected_sample_number):
  328. # cur_seg.buffer[out_pos++] = data_buf_.back()
  329. out_pos += 1
  330. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  331. print('Something wrong with the VAD algorithm\n')
  332. self.data_buf_start_frame += frm_cnt
  333. cur_seg.end_ms = (start_frm + frm_cnt) * self.vad_opts.frame_in_ms
  334. if first_frm_is_start_point:
  335. cur_seg.contain_seg_start_point = True
  336. if last_frm_is_end_point:
  337. cur_seg.contain_seg_end_point = True
  338. def OnSilenceDetected(self, valid_frame: int):
  339. self.lastest_confirmed_silence_frame = valid_frame
  340. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  341. self.PopDataBufTillFrame(valid_frame)
  342. # silence_detected_callback_
  343. # pass
  344. def OnVoiceDetected(self, valid_frame: int) -> None:
  345. self.latest_confirmed_speech_frame = valid_frame
  346. self.PopDataToOutputBuf(valid_frame, 1, False, False, False)
  347. def OnVoiceStart(self, start_frame: int, fake_result: bool = False) -> None:
  348. if self.vad_opts.do_start_point_detection:
  349. pass
  350. if self.confirmed_start_frame != -1:
  351. print('not reset vad properly\n')
  352. else:
  353. self.confirmed_start_frame = start_frame
  354. if not fake_result and self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  355. self.PopDataToOutputBuf(self.confirmed_start_frame, 1, True, False, False)
  356. def OnVoiceEnd(self, end_frame: int, fake_result: bool, is_last_frame: bool) -> None:
  357. for t in range(self.latest_confirmed_speech_frame + 1, end_frame):
  358. self.OnVoiceDetected(t)
  359. if self.vad_opts.do_end_point_detection:
  360. pass
  361. if self.confirmed_end_frame != -1:
  362. print('not reset vad properly\n')
  363. else:
  364. self.confirmed_end_frame = end_frame
  365. if not fake_result:
  366. self.sil_frame = 0
  367. self.PopDataToOutputBuf(self.confirmed_end_frame, 1, False, True, is_last_frame)
  368. self.number_end_time_detected += 1
  369. def MaybeOnVoiceEndIfLastFrame(self, is_final_frame: bool, cur_frm_idx: int) -> None:
  370. if is_final_frame:
  371. self.OnVoiceEnd(cur_frm_idx, False, True)
  372. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  373. def GetLatency(self) -> int:
  374. return int(self.LatencyFrmNumAtStartPoint() * self.vad_opts.frame_in_ms)
  375. def LatencyFrmNumAtStartPoint(self) -> int:
  376. vad_latency = self.windows_detector.GetWinSize()
  377. if self.vad_opts.do_extend:
  378. vad_latency += int(self.vad_opts.lookback_time_start_point / self.vad_opts.frame_in_ms)
  379. return vad_latency
  380. def GetFrameState(self, t: int) -> FrameState:
  381. frame_state = FrameState.kFrameStateInvalid
  382. cur_decibel = self.decibel[t]
  383. cur_snr = cur_decibel - self.noise_average_decibel
  384. # for each frame, calc log posterior probability of each state
  385. if cur_decibel < self.vad_opts.decibel_thres:
  386. frame_state = FrameState.kFrameStateSil
  387. self.DetectOneFrame(frame_state, t, False)
  388. return frame_state
  389. sum_score = 0.0
  390. noise_prob = 0.0
  391. assert len(self.sil_pdf_ids) == self.vad_opts.silence_pdf_num
  392. if len(self.sil_pdf_ids) > 0:
  393. assert len(self.scores) == 1 # 只支持batch_size = 1的测试
  394. sil_pdf_scores = [self.scores[0][t][sil_pdf_id] for sil_pdf_id in self.sil_pdf_ids]
  395. sum_score = sum(sil_pdf_scores)
  396. noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio
  397. total_score = 1.0
  398. sum_score = total_score - sum_score
  399. speech_prob = math.log(sum_score)
  400. if self.vad_opts.output_frame_probs:
  401. frame_prob = E2EVadFrameProb()
  402. frame_prob.noise_prob = noise_prob
  403. frame_prob.speech_prob = speech_prob
  404. frame_prob.score = sum_score
  405. frame_prob.frame_id = t
  406. self.frame_probs.append(frame_prob)
  407. if math.exp(speech_prob) >= math.exp(noise_prob) + self.speech_noise_thres:
  408. if cur_snr >= self.vad_opts.snr_thres and cur_decibel >= self.vad_opts.decibel_thres:
  409. frame_state = FrameState.kFrameStateSpeech
  410. else:
  411. frame_state = FrameState.kFrameStateSil
  412. else:
  413. frame_state = FrameState.kFrameStateSil
  414. if self.noise_average_decibel < -99.9:
  415. self.noise_average_decibel = cur_decibel
  416. else:
  417. self.noise_average_decibel = (cur_decibel + self.noise_average_decibel * (
  418. self.vad_opts.noise_frame_num_used_for_snr
  419. - 1)) / self.vad_opts.noise_frame_num_used_for_snr
  420. return frame_state
  421. def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  422. is_final: bool = False
  423. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  424. self.waveform = waveform # compute decibel for each frame
  425. self.ComputeDecibel()
  426. self.ComputeScores(feats, in_cache)
  427. if not is_final:
  428. self.DetectCommonFrames()
  429. else:
  430. self.DetectLastFrames()
  431. segments = []
  432. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  433. segment_batch = []
  434. if len(self.output_data_buf) > 0:
  435. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  436. if not is_final and (not self.output_data_buf[i].contain_seg_start_point or not self.output_data_buf[
  437. i].contain_seg_end_point):
  438. continue
  439. segment = [self.output_data_buf[i].start_ms, self.output_data_buf[i].end_ms]
  440. segment_batch.append(segment)
  441. self.output_data_buf_offset += 1 # need update this parameter
  442. if segment_batch:
  443. segments.append(segment_batch)
  444. if is_final:
  445. # reset class variables and clear the dict for the next query
  446. self.AllResetDetection()
  447. return segments, in_cache
  448. def forward_online(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  449. is_final: bool = False, max_end_sil: int = 800
  450. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  451. self.max_end_sil_frame_cnt_thresh = max_end_sil - self.vad_opts.speech_to_sil_time_thres
  452. self.waveform = waveform # compute decibel for each frame
  453. self.ComputeScores(feats, in_cache)
  454. self.ComputeDecibel()
  455. if not is_final:
  456. self.DetectCommonFrames()
  457. else:
  458. self.DetectLastFrames()
  459. segments = []
  460. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  461. segment_batch = []
  462. if len(self.output_data_buf) > 0:
  463. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  464. if not self.output_data_buf[i].contain_seg_start_point:
  465. continue
  466. if not self.next_seg and not self.output_data_buf[i].contain_seg_end_point:
  467. continue
  468. start_ms = self.output_data_buf[i].start_ms if self.next_seg else -1
  469. if self.output_data_buf[i].contain_seg_end_point:
  470. end_ms = self.output_data_buf[i].end_ms
  471. self.next_seg = True
  472. self.output_data_buf_offset += 1
  473. else:
  474. end_ms = -1
  475. self.next_seg = False
  476. segment = [start_ms, end_ms]
  477. segment_batch.append(segment)
  478. if segment_batch:
  479. segments.append(segment_batch)
  480. if is_final:
  481. # reset class variables and clear the dict for the next query
  482. self.AllResetDetection()
  483. return segments, in_cache
  484. def DetectCommonFrames(self) -> int:
  485. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  486. return 0
  487. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  488. frame_state = FrameState.kFrameStateInvalid
  489. frame_state = self.GetFrameState(self.frm_cnt - 1 - i)
  490. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  491. return 0
  492. def DetectLastFrames(self) -> int:
  493. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  494. return 0
  495. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  496. frame_state = FrameState.kFrameStateInvalid
  497. frame_state = self.GetFrameState(self.frm_cnt - 1 - i)
  498. if i != 0:
  499. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  500. else:
  501. self.DetectOneFrame(frame_state, self.frm_cnt - 1, True)
  502. return 0
  503. def DetectOneFrame(self, cur_frm_state: FrameState, cur_frm_idx: int, is_final_frame: bool) -> None:
  504. tmp_cur_frm_state = FrameState.kFrameStateInvalid
  505. if cur_frm_state == FrameState.kFrameStateSpeech:
  506. if math.fabs(1.0) > self.vad_opts.fe_prior_thres:
  507. tmp_cur_frm_state = FrameState.kFrameStateSpeech
  508. else:
  509. tmp_cur_frm_state = FrameState.kFrameStateSil
  510. elif cur_frm_state == FrameState.kFrameStateSil:
  511. tmp_cur_frm_state = FrameState.kFrameStateSil
  512. state_change = self.windows_detector.DetectOneFrame(tmp_cur_frm_state, cur_frm_idx)
  513. frm_shift_in_ms = self.vad_opts.frame_in_ms
  514. if AudioChangeState.kChangeStateSil2Speech == state_change:
  515. silence_frame_count = self.continous_silence_frame_count
  516. self.continous_silence_frame_count = 0
  517. self.pre_end_silence_detected = False
  518. start_frame = 0
  519. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  520. start_frame = max(self.data_buf_start_frame, cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  521. self.OnVoiceStart(start_frame)
  522. self.vad_state_machine = VadStateMachine.kVadInStateInSpeechSegment
  523. for t in range(start_frame + 1, cur_frm_idx + 1):
  524. self.OnVoiceDetected(t)
  525. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  526. for t in range(self.latest_confirmed_speech_frame + 1, cur_frm_idx):
  527. self.OnVoiceDetected(t)
  528. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  529. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  530. self.OnVoiceEnd(cur_frm_idx, False, False)
  531. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  532. elif not is_final_frame:
  533. self.OnVoiceDetected(cur_frm_idx)
  534. else:
  535. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  536. else:
  537. pass
  538. elif AudioChangeState.kChangeStateSpeech2Sil == state_change:
  539. self.continous_silence_frame_count = 0
  540. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  541. pass
  542. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  543. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  544. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  545. self.OnVoiceEnd(cur_frm_idx, False, False)
  546. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  547. elif not is_final_frame:
  548. self.OnVoiceDetected(cur_frm_idx)
  549. else:
  550. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  551. else:
  552. pass
  553. elif AudioChangeState.kChangeStateSpeech2Speech == state_change:
  554. self.continous_silence_frame_count = 0
  555. if self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  556. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  557. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  558. self.max_time_out = True
  559. self.OnVoiceEnd(cur_frm_idx, False, False)
  560. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  561. elif not is_final_frame:
  562. self.OnVoiceDetected(cur_frm_idx)
  563. else:
  564. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  565. else:
  566. pass
  567. elif AudioChangeState.kChangeStateSil2Sil == state_change:
  568. self.continous_silence_frame_count += 1
  569. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  570. # silence timeout, return zero length decision
  571. if ((self.vad_opts.detect_mode == VadDetectMode.kVadSingleUtteranceDetectMode.value) and (
  572. self.continous_silence_frame_count * frm_shift_in_ms > self.vad_opts.max_start_silence_time)) \
  573. or (is_final_frame and self.number_end_time_detected == 0):
  574. for t in range(self.lastest_confirmed_silence_frame + 1, cur_frm_idx):
  575. self.OnSilenceDetected(t)
  576. self.OnVoiceStart(0, True)
  577. self.OnVoiceEnd(0, True, False);
  578. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  579. else:
  580. if cur_frm_idx >= self.LatencyFrmNumAtStartPoint():
  581. self.OnSilenceDetected(cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  582. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  583. if self.continous_silence_frame_count * frm_shift_in_ms >= self.max_end_sil_frame_cnt_thresh:
  584. lookback_frame = int(self.max_end_sil_frame_cnt_thresh / frm_shift_in_ms)
  585. if self.vad_opts.do_extend:
  586. lookback_frame -= int(self.vad_opts.lookahead_time_end_point / frm_shift_in_ms)
  587. lookback_frame -= 1
  588. lookback_frame = max(0, lookback_frame)
  589. self.OnVoiceEnd(cur_frm_idx - lookback_frame, False, False)
  590. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  591. elif cur_frm_idx - self.confirmed_start_frame + 1 > \
  592. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  593. self.OnVoiceEnd(cur_frm_idx, False, False)
  594. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  595. elif self.vad_opts.do_extend and not is_final_frame:
  596. if self.continous_silence_frame_count <= int(
  597. self.vad_opts.lookahead_time_end_point / frm_shift_in_ms):
  598. self.OnVoiceDetected(cur_frm_idx)
  599. else:
  600. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  601. else:
  602. pass
  603. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected and \
  604. self.vad_opts.detect_mode == VadDetectMode.kVadMutipleUtteranceDetectMode.value:
  605. self.ResetDetection()