e2e_vad.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. from enum import Enum
  2. from typing import List, Tuple, Dict, Any
  3. import torch
  4. from torch import nn
  5. import math
  6. from funasr.models.encoder.fsmn_encoder import FSMN
  7. from funasr.models.base_model import FunASRModel
  8. class VadStateMachine(Enum):
  9. kVadInStateStartPointNotDetected = 1
  10. kVadInStateInSpeechSegment = 2
  11. kVadInStateEndPointDetected = 3
  12. class FrameState(Enum):
  13. kFrameStateInvalid = -1
  14. kFrameStateSpeech = 1
  15. kFrameStateSil = 0
  16. # final voice/unvoice state per frame
  17. class AudioChangeState(Enum):
  18. kChangeStateSpeech2Speech = 0
  19. kChangeStateSpeech2Sil = 1
  20. kChangeStateSil2Sil = 2
  21. kChangeStateSil2Speech = 3
  22. kChangeStateNoBegin = 4
  23. kChangeStateInvalid = 5
  24. class VadDetectMode(Enum):
  25. kVadSingleUtteranceDetectMode = 0
  26. kVadMutipleUtteranceDetectMode = 1
  27. class VADXOptions:
  28. """
  29. Author: Speech Lab of DAMO Academy, Alibaba Group
  30. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  31. https://arxiv.org/abs/1803.05030
  32. """
  33. def __init__(
  34. self,
  35. sample_rate: int = 16000,
  36. detect_mode: int = VadDetectMode.kVadMutipleUtteranceDetectMode.value,
  37. snr_mode: int = 0,
  38. max_end_silence_time: int = 800,
  39. max_start_silence_time: int = 3000,
  40. do_start_point_detection: bool = True,
  41. do_end_point_detection: bool = True,
  42. window_size_ms: int = 200,
  43. sil_to_speech_time_thres: int = 150,
  44. speech_to_sil_time_thres: int = 150,
  45. speech_2_noise_ratio: float = 1.0,
  46. do_extend: int = 1,
  47. lookback_time_start_point: int = 200,
  48. lookahead_time_end_point: int = 100,
  49. max_single_segment_time: int = 60000,
  50. nn_eval_block_size: int = 8,
  51. dcd_block_size: int = 4,
  52. snr_thres: int = -100.0,
  53. noise_frame_num_used_for_snr: int = 100,
  54. decibel_thres: int = -100.0,
  55. speech_noise_thres: float = 0.6,
  56. fe_prior_thres: float = 1e-4,
  57. silence_pdf_num: int = 1,
  58. sil_pdf_ids: List[int] = [0],
  59. speech_noise_thresh_low: float = -0.1,
  60. speech_noise_thresh_high: float = 0.3,
  61. output_frame_probs: bool = False,
  62. frame_in_ms: int = 10,
  63. frame_length_ms: int = 25,
  64. ):
  65. self.sample_rate = sample_rate
  66. self.detect_mode = detect_mode
  67. self.snr_mode = snr_mode
  68. self.max_end_silence_time = max_end_silence_time
  69. self.max_start_silence_time = max_start_silence_time
  70. self.do_start_point_detection = do_start_point_detection
  71. self.do_end_point_detection = do_end_point_detection
  72. self.window_size_ms = window_size_ms
  73. self.sil_to_speech_time_thres = sil_to_speech_time_thres
  74. self.speech_to_sil_time_thres = speech_to_sil_time_thres
  75. self.speech_2_noise_ratio = speech_2_noise_ratio
  76. self.do_extend = do_extend
  77. self.lookback_time_start_point = lookback_time_start_point
  78. self.lookahead_time_end_point = lookahead_time_end_point
  79. self.max_single_segment_time = max_single_segment_time
  80. self.nn_eval_block_size = nn_eval_block_size
  81. self.dcd_block_size = dcd_block_size
  82. self.snr_thres = snr_thres
  83. self.noise_frame_num_used_for_snr = noise_frame_num_used_for_snr
  84. self.decibel_thres = decibel_thres
  85. self.speech_noise_thres = speech_noise_thres
  86. self.fe_prior_thres = fe_prior_thres
  87. self.silence_pdf_num = silence_pdf_num
  88. self.sil_pdf_ids = sil_pdf_ids
  89. self.speech_noise_thresh_low = speech_noise_thresh_low
  90. self.speech_noise_thresh_high = speech_noise_thresh_high
  91. self.output_frame_probs = output_frame_probs
  92. self.frame_in_ms = frame_in_ms
  93. self.frame_length_ms = frame_length_ms
  94. class E2EVadSpeechBufWithDoa(object):
  95. """
  96. Author: Speech Lab of DAMO Academy, Alibaba Group
  97. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  98. https://arxiv.org/abs/1803.05030
  99. """
  100. def __init__(self):
  101. self.start_ms = 0
  102. self.end_ms = 0
  103. self.buffer = []
  104. self.contain_seg_start_point = False
  105. self.contain_seg_end_point = False
  106. self.doa = 0
  107. def Reset(self):
  108. self.start_ms = 0
  109. self.end_ms = 0
  110. self.buffer = []
  111. self.contain_seg_start_point = False
  112. self.contain_seg_end_point = False
  113. self.doa = 0
  114. class E2EVadFrameProb(object):
  115. """
  116. Author: Speech Lab of DAMO Academy, Alibaba Group
  117. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  118. https://arxiv.org/abs/1803.05030
  119. """
  120. def __init__(self):
  121. self.noise_prob = 0.0
  122. self.speech_prob = 0.0
  123. self.score = 0.0
  124. self.frame_id = 0
  125. self.frm_state = 0
  126. class WindowDetector(object):
  127. """
  128. Author: Speech Lab of DAMO Academy, Alibaba Group
  129. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  130. https://arxiv.org/abs/1803.05030
  131. """
  132. def __init__(self, window_size_ms: int, sil_to_speech_time: int,
  133. speech_to_sil_time: int, frame_size_ms: int):
  134. self.window_size_ms = window_size_ms
  135. self.sil_to_speech_time = sil_to_speech_time
  136. self.speech_to_sil_time = speech_to_sil_time
  137. self.frame_size_ms = frame_size_ms
  138. self.win_size_frame = int(window_size_ms / frame_size_ms)
  139. self.win_sum = 0
  140. self.win_state = [0] * self.win_size_frame # 初始化窗
  141. self.cur_win_pos = 0
  142. self.pre_frame_state = FrameState.kFrameStateSil
  143. self.cur_frame_state = FrameState.kFrameStateSil
  144. self.sil_to_speech_frmcnt_thres = int(sil_to_speech_time / frame_size_ms)
  145. self.speech_to_sil_frmcnt_thres = int(speech_to_sil_time / frame_size_ms)
  146. self.voice_last_frame_count = 0
  147. self.noise_last_frame_count = 0
  148. self.hydre_frame_count = 0
  149. def Reset(self) -> None:
  150. self.cur_win_pos = 0
  151. self.win_sum = 0
  152. self.win_state = [0] * self.win_size_frame
  153. self.pre_frame_state = FrameState.kFrameStateSil
  154. self.cur_frame_state = FrameState.kFrameStateSil
  155. self.voice_last_frame_count = 0
  156. self.noise_last_frame_count = 0
  157. self.hydre_frame_count = 0
  158. def GetWinSize(self) -> int:
  159. return int(self.win_size_frame)
  160. def DetectOneFrame(self, frameState: FrameState, frame_count: int) -> AudioChangeState:
  161. cur_frame_state = FrameState.kFrameStateSil
  162. if frameState == FrameState.kFrameStateSpeech:
  163. cur_frame_state = 1
  164. elif frameState == FrameState.kFrameStateSil:
  165. cur_frame_state = 0
  166. else:
  167. return AudioChangeState.kChangeStateInvalid
  168. self.win_sum -= self.win_state[self.cur_win_pos]
  169. self.win_sum += cur_frame_state
  170. self.win_state[self.cur_win_pos] = cur_frame_state
  171. self.cur_win_pos = (self.cur_win_pos + 1) % self.win_size_frame
  172. if self.pre_frame_state == FrameState.kFrameStateSil and self.win_sum >= self.sil_to_speech_frmcnt_thres:
  173. self.pre_frame_state = FrameState.kFrameStateSpeech
  174. return AudioChangeState.kChangeStateSil2Speech
  175. if self.pre_frame_state == FrameState.kFrameStateSpeech and self.win_sum <= self.speech_to_sil_frmcnt_thres:
  176. self.pre_frame_state = FrameState.kFrameStateSil
  177. return AudioChangeState.kChangeStateSpeech2Sil
  178. if self.pre_frame_state == FrameState.kFrameStateSil:
  179. return AudioChangeState.kChangeStateSil2Sil
  180. if self.pre_frame_state == FrameState.kFrameStateSpeech:
  181. return AudioChangeState.kChangeStateSpeech2Speech
  182. return AudioChangeState.kChangeStateInvalid
  183. def FrameSizeMs(self) -> int:
  184. return int(self.frame_size_ms)
  185. class E2EVadModel(FunASRModel):
  186. """
  187. Author: Speech Lab of DAMO Academy, Alibaba Group
  188. Deep-FSMN for Large Vocabulary Continuous Speech Recognition
  189. https://arxiv.org/abs/1803.05030
  190. """
  191. def __init__(self, encoder: FSMN, vad_post_args: Dict[str, Any], frontend=None):
  192. super(E2EVadModel, self).__init__()
  193. self.vad_opts = VADXOptions(**vad_post_args)
  194. self.windows_detector = WindowDetector(self.vad_opts.window_size_ms,
  195. self.vad_opts.sil_to_speech_time_thres,
  196. self.vad_opts.speech_to_sil_time_thres,
  197. self.vad_opts.frame_in_ms)
  198. self.encoder = encoder
  199. # init variables
  200. self.data_buf_start_frame = 0
  201. self.frm_cnt = 0
  202. self.latest_confirmed_speech_frame = 0
  203. self.lastest_confirmed_silence_frame = -1
  204. self.continous_silence_frame_count = 0
  205. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  206. self.confirmed_start_frame = -1
  207. self.confirmed_end_frame = -1
  208. self.number_end_time_detected = 0
  209. self.sil_frame = 0
  210. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  211. self.noise_average_decibel = -100.0
  212. self.pre_end_silence_detected = False
  213. self.next_seg = True
  214. self.output_data_buf = []
  215. self.output_data_buf_offset = 0
  216. self.frame_probs = []
  217. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  218. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  219. self.scores = None
  220. self.max_time_out = False
  221. self.decibel = []
  222. self.data_buf = None
  223. self.data_buf_all = None
  224. self.waveform = None
  225. self.frontend = frontend
  226. self.last_drop_frames = 0
  227. def AllResetDetection(self):
  228. self.data_buf_start_frame = 0
  229. self.frm_cnt = 0
  230. self.latest_confirmed_speech_frame = 0
  231. self.lastest_confirmed_silence_frame = -1
  232. self.continous_silence_frame_count = 0
  233. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  234. self.confirmed_start_frame = -1
  235. self.confirmed_end_frame = -1
  236. self.number_end_time_detected = 0
  237. self.sil_frame = 0
  238. self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
  239. self.noise_average_decibel = -100.0
  240. self.pre_end_silence_detected = False
  241. self.next_seg = True
  242. self.output_data_buf = []
  243. self.output_data_buf_offset = 0
  244. self.frame_probs = []
  245. self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
  246. self.speech_noise_thres = self.vad_opts.speech_noise_thres
  247. self.scores = None
  248. self.max_time_out = False
  249. self.decibel = []
  250. self.data_buf = None
  251. self.data_buf_all = None
  252. self.waveform = None
  253. self.last_drop_frames = 0
  254. self.windows_detector.Reset()
  255. def ResetDetection(self):
  256. self.continous_silence_frame_count = 0
  257. self.latest_confirmed_speech_frame = 0
  258. self.lastest_confirmed_silence_frame = -1
  259. self.confirmed_start_frame = -1
  260. self.confirmed_end_frame = -1
  261. self.vad_state_machine = VadStateMachine.kVadInStateStartPointNotDetected
  262. self.windows_detector.Reset()
  263. self.sil_frame = 0
  264. self.frame_probs = []
  265. if self.output_data_buf:
  266. assert self.output_data_buf[-1].contain_seg_end_point == True
  267. drop_frames = int(self.output_data_buf[-1].end_ms / self.vad_opts.frame_in_ms)
  268. real_drop_frames = drop_frames - self.last_drop_frames
  269. self.last_drop_frames = drop_frames
  270. self.data_buf_all = self.data_buf_all[real_drop_frames * int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):]
  271. self.decibel = self.decibel[real_drop_frames:]
  272. self.scores = self.scores[:, real_drop_frames:, :]
  273. def ComputeDecibel(self) -> None:
  274. frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000)
  275. frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  276. if self.data_buf_all is None:
  277. self.data_buf_all = self.waveform[0] # self.data_buf is pointed to self.waveform[0]
  278. self.data_buf = self.data_buf_all
  279. else:
  280. self.data_buf_all = torch.cat((self.data_buf_all, self.waveform[0]))
  281. for offset in range(0, self.waveform.shape[1] - frame_sample_length + 1, frame_shift_length):
  282. self.decibel.append(
  283. 10 * math.log10((self.waveform[0][offset: offset + frame_sample_length]).square().sum() + \
  284. 0.000001))
  285. def ComputeScores(self, feats: torch.Tensor, in_cache: Dict[str, torch.Tensor]) -> None:
  286. scores = self.encoder(feats, in_cache).to('cpu') # return B * T * D
  287. assert scores.shape[1] == feats.shape[1], "The shape between feats and scores does not match"
  288. self.vad_opts.nn_eval_block_size = scores.shape[1]
  289. self.frm_cnt += scores.shape[1] # count total frames
  290. if self.scores is None:
  291. self.scores = scores # the first calculation
  292. else:
  293. self.scores = torch.cat((self.scores, scores), dim=1)
  294. def PopDataBufTillFrame(self, frame_idx: int) -> None: # need check again
  295. while self.data_buf_start_frame < frame_idx:
  296. if len(self.data_buf) >= int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):
  297. self.data_buf_start_frame += 1
  298. self.data_buf = self.data_buf_all[(self.data_buf_start_frame - self.last_drop_frames) * int(
  299. self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):]
  300. def PopDataToOutputBuf(self, start_frm: int, frm_cnt: int, first_frm_is_start_point: bool,
  301. last_frm_is_end_point: bool, end_point_is_sent_end: bool) -> None:
  302. self.PopDataBufTillFrame(start_frm)
  303. expected_sample_number = int(frm_cnt * self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000)
  304. if last_frm_is_end_point:
  305. extra_sample = max(0, int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000 - \
  306. self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000))
  307. expected_sample_number += int(extra_sample)
  308. if end_point_is_sent_end:
  309. expected_sample_number = max(expected_sample_number, len(self.data_buf))
  310. if len(self.data_buf) < expected_sample_number:
  311. print('error in calling pop data_buf\n')
  312. if len(self.output_data_buf) == 0 or first_frm_is_start_point:
  313. self.output_data_buf.append(E2EVadSpeechBufWithDoa())
  314. self.output_data_buf[-1].Reset()
  315. self.output_data_buf[-1].start_ms = start_frm * self.vad_opts.frame_in_ms
  316. self.output_data_buf[-1].end_ms = self.output_data_buf[-1].start_ms
  317. self.output_data_buf[-1].doa = 0
  318. cur_seg = self.output_data_buf[-1]
  319. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  320. print('warning\n')
  321. out_pos = len(cur_seg.buffer) # cur_seg.buff现在没做任何操作
  322. data_to_pop = 0
  323. if end_point_is_sent_end:
  324. data_to_pop = expected_sample_number
  325. else:
  326. data_to_pop = int(frm_cnt * self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
  327. if data_to_pop > len(self.data_buf):
  328. print('VAD data_to_pop is bigger than self.data_buf.size()!!!\n')
  329. data_to_pop = len(self.data_buf)
  330. expected_sample_number = len(self.data_buf)
  331. cur_seg.doa = 0
  332. for sample_cpy_out in range(0, data_to_pop):
  333. # cur_seg.buffer[out_pos ++] = data_buf_.back();
  334. out_pos += 1
  335. for sample_cpy_out in range(data_to_pop, expected_sample_number):
  336. # cur_seg.buffer[out_pos++] = data_buf_.back()
  337. out_pos += 1
  338. if cur_seg.end_ms != start_frm * self.vad_opts.frame_in_ms:
  339. print('Something wrong with the VAD algorithm\n')
  340. self.data_buf_start_frame += frm_cnt
  341. cur_seg.end_ms = (start_frm + frm_cnt) * self.vad_opts.frame_in_ms
  342. if first_frm_is_start_point:
  343. cur_seg.contain_seg_start_point = True
  344. if last_frm_is_end_point:
  345. cur_seg.contain_seg_end_point = True
  346. def OnSilenceDetected(self, valid_frame: int):
  347. self.lastest_confirmed_silence_frame = valid_frame
  348. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  349. self.PopDataBufTillFrame(valid_frame)
  350. # silence_detected_callback_
  351. # pass
  352. def OnVoiceDetected(self, valid_frame: int) -> None:
  353. self.latest_confirmed_speech_frame = valid_frame
  354. self.PopDataToOutputBuf(valid_frame, 1, False, False, False)
  355. def OnVoiceStart(self, start_frame: int, fake_result: bool = False) -> None:
  356. if self.vad_opts.do_start_point_detection:
  357. pass
  358. if self.confirmed_start_frame != -1:
  359. print('not reset vad properly\n')
  360. else:
  361. self.confirmed_start_frame = start_frame
  362. if not fake_result and self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  363. self.PopDataToOutputBuf(self.confirmed_start_frame, 1, True, False, False)
  364. def OnVoiceEnd(self, end_frame: int, fake_result: bool, is_last_frame: bool) -> None:
  365. for t in range(self.latest_confirmed_speech_frame + 1, end_frame):
  366. self.OnVoiceDetected(t)
  367. if self.vad_opts.do_end_point_detection:
  368. pass
  369. if self.confirmed_end_frame != -1:
  370. print('not reset vad properly\n')
  371. else:
  372. self.confirmed_end_frame = end_frame
  373. if not fake_result:
  374. self.sil_frame = 0
  375. self.PopDataToOutputBuf(self.confirmed_end_frame, 1, False, True, is_last_frame)
  376. self.number_end_time_detected += 1
  377. def MaybeOnVoiceEndIfLastFrame(self, is_final_frame: bool, cur_frm_idx: int) -> None:
  378. if is_final_frame:
  379. self.OnVoiceEnd(cur_frm_idx, False, True)
  380. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  381. def GetLatency(self) -> int:
  382. return int(self.LatencyFrmNumAtStartPoint() * self.vad_opts.frame_in_ms)
  383. def LatencyFrmNumAtStartPoint(self) -> int:
  384. vad_latency = self.windows_detector.GetWinSize()
  385. if self.vad_opts.do_extend:
  386. vad_latency += int(self.vad_opts.lookback_time_start_point / self.vad_opts.frame_in_ms)
  387. return vad_latency
  388. def GetFrameState(self, t: int) -> FrameState:
  389. frame_state = FrameState.kFrameStateInvalid
  390. cur_decibel = self.decibel[t]
  391. cur_snr = cur_decibel - self.noise_average_decibel
  392. # for each frame, calc log posterior probability of each state
  393. if cur_decibel < self.vad_opts.decibel_thres:
  394. frame_state = FrameState.kFrameStateSil
  395. self.DetectOneFrame(frame_state, t, False)
  396. return frame_state
  397. sum_score = 0.0
  398. noise_prob = 0.0
  399. assert len(self.sil_pdf_ids) == self.vad_opts.silence_pdf_num
  400. if len(self.sil_pdf_ids) > 0:
  401. assert len(self.scores) == 1 # 只支持batch_size = 1的测试
  402. sil_pdf_scores = [self.scores[0][t][sil_pdf_id] for sil_pdf_id in self.sil_pdf_ids]
  403. sum_score = sum(sil_pdf_scores)
  404. noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio
  405. total_score = 1.0
  406. sum_score = total_score - sum_score
  407. speech_prob = math.log(sum_score)
  408. if self.vad_opts.output_frame_probs:
  409. frame_prob = E2EVadFrameProb()
  410. frame_prob.noise_prob = noise_prob
  411. frame_prob.speech_prob = speech_prob
  412. frame_prob.score = sum_score
  413. frame_prob.frame_id = t
  414. self.frame_probs.append(frame_prob)
  415. if math.exp(speech_prob) >= math.exp(noise_prob) + self.speech_noise_thres:
  416. if cur_snr >= self.vad_opts.snr_thres and cur_decibel >= self.vad_opts.decibel_thres:
  417. frame_state = FrameState.kFrameStateSpeech
  418. else:
  419. frame_state = FrameState.kFrameStateSil
  420. else:
  421. frame_state = FrameState.kFrameStateSil
  422. if self.noise_average_decibel < -99.9:
  423. self.noise_average_decibel = cur_decibel
  424. else:
  425. self.noise_average_decibel = (cur_decibel + self.noise_average_decibel * (
  426. self.vad_opts.noise_frame_num_used_for_snr
  427. - 1)) / self.vad_opts.noise_frame_num_used_for_snr
  428. return frame_state
  429. def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  430. is_final: bool = False
  431. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  432. if not in_cache:
  433. self.AllResetDetection()
  434. self.waveform = waveform # compute decibel for each frame
  435. self.ComputeDecibel()
  436. self.ComputeScores(feats, in_cache)
  437. if not is_final:
  438. self.DetectCommonFrames()
  439. else:
  440. self.DetectLastFrames()
  441. segments = []
  442. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  443. segment_batch = []
  444. if len(self.output_data_buf) > 0:
  445. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  446. if not is_final and (not self.output_data_buf[i].contain_seg_start_point or not self.output_data_buf[
  447. i].contain_seg_end_point):
  448. continue
  449. segment = [self.output_data_buf[i].start_ms, self.output_data_buf[i].end_ms]
  450. segment_batch.append(segment)
  451. self.output_data_buf_offset += 1 # need update this parameter
  452. if segment_batch:
  453. segments.append(segment_batch)
  454. if is_final:
  455. # reset class variables and clear the dict for the next query
  456. self.AllResetDetection()
  457. return segments, in_cache
  458. def forward_online(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
  459. is_final: bool = False, max_end_sil: int = 800
  460. ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
  461. if not in_cache:
  462. self.AllResetDetection()
  463. self.max_end_sil_frame_cnt_thresh = max_end_sil - self.vad_opts.speech_to_sil_time_thres
  464. self.waveform = waveform # compute decibel for each frame
  465. self.ComputeScores(feats, in_cache)
  466. self.ComputeDecibel()
  467. if not is_final:
  468. self.DetectCommonFrames()
  469. else:
  470. self.DetectLastFrames()
  471. segments = []
  472. for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now
  473. segment_batch = []
  474. if len(self.output_data_buf) > 0:
  475. for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
  476. if not self.output_data_buf[i].contain_seg_start_point:
  477. continue
  478. if not self.next_seg and not self.output_data_buf[i].contain_seg_end_point:
  479. continue
  480. start_ms = self.output_data_buf[i].start_ms if self.next_seg else -1
  481. if self.output_data_buf[i].contain_seg_end_point:
  482. end_ms = self.output_data_buf[i].end_ms
  483. self.next_seg = True
  484. self.output_data_buf_offset += 1
  485. else:
  486. end_ms = -1
  487. self.next_seg = False
  488. segment = [start_ms, end_ms]
  489. segment_batch.append(segment)
  490. if segment_batch:
  491. segments.append(segment_batch)
  492. if is_final:
  493. # reset class variables and clear the dict for the next query
  494. self.AllResetDetection()
  495. return segments, in_cache
  496. def DetectCommonFrames(self) -> int:
  497. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  498. return 0
  499. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  500. frame_state = FrameState.kFrameStateInvalid
  501. frame_state = self.GetFrameState(self.frm_cnt - 1 - i - self.last_drop_frames)
  502. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  503. return 0
  504. def DetectLastFrames(self) -> int:
  505. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
  506. return 0
  507. for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1):
  508. frame_state = FrameState.kFrameStateInvalid
  509. frame_state = self.GetFrameState(self.frm_cnt - 1 - i - self.last_drop_frames)
  510. if i != 0:
  511. self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
  512. else:
  513. self.DetectOneFrame(frame_state, self.frm_cnt - 1, True)
  514. return 0
  515. def DetectOneFrame(self, cur_frm_state: FrameState, cur_frm_idx: int, is_final_frame: bool) -> None:
  516. tmp_cur_frm_state = FrameState.kFrameStateInvalid
  517. if cur_frm_state == FrameState.kFrameStateSpeech:
  518. if math.fabs(1.0) > self.vad_opts.fe_prior_thres:
  519. tmp_cur_frm_state = FrameState.kFrameStateSpeech
  520. else:
  521. tmp_cur_frm_state = FrameState.kFrameStateSil
  522. elif cur_frm_state == FrameState.kFrameStateSil:
  523. tmp_cur_frm_state = FrameState.kFrameStateSil
  524. state_change = self.windows_detector.DetectOneFrame(tmp_cur_frm_state, cur_frm_idx)
  525. frm_shift_in_ms = self.vad_opts.frame_in_ms
  526. if AudioChangeState.kChangeStateSil2Speech == state_change:
  527. silence_frame_count = self.continous_silence_frame_count
  528. self.continous_silence_frame_count = 0
  529. self.pre_end_silence_detected = False
  530. start_frame = 0
  531. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  532. start_frame = max(self.data_buf_start_frame, cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  533. self.OnVoiceStart(start_frame)
  534. self.vad_state_machine = VadStateMachine.kVadInStateInSpeechSegment
  535. for t in range(start_frame + 1, cur_frm_idx + 1):
  536. self.OnVoiceDetected(t)
  537. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  538. for t in range(self.latest_confirmed_speech_frame + 1, cur_frm_idx):
  539. self.OnVoiceDetected(t)
  540. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  541. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  542. self.OnVoiceEnd(cur_frm_idx, False, False)
  543. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  544. elif not is_final_frame:
  545. self.OnVoiceDetected(cur_frm_idx)
  546. else:
  547. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  548. else:
  549. pass
  550. elif AudioChangeState.kChangeStateSpeech2Sil == state_change:
  551. self.continous_silence_frame_count = 0
  552. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  553. pass
  554. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  555. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  556. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  557. self.OnVoiceEnd(cur_frm_idx, False, False)
  558. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  559. elif not is_final_frame:
  560. self.OnVoiceDetected(cur_frm_idx)
  561. else:
  562. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  563. else:
  564. pass
  565. elif AudioChangeState.kChangeStateSpeech2Speech == state_change:
  566. self.continous_silence_frame_count = 0
  567. if self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  568. if cur_frm_idx - self.confirmed_start_frame + 1 > \
  569. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  570. self.max_time_out = True
  571. self.OnVoiceEnd(cur_frm_idx, False, False)
  572. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  573. elif not is_final_frame:
  574. self.OnVoiceDetected(cur_frm_idx)
  575. else:
  576. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  577. else:
  578. pass
  579. elif AudioChangeState.kChangeStateSil2Sil == state_change:
  580. self.continous_silence_frame_count += 1
  581. if self.vad_state_machine == VadStateMachine.kVadInStateStartPointNotDetected:
  582. # silence timeout, return zero length decision
  583. if ((self.vad_opts.detect_mode == VadDetectMode.kVadSingleUtteranceDetectMode.value) and (
  584. self.continous_silence_frame_count * frm_shift_in_ms > self.vad_opts.max_start_silence_time)) \
  585. or (is_final_frame and self.number_end_time_detected == 0):
  586. for t in range(self.lastest_confirmed_silence_frame + 1, cur_frm_idx):
  587. self.OnSilenceDetected(t)
  588. self.OnVoiceStart(0, True)
  589. self.OnVoiceEnd(0, True, False);
  590. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  591. else:
  592. if cur_frm_idx >= self.LatencyFrmNumAtStartPoint():
  593. self.OnSilenceDetected(cur_frm_idx - self.LatencyFrmNumAtStartPoint())
  594. elif self.vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment:
  595. if self.continous_silence_frame_count * frm_shift_in_ms >= self.max_end_sil_frame_cnt_thresh:
  596. lookback_frame = int(self.max_end_sil_frame_cnt_thresh / frm_shift_in_ms)
  597. if self.vad_opts.do_extend:
  598. lookback_frame -= int(self.vad_opts.lookahead_time_end_point / frm_shift_in_ms)
  599. lookback_frame -= 1
  600. lookback_frame = max(0, lookback_frame)
  601. self.OnVoiceEnd(cur_frm_idx - lookback_frame, False, False)
  602. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  603. elif cur_frm_idx - self.confirmed_start_frame + 1 > \
  604. self.vad_opts.max_single_segment_time / frm_shift_in_ms:
  605. self.OnVoiceEnd(cur_frm_idx, False, False)
  606. self.vad_state_machine = VadStateMachine.kVadInStateEndPointDetected
  607. elif self.vad_opts.do_extend and not is_final_frame:
  608. if self.continous_silence_frame_count <= int(
  609. self.vad_opts.lookahead_time_end_point / frm_shift_in_ms):
  610. self.OnVoiceDetected(cur_frm_idx)
  611. else:
  612. self.MaybeOnVoiceEndIfLastFrame(is_final_frame, cur_frm_idx)
  613. else:
  614. pass
  615. if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected and \
  616. self.vad_opts.detect_mode == VadDetectMode.kVadMutipleUtteranceDetectMode.value:
  617. self.ResetDetection()