vad_inference.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. import argparse
  2. import logging
  3. import os
  4. import sys
  5. import json
  6. from pathlib import Path
  7. from typing import Any
  8. from typing import List
  9. from typing import Optional
  10. from typing import Sequence
  11. from typing import Tuple
  12. from typing import Union
  13. from typing import Dict
  14. import math
  15. import numpy as np
  16. import torch
  17. from typeguard import check_argument_types
  18. from typeguard import check_return_type
  19. from funasr.fileio.datadir_writer import DatadirWriter
  20. from funasr.modules.scorers.scorer_interface import BatchScorerInterface
  21. from funasr.modules.subsampling import TooShortUttError
  22. from funasr.tasks.vad import VADTask
  23. from funasr.torch_utils.device_funcs import to_device
  24. from funasr.torch_utils.set_all_random_seed import set_all_random_seed
  25. from funasr.utils import config_argparse
  26. from funasr.utils.cli_utils import get_commandline_args
  27. from funasr.utils.types import str2bool
  28. from funasr.utils.types import str2triple_str
  29. from funasr.utils.types import str_or_none
  30. from funasr.utils import asr_utils, wav_utils, postprocess_utils
  31. from funasr.models.frontend.wav_frontend import WavFrontend, WavFrontendOnline
  32. header_colors = '\033[95m'
  33. end_colors = '\033[0m'
  34. global_asr_language: str = 'zh-cn'
  35. global_sample_rate: Union[int, Dict[Any, int]] = {
  36. 'audio_fs': 16000,
  37. 'model_fs': 16000
  38. }
  39. class Speech2VadSegment:
  40. """Speech2VadSegment class
  41. Examples:
  42. >>> import soundfile
  43. >>> speech2segment = Speech2VadSegment("vad_config.yml", "vad.pt")
  44. >>> audio, rate = soundfile.read("speech.wav")
  45. >>> speech2segment(audio)
  46. [[10, 230], [245, 450], ...]
  47. """
  48. def __init__(
  49. self,
  50. vad_infer_config: Union[Path, str] = None,
  51. vad_model_file: Union[Path, str] = None,
  52. vad_cmvn_file: Union[Path, str] = None,
  53. device: str = "cpu",
  54. batch_size: int = 1,
  55. dtype: str = "float32",
  56. **kwargs,
  57. ):
  58. assert check_argument_types()
  59. # 1. Build vad model
  60. vad_model, vad_infer_args = VADTask.build_model_from_file(
  61. vad_infer_config, vad_model_file, device
  62. )
  63. frontend = None
  64. if vad_infer_args.frontend is not None:
  65. frontend = WavFrontend(cmvn_file=vad_cmvn_file, **vad_infer_args.frontend_conf)
  66. logging.info("vad_model: {}".format(vad_model))
  67. logging.info("vad_infer_args: {}".format(vad_infer_args))
  68. vad_model.to(dtype=getattr(torch, dtype)).eval()
  69. self.vad_model = vad_model
  70. self.vad_infer_args = vad_infer_args
  71. self.device = device
  72. self.dtype = dtype
  73. self.frontend = frontend
  74. self.batch_size = batch_size
  75. @torch.no_grad()
  76. def __call__(
  77. self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
  78. in_cache: Dict[str, torch.Tensor] = dict()
  79. ) -> Tuple[List[List[int]], Dict[str, torch.Tensor]]:
  80. """Inference
  81. Args:
  82. speech: Input speech data
  83. Returns:
  84. text, token, token_int, hyp
  85. """
  86. assert check_argument_types()
  87. # Input as audio signal
  88. if isinstance(speech, np.ndarray):
  89. speech = torch.tensor(speech)
  90. if self.frontend is not None:
  91. self.frontend.filter_length_max = math.inf
  92. fbanks, fbanks_len = self.frontend.forward_fbank(speech, speech_lengths)
  93. feats, feats_len = self.frontend.forward_lfr_cmvn(fbanks, fbanks_len)
  94. fbanks = to_device(fbanks, device=self.device)
  95. feats = to_device(feats, device=self.device)
  96. feats_len = feats_len.int()
  97. else:
  98. raise Exception("Need to extract feats first, please configure frontend configuration")
  99. # b. Forward Encoder streaming
  100. t_offset = 0
  101. step = min(feats_len.max(), 6000)
  102. segments = [[]] * self.batch_size
  103. for t_offset in range(0, feats_len, min(step, feats_len - t_offset)):
  104. if t_offset + step >= feats_len - 1:
  105. step = feats_len - t_offset
  106. is_final = True
  107. else:
  108. is_final = False
  109. batch = {
  110. "feats": feats[:, t_offset:t_offset + step, :],
  111. "waveform": speech[:, t_offset * 160:min(speech.shape[-1], (t_offset + step - 1) * 160 + 400)],
  112. "is_final": is_final,
  113. "in_cache": in_cache
  114. }
  115. # a. To device
  116. #batch = to_device(batch, device=self.device)
  117. segments_part, in_cache = self.vad_model(**batch)
  118. if segments_part:
  119. for batch_num in range(0, self.batch_size):
  120. segments[batch_num] += segments_part[batch_num]
  121. return fbanks, segments
  122. class Speech2VadSegmentOnline(Speech2VadSegment):
  123. """Speech2VadSegmentOnline class
  124. Examples:
  125. >>> import soundfile
  126. >>> speech2segment = Speech2VadSegmentOnline("vad_config.yml", "vad.pt")
  127. >>> audio, rate = soundfile.read("speech.wav")
  128. >>> speech2segment(audio)
  129. [[10, 230], [245, 450], ...]
  130. """
  131. def __init__(self, **kwargs):
  132. super(Speech2VadSegmentOnline, self).__init__(**kwargs)
  133. vad_cmvn_file = kwargs.get('vad_cmvn_file', None)
  134. self.frontend = None
  135. if self.vad_infer_args.frontend is not None:
  136. self.frontend = WavFrontendOnline(cmvn_file=vad_cmvn_file, **self.vad_infer_args.frontend_conf)
  137. @torch.no_grad()
  138. def __call__(
  139. self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
  140. in_cache: Dict[str, torch.Tensor] = dict(), is_final: bool = False, max_end_sil: int = 800
  141. ) -> Tuple[torch.Tensor, List[List[int]], torch.Tensor]:
  142. """Inference
  143. Args:
  144. speech: Input speech data
  145. Returns:
  146. text, token, token_int, hyp
  147. """
  148. assert check_argument_types()
  149. # Input as audio signal
  150. if isinstance(speech, np.ndarray):
  151. speech = torch.tensor(speech)
  152. batch_size = speech.shape[0]
  153. segments = [[]] * batch_size
  154. if self.frontend is not None:
  155. feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final)
  156. fbanks, _ = self.frontend.get_fbank()
  157. else:
  158. raise Exception("Need to extract feats first, please configure frontend configuration")
  159. if feats.shape[0]:
  160. feats = to_device(feats, device=self.device)
  161. feats_len = feats_len.int()
  162. waveforms = self.frontend.get_waveforms()
  163. batch = {
  164. "feats": feats,
  165. "waveform": waveforms,
  166. "in_cache": in_cache,
  167. "is_final": is_final,
  168. "max_end_sil": max_end_sil
  169. }
  170. # a. To device
  171. batch = to_device(batch, device=self.device)
  172. segments, in_cache = self.vad_model.forward_online(**batch)
  173. # in_cache.update(batch['in_cache'])
  174. # in_cache = {key: value for key, value in batch['in_cache'].items()}
  175. return fbanks, segments, in_cache
  176. def inference(
  177. batch_size: int,
  178. ngpu: int,
  179. log_level: Union[int, str],
  180. data_path_and_name_and_type,
  181. vad_infer_config: Optional[str],
  182. vad_model_file: Optional[str],
  183. vad_cmvn_file: Optional[str] = None,
  184. raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  185. key_file: Optional[str] = None,
  186. allow_variable_data_keys: bool = False,
  187. output_dir: Optional[str] = None,
  188. dtype: str = "float32",
  189. seed: int = 0,
  190. num_workers: int = 1,
  191. online: bool = False,
  192. **kwargs,
  193. ):
  194. if not online:
  195. inference_pipeline = inference_modelscope(
  196. batch_size=batch_size,
  197. ngpu=ngpu,
  198. log_level=log_level,
  199. vad_infer_config=vad_infer_config,
  200. vad_model_file=vad_model_file,
  201. vad_cmvn_file=vad_cmvn_file,
  202. key_file=key_file,
  203. allow_variable_data_keys=allow_variable_data_keys,
  204. output_dir=output_dir,
  205. dtype=dtype,
  206. seed=seed,
  207. num_workers=num_workers,
  208. **kwargs,
  209. )
  210. else:
  211. inference_pipeline = inference_modelscope_online(
  212. batch_size=batch_size,
  213. ngpu=ngpu,
  214. log_level=log_level,
  215. vad_infer_config=vad_infer_config,
  216. vad_model_file=vad_model_file,
  217. vad_cmvn_file=vad_cmvn_file,
  218. key_file=key_file,
  219. allow_variable_data_keys=allow_variable_data_keys,
  220. output_dir=output_dir,
  221. dtype=dtype,
  222. seed=seed,
  223. num_workers=num_workers,
  224. **kwargs,
  225. )
  226. return inference_pipeline(data_path_and_name_and_type, raw_inputs)
  227. def inference_modelscope(
  228. batch_size: int,
  229. ngpu: int,
  230. log_level: Union[int, str],
  231. # data_path_and_name_and_type,
  232. vad_infer_config: Optional[str],
  233. vad_model_file: Optional[str],
  234. vad_cmvn_file: Optional[str] = None,
  235. # raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  236. key_file: Optional[str] = None,
  237. allow_variable_data_keys: bool = False,
  238. output_dir: Optional[str] = None,
  239. dtype: str = "float32",
  240. seed: int = 0,
  241. num_workers: int = 1,
  242. **kwargs,
  243. ):
  244. assert check_argument_types()
  245. if batch_size > 1:
  246. raise NotImplementedError("batch decoding is not implemented")
  247. if ngpu > 1:
  248. raise NotImplementedError("only single GPU decoding is supported")
  249. logging.basicConfig(
  250. level=log_level,
  251. format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  252. )
  253. if ngpu >= 1 and torch.cuda.is_available():
  254. device = "cuda"
  255. else:
  256. device = "cpu"
  257. # 1. Set random-seed
  258. set_all_random_seed(seed)
  259. # 2. Build speech2vadsegment
  260. speech2vadsegment_kwargs = dict(
  261. vad_infer_config=vad_infer_config,
  262. vad_model_file=vad_model_file,
  263. vad_cmvn_file=vad_cmvn_file,
  264. device=device,
  265. dtype=dtype,
  266. )
  267. logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
  268. speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
  269. def _forward(
  270. data_path_and_name_and_type,
  271. raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  272. output_dir_v2: Optional[str] = None,
  273. fs: dict = None,
  274. param_dict: dict = None
  275. ):
  276. # 3. Build data-iterator
  277. if data_path_and_name_and_type is None and raw_inputs is not None:
  278. if isinstance(raw_inputs, torch.Tensor):
  279. raw_inputs = raw_inputs.numpy()
  280. data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
  281. loader = VADTask.build_streaming_iterator(
  282. data_path_and_name_and_type,
  283. dtype=dtype,
  284. batch_size=batch_size,
  285. key_file=key_file,
  286. num_workers=num_workers,
  287. preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
  288. collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
  289. allow_variable_data_keys=allow_variable_data_keys,
  290. inference=True,
  291. )
  292. finish_count = 0
  293. file_count = 1
  294. # 7 .Start for-loop
  295. # FIXME(kamo): The output format should be discussed about
  296. output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
  297. if output_path is not None:
  298. writer = DatadirWriter(output_path)
  299. ibest_writer = writer[f"1best_recog"]
  300. else:
  301. writer = None
  302. ibest_writer = None
  303. vad_results = []
  304. for keys, batch in loader:
  305. assert isinstance(batch, dict), type(batch)
  306. assert all(isinstance(s, str) for s in keys), keys
  307. _bs = len(next(iter(batch.values())))
  308. assert len(keys) == _bs, f"{len(keys)} != {_bs}"
  309. # do vad segment
  310. _, results = speech2vadsegment(**batch)
  311. for i, _ in enumerate(keys):
  312. if "MODELSCOPE_ENVIRONMENT" in os.environ and os.environ["MODELSCOPE_ENVIRONMENT"] == "eas":
  313. results[i] = json.dumps(results[i])
  314. item = {'key': keys[i], 'value': results[i]}
  315. vad_results.append(item)
  316. if writer is not None:
  317. results[i] = json.loads(results[i])
  318. ibest_writer["text"][keys[i]] = "{}".format(results[i])
  319. return vad_results
  320. return _forward
  321. def inference_modelscope_online(
  322. batch_size: int,
  323. ngpu: int,
  324. log_level: Union[int, str],
  325. # data_path_and_name_and_type,
  326. vad_infer_config: Optional[str],
  327. vad_model_file: Optional[str],
  328. vad_cmvn_file: Optional[str] = None,
  329. # raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  330. key_file: Optional[str] = None,
  331. allow_variable_data_keys: bool = False,
  332. output_dir: Optional[str] = None,
  333. dtype: str = "float32",
  334. seed: int = 0,
  335. num_workers: int = 1,
  336. **kwargs,
  337. ):
  338. assert check_argument_types()
  339. if batch_size > 1:
  340. raise NotImplementedError("batch decoding is not implemented")
  341. if ngpu > 1:
  342. raise NotImplementedError("only single GPU decoding is supported")
  343. logging.basicConfig(
  344. level=log_level,
  345. format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  346. )
  347. if ngpu >= 1 and torch.cuda.is_available():
  348. device = "cuda"
  349. else:
  350. device = "cpu"
  351. # 1. Set random-seed
  352. set_all_random_seed(seed)
  353. # 2. Build speech2vadsegment
  354. speech2vadsegment_kwargs = dict(
  355. vad_infer_config=vad_infer_config,
  356. vad_model_file=vad_model_file,
  357. vad_cmvn_file=vad_cmvn_file,
  358. device=device,
  359. dtype=dtype,
  360. )
  361. logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
  362. speech2vadsegment = Speech2VadSegmentOnline(**speech2vadsegment_kwargs)
  363. def _forward(
  364. data_path_and_name_and_type,
  365. raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  366. output_dir_v2: Optional[str] = None,
  367. fs: dict = None,
  368. param_dict: dict = None,
  369. ):
  370. # 3. Build data-iterator
  371. if data_path_and_name_and_type is None and raw_inputs is not None:
  372. if isinstance(raw_inputs, torch.Tensor):
  373. raw_inputs = raw_inputs.numpy()
  374. data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
  375. loader = VADTask.build_streaming_iterator(
  376. data_path_and_name_and_type,
  377. dtype=dtype,
  378. batch_size=batch_size,
  379. key_file=key_file,
  380. num_workers=num_workers,
  381. preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
  382. collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
  383. allow_variable_data_keys=allow_variable_data_keys,
  384. inference=True,
  385. )
  386. finish_count = 0
  387. file_count = 1
  388. # 7 .Start for-loop
  389. # FIXME(kamo): The output format should be discussed about
  390. output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
  391. if output_path is not None:
  392. writer = DatadirWriter(output_path)
  393. ibest_writer = writer[f"1best_recog"]
  394. else:
  395. writer = None
  396. ibest_writer = None
  397. vad_results = []
  398. batch_in_cache = param_dict['in_cache'] if param_dict is not None else dict()
  399. is_final = param_dict.get('is_final', False) if param_dict is not None else False
  400. max_end_sil = param_dict.get('max_end_sil', 800) if param_dict is not None else 800
  401. for keys, batch in loader:
  402. assert isinstance(batch, dict), type(batch)
  403. assert all(isinstance(s, str) for s in keys), keys
  404. _bs = len(next(iter(batch.values())))
  405. assert len(keys) == _bs, f"{len(keys)} != {_bs}"
  406. batch['in_cache'] = batch_in_cache
  407. batch['is_final'] = is_final
  408. batch['max_end_sil'] = max_end_sil
  409. # do vad segment
  410. _, results, param_dict['in_cache'] = speech2vadsegment(**batch)
  411. # param_dict['in_cache'] = batch['in_cache']
  412. if results:
  413. for i, _ in enumerate(keys):
  414. if results[i]:
  415. if "MODELSCOPE_ENVIRONMENT" in os.environ and os.environ["MODELSCOPE_ENVIRONMENT"] == "eas":
  416. results[i] = json.dumps(results[i])
  417. item = {'key': keys[i], 'value': results[i]}
  418. vad_results.append(item)
  419. if writer is not None:
  420. results[i] = json.loads(results[i])
  421. ibest_writer["text"][keys[i]] = "{}".format(results[i])
  422. return vad_results
  423. return _forward
  424. def get_parser():
  425. parser = config_argparse.ArgumentParser(
  426. description="VAD Decoding",
  427. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  428. )
  429. # Note(kamo): Use '_' instead of '-' as separator.
  430. # '-' is confusing if written in yaml.
  431. parser.add_argument(
  432. "--log_level",
  433. type=lambda x: x.upper(),
  434. default="INFO",
  435. choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
  436. help="The verbose level of logging",
  437. )
  438. parser.add_argument("--output_dir", type=str, required=False)
  439. parser.add_argument(
  440. "--ngpu",
  441. type=int,
  442. default=0,
  443. help="The number of gpus. 0 indicates CPU mode",
  444. )
  445. parser.add_argument(
  446. "--gpuid_list",
  447. type=str,
  448. default="",
  449. help="The visible gpus",
  450. )
  451. parser.add_argument("--seed", type=int, default=0, help="Random seed")
  452. parser.add_argument(
  453. "--dtype",
  454. default="float32",
  455. choices=["float16", "float32", "float64"],
  456. help="Data type",
  457. )
  458. parser.add_argument(
  459. "--num_workers",
  460. type=int,
  461. default=1,
  462. help="The number of workers used for DataLoader",
  463. )
  464. group = parser.add_argument_group("Input data related")
  465. group.add_argument(
  466. "--data_path_and_name_and_type",
  467. type=str2triple_str,
  468. required=False,
  469. action="append",
  470. )
  471. group.add_argument("--raw_inputs", type=list, default=None)
  472. # example=[{'key':'EdevDEWdIYQ_0021','file':'/mnt/data/jiangyu.xzy/test_data/speech_io/SPEECHIO_ASR_ZH00007_zhibodaihuo/wav/EdevDEWdIYQ_0021.wav'}])
  473. group.add_argument("--key_file", type=str_or_none)
  474. group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
  475. group = parser.add_argument_group("The model configuration related")
  476. group.add_argument(
  477. "--vad_infer_config",
  478. type=str,
  479. help="VAD infer configuration",
  480. )
  481. group.add_argument(
  482. "--vad_model_file",
  483. type=str,
  484. help="VAD model parameter file",
  485. )
  486. group.add_argument(
  487. "--vad_cmvn_file",
  488. type=str,
  489. help="Global cmvn file",
  490. )
  491. group.add_argument(
  492. "--online",
  493. type=str,
  494. help="decoding mode",
  495. )
  496. group = parser.add_argument_group("infer related")
  497. group.add_argument(
  498. "--batch_size",
  499. type=int,
  500. default=1,
  501. help="The batch size for inference",
  502. )
  503. return parser
  504. def main(cmd=None):
  505. print(get_commandline_args(), file=sys.stderr)
  506. parser = get_parser()
  507. args = parser.parse_args(cmd)
  508. kwargs = vars(args)
  509. kwargs.pop("config", None)
  510. inference(**kwargs)
  511. if __name__ == "__main__":
  512. main()