eend_ola_inference.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. #!/usr/bin/env python3
  2. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. # MIT License (https://opensource.org/licenses/MIT)
  4. import argparse
  5. import logging
  6. import os
  7. import sys
  8. from pathlib import Path
  9. from typing import Any
  10. from typing import List
  11. from typing import Optional
  12. from typing import Sequence
  13. from typing import Tuple
  14. from typing import Union
  15. import numpy as np
  16. import torch
  17. from scipy.signal import medfilt
  18. from typeguard import check_argument_types
  19. from funasr.models.frontend.wav_frontend import WavFrontendMel23
  20. from funasr.tasks.diar import EENDOLADiarTask
  21. from funasr.torch_utils.device_funcs import to_device
  22. from funasr.utils import config_argparse
  23. from funasr.utils.cli_utils import get_commandline_args
  24. from funasr.utils.types import str2bool
  25. from funasr.utils.types import str2triple_str
  26. from funasr.utils.types import str_or_none
  27. class Speech2Diarization:
  28. """Speech2Diarlization class
  29. Examples:
  30. >>> import soundfile
  31. >>> import numpy as np
  32. >>> speech2diar = Speech2Diarization("diar_sond_config.yml", "diar_sond.pb")
  33. >>> profile = np.load("profiles.npy")
  34. >>> audio, rate = soundfile.read("speech.wav")
  35. >>> speech2diar(audio, profile)
  36. {"spk1": [(int, int), ...], ...}
  37. """
  38. def __init__(
  39. self,
  40. diar_train_config: Union[Path, str] = None,
  41. diar_model_file: Union[Path, str] = None,
  42. device: str = "cpu",
  43. dtype: str = "float32",
  44. ):
  45. assert check_argument_types()
  46. # 1. Build Diarization model
  47. diar_model, diar_train_args = EENDOLADiarTask.build_model_from_file(
  48. config_file=diar_train_config,
  49. model_file=diar_model_file,
  50. device=device
  51. )
  52. frontend = None
  53. if diar_train_args.frontend is not None and diar_train_args.frontend_conf is not None:
  54. frontend = WavFrontendMel23(**diar_train_args.frontend_conf)
  55. # set up seed for eda
  56. np.random.seed(diar_train_args.seed)
  57. torch.manual_seed(diar_train_args.seed)
  58. torch.cuda.manual_seed(diar_train_args.seed)
  59. os.environ['PYTORCH_SEED'] = str(diar_train_args.seed)
  60. logging.info("diar_model: {}".format(diar_model))
  61. logging.info("diar_train_args: {}".format(diar_train_args))
  62. diar_model.to(dtype=getattr(torch, dtype)).eval()
  63. self.diar_model = diar_model
  64. self.diar_train_args = diar_train_args
  65. self.device = device
  66. self.dtype = dtype
  67. self.frontend = frontend
  68. @torch.no_grad()
  69. def __call__(
  70. self,
  71. speech: Union[torch.Tensor, np.ndarray],
  72. speech_lengths: Union[torch.Tensor, np.ndarray] = None
  73. ):
  74. """Inference
  75. Args:
  76. speech: Input speech data
  77. Returns:
  78. diarization results
  79. """
  80. assert check_argument_types()
  81. # Input as audio signal
  82. if isinstance(speech, np.ndarray):
  83. speech = torch.tensor(speech)
  84. if self.frontend is not None:
  85. feats, feats_len = self.frontend.forward(speech, speech_lengths)
  86. feats = to_device(feats, device=self.device)
  87. feats_len = feats_len.int()
  88. self.diar_model.frontend = None
  89. else:
  90. feats = speech
  91. feats_len = speech_lengths
  92. batch = {"speech": feats, "speech_lengths": feats_len}
  93. batch = to_device(batch, device=self.device)
  94. results = self.diar_model.estimate_sequential(**batch)
  95. return results
  96. @staticmethod
  97. def from_pretrained(
  98. model_tag: Optional[str] = None,
  99. **kwargs: Optional[Any],
  100. ):
  101. """Build Speech2Diarization instance from the pretrained model.
  102. Args:
  103. model_tag (Optional[str]): Model tag of the pretrained models.
  104. Currently, the tags of espnet_model_zoo are supported.
  105. Returns:
  106. Speech2Diarization: Speech2Diarization instance.
  107. """
  108. if model_tag is not None:
  109. try:
  110. from espnet_model_zoo.downloader import ModelDownloader
  111. except ImportError:
  112. logging.error(
  113. "`espnet_model_zoo` is not installed. "
  114. "Please install via `pip install -U espnet_model_zoo`."
  115. )
  116. raise
  117. d = ModelDownloader()
  118. kwargs.update(**d.download_and_unpack(model_tag))
  119. return Speech2Diarization(**kwargs)
  120. def inference_modelscope(
  121. diar_train_config: str,
  122. diar_model_file: str,
  123. output_dir: Optional[str] = None,
  124. batch_size: int = 1,
  125. dtype: str = "float32",
  126. ngpu: int = 1,
  127. num_workers: int = 0,
  128. log_level: Union[int, str] = "INFO",
  129. key_file: Optional[str] = None,
  130. model_tag: Optional[str] = None,
  131. allow_variable_data_keys: bool = True,
  132. streaming: bool = False,
  133. param_dict: Optional[dict] = None,
  134. **kwargs,
  135. ):
  136. assert check_argument_types()
  137. if batch_size > 1:
  138. raise NotImplementedError("batch decoding is not implemented")
  139. if ngpu > 1:
  140. raise NotImplementedError("only single GPU decoding is supported")
  141. logging.basicConfig(
  142. level=log_level,
  143. format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  144. )
  145. logging.info("param_dict: {}".format(param_dict))
  146. if ngpu >= 1 and torch.cuda.is_available():
  147. device = "cuda"
  148. else:
  149. device = "cpu"
  150. # 1. Build speech2diar
  151. speech2diar_kwargs = dict(
  152. diar_train_config=diar_train_config,
  153. diar_model_file=diar_model_file,
  154. device=device,
  155. dtype=dtype,
  156. )
  157. logging.info("speech2diarization_kwargs: {}".format(speech2diar_kwargs))
  158. speech2diar = Speech2Diarization.from_pretrained(
  159. model_tag=model_tag,
  160. **speech2diar_kwargs,
  161. )
  162. speech2diar.diar_model.eval()
  163. def output_results_str(results: dict, uttid: str):
  164. rst = []
  165. mid = uttid.rsplit("-", 1)[0]
  166. for key in results:
  167. results[key] = [(x[0] / 100, x[1] / 100) for x in results[key]]
  168. template = "SPEAKER {} 0 {:.2f} {:.2f} <NA> <NA> {} <NA> <NA>"
  169. for spk, segs in results.items():
  170. rst.extend([template.format(mid, st, ed, spk) for st, ed in segs])
  171. return "\n".join(rst)
  172. def _forward(
  173. data_path_and_name_and_type: Sequence[Tuple[str, str, str]] = None,
  174. raw_inputs: List[List[Union[np.ndarray, torch.Tensor, str, bytes]]] = None,
  175. output_dir_v2: Optional[str] = None,
  176. param_dict: Optional[dict] = None,
  177. ):
  178. # 2. Build data-iterator
  179. if data_path_and_name_and_type is None and raw_inputs is not None:
  180. if isinstance(raw_inputs, torch.Tensor):
  181. raw_inputs = raw_inputs.numpy()
  182. data_path_and_name_and_type = [raw_inputs[0], "speech", "sound"]
  183. loader = EENDOLADiarTask.build_streaming_iterator(
  184. data_path_and_name_and_type,
  185. dtype=dtype,
  186. batch_size=batch_size,
  187. key_file=key_file,
  188. num_workers=num_workers,
  189. preprocess_fn=EENDOLADiarTask.build_preprocess_fn(speech2diar.diar_train_args, False),
  190. collate_fn=EENDOLADiarTask.build_collate_fn(speech2diar.diar_train_args, False),
  191. allow_variable_data_keys=allow_variable_data_keys,
  192. inference=True,
  193. )
  194. # 3. Start for-loop
  195. output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
  196. if output_path is not None:
  197. os.makedirs(output_path, exist_ok=True)
  198. output_writer = open("{}/result.txt".format(output_path), "w")
  199. result_list = []
  200. for keys, batch in loader:
  201. assert isinstance(batch, dict), type(batch)
  202. assert all(isinstance(s, str) for s in keys), keys
  203. _bs = len(next(iter(batch.values())))
  204. assert len(keys) == _bs, f"{len(keys)} != {_bs}"
  205. # batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
  206. results = speech2diar(**batch)
  207. # post process
  208. a = results[0][0].cpu().numpy()
  209. a = medfilt(a, (11, 1))
  210. rst = []
  211. for spkid, frames in enumerate(a.T):
  212. frames = np.pad(frames, (1, 1), 'constant')
  213. changes, = np.where(np.diff(frames, axis=0) != 0)
  214. fmt = "SPEAKER {:s} 1 {:7.2f} {:7.2f} <NA> <NA> {:s} <NA>"
  215. for s, e in zip(changes[::2], changes[1::2]):
  216. st = s / 10.
  217. dur = (e - s) / 10.
  218. rst.append(fmt.format(keys[0], st, dur, "{}_{}".format(keys[0], str(spkid))))
  219. # Only supporting batch_size==1
  220. value = "\n".join(rst)
  221. item = {"key": keys[0], "value": value}
  222. result_list.append(item)
  223. if output_path is not None:
  224. output_writer.write(value)
  225. output_writer.flush()
  226. if output_path is not None:
  227. output_writer.close()
  228. return result_list
  229. return _forward
  230. def inference(
  231. data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
  232. diar_train_config: Optional[str],
  233. diar_model_file: Optional[str],
  234. output_dir: Optional[str] = None,
  235. batch_size: int = 1,
  236. dtype: str = "float32",
  237. ngpu: int = 0,
  238. seed: int = 0,
  239. num_workers: int = 1,
  240. log_level: Union[int, str] = "INFO",
  241. key_file: Optional[str] = None,
  242. model_tag: Optional[str] = None,
  243. allow_variable_data_keys: bool = True,
  244. streaming: bool = False,
  245. smooth_size: int = 83,
  246. dur_threshold: int = 10,
  247. out_format: str = "vad",
  248. **kwargs,
  249. ):
  250. inference_pipeline = inference_modelscope(
  251. diar_train_config=diar_train_config,
  252. diar_model_file=diar_model_file,
  253. output_dir=output_dir,
  254. batch_size=batch_size,
  255. dtype=dtype,
  256. ngpu=ngpu,
  257. seed=seed,
  258. num_workers=num_workers,
  259. log_level=log_level,
  260. key_file=key_file,
  261. model_tag=model_tag,
  262. allow_variable_data_keys=allow_variable_data_keys,
  263. streaming=streaming,
  264. smooth_size=smooth_size,
  265. dur_threshold=dur_threshold,
  266. out_format=out_format,
  267. **kwargs,
  268. )
  269. return inference_pipeline(data_path_and_name_and_type, raw_inputs=None)
  270. def get_parser():
  271. parser = config_argparse.ArgumentParser(
  272. description="Speaker verification/x-vector extraction",
  273. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  274. )
  275. # Note(kamo): Use '_' instead of '-' as separator.
  276. # '-' is confusing if written in yaml.
  277. parser.add_argument(
  278. "--log_level",
  279. type=lambda x: x.upper(),
  280. default="INFO",
  281. choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
  282. help="The verbose level of logging",
  283. )
  284. parser.add_argument("--output_dir", type=str, required=False)
  285. parser.add_argument(
  286. "--ngpu",
  287. type=int,
  288. default=0,
  289. help="The number of gpus. 0 indicates CPU mode",
  290. )
  291. parser.add_argument(
  292. "--gpuid_list",
  293. type=str,
  294. default="",
  295. help="The visible gpus",
  296. )
  297. parser.add_argument("--seed", type=int, default=0, help="Random seed")
  298. parser.add_argument(
  299. "--dtype",
  300. default="float32",
  301. choices=["float16", "float32", "float64"],
  302. help="Data type",
  303. )
  304. parser.add_argument(
  305. "--num_workers",
  306. type=int,
  307. default=1,
  308. help="The number of workers used for DataLoader",
  309. )
  310. group = parser.add_argument_group("Input data related")
  311. group.add_argument(
  312. "--data_path_and_name_and_type",
  313. type=str2triple_str,
  314. required=False,
  315. action="append",
  316. )
  317. group.add_argument("--key_file", type=str_or_none)
  318. group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
  319. group = parser.add_argument_group("The model configuration related")
  320. group.add_argument(
  321. "--diar_train_config",
  322. type=str,
  323. help="diarization training configuration",
  324. )
  325. group.add_argument(
  326. "--diar_model_file",
  327. type=str,
  328. help="diarization model parameter file",
  329. )
  330. group.add_argument(
  331. "--dur_threshold",
  332. type=int,
  333. default=10,
  334. help="The threshold for short segments in number frames"
  335. )
  336. parser.add_argument(
  337. "--smooth_size",
  338. type=int,
  339. default=83,
  340. help="The smoothing window length in number frames"
  341. )
  342. group.add_argument(
  343. "--model_tag",
  344. type=str,
  345. help="Pretrained model tag. If specify this option, *_train_config and "
  346. "*_file will be overwritten",
  347. )
  348. parser.add_argument(
  349. "--batch_size",
  350. type=int,
  351. default=1,
  352. help="The batch size for inference",
  353. )
  354. parser.add_argument("--streaming", type=str2bool, default=False)
  355. return parser
  356. def main(cmd=None):
  357. print(get_commandline_args(), file=sys.stderr)
  358. parser = get_parser()
  359. args = parser.parse_args(cmd)
  360. kwargs = vars(args)
  361. kwargs.pop("config", None)
  362. logging.info("args: {}".format(kwargs))
  363. if args.output_dir is None:
  364. jobid, n_gpu = 1, 1
  365. gpuid = args.gpuid_list.split(",")[jobid - 1]
  366. else:
  367. jobid = int(args.output_dir.split(".")[-1])
  368. n_gpu = len(args.gpuid_list.split(","))
  369. gpuid = args.gpuid_list.split(",")[(jobid - 1) % n_gpu]
  370. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  371. os.environ["CUDA_VISIBLE_DEVICES"] = gpuid
  372. results_list = inference(**kwargs)
  373. for results in results_list:
  374. print("{} {}".format(results["key"], results["value"]))
  375. if __name__ == "__main__":
  376. main()