sv_inference.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. #!/usr/bin/env python3
  2. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. # MIT License (https://opensource.org/licenses/MIT)
  4. import argparse
  5. import logging
  6. import os
  7. import sys
  8. from pathlib import Path
  9. from typing import Any
  10. from typing import List
  11. from typing import Optional
  12. from typing import Sequence
  13. from typing import Tuple
  14. from typing import Union
  15. import numpy as np
  16. import torch
  17. from kaldiio import WriteHelper
  18. from typeguard import check_argument_types
  19. from typeguard import check_return_type
  20. from funasr.utils.cli_utils import get_commandline_args
  21. from funasr.tasks.sv import SVTask
  22. from funasr.tasks.asr import ASRTask
  23. from funasr.torch_utils.device_funcs import to_device
  24. from funasr.torch_utils.set_all_random_seed import set_all_random_seed
  25. from funasr.utils import config_argparse
  26. from funasr.utils.types import str2bool
  27. from funasr.utils.types import str2triple_str
  28. from funasr.utils.types import str_or_none
  29. from funasr.utils.misc import statistic_model_parameters
  30. class Speech2Xvector:
  31. """Speech2Xvector class
  32. Examples:
  33. >>> import soundfile
  34. >>> speech2xvector = Speech2Xvector("sv_config.yml", "sv.pb")
  35. >>> audio, rate = soundfile.read("speech.wav")
  36. >>> speech2xvector(audio)
  37. [(text, token, token_int, hypothesis object), ...]
  38. """
  39. def __init__(
  40. self,
  41. sv_train_config: Union[Path, str] = None,
  42. sv_model_file: Union[Path, str] = None,
  43. device: str = "cpu",
  44. batch_size: int = 1,
  45. dtype: str = "float32",
  46. streaming: bool = False,
  47. embedding_node: str = "resnet1_dense",
  48. ):
  49. assert check_argument_types()
  50. # TODO: 1. Build SV model
  51. sv_model, sv_train_args = SVTask.build_model_from_file(
  52. config_file=sv_train_config,
  53. model_file=sv_model_file,
  54. device=device
  55. )
  56. logging.info("sv_model: {}".format(sv_model))
  57. logging.info("model parameter number: {}".format(statistic_model_parameters(sv_model)))
  58. logging.info("sv_train_args: {}".format(sv_train_args))
  59. sv_model.to(dtype=getattr(torch, dtype)).eval()
  60. self.sv_model = sv_model
  61. self.sv_train_args = sv_train_args
  62. self.device = device
  63. self.dtype = dtype
  64. self.embedding_node = embedding_node
  65. @torch.no_grad()
  66. def calculate_embedding(self, speech: Union[torch.Tensor, np.ndarray]) -> torch.Tensor:
  67. # Input as audio signal
  68. if isinstance(speech, np.ndarray):
  69. speech = torch.tensor(speech)
  70. # data: (Nsamples,) -> (1, Nsamples)
  71. speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
  72. # lengths: (1,)
  73. lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
  74. batch = {"speech": speech, "speech_lengths": lengths}
  75. # a. To device
  76. batch = to_device(batch, device=self.device)
  77. # b. Forward Encoder
  78. enc, ilens = self.sv_model.encode(**batch)
  79. # c. Forward Pooling
  80. pooling = self.sv_model.pooling_layer(enc)
  81. # d. Forward Decoder
  82. outputs, embeddings = self.sv_model.decoder(pooling)
  83. if self.embedding_node not in embeddings:
  84. raise ValueError("Required embedding node {} not in {}".format(
  85. self.embedding_node, embeddings.keys()))
  86. return embeddings[self.embedding_node]
  87. @torch.no_grad()
  88. def __call__(
  89. self, speech: Union[torch.Tensor, np.ndarray],
  90. ref_speech: Optional[Union[torch.Tensor, np.ndarray]] = None,
  91. ) -> Tuple[torch.Tensor, Union[torch.Tensor, None], Union[torch.Tensor, None]]:
  92. """Inference
  93. Args:
  94. speech: Input speech data
  95. ref_speech: Reference speech to compare
  96. Returns:
  97. embedding, ref_embedding, similarity_score
  98. """
  99. assert check_argument_types()
  100. self.sv_model.eval()
  101. embedding = self.calculate_embedding(speech)
  102. ref_emb, score = None, None
  103. if ref_speech is not None:
  104. ref_emb = self.calculate_embedding(ref_speech)
  105. score = torch.cosine_similarity(embedding, ref_emb)
  106. results = (embedding, ref_emb, score)
  107. assert check_return_type(results)
  108. return results
  109. @staticmethod
  110. def from_pretrained(
  111. model_tag: Optional[str] = None,
  112. **kwargs: Optional[Any],
  113. ):
  114. """Build Speech2Xvector instance from the pretrained model.
  115. Args:
  116. model_tag (Optional[str]): Model tag of the pretrained models.
  117. Currently, the tags of espnet_model_zoo are supported.
  118. Returns:
  119. Speech2Xvector: Speech2Xvector instance.
  120. """
  121. if model_tag is not None:
  122. try:
  123. from espnet_model_zoo.downloader import ModelDownloader
  124. except ImportError:
  125. logging.error(
  126. "`espnet_model_zoo` is not installed. "
  127. "Please install via `pip install -U espnet_model_zoo`."
  128. )
  129. raise
  130. d = ModelDownloader()
  131. kwargs.update(**d.download_and_unpack(model_tag))
  132. return Speech2Xvector(**kwargs)
  133. def inference_modelscope(
  134. output_dir: Optional[str] = None,
  135. batch_size: int = 1,
  136. dtype: str = "float32",
  137. ngpu: int = 1,
  138. seed: int = 0,
  139. num_workers: int = 0,
  140. log_level: Union[int, str] = "INFO",
  141. key_file: Optional[str] = None,
  142. sv_train_config: Optional[str] = "sv.yaml",
  143. sv_model_file: Optional[str] = "sv.pb",
  144. model_tag: Optional[str] = None,
  145. allow_variable_data_keys: bool = True,
  146. streaming: bool = False,
  147. embedding_node: str = "resnet1_dense",
  148. sv_threshold: float = 0.9465,
  149. param_dict: Optional[dict] = None,
  150. **kwargs,
  151. ):
  152. assert check_argument_types()
  153. if batch_size > 1:
  154. raise NotImplementedError("batch decoding is not implemented")
  155. if ngpu > 1:
  156. raise NotImplementedError("only single GPU decoding is supported")
  157. logging.basicConfig(
  158. level=log_level,
  159. format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  160. )
  161. logging.info("param_dict: {}".format(param_dict))
  162. if ngpu >= 1 and torch.cuda.is_available():
  163. device = "cuda"
  164. else:
  165. device = "cpu"
  166. # 1. Set random-seed
  167. set_all_random_seed(seed)
  168. # 2. Build speech2xvector
  169. speech2xvector_kwargs = dict(
  170. sv_train_config=sv_train_config,
  171. sv_model_file=sv_model_file,
  172. device=device,
  173. dtype=dtype,
  174. streaming=streaming,
  175. embedding_node=embedding_node
  176. )
  177. logging.info("speech2xvector_kwargs: {}".format(speech2xvector_kwargs))
  178. speech2xvector = Speech2Xvector.from_pretrained(
  179. model_tag=model_tag,
  180. **speech2xvector_kwargs,
  181. )
  182. speech2xvector.sv_model.eval()
  183. def _forward(
  184. data_path_and_name_and_type: Sequence[Tuple[str, str, str]] = None,
  185. raw_inputs: Union[np.ndarray, torch.Tensor] = None,
  186. output_dir_v2: Optional[str] = None,
  187. param_dict: Optional[dict] = None,
  188. ):
  189. logging.info("param_dict: {}".format(param_dict))
  190. if data_path_and_name_and_type is None and raw_inputs is not None:
  191. if isinstance(raw_inputs, torch.Tensor):
  192. raw_inputs = raw_inputs.numpy()
  193. data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
  194. # 3. Build data-iterator
  195. loader = ASRTask.build_streaming_iterator(
  196. data_path_and_name_and_type,
  197. dtype=dtype,
  198. batch_size=batch_size,
  199. key_file=key_file,
  200. num_workers=num_workers,
  201. preprocess_fn=None,
  202. collate_fn=None,
  203. allow_variable_data_keys=allow_variable_data_keys,
  204. inference=True,
  205. )
  206. # 7 .Start for-loop
  207. output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
  208. embd_writer, ref_embd_writer, score_writer = None, None, None
  209. if output_path is not None:
  210. os.makedirs(output_path, exist_ok=True)
  211. embd_writer = WriteHelper("ark,scp:{}/xvector.ark,{}/xvector.scp".format(output_path, output_path))
  212. sv_result_list = []
  213. for keys, batch in loader:
  214. assert isinstance(batch, dict), type(batch)
  215. assert all(isinstance(s, str) for s in keys), keys
  216. _bs = len(next(iter(batch.values())))
  217. assert len(keys) == _bs, f"{len(keys)} != {_bs}"
  218. batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
  219. embedding, ref_embedding, score = speech2xvector(**batch)
  220. # Only supporting batch_size==1
  221. key = keys[0]
  222. normalized_score = 0.0
  223. if score is not None:
  224. score = score.item()
  225. normalized_score = max(score - sv_threshold, 0.0) / (1.0 - sv_threshold) * 100.0
  226. item = {"key": key, "value": normalized_score}
  227. else:
  228. item = {"key": key, "value": embedding.squeeze(0).cpu().numpy()}
  229. sv_result_list.append(item)
  230. if output_path is not None:
  231. embd_writer(key, embedding[0].cpu().numpy())
  232. if ref_embedding is not None:
  233. if ref_embd_writer is None:
  234. ref_embd_writer = WriteHelper(
  235. "ark,scp:{}/ref_xvector.ark,{}/ref_xvector.scp".format(output_path, output_path)
  236. )
  237. score_writer = open(os.path.join(output_path, "score.txt"), "w")
  238. ref_embd_writer(key, ref_embedding[0].cpu().numpy())
  239. score_writer.write("{} {:.6f}\n".format(key, normalized_score))
  240. if output_path is not None:
  241. embd_writer.close()
  242. if ref_embd_writer is not None:
  243. ref_embd_writer.close()
  244. score_writer.close()
  245. return sv_result_list
  246. return _forward
  247. def inference(
  248. output_dir: Optional[str],
  249. batch_size: int,
  250. dtype: str,
  251. ngpu: int,
  252. seed: int,
  253. num_workers: int,
  254. log_level: Union[int, str],
  255. data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
  256. key_file: Optional[str],
  257. sv_train_config: Optional[str],
  258. sv_model_file: Optional[str],
  259. model_tag: Optional[str],
  260. allow_variable_data_keys: bool = True,
  261. streaming: bool = False,
  262. embedding_node: str = "resnet1_dense",
  263. sv_threshold: float = 0.9465,
  264. **kwargs,
  265. ):
  266. inference_pipeline = inference_modelscope(
  267. output_dir=output_dir,
  268. batch_size=batch_size,
  269. dtype=dtype,
  270. ngpu=ngpu,
  271. seed=seed,
  272. num_workers=num_workers,
  273. log_level=log_level,
  274. key_file=key_file,
  275. sv_train_config=sv_train_config,
  276. sv_model_file=sv_model_file,
  277. model_tag=model_tag,
  278. allow_variable_data_keys=allow_variable_data_keys,
  279. streaming=streaming,
  280. embedding_node=embedding_node,
  281. sv_threshold=sv_threshold,
  282. **kwargs,
  283. )
  284. return inference_pipeline(data_path_and_name_and_type, raw_inputs=None)
  285. def get_parser():
  286. parser = config_argparse.ArgumentParser(
  287. description="Speaker verification/x-vector extraction",
  288. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  289. )
  290. # Note(kamo): Use '_' instead of '-' as separator.
  291. # '-' is confusing if written in yaml.
  292. parser.add_argument(
  293. "--log_level",
  294. type=lambda x: x.upper(),
  295. default="INFO",
  296. choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
  297. help="The verbose level of logging",
  298. )
  299. parser.add_argument("--output_dir", type=str, required=False)
  300. parser.add_argument(
  301. "--ngpu",
  302. type=int,
  303. default=0,
  304. help="The number of gpus. 0 indicates CPU mode",
  305. )
  306. parser.add_argument(
  307. "--gpuid_list",
  308. type=str,
  309. default="",
  310. help="The visible gpus",
  311. )
  312. parser.add_argument("--seed", type=int, default=0, help="Random seed")
  313. parser.add_argument(
  314. "--dtype",
  315. default="float32",
  316. choices=["float16", "float32", "float64"],
  317. help="Data type",
  318. )
  319. parser.add_argument(
  320. "--num_workers",
  321. type=int,
  322. default=1,
  323. help="The number of workers used for DataLoader",
  324. )
  325. group = parser.add_argument_group("Input data related")
  326. group.add_argument(
  327. "--data_path_and_name_and_type",
  328. type=str2triple_str,
  329. required=False,
  330. action="append",
  331. )
  332. group.add_argument("--key_file", type=str_or_none)
  333. group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
  334. group = parser.add_argument_group("The model configuration related")
  335. group.add_argument(
  336. "--sv_train_config",
  337. type=str,
  338. help="SV training configuration",
  339. )
  340. group.add_argument(
  341. "--sv_model_file",
  342. type=str,
  343. help="SV model parameter file",
  344. )
  345. group.add_argument(
  346. "--sv_threshold",
  347. type=float,
  348. default=0.9465,
  349. help="The threshold for verification"
  350. )
  351. group.add_argument(
  352. "--model_tag",
  353. type=str,
  354. help="Pretrained model tag. If specify this option, *_train_config and "
  355. "*_file will be overwritten",
  356. )
  357. parser.add_argument(
  358. "--batch_size",
  359. type=int,
  360. default=1,
  361. help="The batch size for inference",
  362. )
  363. parser.add_argument("--streaming", type=str2bool, default=False)
  364. parser.add_argument("--embedding_node", type=str, default="resnet1_dense")
  365. return parser
  366. def main(cmd=None):
  367. print(get_commandline_args(), file=sys.stderr)
  368. parser = get_parser()
  369. args = parser.parse_args(cmd)
  370. kwargs = vars(args)
  371. kwargs.pop("config", None)
  372. logging.info("args: {}".format(kwargs))
  373. if args.output_dir is None:
  374. jobid, n_gpu = 1, 1
  375. gpuid = args.gpuid_list.split(",")[jobid-1]
  376. else:
  377. jobid = int(args.output_dir.split(".")[-1])
  378. n_gpu = len(args.gpuid_list.split(","))
  379. gpuid = args.gpuid_list.split(",")[(jobid - 1) % n_gpu]
  380. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  381. os.environ["CUDA_VISIBLE_DEVICES"] = gpuid
  382. results_list = inference(**kwargs)
  383. for results in results_list:
  384. print("{} {}".format(results["key"], results["value"]))
  385. if __name__ == "__main__":
  386. main()