inference.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. import os.path
  2. import torch
  3. import numpy as np
  4. import hydra
  5. import json
  6. from omegaconf import DictConfig, OmegaConf
  7. import logging
  8. from funasr.download.download_from_hub import download_model
  9. from funasr.train_utils.set_all_random_seed import set_all_random_seed
  10. from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_bytes
  11. from funasr.train_utils.device_funcs import to_device
  12. from tqdm import tqdm
  13. from funasr.train_utils.load_pretrained_model import load_pretrained_model
  14. import time
  15. import random
  16. import string
  17. from funasr.register import tables
  18. from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
  19. from funasr.utils.vad_utils import slice_padding_audio_samples
  20. from funasr.utils.timestamp_tools import time_stamp_sentence
  21. def build_iter_for_infer(data_in, input_len=None, data_type="sound", key=None):
  22. """
  23. :param input:
  24. :param input_len:
  25. :param data_type:
  26. :param frontend:
  27. :return:
  28. """
  29. data_list = []
  30. key_list = []
  31. filelist = [".scp", ".txt", ".json", ".jsonl"]
  32. chars = string.ascii_letters + string.digits
  33. if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
  34. _, file_extension = os.path.splitext(data_in)
  35. file_extension = file_extension.lower()
  36. if file_extension in filelist: #filelist: wav.scp, file.jsonl;text.txt;
  37. with open(data_in, encoding='utf-8') as fin:
  38. for line in fin:
  39. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  40. if data_in.endswith(".jsonl"): #file.jsonl: json.dumps({"source": data})
  41. lines = json.loads(line.strip())
  42. data = lines["source"]
  43. key = data["key"] if "key" in data else key
  44. else: # filelist, wav.scp, text.txt: id \t data or data
  45. lines = line.strip().split(maxsplit=1)
  46. data = lines[1] if len(lines)>1 else lines[0]
  47. key = lines[0] if len(lines)>1 else key
  48. data_list.append(data)
  49. key_list.append(key)
  50. else:
  51. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  52. data_list = [data_in]
  53. key_list = [key]
  54. elif isinstance(data_in, (list, tuple)): # [audio sample point, fbank]
  55. data_list = data_in
  56. key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
  57. else: # raw text; audio sample point, fbank; bytes
  58. if isinstance(data_in, bytes): # audio bytes
  59. data_in = load_bytes(data_in)
  60. if key is None:
  61. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  62. data_list = [data_in]
  63. key_list = [key]
  64. return key_list, data_list
  65. @hydra.main(config_name=None, version_base=None)
  66. def main_hydra(kwargs: DictConfig):
  67. log_level = getattr(logging, kwargs.get("log_level", "INFO").upper())
  68. logging.basicConfig(level=log_level)
  69. if kwargs.get("debug", False):
  70. import pdb; pdb.set_trace()
  71. model = AutoModel(**kwargs)
  72. res = model(input=kwargs["input"])
  73. print(res)
  74. class AutoModel:
  75. def __init__(self, **kwargs):
  76. tables.print()
  77. model, kwargs = self.build_model(**kwargs)
  78. # if vad_model is not None, build vad model else None
  79. vad_model = kwargs.get("vad_model", None)
  80. vad_kwargs = kwargs.get("vad_model_revision", None)
  81. if vad_model is not None:
  82. print("build vad model")
  83. vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs}
  84. vad_model, vad_kwargs = self.build_model(**vad_kwargs)
  85. # if punc_model is not None, build punc model else None
  86. punc_model = kwargs.get("punc_model", None)
  87. punc_kwargs = kwargs.get("punc_model_revision", None)
  88. if punc_model is not None:
  89. punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs}
  90. punc_model, punc_kwargs = self.build_model(**punc_kwargs)
  91. self.kwargs = kwargs
  92. self.model = model
  93. self.vad_model = vad_model
  94. self.vad_kwargs = vad_kwargs
  95. self.punc_model = punc_model
  96. self.punc_kwargs = punc_kwargs
  97. def build_model(self, **kwargs):
  98. assert "model" in kwargs
  99. if "model_conf" not in kwargs:
  100. logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
  101. kwargs = download_model(**kwargs)
  102. set_all_random_seed(kwargs.get("seed", 0))
  103. device = kwargs.get("device", "cuda")
  104. if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
  105. device = "cpu"
  106. kwargs["batch_size"] = 1
  107. kwargs["device"] = device
  108. if kwargs.get("ncpu", None):
  109. torch.set_num_threads(kwargs.get("ncpu"))
  110. # build tokenizer
  111. tokenizer = kwargs.get("tokenizer", None)
  112. if tokenizer is not None:
  113. tokenizer_class = tables.tokenizer_classes.get(tokenizer.lower())
  114. tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
  115. kwargs["tokenizer"] = tokenizer
  116. kwargs["token_list"] = tokenizer.token_list
  117. # build frontend
  118. frontend = kwargs.get("frontend", None)
  119. if frontend is not None:
  120. frontend_class = tables.frontend_classes.get(frontend.lower())
  121. frontend = frontend_class(**kwargs["frontend_conf"])
  122. kwargs["frontend"] = frontend
  123. kwargs["input_size"] = frontend.output_size()
  124. # build model
  125. model_class = tables.model_classes.get(kwargs["model"].lower())
  126. model = model_class(**kwargs, **kwargs["model_conf"],
  127. vocab_size=len(tokenizer.token_list) if tokenizer is not None else -1)
  128. model.eval()
  129. model.to(device)
  130. # init_param
  131. init_param = kwargs.get("init_param", None)
  132. if init_param is not None:
  133. logging.info(f"Loading pretrained params from {init_param}")
  134. load_pretrained_model(
  135. model=model,
  136. init_param=init_param,
  137. ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
  138. oss_bucket=kwargs.get("oss_bucket", None),
  139. )
  140. return model, kwargs
  141. def __call__(self, input, input_len=None, **cfg):
  142. if self.vad_model is None:
  143. return self.generate(input, input_len=input_len, **cfg)
  144. else:
  145. return self.generate_with_vad(input, input_len=input_len, **cfg)
  146. def generate(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
  147. # import pdb; pdb.set_trace()
  148. kwargs = self.kwargs if kwargs is None else kwargs
  149. kwargs.update(cfg)
  150. model = self.model if model is None else model
  151. data_type = kwargs.get("data_type", "sound")
  152. batch_size = kwargs.get("batch_size", 1)
  153. if kwargs.get("device", "cpu") == "cpu":
  154. batch_size = 1
  155. key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type, key=key)
  156. speed_stats = {}
  157. asr_result_list = []
  158. num_samples = len(data_list)
  159. pbar = tqdm(colour="blue", total=num_samples+1, dynamic_ncols=True)
  160. time_speech_total = 0.0
  161. time_escape_total = 0.0
  162. for beg_idx in range(0, num_samples, batch_size):
  163. end_idx = min(num_samples, beg_idx + batch_size)
  164. data_batch = data_list[beg_idx:end_idx]
  165. key_batch = key_list[beg_idx:end_idx]
  166. batch = {"data_in": data_batch, "key": key_batch}
  167. if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
  168. batch["data_in"] = data_batch[0]
  169. batch["data_lengths"] = input_len
  170. time1 = time.perf_counter()
  171. results, meta_data = model.generate(**batch, **kwargs)
  172. time2 = time.perf_counter()
  173. asr_result_list.extend(results)
  174. pbar.update(1)
  175. # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
  176. batch_data_time = meta_data.get("batch_data_time", -1)
  177. time_escape = time2 - time1
  178. speed_stats["load_data"] = meta_data.get("load_data", 0.0)
  179. speed_stats["extract_feat"] = meta_data.get("extract_feat", 0.0)
  180. speed_stats["forward"] = f"{time_escape:0.3f}"
  181. speed_stats["batch_size"] = f"{len(results)}"
  182. speed_stats["rtf"] = f"{(time_escape) / batch_data_time:0.3f}"
  183. description = (
  184. f"{speed_stats}, "
  185. )
  186. pbar.set_description(description)
  187. time_speech_total += batch_data_time
  188. time_escape_total += time_escape
  189. pbar.update(1)
  190. pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
  191. torch.cuda.empty_cache()
  192. return asr_result_list
  193. def generate_with_vad(self, input, input_len=None, **cfg):
  194. # step.1: compute the vad model
  195. model = self.vad_model
  196. kwargs = self.vad_kwargs
  197. kwargs.update(cfg)
  198. beg_vad = time.time()
  199. res = self.generate(input, input_len=input_len, model=model, kwargs=kwargs, **cfg)
  200. end_vad = time.time()
  201. print(f"time cost vad: {end_vad - beg_vad:0.3f}")
  202. # step.2 compute asr model
  203. model = self.model
  204. kwargs = self.kwargs
  205. kwargs.update(cfg)
  206. batch_size = int(kwargs.get("batch_size_s", 300))*1000
  207. batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
  208. kwargs["batch_size"] = batch_size
  209. data_type = kwargs.get("data_type", "sound")
  210. key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type)
  211. results_ret_list = []
  212. time_speech_total_all_samples = 0.0
  213. beg_total = time.time()
  214. pbar_total = tqdm(colour="red", total=len(res) + 1, dynamic_ncols=True)
  215. for i in range(len(res)):
  216. key = res[i]["key"]
  217. vadsegments = res[i]["value"]
  218. input_i = data_list[i]
  219. speech = load_audio(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
  220. speech_lengths = len(speech)
  221. n = len(vadsegments)
  222. data_with_index = [(vadsegments[i], i) for i in range(n)]
  223. sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
  224. results_sorted = []
  225. if not len(sorted_data):
  226. logging.info("decoding, utt: {}, empty speech".format(key))
  227. continue
  228. # if kwargs["device"] == "cpu":
  229. # batch_size = 0
  230. if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
  231. batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
  232. batch_size_ms_cum = 0
  233. beg_idx = 0
  234. beg_asr_total = time.time()
  235. time_speech_total_per_sample = speech_lengths/16000
  236. time_speech_total_all_samples += time_speech_total_per_sample
  237. for j, _ in enumerate(range(0, n)):
  238. batch_size_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
  239. if j < n - 1 and (
  240. batch_size_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size and (
  241. sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_threshold_ms:
  242. continue
  243. batch_size_ms_cum = 0
  244. end_idx = j + 1
  245. speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
  246. beg_idx = end_idx
  247. results = self.generate(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
  248. if len(results) < 1:
  249. continue
  250. results_sorted.extend(results)
  251. pbar_total.update(1)
  252. end_asr_total = time.time()
  253. time_escape_total_per_sample = end_asr_total - beg_asr_total
  254. pbar_total.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
  255. f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
  256. f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
  257. restored_data = [0] * n
  258. for j in range(n):
  259. index = sorted_data[j][1]
  260. restored_data[index] = results_sorted[j]
  261. result = {}
  262. for j in range(n):
  263. for k, v in restored_data[j].items():
  264. if not k.startswith("timestamp"):
  265. if k not in result:
  266. result[k] = restored_data[j][k]
  267. else:
  268. result[k] += restored_data[j][k]
  269. else:
  270. result[k] = []
  271. for t in restored_data[j][k]:
  272. t[0] += vadsegments[j][0]
  273. t[1] += vadsegments[j][0]
  274. result[k] += restored_data[j][k]
  275. result["key"] = key
  276. results_ret_list.append(result)
  277. pbar_total.update(1)
  278. # step.3 compute punc model
  279. model = self.punc_model
  280. kwargs = self.punc_kwargs
  281. kwargs.update(cfg)
  282. for i, result in enumerate(results_ret_list):
  283. beg_punc = time.time()
  284. res = self.generate(result["text"], model=model, kwargs=kwargs, **cfg)
  285. end_punc = time.time()
  286. print(f"time punc: {end_punc - beg_punc:0.3f}")
  287. # sentences = time_stamp_sentence(model.punc_list, model.sentence_end_id, results_ret_list[i]["timestamp"], res[i]["text"])
  288. # results_ret_list[i]["time_stamp"] = res[0]["text_postprocessed_punc"]
  289. # results_ret_list[i]["sentences"] = sentences
  290. results_ret_list[i]["text_with_punc"] = res[i]["text"]
  291. pbar_total.update(1)
  292. end_total = time.time()
  293. time_escape_total_all_samples = end_total - beg_total
  294. pbar_total.set_description(f"rtf_avg_all_samples: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
  295. f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
  296. f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
  297. return results_ret_list
  298. class AutoFrontend:
  299. def __init__(self, **kwargs):
  300. assert "model" in kwargs
  301. if "model_conf" not in kwargs:
  302. logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
  303. kwargs = download_model(**kwargs)
  304. # build frontend
  305. frontend = kwargs.get("frontend", None)
  306. if frontend is not None:
  307. frontend_class = tables.frontend_classes.get(frontend.lower())
  308. frontend = frontend_class(**kwargs["frontend_conf"])
  309. self.frontend = frontend
  310. self.kwargs = kwargs
  311. def __call__(self, input, input_len=None, kwargs=None, **cfg):
  312. kwargs = self.kwargs if kwargs is None else kwargs
  313. kwargs.update(cfg)
  314. key_list, data_list = build_iter_for_infer(input, input_len=input_len)
  315. batch_size = kwargs.get("batch_size", 1)
  316. device = kwargs.get("device", "cpu")
  317. if device == "cpu":
  318. batch_size = 1
  319. meta_data = {}
  320. result_list = []
  321. num_samples = len(data_list)
  322. pbar = tqdm(colour="blue", total=num_samples + 1, dynamic_ncols=True)
  323. time0 = time.perf_counter()
  324. for beg_idx in range(0, num_samples, batch_size):
  325. end_idx = min(num_samples, beg_idx + batch_size)
  326. data_batch = data_list[beg_idx:end_idx]
  327. key_batch = key_list[beg_idx:end_idx]
  328. # extract fbank feats
  329. time1 = time.perf_counter()
  330. audio_sample_list = load_audio(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
  331. time2 = time.perf_counter()
  332. meta_data["load_data"] = f"{time2 - time1:0.3f}"
  333. speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
  334. frontend=self.frontend)
  335. time3 = time.perf_counter()
  336. meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
  337. meta_data["batch_data_time"] = speech_lengths.sum().item() * self.frontend.frame_shift * self.frontend.lfr_n / 1000
  338. speech.to(device=device), speech_lengths.to(device=device)
  339. batch = {"input": speech, "input_len": speech_lengths, "key": key_batch}
  340. result_list.append(batch)
  341. pbar.update(1)
  342. description = (
  343. f"{meta_data}, "
  344. )
  345. pbar.set_description(description)
  346. time_end = time.perf_counter()
  347. pbar.set_description(f"time escaped total: {time_end - time0:0.3f}")
  348. return result_list
  349. if __name__ == '__main__':
  350. main_hydra()