auto_model.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. import json
  2. import time
  3. import torch
  4. import hydra
  5. import random
  6. import string
  7. import logging
  8. import os.path
  9. from tqdm import tqdm
  10. from omegaconf import DictConfig, OmegaConf, ListConfig
  11. from funasr.register import tables
  12. from funasr.utils.load_utils import load_bytes
  13. from funasr.download.file import download_from_url
  14. from funasr.download.download_from_hub import download_model
  15. from funasr.utils.vad_utils import slice_padding_audio_samples
  16. from funasr.train_utils.set_all_random_seed import set_all_random_seed
  17. from funasr.train_utils.load_pretrained_model import load_pretrained_model
  18. from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
  19. from funasr.utils.timestamp_tools import timestamp_sentence
  20. from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
  21. from funasr.models.campplus.cluster_backend import ClusterBackend
  22. def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
  23. """
  24. :param input:
  25. :param input_len:
  26. :param data_type:
  27. :param frontend:
  28. :return:
  29. """
  30. data_list = []
  31. key_list = []
  32. filelist = [".scp", ".txt", ".json", ".jsonl"]
  33. chars = string.ascii_letters + string.digits
  34. if isinstance(data_in, str) and data_in.startswith('http'): # url
  35. data_in = download_from_url(data_in)
  36. if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
  37. _, file_extension = os.path.splitext(data_in)
  38. file_extension = file_extension.lower()
  39. if file_extension in filelist: #filelist: wav.scp, file.jsonl;text.txt;
  40. with open(data_in, encoding='utf-8') as fin:
  41. for line in fin:
  42. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  43. if data_in.endswith(".jsonl"): #file.jsonl: json.dumps({"source": data})
  44. lines = json.loads(line.strip())
  45. data = lines["source"]
  46. key = data["key"] if "key" in data else key
  47. else: # filelist, wav.scp, text.txt: id \t data or data
  48. lines = line.strip().split(maxsplit=1)
  49. data = lines[1] if len(lines)>1 else lines[0]
  50. key = lines[0] if len(lines)>1 else key
  51. data_list.append(data)
  52. key_list.append(key)
  53. else:
  54. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  55. data_list = [data_in]
  56. key_list = [key]
  57. elif isinstance(data_in, (list, tuple)):
  58. if data_type is not None and isinstance(data_type, (list, tuple)): # mutiple inputs
  59. data_list_tmp = []
  60. for data_in_i, data_type_i in zip(data_in, data_type):
  61. key_list, data_list_i = prepare_data_iterator(data_in=data_in_i, data_type=data_type_i)
  62. data_list_tmp.append(data_list_i)
  63. data_list = []
  64. for item in zip(*data_list_tmp):
  65. data_list.append(item)
  66. else:
  67. # [audio sample point, fbank, text]
  68. data_list = data_in
  69. key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
  70. else: # raw text; audio sample point, fbank; bytes
  71. if isinstance(data_in, bytes): # audio bytes
  72. data_in = load_bytes(data_in)
  73. if key is None:
  74. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  75. data_list = [data_in]
  76. key_list = [key]
  77. return key_list, data_list
  78. class AutoModel:
  79. def __init__(self, **kwargs):
  80. tables.print()
  81. model, kwargs = self.build_model(**kwargs)
  82. # if vad_model is not None, build vad model else None
  83. vad_model = kwargs.get("vad_model", None)
  84. vad_kwargs = kwargs.get("vad_model_revision", None)
  85. if vad_model is not None:
  86. logging.info("Building VAD model.")
  87. vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs}
  88. vad_model, vad_kwargs = self.build_model(**vad_kwargs)
  89. # if punc_model is not None, build punc model else None
  90. punc_model = kwargs.get("punc_model", None)
  91. punc_kwargs = kwargs.get("punc_model_revision", None)
  92. if punc_model is not None:
  93. logging.info("Building punc model.")
  94. punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs}
  95. punc_model, punc_kwargs = self.build_model(**punc_kwargs)
  96. # if spk_model is not None, build spk model else None
  97. spk_model = kwargs.get("spk_model", None)
  98. spk_kwargs = kwargs.get("spk_model_revision", None)
  99. if spk_model is not None:
  100. logging.info("Building SPK model.")
  101. spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs}
  102. spk_model, spk_kwargs = self.build_model(**spk_kwargs)
  103. self.cb_model = ClusterBackend()
  104. spk_mode = kwargs.get("spk_mode", 'punc_segment')
  105. if spk_mode not in ["default", "vad_segment", "punc_segment"]:
  106. logging.error("spk_mode should be one of default, vad_segment and punc_segment.")
  107. self.spk_mode = spk_mode
  108. self.preset_spk_num = kwargs.get("preset_spk_num", None)
  109. if self.preset_spk_num:
  110. logging.warning("Using preset speaker number: {}".format(self.preset_spk_num))
  111. logging.warning("Many to print when using speaker model...")
  112. self.kwargs = kwargs
  113. self.model = model
  114. self.vad_model = vad_model
  115. self.vad_kwargs = vad_kwargs
  116. self.punc_model = punc_model
  117. self.punc_kwargs = punc_kwargs
  118. self.spk_model = spk_model
  119. self.spk_kwargs = spk_kwargs
  120. self.model_path = kwargs["model_path"]
  121. def build_model(self, **kwargs):
  122. assert "model" in kwargs
  123. if "model_conf" not in kwargs:
  124. logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
  125. kwargs = download_model(**kwargs)
  126. set_all_random_seed(kwargs.get("seed", 0))
  127. device = kwargs.get("device", "cuda")
  128. if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
  129. device = "cpu"
  130. kwargs["batch_size"] = 1
  131. kwargs["device"] = device
  132. if kwargs.get("ncpu", None):
  133. torch.set_num_threads(kwargs.get("ncpu"))
  134. # build tokenizer
  135. tokenizer = kwargs.get("tokenizer", None)
  136. if tokenizer is not None:
  137. tokenizer_class = tables.tokenizer_classes.get(tokenizer)
  138. tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
  139. kwargs["tokenizer"] = tokenizer
  140. kwargs["token_list"] = tokenizer.token_list
  141. vocab_size = len(tokenizer.token_list)
  142. else:
  143. vocab_size = -1
  144. # build frontend
  145. frontend = kwargs.get("frontend", None)
  146. if frontend is not None:
  147. frontend_class = tables.frontend_classes.get(frontend)
  148. frontend = frontend_class(**kwargs["frontend_conf"])
  149. kwargs["frontend"] = frontend
  150. kwargs["input_size"] = frontend.output_size()
  151. # build model
  152. model_class = tables.model_classes.get(kwargs["model"])
  153. model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
  154. model.eval()
  155. model.to(device)
  156. # init_param
  157. init_param = kwargs.get("init_param", None)
  158. if init_param is not None:
  159. logging.info(f"Loading pretrained params from {init_param}")
  160. load_pretrained_model(
  161. model=model,
  162. path=init_param,
  163. ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
  164. oss_bucket=kwargs.get("oss_bucket", None),
  165. scope_map=kwargs.get("scope_map", None),
  166. excludes=kwargs.get("excludes", None),
  167. )
  168. return model, kwargs
  169. def __call__(self, *args, **cfg):
  170. kwargs = self.kwargs
  171. kwargs.update(cfg)
  172. res = self.model(*args, kwargs)
  173. return res
  174. def generate(self, input, input_len=None, **cfg):
  175. if self.vad_model is None:
  176. return self.inference(input, input_len=input_len, **cfg)
  177. else:
  178. return self.inference_with_vad(input, input_len=input_len, **cfg)
  179. def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
  180. kwargs = self.kwargs if kwargs is None else kwargs
  181. kwargs.update(cfg)
  182. model = self.model if model is None else model
  183. batch_size = kwargs.get("batch_size", 1)
  184. # if kwargs.get("device", "cpu") == "cpu":
  185. # batch_size = 1
  186. key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
  187. speed_stats = {}
  188. asr_result_list = []
  189. num_samples = len(data_list)
  190. pbar = tqdm(colour="blue", total=num_samples+1, dynamic_ncols=True)
  191. time_speech_total = 0.0
  192. time_escape_total = 0.0
  193. for beg_idx in range(0, num_samples, batch_size):
  194. end_idx = min(num_samples, beg_idx + batch_size)
  195. data_batch = data_list[beg_idx:end_idx]
  196. key_batch = key_list[beg_idx:end_idx]
  197. batch = {"data_in": data_batch, "key": key_batch}
  198. if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
  199. batch["data_in"] = data_batch[0]
  200. batch["data_lengths"] = input_len
  201. time1 = time.perf_counter()
  202. with torch.no_grad():
  203. results, meta_data = model.inference(**batch, **kwargs)
  204. time2 = time.perf_counter()
  205. asr_result_list.extend(results)
  206. pbar.update(1)
  207. # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
  208. batch_data_time = meta_data.get("batch_data_time", -1)
  209. time_escape = time2 - time1
  210. speed_stats["load_data"] = meta_data.get("load_data", 0.0)
  211. speed_stats["extract_feat"] = meta_data.get("extract_feat", 0.0)
  212. speed_stats["forward"] = f"{time_escape:0.3f}"
  213. speed_stats["batch_size"] = f"{len(results)}"
  214. speed_stats["rtf"] = f"{(time_escape) / batch_data_time:0.3f}"
  215. description = (
  216. f"{speed_stats}, "
  217. )
  218. pbar.set_description(description)
  219. time_speech_total += batch_data_time
  220. time_escape_total += time_escape
  221. pbar.update(1)
  222. pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
  223. torch.cuda.empty_cache()
  224. return asr_result_list
  225. def inference_with_vad(self, input, input_len=None, **cfg):
  226. # step.1: compute the vad model
  227. self.vad_kwargs.update(cfg)
  228. beg_vad = time.time()
  229. res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
  230. end_vad = time.time()
  231. print(f"time cost vad: {end_vad - beg_vad:0.3f}")
  232. # step.2 compute asr model
  233. model = self.model
  234. kwargs = self.kwargs
  235. kwargs.update(cfg)
  236. batch_size = int(kwargs.get("batch_size_s", 300))*1000
  237. batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
  238. kwargs["batch_size"] = batch_size
  239. key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
  240. results_ret_list = []
  241. time_speech_total_all_samples = 0.0
  242. beg_total = time.time()
  243. pbar_total = tqdm(colour="red", total=len(res) + 1, dynamic_ncols=True)
  244. for i in range(len(res)):
  245. key = res[i]["key"]
  246. vadsegments = res[i]["value"]
  247. input_i = data_list[i]
  248. speech = load_audio_text_image_video(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
  249. speech_lengths = len(speech)
  250. n = len(vadsegments)
  251. data_with_index = [(vadsegments[i], i) for i in range(n)]
  252. sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
  253. results_sorted = []
  254. if not len(sorted_data):
  255. logging.info("decoding, utt: {}, empty speech".format(key))
  256. continue
  257. if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
  258. batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
  259. batch_size_ms_cum = 0
  260. beg_idx = 0
  261. beg_asr_total = time.time()
  262. time_speech_total_per_sample = speech_lengths/16000
  263. time_speech_total_all_samples += time_speech_total_per_sample
  264. for j, _ in enumerate(range(0, n)):
  265. batch_size_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
  266. if j < n - 1 and (
  267. batch_size_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size and (
  268. sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_threshold_ms:
  269. continue
  270. batch_size_ms_cum = 0
  271. end_idx = j + 1
  272. speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
  273. results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
  274. if self.spk_model is not None:
  275. all_segments = []
  276. # compose vad segments: [[start_time_sec, end_time_sec, speech], [...]]
  277. for _b in range(len(speech_j)):
  278. vad_segments = [[sorted_data[beg_idx:end_idx][_b][0][0]/1000.0, \
  279. sorted_data[beg_idx:end_idx][_b][0][1]/1000.0, \
  280. speech_j[_b]]]
  281. segments = sv_chunk(vad_segments)
  282. all_segments.extend(segments)
  283. speech_b = [i[2] for i in segments]
  284. spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, **cfg)
  285. results[_b]['spk_embedding'] = spk_res[0]['spk_embedding']
  286. beg_idx = end_idx
  287. if len(results) < 1:
  288. continue
  289. results_sorted.extend(results)
  290. pbar_total.update(1)
  291. end_asr_total = time.time()
  292. time_escape_total_per_sample = end_asr_total - beg_asr_total
  293. pbar_total.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
  294. f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
  295. f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
  296. restored_data = [0] * n
  297. for j in range(n):
  298. index = sorted_data[j][1]
  299. restored_data[index] = results_sorted[j]
  300. result = {}
  301. # results combine for texts, timestamps, speaker embeddings and others
  302. # TODO: rewrite for clean code
  303. for j in range(n):
  304. for k, v in restored_data[j].items():
  305. if k.startswith("timestamp"):
  306. if k not in result:
  307. result[k] = []
  308. for t in restored_data[j][k]:
  309. t[0] += vadsegments[j][0]
  310. t[1] += vadsegments[j][0]
  311. result[k].extend(restored_data[j][k])
  312. elif k == 'spk_embedding':
  313. if k not in result:
  314. result[k] = restored_data[j][k]
  315. else:
  316. result[k] = torch.cat([result[k], restored_data[j][k]], dim=0)
  317. elif k == 'text':
  318. if k not in result:
  319. result[k] = restored_data[j][k]
  320. else:
  321. result[k] += " " + restored_data[j][k]
  322. else:
  323. if k not in result:
  324. result[k] = restored_data[j][k]
  325. else:
  326. result[k] += restored_data[j][k]
  327. # step.3 compute punc model
  328. if self.punc_model is not None:
  329. self.punc_kwargs.update(cfg)
  330. punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
  331. result["text_with_punc"] = punc_res[0]["text"]
  332. # speaker embedding cluster after resorted
  333. if self.spk_model is not None:
  334. all_segments = sorted(all_segments, key=lambda x: x[0])
  335. spk_embedding = result['spk_embedding']
  336. labels = self.cb_model(spk_embedding, oracle_num=self.preset_spk_num)
  337. del result['spk_embedding']
  338. sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
  339. if self.spk_mode == 'vad_segment':
  340. sentence_list = []
  341. for res, vadsegment in zip(restored_data, vadsegments):
  342. sentence_list.append({"start": vadsegment[0],\
  343. "end": vadsegment[1],
  344. "sentence": res['text'],
  345. "timestamp": res['timestamp']})
  346. else: # punc_segment
  347. sentence_list = timestamp_sentence(punc_res[0]['punc_array'], \
  348. result['timestamp'], \
  349. result['text'])
  350. distribute_spk(sentence_list, sv_output)
  351. result['sentence_info'] = sentence_list
  352. result["key"] = key
  353. results_ret_list.append(result)
  354. pbar_total.update(1)
  355. pbar_total.update(1)
  356. end_total = time.time()
  357. time_escape_total_all_samples = end_total - beg_total
  358. pbar_total.set_description(f"rtf_avg_all_samples: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
  359. f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
  360. f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
  361. return results_ret_list