auto_model.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. #!/usr/bin/env python3
  2. # -*- encoding: utf-8 -*-
  3. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  4. # MIT License (https://opensource.org/licenses/MIT)
  5. import json
  6. import time
  7. import copy
  8. import torch
  9. import random
  10. import string
  11. import logging
  12. import os.path
  13. import numpy as np
  14. from tqdm import tqdm
  15. from funasr.register import tables
  16. from funasr.utils.load_utils import load_bytes
  17. from funasr.download.file import download_from_url
  18. from funasr.utils.timestamp_tools import timestamp_sentence
  19. from funasr.download.download_from_hub import download_model
  20. from funasr.utils.vad_utils import slice_padding_audio_samples
  21. from funasr.utils.load_utils import load_audio_text_image_video
  22. from funasr.train_utils.set_all_random_seed import set_all_random_seed
  23. from funasr.train_utils.load_pretrained_model import load_pretrained_model
  24. from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
  25. try:
  26. from funasr.models.campplus.cluster_backend import ClusterBackend
  27. except:
  28. print("If you want to use the speaker diarization, please `pip install hdbscan`")
  29. def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
  30. """
  31. :param input:
  32. :param input_len:
  33. :param data_type:
  34. :param frontend:
  35. :return:
  36. """
  37. data_list = []
  38. key_list = []
  39. filelist = [".scp", ".txt", ".json", ".jsonl"]
  40. chars = string.ascii_letters + string.digits
  41. if isinstance(data_in, str) and data_in.startswith('http'): # url
  42. data_in = download_from_url(data_in)
  43. if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
  44. _, file_extension = os.path.splitext(data_in)
  45. file_extension = file_extension.lower()
  46. if file_extension in filelist: #filelist: wav.scp, file.jsonl;text.txt;
  47. with open(data_in, encoding='utf-8') as fin:
  48. for line in fin:
  49. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  50. if data_in.endswith(".jsonl"): #file.jsonl: json.dumps({"source": data})
  51. lines = json.loads(line.strip())
  52. data = lines["source"]
  53. key = data["key"] if "key" in data else key
  54. else: # filelist, wav.scp, text.txt: id \t data or data
  55. lines = line.strip().split(maxsplit=1)
  56. data = lines[1] if len(lines)>1 else lines[0]
  57. key = lines[0] if len(lines)>1 else key
  58. data_list.append(data)
  59. key_list.append(key)
  60. else:
  61. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  62. data_list = [data_in]
  63. key_list = [key]
  64. elif isinstance(data_in, (list, tuple)):
  65. if data_type is not None and isinstance(data_type, (list, tuple)): # mutiple inputs
  66. data_list_tmp = []
  67. for data_in_i, data_type_i in zip(data_in, data_type):
  68. key_list, data_list_i = prepare_data_iterator(data_in=data_in_i, data_type=data_type_i)
  69. data_list_tmp.append(data_list_i)
  70. data_list = []
  71. for item in zip(*data_list_tmp):
  72. data_list.append(item)
  73. else:
  74. # [audio sample point, fbank, text]
  75. data_list = data_in
  76. key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
  77. else: # raw text; audio sample point, fbank; bytes
  78. if isinstance(data_in, bytes): # audio bytes
  79. data_in = load_bytes(data_in)
  80. if key is None:
  81. key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
  82. data_list = [data_in]
  83. key_list = [key]
  84. return key_list, data_list
  85. class AutoModel:
  86. def __init__(self, **kwargs):
  87. if not kwargs.get("disable_log", True):
  88. tables.print()
  89. model, kwargs = self.build_model(**kwargs)
  90. # if vad_model is not None, build vad model else None
  91. vad_model = kwargs.get("vad_model", None)
  92. vad_kwargs = kwargs.get("vad_model_revision", None)
  93. if vad_model is not None:
  94. logging.info("Building VAD model.")
  95. vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs, "device": kwargs["device"]}
  96. vad_model, vad_kwargs = self.build_model(**vad_kwargs)
  97. # if punc_model is not None, build punc model else None
  98. punc_model = kwargs.get("punc_model", None)
  99. punc_kwargs = kwargs.get("punc_model_revision", None)
  100. if punc_model is not None:
  101. logging.info("Building punc model.")
  102. punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs, "device": kwargs["device"]}
  103. punc_model, punc_kwargs = self.build_model(**punc_kwargs)
  104. # if spk_model is not None, build spk model else None
  105. spk_model = kwargs.get("spk_model", None)
  106. spk_kwargs = kwargs.get("spk_model_revision", None)
  107. if spk_model is not None:
  108. logging.info("Building SPK model.")
  109. spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs, "device": kwargs["device"]}
  110. spk_model, spk_kwargs = self.build_model(**spk_kwargs)
  111. self.cb_model = ClusterBackend().to(kwargs["device"])
  112. spk_mode = kwargs.get("spk_mode", 'punc_segment')
  113. if spk_mode not in ["default", "vad_segment", "punc_segment"]:
  114. logging.error("spk_mode should be one of default, vad_segment and punc_segment.")
  115. self.spk_mode = spk_mode
  116. self.kwargs = kwargs
  117. self.model = model
  118. self.vad_model = vad_model
  119. self.vad_kwargs = vad_kwargs
  120. self.punc_model = punc_model
  121. self.punc_kwargs = punc_kwargs
  122. self.spk_model = spk_model
  123. self.spk_kwargs = spk_kwargs
  124. self.model_path = kwargs.get("model_path")
  125. def build_model(self, **kwargs):
  126. assert "model" in kwargs
  127. if "model_conf" not in kwargs:
  128. logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
  129. kwargs = download_model(**kwargs)
  130. set_all_random_seed(kwargs.get("seed", 0))
  131. device = kwargs.get("device", "cuda")
  132. if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
  133. device = "cpu"
  134. kwargs["batch_size"] = 1
  135. kwargs["device"] = device
  136. if kwargs.get("ncpu", None):
  137. torch.set_num_threads(kwargs.get("ncpu"))
  138. # build tokenizer
  139. tokenizer = kwargs.get("tokenizer", None)
  140. if tokenizer is not None:
  141. tokenizer_class = tables.tokenizer_classes.get(tokenizer)
  142. tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
  143. kwargs["tokenizer"] = tokenizer
  144. kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
  145. kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
  146. vocab_size = len(kwargs["token_list"])
  147. else:
  148. vocab_size = -1
  149. # build frontend
  150. frontend = kwargs.get("frontend", None)
  151. if frontend is not None:
  152. frontend_class = tables.frontend_classes.get(frontend)
  153. frontend = frontend_class(**kwargs["frontend_conf"])
  154. kwargs["frontend"] = frontend
  155. kwargs["input_size"] = frontend.output_size()
  156. # build model
  157. model_class = tables.model_classes.get(kwargs["model"])
  158. model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
  159. model.to(device)
  160. # init_param
  161. init_param = kwargs.get("init_param", None)
  162. if init_param is not None:
  163. if os.path.exists(init_param):
  164. logging.info(f"Loading pretrained params from {init_param}")
  165. load_pretrained_model(
  166. model=model,
  167. path=init_param,
  168. ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
  169. oss_bucket=kwargs.get("oss_bucket", None),
  170. scope_map=kwargs.get("scope_map", []),
  171. excludes=kwargs.get("excludes", None),
  172. )
  173. else:
  174. print(f"error, init_param does not exist!: {init_param}")
  175. return model, kwargs
  176. def __call__(self, *args, **cfg):
  177. kwargs = self.kwargs
  178. kwargs.update(cfg)
  179. res = self.model(*args, kwargs)
  180. return res
  181. def generate(self, input, input_len=None, **cfg):
  182. if self.vad_model is None:
  183. return self.inference(input, input_len=input_len, **cfg)
  184. else:
  185. return self.inference_with_vad(input, input_len=input_len, **cfg)
  186. def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
  187. kwargs = self.kwargs if kwargs is None else kwargs
  188. kwargs.update(cfg)
  189. model = self.model if model is None else model
  190. model.eval()
  191. batch_size = kwargs.get("batch_size", 1)
  192. # if kwargs.get("device", "cpu") == "cpu":
  193. # batch_size = 1
  194. key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
  195. speed_stats = {}
  196. asr_result_list = []
  197. num_samples = len(data_list)
  198. disable_pbar = self.kwargs.get("disable_pbar", False)
  199. pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None
  200. time_speech_total = 0.0
  201. time_escape_total = 0.0
  202. for beg_idx in range(0, num_samples, batch_size):
  203. end_idx = min(num_samples, beg_idx + batch_size)
  204. data_batch = data_list[beg_idx:end_idx]
  205. key_batch = key_list[beg_idx:end_idx]
  206. batch = {"data_in": data_batch, "key": key_batch}
  207. if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
  208. batch["data_in"] = data_batch[0]
  209. batch["data_lengths"] = input_len
  210. time1 = time.perf_counter()
  211. with torch.no_grad():
  212. results, meta_data = model.inference(**batch, **kwargs)
  213. time2 = time.perf_counter()
  214. asr_result_list.extend(results)
  215. # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
  216. batch_data_time = meta_data.get("batch_data_time", -1)
  217. time_escape = time2 - time1
  218. speed_stats["load_data"] = meta_data.get("load_data", 0.0)
  219. speed_stats["extract_feat"] = meta_data.get("extract_feat", 0.0)
  220. speed_stats["forward"] = f"{time_escape:0.3f}"
  221. speed_stats["batch_size"] = f"{len(results)}"
  222. speed_stats["rtf"] = f"{(time_escape) / batch_data_time:0.3f}"
  223. description = (
  224. f"{speed_stats}, "
  225. )
  226. if pbar:
  227. pbar.update(1)
  228. pbar.set_description(description)
  229. time_speech_total += batch_data_time
  230. time_escape_total += time_escape
  231. if pbar:
  232. # pbar.update(1)
  233. pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
  234. torch.cuda.empty_cache()
  235. return asr_result_list
  236. def inference_with_vad(self, input, input_len=None, **cfg):
  237. kwargs = self.kwargs
  238. # step.1: compute the vad model
  239. self.vad_kwargs.update(cfg)
  240. beg_vad = time.time()
  241. res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
  242. end_vad = time.time()
  243. # step.2 compute asr model
  244. model = self.model
  245. kwargs.update(cfg)
  246. batch_size = int(kwargs.get("batch_size_s", 300))*1000
  247. batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
  248. kwargs["batch_size"] = batch_size
  249. key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
  250. results_ret_list = []
  251. time_speech_total_all_samples = 1e-6
  252. beg_total = time.time()
  253. pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True) if not kwargs.get("disable_pbar", False) else None
  254. for i in range(len(res)):
  255. key = res[i]["key"]
  256. vadsegments = res[i]["value"]
  257. input_i = data_list[i]
  258. speech = load_audio_text_image_video(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
  259. speech_lengths = len(speech)
  260. n = len(vadsegments)
  261. data_with_index = [(vadsegments[i], i) for i in range(n)]
  262. sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
  263. results_sorted = []
  264. if not len(sorted_data):
  265. logging.info("decoding, utt: {}, empty speech".format(key))
  266. continue
  267. if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
  268. batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
  269. batch_size_ms_cum = 0
  270. beg_idx = 0
  271. beg_asr_total = time.time()
  272. time_speech_total_per_sample = speech_lengths/16000
  273. time_speech_total_all_samples += time_speech_total_per_sample
  274. # pbar_sample = tqdm(colour="blue", total=n, dynamic_ncols=True)
  275. all_segments = []
  276. for j, _ in enumerate(range(0, n)):
  277. # pbar_sample.update(1)
  278. batch_size_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
  279. if j < n - 1 and (
  280. batch_size_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size and (
  281. sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_threshold_ms:
  282. continue
  283. batch_size_ms_cum = 0
  284. end_idx = j + 1
  285. speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
  286. results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
  287. if self.spk_model is not None:
  288. # compose vad segments: [[start_time_sec, end_time_sec, speech], [...]]
  289. for _b in range(len(speech_j)):
  290. vad_segments = [[sorted_data[beg_idx:end_idx][_b][0][0]/1000.0,
  291. sorted_data[beg_idx:end_idx][_b][0][1]/1000.0,
  292. np.array(speech_j[_b])]]
  293. segments = sv_chunk(vad_segments)
  294. all_segments.extend(segments)
  295. speech_b = [i[2] for i in segments]
  296. spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, **cfg)
  297. results[_b]['spk_embedding'] = spk_res[0]['spk_embedding']
  298. beg_idx = end_idx
  299. if len(results) < 1:
  300. continue
  301. results_sorted.extend(results)
  302. # end_asr_total = time.time()
  303. # time_escape_total_per_sample = end_asr_total - beg_asr_total
  304. # pbar_sample.update(1)
  305. # pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
  306. # f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
  307. # f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
  308. restored_data = [0] * n
  309. for j in range(n):
  310. index = sorted_data[j][1]
  311. restored_data[index] = results_sorted[j]
  312. result = {}
  313. # results combine for texts, timestamps, speaker embeddings and others
  314. # TODO: rewrite for clean code
  315. for j in range(n):
  316. for k, v in restored_data[j].items():
  317. if k.startswith("timestamp"):
  318. if k not in result:
  319. result[k] = []
  320. for t in restored_data[j][k]:
  321. t[0] += vadsegments[j][0]
  322. t[1] += vadsegments[j][0]
  323. result[k].extend(restored_data[j][k])
  324. elif k == 'spk_embedding':
  325. if k not in result:
  326. result[k] = restored_data[j][k]
  327. else:
  328. result[k] = torch.cat([result[k], restored_data[j][k]], dim=0)
  329. elif 'text' in k:
  330. if k not in result:
  331. result[k] = restored_data[j][k]
  332. else:
  333. result[k] += " " + restored_data[j][k]
  334. else:
  335. if k not in result:
  336. result[k] = restored_data[j][k]
  337. else:
  338. result[k] += restored_data[j][k]
  339. return_raw_text = kwargs.get('return_raw_text', False)
  340. # step.3 compute punc model
  341. if self.punc_model is not None:
  342. if not len(result["text"]):
  343. result['raw_text'] = ''
  344. else:
  345. self.punc_kwargs.update(cfg)
  346. punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
  347. raw_text = copy.copy(result["text"])
  348. if return_raw_text: result['raw_text'] = raw_text
  349. result["text"] = punc_res[0]["text"]
  350. else:
  351. raw_text = None
  352. # speaker embedding cluster after resorted
  353. if self.spk_model is not None and kwargs.get('return_spk_res', True):
  354. if raw_text is None:
  355. logging.error("Missing punc_model, which is required by spk_model.")
  356. all_segments = sorted(all_segments, key=lambda x: x[0])
  357. spk_embedding = result['spk_embedding']
  358. labels = self.cb_model(spk_embedding.cpu(), oracle_num=kwargs.get('preset_spk_num', None))
  359. # del result['spk_embedding']
  360. sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
  361. if self.spk_mode == 'vad_segment': # recover sentence_list
  362. sentence_list = []
  363. for res, vadsegment in zip(restored_data, vadsegments):
  364. if 'timestamp' not in res:
  365. logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
  366. and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
  367. can predict timestamp, and speaker diarization relies on timestamps.")
  368. sentence_list.append({"start": vadsegment[0],
  369. "end": vadsegment[1],
  370. "sentence": res['text'],
  371. "timestamp": res['timestamp']})
  372. elif self.spk_mode == 'punc_segment':
  373. if 'timestamp' not in result:
  374. logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
  375. and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
  376. can predict timestamp, and speaker diarization relies on timestamps.")
  377. sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
  378. result['timestamp'],
  379. raw_text,
  380. return_raw_text=return_raw_text)
  381. distribute_spk(sentence_list, sv_output)
  382. result['sentence_info'] = sentence_list
  383. elif kwargs.get("sentence_timestamp", False):
  384. sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
  385. result['timestamp'],
  386. raw_text,
  387. return_raw_text=return_raw_text)
  388. result['sentence_info'] = sentence_list
  389. if "spk_embedding" in result: del result['spk_embedding']
  390. result["key"] = key
  391. results_ret_list.append(result)
  392. end_asr_total = time.time()
  393. time_escape_total_per_sample = end_asr_total - beg_asr_total
  394. if pbar_total:
  395. pbar_total.update(1)
  396. pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
  397. f"time_speech: {time_speech_total_per_sample: 0.3f}, "
  398. f"time_escape: {time_escape_total_per_sample:0.3f}")
  399. # end_total = time.time()
  400. # time_escape_total_all_samples = end_total - beg_total
  401. # print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
  402. # f"time_speech_all: {time_speech_total_all_samples: 0.3f}, "
  403. # f"time_escape_all: {time_escape_total_all_samples:0.3f}")
  404. return results_ret_list