punc_bin.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. # -*- encoding: utf-8 -*-
  2. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. # MIT License (https://opensource.org/licenses/MIT)
  4. import os.path
  5. from pathlib import Path
  6. from typing import List, Union, Tuple
  7. import numpy as np
  8. from .utils.utils import (ONNXRuntimeError,
  9. OrtInferSession, get_logger,
  10. read_yaml)
  11. from .utils.utils import (TokenIDConverter, split_to_mini_sentence,code_mix_split_words,code_mix_split_words_jieba)
  12. logging = get_logger()
  13. class CT_Transformer():
  14. """
  15. Author: Speech Lab of DAMO Academy, Alibaba Group
  16. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  17. https://arxiv.org/pdf/2003.01309.pdf
  18. """
  19. def __init__(self, model_dir: Union[str, Path] = None,
  20. batch_size: int = 1,
  21. device_id: Union[str, int] = "-1",
  22. quantize: bool = False,
  23. intra_op_num_threads: int = 4,
  24. cache_dir: str = None,
  25. ):
  26. if not Path(model_dir).exists():
  27. try:
  28. from modelscope.hub.snapshot_download import snapshot_download
  29. except:
  30. raise "You are exporting model from modelscope, please install modelscope and try it again. To install modelscope, you could:\n" \
  31. "\npip3 install -U modelscope\n" \
  32. "For the users in China, you could install with the command:\n" \
  33. "\npip3 install -U modelscope -i https://mirror.sjtu.edu.cn/pypi/web/simple"
  34. try:
  35. model_dir = snapshot_download(model_dir, cache_dir=cache_dir)
  36. except:
  37. raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
  38. model_dir)
  39. model_file = os.path.join(model_dir, 'model.onnx')
  40. if quantize:
  41. model_file = os.path.join(model_dir, 'model_quant.onnx')
  42. if not os.path.exists(model_file):
  43. print(".onnx is not exist, begin to export onnx")
  44. try:
  45. from funasr.export.export_model import ModelExport
  46. except:
  47. raise "You are exporting onnx, please install funasr and try it again. To install funasr, you could:\n" \
  48. "\npip3 install -U funasr\n" \
  49. "For the users in China, you could install with the command:\n" \
  50. "\npip3 install -U funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple"
  51. export_model = ModelExport(
  52. cache_dir=cache_dir,
  53. onnx=True,
  54. device="cpu",
  55. quant=quantize,
  56. )
  57. export_model.export(model_dir)
  58. config_file = os.path.join(model_dir, 'punc.yaml')
  59. config = read_yaml(config_file)
  60. self.converter = TokenIDConverter(config['token_list'])
  61. self.ort_infer = OrtInferSession(model_file, device_id, intra_op_num_threads=intra_op_num_threads)
  62. self.batch_size = 1
  63. self.punc_list = config['punc_list']
  64. self.period = 0
  65. for i in range(len(self.punc_list)):
  66. if self.punc_list[i] == ",":
  67. self.punc_list[i] = ","
  68. elif self.punc_list[i] == "?":
  69. self.punc_list[i] = "?"
  70. elif self.punc_list[i] == "。":
  71. self.period = i
  72. if "seg_jieba" in config:
  73. self.seg_jieba = True
  74. self.jieba_usr_dict_path = os.path.join(model_dir, 'jieba_usr_dict')
  75. self.code_mix_split_words_jieba = code_mix_split_words_jieba(self.jieba_usr_dict_path)
  76. else:
  77. self.seg_jieba = False
  78. def __call__(self, text: Union[list, str], split_size=20):
  79. if self.seg_jieba:
  80. split_text = self.code_mix_split_words_jieba(text)
  81. else:
  82. split_text = code_mix_split_words(text)
  83. split_text_id = self.converter.tokens2ids(split_text)
  84. mini_sentences = split_to_mini_sentence(split_text, split_size)
  85. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  86. assert len(mini_sentences) == len(mini_sentences_id)
  87. cache_sent = []
  88. cache_sent_id = []
  89. new_mini_sentence = ""
  90. new_mini_sentence_punc = []
  91. cache_pop_trigger_limit = 200
  92. for mini_sentence_i in range(len(mini_sentences)):
  93. mini_sentence = mini_sentences[mini_sentence_i]
  94. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  95. mini_sentence = cache_sent + mini_sentence
  96. mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype='int32')
  97. data = {
  98. "text": mini_sentence_id[None,:],
  99. "text_lengths": np.array([len(mini_sentence_id)], dtype='int32'),
  100. }
  101. try:
  102. outputs = self.infer(data['text'], data['text_lengths'])
  103. y = outputs[0]
  104. punctuations = np.argmax(y,axis=-1)[0]
  105. assert punctuations.size == len(mini_sentence)
  106. except ONNXRuntimeError:
  107. logging.warning("error")
  108. # Search for the last Period/QuestionMark as cache
  109. if mini_sentence_i < len(mini_sentences) - 1:
  110. sentenceEnd = -1
  111. last_comma_index = -1
  112. for i in range(len(punctuations) - 2, 1, -1):
  113. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  114. sentenceEnd = i
  115. break
  116. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  117. last_comma_index = i
  118. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  119. # The sentence it too long, cut off at a comma.
  120. sentenceEnd = last_comma_index
  121. punctuations[sentenceEnd] = self.period
  122. cache_sent = mini_sentence[sentenceEnd + 1:]
  123. cache_sent_id = mini_sentence_id[sentenceEnd + 1:].tolist()
  124. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  125. punctuations = punctuations[0:sentenceEnd + 1]
  126. new_mini_sentence_punc += [int(x) for x in punctuations]
  127. words_with_punc = []
  128. for i in range(len(mini_sentence)):
  129. if i > 0:
  130. if len(mini_sentence[i][0].encode()) == 1 and len(mini_sentence[i - 1][0].encode()) == 1:
  131. mini_sentence[i] = " " + mini_sentence[i]
  132. words_with_punc.append(mini_sentence[i])
  133. if self.punc_list[punctuations[i]] != "_":
  134. words_with_punc.append(self.punc_list[punctuations[i]])
  135. new_mini_sentence += "".join(words_with_punc)
  136. # Add Period for the end of the sentence
  137. new_mini_sentence_out = new_mini_sentence
  138. new_mini_sentence_punc_out = new_mini_sentence_punc
  139. if mini_sentence_i == len(mini_sentences) - 1:
  140. if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、":
  141. new_mini_sentence_out = new_mini_sentence[:-1] + "。"
  142. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  143. elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?":
  144. new_mini_sentence_out = new_mini_sentence + "。"
  145. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  146. return new_mini_sentence_out, new_mini_sentence_punc_out
  147. def infer(self, feats: np.ndarray,
  148. feats_len: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  149. outputs = self.ort_infer([feats, feats_len])
  150. return outputs
  151. class CT_Transformer_VadRealtime(CT_Transformer):
  152. """
  153. Author: Speech Lab of DAMO Academy, Alibaba Group
  154. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  155. https://arxiv.org/pdf/2003.01309.pdf
  156. """
  157. def __init__(self, model_dir: Union[str, Path] = None,
  158. batch_size: int = 1,
  159. device_id: Union[str, int] = "-1",
  160. quantize: bool = False,
  161. intra_op_num_threads: int = 4,
  162. cache_dir: str = None
  163. ):
  164. super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir)
  165. def __call__(self, text: str, param_dict: map, split_size=20):
  166. cache_key = "cache"
  167. assert cache_key in param_dict
  168. cache = param_dict[cache_key]
  169. if cache is not None and len(cache) > 0:
  170. precache = "".join(cache)
  171. else:
  172. precache = ""
  173. cache = []
  174. full_text = precache + " " + text
  175. split_text = code_mix_split_words(full_text)
  176. split_text_id = self.converter.tokens2ids(split_text)
  177. mini_sentences = split_to_mini_sentence(split_text, split_size)
  178. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  179. new_mini_sentence_punc = []
  180. assert len(mini_sentences) == len(mini_sentences_id)
  181. cache_sent = []
  182. cache_sent_id = np.array([], dtype='int32')
  183. sentence_punc_list = []
  184. sentence_words_list = []
  185. cache_pop_trigger_limit = 200
  186. skip_num = 0
  187. for mini_sentence_i in range(len(mini_sentences)):
  188. mini_sentence = mini_sentences[mini_sentence_i]
  189. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  190. mini_sentence = cache_sent + mini_sentence
  191. mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0,dtype='int32')
  192. text_length = len(mini_sentence_id)
  193. vad_mask = self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32)
  194. data = {
  195. "input": mini_sentence_id[None,:],
  196. "text_lengths": np.array([text_length], dtype='int32'),
  197. "vad_mask": vad_mask,
  198. "sub_masks": vad_mask
  199. }
  200. try:
  201. outputs = self.infer(data['input'], data['text_lengths'], data['vad_mask'], data["sub_masks"])
  202. y = outputs[0]
  203. punctuations = np.argmax(y,axis=-1)[0]
  204. assert punctuations.size == len(mini_sentence)
  205. except ONNXRuntimeError:
  206. logging.warning("error")
  207. # Search for the last Period/QuestionMark as cache
  208. if mini_sentence_i < len(mini_sentences) - 1:
  209. sentenceEnd = -1
  210. last_comma_index = -1
  211. for i in range(len(punctuations) - 2, 1, -1):
  212. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  213. sentenceEnd = i
  214. break
  215. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  216. last_comma_index = i
  217. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  218. # The sentence it too long, cut off at a comma.
  219. sentenceEnd = last_comma_index
  220. punctuations[sentenceEnd] = self.period
  221. cache_sent = mini_sentence[sentenceEnd + 1:]
  222. cache_sent_id = mini_sentence_id[sentenceEnd + 1:]
  223. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  224. punctuations = punctuations[0:sentenceEnd + 1]
  225. punctuations_np = [int(x) for x in punctuations]
  226. new_mini_sentence_punc += punctuations_np
  227. sentence_punc_list += [self.punc_list[int(x)] for x in punctuations_np]
  228. sentence_words_list += mini_sentence
  229. assert len(sentence_punc_list) == len(sentence_words_list)
  230. words_with_punc = []
  231. sentence_punc_list_out = []
  232. for i in range(0, len(sentence_words_list)):
  233. if i > 0:
  234. if len(sentence_words_list[i][0].encode()) == 1 and len(sentence_words_list[i - 1][-1].encode()) == 1:
  235. sentence_words_list[i] = " " + sentence_words_list[i]
  236. if skip_num < len(cache):
  237. skip_num += 1
  238. else:
  239. words_with_punc.append(sentence_words_list[i])
  240. if skip_num >= len(cache):
  241. sentence_punc_list_out.append(sentence_punc_list[i])
  242. if sentence_punc_list[i] != "_":
  243. words_with_punc.append(sentence_punc_list[i])
  244. sentence_out = "".join(words_with_punc)
  245. sentenceEnd = -1
  246. for i in range(len(sentence_punc_list) - 2, 1, -1):
  247. if sentence_punc_list[i] == "。" or sentence_punc_list[i] == "?":
  248. sentenceEnd = i
  249. break
  250. cache_out = sentence_words_list[sentenceEnd + 1:]
  251. if sentence_out[-1] in self.punc_list:
  252. sentence_out = sentence_out[:-1]
  253. sentence_punc_list_out[-1] = "_"
  254. param_dict[cache_key] = cache_out
  255. return sentence_out, sentence_punc_list_out, cache_out
  256. def vad_mask(self, size, vad_pos, dtype=bool):
  257. """Create mask for decoder self-attention.
  258. :param int size: size of mask
  259. :param int vad_pos: index of vad index
  260. :param torch.dtype dtype: result dtype
  261. :rtype: torch.Tensor (B, Lmax, Lmax)
  262. """
  263. ret = np.ones((size, size), dtype=dtype)
  264. if vad_pos <= 0 or vad_pos >= size:
  265. return ret
  266. sub_corner = np.zeros(
  267. (vad_pos - 1, size - vad_pos), dtype=dtype)
  268. ret[0:vad_pos - 1, vad_pos:] = sub_corner
  269. return ret
  270. def infer(self, feats: np.ndarray,
  271. feats_len: np.ndarray,
  272. vad_mask: np.ndarray,
  273. sub_masks: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  274. outputs = self.ort_infer([feats, feats_len, vad_mask, sub_masks])
  275. return outputs