punc_bin.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # -*- encoding: utf-8 -*-
  2. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. # MIT License (https://opensource.org/licenses/MIT)
  4. import os.path
  5. from pathlib import Path
  6. from typing import List, Union, Tuple
  7. import numpy as np
  8. from .utils.utils import (ONNXRuntimeError,
  9. OrtInferSession, get_logger,
  10. read_yaml)
  11. from .utils.utils import (TokenIDConverter, split_to_mini_sentence,code_mix_split_words,code_mix_split_words_jieba)
  12. logging = get_logger()
  13. class CT_Transformer():
  14. """
  15. Author: Speech Lab of DAMO Academy, Alibaba Group
  16. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  17. https://arxiv.org/pdf/2003.01309.pdf
  18. """
  19. def __init__(self, model_dir: Union[str, Path] = None,
  20. batch_size: int = 1,
  21. device_id: Union[str, int] = "-1",
  22. quantize: bool = False,
  23. intra_op_num_threads: int = 4,
  24. cache_dir: str = None,
  25. ):
  26. if not Path(model_dir).exists():
  27. from modelscope.hub.snapshot_download import snapshot_download
  28. try:
  29. model_dir = snapshot_download(model_dir, cache_dir=cache_dir)
  30. except:
  31. raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
  32. model_dir)
  33. model_file = os.path.join(model_dir, 'model.onnx')
  34. if quantize:
  35. model_file = os.path.join(model_dir, 'model_quant.onnx')
  36. if not os.path.exists(model_file):
  37. print(".onnx is not exist, begin to export onnx")
  38. from funasr.export.export_model import ModelExport
  39. export_model = ModelExport(
  40. cache_dir=cache_dir,
  41. onnx=True,
  42. device="cpu",
  43. quant=quantize,
  44. )
  45. export_model.export(model_dir)
  46. config_file = os.path.join(model_dir, 'punc.yaml')
  47. config = read_yaml(config_file)
  48. self.converter = TokenIDConverter(config['token_list'])
  49. self.ort_infer = OrtInferSession(model_file, device_id, intra_op_num_threads=intra_op_num_threads)
  50. self.batch_size = 1
  51. self.punc_list = config['punc_list']
  52. self.period = 0
  53. for i in range(len(self.punc_list)):
  54. if self.punc_list[i] == ",":
  55. self.punc_list[i] = ","
  56. elif self.punc_list[i] == "?":
  57. self.punc_list[i] = "?"
  58. elif self.punc_list[i] == "。":
  59. self.period = i
  60. if "seg_jieba" in config:
  61. self.seg_jieba = True
  62. self.jieba_usr_dict_path = os.path.join(model_dir, 'jieba_usr_dict')
  63. self.code_mix_split_words_jieba = code_mix_split_words_jieba(self.jieba_usr_dict_path)
  64. else:
  65. self.seg_jieba = False
  66. def __call__(self, text: Union[list, str], split_size=20):
  67. if self.seg_jieba:
  68. split_text = self.code_mix_split_words_jieba(text)
  69. else:
  70. split_text = code_mix_split_words(text)
  71. split_text_id = self.converter.tokens2ids(split_text)
  72. mini_sentences = split_to_mini_sentence(split_text, split_size)
  73. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  74. assert len(mini_sentences) == len(mini_sentences_id)
  75. cache_sent = []
  76. cache_sent_id = []
  77. new_mini_sentence = ""
  78. new_mini_sentence_punc = []
  79. cache_pop_trigger_limit = 200
  80. for mini_sentence_i in range(len(mini_sentences)):
  81. mini_sentence = mini_sentences[mini_sentence_i]
  82. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  83. mini_sentence = cache_sent + mini_sentence
  84. mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype='int32')
  85. data = {
  86. "text": mini_sentence_id[None,:],
  87. "text_lengths": np.array([len(mini_sentence_id)], dtype='int32'),
  88. }
  89. try:
  90. outputs = self.infer(data['text'], data['text_lengths'])
  91. y = outputs[0]
  92. punctuations = np.argmax(y,axis=-1)[0]
  93. assert punctuations.size == len(mini_sentence)
  94. except ONNXRuntimeError:
  95. logging.warning("error")
  96. # Search for the last Period/QuestionMark as cache
  97. if mini_sentence_i < len(mini_sentences) - 1:
  98. sentenceEnd = -1
  99. last_comma_index = -1
  100. for i in range(len(punctuations) - 2, 1, -1):
  101. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  102. sentenceEnd = i
  103. break
  104. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  105. last_comma_index = i
  106. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  107. # The sentence it too long, cut off at a comma.
  108. sentenceEnd = last_comma_index
  109. punctuations[sentenceEnd] = self.period
  110. cache_sent = mini_sentence[sentenceEnd + 1:]
  111. cache_sent_id = mini_sentence_id[sentenceEnd + 1:].tolist()
  112. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  113. punctuations = punctuations[0:sentenceEnd + 1]
  114. new_mini_sentence_punc += [int(x) for x in punctuations]
  115. words_with_punc = []
  116. for i in range(len(mini_sentence)):
  117. if i > 0:
  118. if len(mini_sentence[i][0].encode()) == 1 and len(mini_sentence[i - 1][0].encode()) == 1:
  119. mini_sentence[i] = " " + mini_sentence[i]
  120. words_with_punc.append(mini_sentence[i])
  121. if self.punc_list[punctuations[i]] != "_":
  122. words_with_punc.append(self.punc_list[punctuations[i]])
  123. new_mini_sentence += "".join(words_with_punc)
  124. # Add Period for the end of the sentence
  125. new_mini_sentence_out = new_mini_sentence
  126. new_mini_sentence_punc_out = new_mini_sentence_punc
  127. if mini_sentence_i == len(mini_sentences) - 1:
  128. if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、":
  129. new_mini_sentence_out = new_mini_sentence[:-1] + "。"
  130. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  131. elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?":
  132. new_mini_sentence_out = new_mini_sentence + "。"
  133. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  134. return new_mini_sentence_out, new_mini_sentence_punc_out
  135. def infer(self, feats: np.ndarray,
  136. feats_len: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  137. outputs = self.ort_infer([feats, feats_len])
  138. return outputs
  139. class CT_Transformer_VadRealtime(CT_Transformer):
  140. """
  141. Author: Speech Lab of DAMO Academy, Alibaba Group
  142. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  143. https://arxiv.org/pdf/2003.01309.pdf
  144. """
  145. def __init__(self, model_dir: Union[str, Path] = None,
  146. batch_size: int = 1,
  147. device_id: Union[str, int] = "-1",
  148. quantize: bool = False,
  149. intra_op_num_threads: int = 4,
  150. cache_dir: str = None
  151. ):
  152. super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir)
  153. def __call__(self, text: str, param_dict: map, split_size=20):
  154. cache_key = "cache"
  155. assert cache_key in param_dict
  156. cache = param_dict[cache_key]
  157. if cache is not None and len(cache) > 0:
  158. precache = "".join(cache)
  159. else:
  160. precache = ""
  161. cache = []
  162. full_text = precache + " " + text
  163. split_text = code_mix_split_words(full_text)
  164. split_text_id = self.converter.tokens2ids(split_text)
  165. mini_sentences = split_to_mini_sentence(split_text, split_size)
  166. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  167. new_mini_sentence_punc = []
  168. assert len(mini_sentences) == len(mini_sentences_id)
  169. cache_sent = []
  170. cache_sent_id = np.array([], dtype='int32')
  171. sentence_punc_list = []
  172. sentence_words_list = []
  173. cache_pop_trigger_limit = 200
  174. skip_num = 0
  175. for mini_sentence_i in range(len(mini_sentences)):
  176. mini_sentence = mini_sentences[mini_sentence_i]
  177. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  178. mini_sentence = cache_sent + mini_sentence
  179. mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0,dtype='int32')
  180. text_length = len(mini_sentence_id)
  181. vad_mask = self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32)
  182. data = {
  183. "input": mini_sentence_id[None,:],
  184. "text_lengths": np.array([text_length], dtype='int32'),
  185. "vad_mask": vad_mask,
  186. "sub_masks": vad_mask
  187. }
  188. try:
  189. outputs = self.infer(data['input'], data['text_lengths'], data['vad_mask'], data["sub_masks"])
  190. y = outputs[0]
  191. punctuations = np.argmax(y,axis=-1)[0]
  192. assert punctuations.size == len(mini_sentence)
  193. except ONNXRuntimeError:
  194. logging.warning("error")
  195. # Search for the last Period/QuestionMark as cache
  196. if mini_sentence_i < len(mini_sentences) - 1:
  197. sentenceEnd = -1
  198. last_comma_index = -1
  199. for i in range(len(punctuations) - 2, 1, -1):
  200. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  201. sentenceEnd = i
  202. break
  203. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  204. last_comma_index = i
  205. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  206. # The sentence it too long, cut off at a comma.
  207. sentenceEnd = last_comma_index
  208. punctuations[sentenceEnd] = self.period
  209. cache_sent = mini_sentence[sentenceEnd + 1:]
  210. cache_sent_id = mini_sentence_id[sentenceEnd + 1:]
  211. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  212. punctuations = punctuations[0:sentenceEnd + 1]
  213. punctuations_np = [int(x) for x in punctuations]
  214. new_mini_sentence_punc += punctuations_np
  215. sentence_punc_list += [self.punc_list[int(x)] for x in punctuations_np]
  216. sentence_words_list += mini_sentence
  217. assert len(sentence_punc_list) == len(sentence_words_list)
  218. words_with_punc = []
  219. sentence_punc_list_out = []
  220. for i in range(0, len(sentence_words_list)):
  221. if i > 0:
  222. if len(sentence_words_list[i][0].encode()) == 1 and len(sentence_words_list[i - 1][-1].encode()) == 1:
  223. sentence_words_list[i] = " " + sentence_words_list[i]
  224. if skip_num < len(cache):
  225. skip_num += 1
  226. else:
  227. words_with_punc.append(sentence_words_list[i])
  228. if skip_num >= len(cache):
  229. sentence_punc_list_out.append(sentence_punc_list[i])
  230. if sentence_punc_list[i] != "_":
  231. words_with_punc.append(sentence_punc_list[i])
  232. sentence_out = "".join(words_with_punc)
  233. sentenceEnd = -1
  234. for i in range(len(sentence_punc_list) - 2, 1, -1):
  235. if sentence_punc_list[i] == "。" or sentence_punc_list[i] == "?":
  236. sentenceEnd = i
  237. break
  238. cache_out = sentence_words_list[sentenceEnd + 1:]
  239. if sentence_out[-1] in self.punc_list:
  240. sentence_out = sentence_out[:-1]
  241. sentence_punc_list_out[-1] = "_"
  242. param_dict[cache_key] = cache_out
  243. return sentence_out, sentence_punc_list_out, cache_out
  244. def vad_mask(self, size, vad_pos, dtype=np.bool):
  245. """Create mask for decoder self-attention.
  246. :param int size: size of mask
  247. :param int vad_pos: index of vad index
  248. :param torch.dtype dtype: result dtype
  249. :rtype: torch.Tensor (B, Lmax, Lmax)
  250. """
  251. ret = np.ones((size, size), dtype=dtype)
  252. if vad_pos <= 0 or vad_pos >= size:
  253. return ret
  254. sub_corner = np.zeros(
  255. (vad_pos - 1, size - vad_pos), dtype=dtype)
  256. ret[0:vad_pos - 1, vad_pos:] = sub_corner
  257. return ret
  258. def infer(self, feats: np.ndarray,
  259. feats_len: np.ndarray,
  260. vad_mask: np.ndarray,
  261. sub_masks: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  262. outputs = self.ort_infer([feats, feats_len, vad_mask, sub_masks])
  263. return outputs