punc_bin.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. # -*- encoding: utf-8 -*-
  2. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. # MIT License (https://opensource.org/licenses/MIT)
  4. import os.path
  5. from pathlib import Path
  6. from typing import List, Union, Tuple
  7. import numpy as np
  8. from .utils.utils import (ONNXRuntimeError,
  9. OrtInferSession, get_logger,
  10. read_yaml)
  11. from .utils.utils import (TokenIDConverter, split_to_mini_sentence,code_mix_split_words)
  12. logging = get_logger()
  13. class CT_Transformer():
  14. """
  15. Author: Speech Lab of DAMO Academy, Alibaba Group
  16. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  17. https://arxiv.org/pdf/2003.01309.pdf
  18. """
  19. def __init__(self, model_dir: Union[str, Path] = None,
  20. batch_size: int = 1,
  21. device_id: Union[str, int] = "-1",
  22. quantize: bool = False,
  23. intra_op_num_threads: int = 4,
  24. cache_dir: str = None,
  25. ):
  26. if not Path(model_dir).exists():
  27. from modelscope.hub.snapshot_download import snapshot_download
  28. try:
  29. model_dir = snapshot_download(model_dir, cache_dir=cache_dir)
  30. except:
  31. raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
  32. model_dir)
  33. model_file = os.path.join(model_dir, 'model.onnx')
  34. if quantize:
  35. model_file = os.path.join(model_dir, 'model_quant.onnx')
  36. if not os.path.exists(model_file):
  37. print(".onnx is not exist, begin to export onnx")
  38. from funasr.export.export_model import ModelExport
  39. export_model = ModelExport(
  40. cache_dir=cache_dir,
  41. onnx=True,
  42. device="cpu",
  43. quant=quantize,
  44. )
  45. export_model.export(model_dir)
  46. config_file = os.path.join(model_dir, 'punc.yaml')
  47. config = read_yaml(config_file)
  48. self.converter = TokenIDConverter(config['token_list'])
  49. self.ort_infer = OrtInferSession(model_file, device_id, intra_op_num_threads=intra_op_num_threads)
  50. self.batch_size = 1
  51. self.punc_list = config['punc_list']
  52. self.period = 0
  53. for i in range(len(self.punc_list)):
  54. if self.punc_list[i] == ",":
  55. self.punc_list[i] = ","
  56. elif self.punc_list[i] == "?":
  57. self.punc_list[i] = "?"
  58. elif self.punc_list[i] == "。":
  59. self.period = i
  60. def __call__(self, text: Union[list, str], split_size=20):
  61. split_text = code_mix_split_words(text)
  62. split_text_id = self.converter.tokens2ids(split_text)
  63. mini_sentences = split_to_mini_sentence(split_text, split_size)
  64. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  65. assert len(mini_sentences) == len(mini_sentences_id)
  66. cache_sent = []
  67. cache_sent_id = []
  68. new_mini_sentence = ""
  69. new_mini_sentence_punc = []
  70. cache_pop_trigger_limit = 200
  71. for mini_sentence_i in range(len(mini_sentences)):
  72. mini_sentence = mini_sentences[mini_sentence_i]
  73. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  74. mini_sentence = cache_sent + mini_sentence
  75. mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype='int32')
  76. data = {
  77. "text": mini_sentence_id[None,:],
  78. "text_lengths": np.array([len(mini_sentence_id)], dtype='int32'),
  79. }
  80. try:
  81. outputs = self.infer(data['text'], data['text_lengths'])
  82. y = outputs[0]
  83. punctuations = np.argmax(y,axis=-1)[0]
  84. assert punctuations.size == len(mini_sentence)
  85. except ONNXRuntimeError:
  86. logging.warning("error")
  87. # Search for the last Period/QuestionMark as cache
  88. if mini_sentence_i < len(mini_sentences) - 1:
  89. sentenceEnd = -1
  90. last_comma_index = -1
  91. for i in range(len(punctuations) - 2, 1, -1):
  92. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  93. sentenceEnd = i
  94. break
  95. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  96. last_comma_index = i
  97. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  98. # The sentence it too long, cut off at a comma.
  99. sentenceEnd = last_comma_index
  100. punctuations[sentenceEnd] = self.period
  101. cache_sent = mini_sentence[sentenceEnd + 1:]
  102. cache_sent_id = mini_sentence_id[sentenceEnd + 1:].tolist()
  103. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  104. punctuations = punctuations[0:sentenceEnd + 1]
  105. new_mini_sentence_punc += [int(x) for x in punctuations]
  106. words_with_punc = []
  107. for i in range(len(mini_sentence)):
  108. if i > 0:
  109. if len(mini_sentence[i][0].encode()) == 1 and len(mini_sentence[i - 1][0].encode()) == 1:
  110. mini_sentence[i] = " " + mini_sentence[i]
  111. words_with_punc.append(mini_sentence[i])
  112. if self.punc_list[punctuations[i]] != "_":
  113. words_with_punc.append(self.punc_list[punctuations[i]])
  114. new_mini_sentence += "".join(words_with_punc)
  115. # Add Period for the end of the sentence
  116. new_mini_sentence_out = new_mini_sentence
  117. new_mini_sentence_punc_out = new_mini_sentence_punc
  118. if mini_sentence_i == len(mini_sentences) - 1:
  119. if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、":
  120. new_mini_sentence_out = new_mini_sentence[:-1] + "。"
  121. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  122. elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?":
  123. new_mini_sentence_out = new_mini_sentence + "。"
  124. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
  125. return new_mini_sentence_out, new_mini_sentence_punc_out
  126. def infer(self, feats: np.ndarray,
  127. feats_len: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  128. outputs = self.ort_infer([feats, feats_len])
  129. return outputs
  130. class CT_Transformer_VadRealtime(CT_Transformer):
  131. """
  132. Author: Speech Lab of DAMO Academy, Alibaba Group
  133. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  134. https://arxiv.org/pdf/2003.01309.pdf
  135. """
  136. def __init__(self, model_dir: Union[str, Path] = None,
  137. batch_size: int = 1,
  138. device_id: Union[str, int] = "-1",
  139. quantize: bool = False,
  140. intra_op_num_threads: int = 4,
  141. cache_dir: str = None
  142. ):
  143. super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir)
  144. def __call__(self, text: str, param_dict: map, split_size=20):
  145. cache_key = "cache"
  146. assert cache_key in param_dict
  147. cache = param_dict[cache_key]
  148. if cache is not None and len(cache) > 0:
  149. precache = "".join(cache)
  150. else:
  151. precache = ""
  152. cache = []
  153. full_text = precache + " " + text
  154. split_text = code_mix_split_words(full_text)
  155. split_text_id = self.converter.tokens2ids(split_text)
  156. mini_sentences = split_to_mini_sentence(split_text, split_size)
  157. mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
  158. new_mini_sentence_punc = []
  159. assert len(mini_sentences) == len(mini_sentences_id)
  160. cache_sent = []
  161. cache_sent_id = np.array([], dtype='int32')
  162. sentence_punc_list = []
  163. sentence_words_list = []
  164. cache_pop_trigger_limit = 200
  165. skip_num = 0
  166. for mini_sentence_i in range(len(mini_sentences)):
  167. mini_sentence = mini_sentences[mini_sentence_i]
  168. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  169. mini_sentence = cache_sent + mini_sentence
  170. mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0,dtype='int32')
  171. text_length = len(mini_sentence_id)
  172. vad_mask = self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32)
  173. data = {
  174. "input": mini_sentence_id[None,:],
  175. "text_lengths": np.array([text_length], dtype='int32'),
  176. "vad_mask": vad_mask,
  177. "sub_masks": vad_mask
  178. }
  179. try:
  180. outputs = self.infer(data['input'], data['text_lengths'], data['vad_mask'], data["sub_masks"])
  181. y = outputs[0]
  182. punctuations = np.argmax(y,axis=-1)[0]
  183. assert punctuations.size == len(mini_sentence)
  184. except ONNXRuntimeError:
  185. logging.warning("error")
  186. # Search for the last Period/QuestionMark as cache
  187. if mini_sentence_i < len(mini_sentences) - 1:
  188. sentenceEnd = -1
  189. last_comma_index = -1
  190. for i in range(len(punctuations) - 2, 1, -1):
  191. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  192. sentenceEnd = i
  193. break
  194. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  195. last_comma_index = i
  196. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  197. # The sentence it too long, cut off at a comma.
  198. sentenceEnd = last_comma_index
  199. punctuations[sentenceEnd] = self.period
  200. cache_sent = mini_sentence[sentenceEnd + 1:]
  201. cache_sent_id = mini_sentence_id[sentenceEnd + 1:]
  202. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  203. punctuations = punctuations[0:sentenceEnd + 1]
  204. punctuations_np = [int(x) for x in punctuations]
  205. new_mini_sentence_punc += punctuations_np
  206. sentence_punc_list += [self.punc_list[int(x)] for x in punctuations_np]
  207. sentence_words_list += mini_sentence
  208. assert len(sentence_punc_list) == len(sentence_words_list)
  209. words_with_punc = []
  210. sentence_punc_list_out = []
  211. for i in range(0, len(sentence_words_list)):
  212. if i > 0:
  213. if len(sentence_words_list[i][0].encode()) == 1 and len(sentence_words_list[i - 1][-1].encode()) == 1:
  214. sentence_words_list[i] = " " + sentence_words_list[i]
  215. if skip_num < len(cache):
  216. skip_num += 1
  217. else:
  218. words_with_punc.append(sentence_words_list[i])
  219. if skip_num >= len(cache):
  220. sentence_punc_list_out.append(sentence_punc_list[i])
  221. if sentence_punc_list[i] != "_":
  222. words_with_punc.append(sentence_punc_list[i])
  223. sentence_out = "".join(words_with_punc)
  224. sentenceEnd = -1
  225. for i in range(len(sentence_punc_list) - 2, 1, -1):
  226. if sentence_punc_list[i] == "。" or sentence_punc_list[i] == "?":
  227. sentenceEnd = i
  228. break
  229. cache_out = sentence_words_list[sentenceEnd + 1:]
  230. if sentence_out[-1] in self.punc_list:
  231. sentence_out = sentence_out[:-1]
  232. sentence_punc_list_out[-1] = "_"
  233. param_dict[cache_key] = cache_out
  234. return sentence_out, sentence_punc_list_out, cache_out
  235. def vad_mask(self, size, vad_pos, dtype=np.bool):
  236. """Create mask for decoder self-attention.
  237. :param int size: size of mask
  238. :param int vad_pos: index of vad index
  239. :param torch.dtype dtype: result dtype
  240. :rtype: torch.Tensor (B, Lmax, Lmax)
  241. """
  242. ret = np.ones((size, size), dtype=dtype)
  243. if vad_pos <= 0 or vad_pos >= size:
  244. return ret
  245. sub_corner = np.zeros(
  246. (vad_pos - 1, size - vad_pos), dtype=dtype)
  247. ret[0:vad_pos - 1, vad_pos:] = sub_corner
  248. return ret
  249. def infer(self, feats: np.ndarray,
  250. feats_len: np.ndarray,
  251. vad_mask: np.ndarray,
  252. sub_masks: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
  253. outputs = self.ort_infer([feats, feats_len, vad_mask, sub_masks])
  254. return outputs