utils.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. # -*- encoding: utf-8 -*-
  2. import functools
  3. import logging
  4. import pickle
  5. from pathlib import Path
  6. from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
  7. import re
  8. import numpy as np
  9. import yaml
  10. try:
  11. from onnxruntime import (GraphOptimizationLevel, InferenceSession,
  12. SessionOptions, get_available_providers, get_device)
  13. except:
  14. print("please pip3 install onnxruntime")
  15. import jieba
  16. import warnings
  17. root_dir = Path(__file__).resolve().parent
  18. logger_initialized = {}
  19. class TokenIDConverter():
  20. def __init__(self, token_list: Union[List, str],
  21. ):
  22. self.token_list = token_list
  23. self.unk_symbol = token_list[-1]
  24. self.token2id = {v: i for i, v in enumerate(self.token_list)}
  25. self.unk_id = self.token2id[self.unk_symbol]
  26. def get_num_vocabulary_size(self) -> int:
  27. return len(self.token_list)
  28. def ids2tokens(self,
  29. integers: Union[np.ndarray, Iterable[int]]) -> List[str]:
  30. if isinstance(integers, np.ndarray) and integers.ndim != 1:
  31. raise TokenIDConverterError(
  32. f"Must be 1 dim ndarray, but got {integers.ndim}")
  33. return [self.token_list[i] for i in integers]
  34. def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
  35. return [self.token2id.get(i, self.unk_id) for i in tokens]
  36. class CharTokenizer():
  37. def __init__(
  38. self,
  39. symbol_value: Union[Path, str, Iterable[str]] = None,
  40. space_symbol: str = "<space>",
  41. remove_non_linguistic_symbols: bool = False,
  42. ):
  43. self.space_symbol = space_symbol
  44. self.non_linguistic_symbols = self.load_symbols(symbol_value)
  45. self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
  46. @staticmethod
  47. def load_symbols(value: Union[Path, str, Iterable[str]] = None) -> Set:
  48. if value is None:
  49. return set()
  50. if isinstance(value, Iterable[str]):
  51. return set(value)
  52. file_path = Path(value)
  53. if not file_path.exists():
  54. logging.warning("%s doesn't exist.", file_path)
  55. return set()
  56. with file_path.open("r", encoding="utf-8") as f:
  57. return set(line.rstrip() for line in f)
  58. def text2tokens(self, line: Union[str, list]) -> List[str]:
  59. tokens = []
  60. while len(line) != 0:
  61. for w in self.non_linguistic_symbols:
  62. if line.startswith(w):
  63. if not self.remove_non_linguistic_symbols:
  64. tokens.append(line[: len(w)])
  65. line = line[len(w):]
  66. break
  67. else:
  68. t = line[0]
  69. if t == " ":
  70. t = "<space>"
  71. tokens.append(t)
  72. line = line[1:]
  73. return tokens
  74. def tokens2text(self, tokens: Iterable[str]) -> str:
  75. tokens = [t if t != self.space_symbol else " " for t in tokens]
  76. return "".join(tokens)
  77. def __repr__(self):
  78. return (
  79. f"{self.__class__.__name__}("
  80. f'space_symbol="{self.space_symbol}"'
  81. f'non_linguistic_symbols="{self.non_linguistic_symbols}"'
  82. f")"
  83. )
  84. class Hypothesis(NamedTuple):
  85. """Hypothesis data type."""
  86. yseq: np.ndarray
  87. score: Union[float, np.ndarray] = 0
  88. scores: Dict[str, Union[float, np.ndarray]] = dict()
  89. states: Dict[str, Any] = dict()
  90. def asdict(self) -> dict:
  91. """Convert data to JSON-friendly dict."""
  92. return self._replace(
  93. yseq=self.yseq.tolist(),
  94. score=float(self.score),
  95. scores={k: float(v) for k, v in self.scores.items()},
  96. )._asdict()
  97. class TokenIDConverterError(Exception):
  98. pass
  99. class ONNXRuntimeError(Exception):
  100. pass
  101. class OrtInferSession():
  102. def __init__(self, model_file, device_id=-1, intra_op_num_threads=4):
  103. device_id = str(device_id)
  104. sess_opt = SessionOptions()
  105. sess_opt.intra_op_num_threads = intra_op_num_threads
  106. sess_opt.log_severity_level = 4
  107. sess_opt.enable_cpu_mem_arena = False
  108. sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
  109. cuda_ep = 'CUDAExecutionProvider'
  110. cuda_provider_options = {
  111. "device_id": device_id,
  112. "arena_extend_strategy": "kNextPowerOfTwo",
  113. "cudnn_conv_algo_search": "EXHAUSTIVE",
  114. "do_copy_in_default_stream": "true",
  115. }
  116. cpu_ep = 'CPUExecutionProvider'
  117. cpu_provider_options = {
  118. "arena_extend_strategy": "kSameAsRequested",
  119. }
  120. EP_list = []
  121. if device_id != "-1" and get_device() == 'GPU' \
  122. and cuda_ep in get_available_providers():
  123. EP_list = [(cuda_ep, cuda_provider_options)]
  124. EP_list.append((cpu_ep, cpu_provider_options))
  125. self._verify_model(model_file)
  126. self.session = InferenceSession(model_file,
  127. sess_options=sess_opt,
  128. providers=EP_list)
  129. if device_id != "-1" and cuda_ep not in self.session.get_providers():
  130. warnings.warn(f'{cuda_ep} is not avaiable for current env, the inference part is automatically shifted to be executed under {cpu_ep}.\n'
  131. 'Please ensure the installed onnxruntime-gpu version matches your cuda and cudnn version, '
  132. 'you can check their relations from the offical web site: '
  133. 'https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html',
  134. RuntimeWarning)
  135. def __call__(self,
  136. input_content: List[Union[np.ndarray, np.ndarray]]) -> np.ndarray:
  137. input_dict = dict(zip(self.get_input_names(), input_content))
  138. try:
  139. return self.session.run(self.get_output_names(), input_dict)
  140. except Exception as e:
  141. raise ONNXRuntimeError('ONNXRuntime inferece failed.') from e
  142. def get_input_names(self, ):
  143. return [v.name for v in self.session.get_inputs()]
  144. def get_output_names(self,):
  145. return [v.name for v in self.session.get_outputs()]
  146. def get_character_list(self, key: str = 'character'):
  147. return self.meta_dict[key].splitlines()
  148. def have_key(self, key: str = 'character') -> bool:
  149. self.meta_dict = self.session.get_modelmeta().custom_metadata_map
  150. if key in self.meta_dict.keys():
  151. return True
  152. return False
  153. @staticmethod
  154. def _verify_model(model_path):
  155. model_path = Path(model_path)
  156. if not model_path.exists():
  157. raise FileNotFoundError(f'{model_path} does not exists.')
  158. if not model_path.is_file():
  159. raise FileExistsError(f'{model_path} is not a file.')
  160. def split_to_mini_sentence(words: list, word_limit: int = 20):
  161. assert word_limit > 1
  162. if len(words) <= word_limit:
  163. return [words]
  164. sentences = []
  165. length = len(words)
  166. sentence_len = length // word_limit
  167. for i in range(sentence_len):
  168. sentences.append(words[i * word_limit:(i + 1) * word_limit])
  169. if length % word_limit > 0:
  170. sentences.append(words[sentence_len * word_limit:])
  171. return sentences
  172. def code_mix_split_words(text: str):
  173. words = []
  174. segs = text.split()
  175. for seg in segs:
  176. # There is no space in seg.
  177. current_word = ""
  178. for c in seg:
  179. if len(c.encode()) == 1:
  180. # This is an ASCII char.
  181. current_word += c
  182. else:
  183. # This is a Chinese char.
  184. if len(current_word) > 0:
  185. words.append(current_word)
  186. current_word = ""
  187. words.append(c)
  188. if len(current_word) > 0:
  189. words.append(current_word)
  190. return words
  191. def isEnglish(text:str):
  192. if re.search('^[a-zA-Z\']+$', text):
  193. return True
  194. else:
  195. return False
  196. def join_chinese_and_english(input_list):
  197. line = ''
  198. for token in input_list:
  199. if isEnglish(token):
  200. line = line + ' ' + token
  201. else:
  202. line = line + token
  203. line = line.strip()
  204. return line
  205. def code_mix_split_words_jieba(seg_dict_file: str):
  206. jieba.load_userdict(seg_dict_file)
  207. def _fn(text: str):
  208. input_list = text.split()
  209. token_list_all = []
  210. langauge_list = []
  211. token_list_tmp = []
  212. language_flag = None
  213. for token in input_list:
  214. if isEnglish(token) and language_flag == 'Chinese':
  215. token_list_all.append(token_list_tmp)
  216. langauge_list.append('Chinese')
  217. token_list_tmp = []
  218. elif not isEnglish(token) and language_flag == 'English':
  219. token_list_all.append(token_list_tmp)
  220. langauge_list.append('English')
  221. token_list_tmp = []
  222. token_list_tmp.append(token)
  223. if isEnglish(token):
  224. language_flag = 'English'
  225. else:
  226. language_flag = 'Chinese'
  227. if token_list_tmp:
  228. token_list_all.append(token_list_tmp)
  229. langauge_list.append(language_flag)
  230. result_list = []
  231. for token_list_tmp, language_flag in zip(token_list_all, langauge_list):
  232. if language_flag == 'English':
  233. result_list.extend(token_list_tmp)
  234. else:
  235. seg_list = jieba.cut(join_chinese_and_english(token_list_tmp), HMM=False)
  236. result_list.extend(seg_list)
  237. return result_list
  238. return _fn
  239. def read_yaml(yaml_path: Union[str, Path]) -> Dict:
  240. if not Path(yaml_path).exists():
  241. raise FileExistsError(f'The {yaml_path} does not exist.')
  242. with open(str(yaml_path), 'rb') as f:
  243. data = yaml.load(f, Loader=yaml.Loader)
  244. return data
  245. @functools.lru_cache()
  246. def get_logger(name='funasr_onnx'):
  247. """Initialize and get a logger by name.
  248. If the logger has not been initialized, this method will initialize the
  249. logger by adding one or two handlers, otherwise the initialized logger will
  250. be directly returned. During initialization, a StreamHandler will always be
  251. added.
  252. Args:
  253. name (str): Logger name.
  254. Returns:
  255. logging.Logger: The expected logger.
  256. """
  257. logger = logging.getLogger(name)
  258. if name in logger_initialized:
  259. return logger
  260. for logger_name in logger_initialized:
  261. if name.startswith(logger_name):
  262. return logger
  263. formatter = logging.Formatter(
  264. '[%(asctime)s] %(name)s %(levelname)s: %(message)s',
  265. datefmt="%Y/%m/%d %H:%M:%S")
  266. sh = logging.StreamHandler()
  267. sh.setFormatter(formatter)
  268. logger.addHandler(sh)
  269. logger_initialized[name] = True
  270. logger.propagate = False
  271. logging.basicConfig(level=logging.ERROR)
  272. return logger