utils.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. # -*- encoding: utf-8 -*-
  2. import functools
  3. import logging
  4. import pickle
  5. from pathlib import Path
  6. from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
  7. import numpy as np
  8. import yaml
  9. from onnxruntime import (GraphOptimizationLevel, InferenceSession,
  10. SessionOptions, get_available_providers, get_device)
  11. from typeguard import check_argument_types
  12. import warnings
  13. root_dir = Path(__file__).resolve().parent
  14. logger_initialized = {}
  15. class TokenIDConverter():
  16. def __init__(self, token_list: Union[List, str],
  17. ):
  18. check_argument_types()
  19. self.token_list = token_list
  20. self.unk_symbol = token_list[-1]
  21. self.token2id = {v: i for i, v in enumerate(self.token_list)}
  22. self.unk_id = self.token2id[self.unk_symbol]
  23. def get_num_vocabulary_size(self) -> int:
  24. return len(self.token_list)
  25. def ids2tokens(self,
  26. integers: Union[np.ndarray, Iterable[int]]) -> List[str]:
  27. if isinstance(integers, np.ndarray) and integers.ndim != 1:
  28. raise TokenIDConverterError(
  29. f"Must be 1 dim ndarray, but got {integers.ndim}")
  30. return [self.token_list[i] for i in integers]
  31. def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
  32. return [self.token2id.get(i, self.unk_id) for i in tokens]
  33. class CharTokenizer():
  34. def __init__(
  35. self,
  36. symbol_value: Union[Path, str, Iterable[str]] = None,
  37. space_symbol: str = "<space>",
  38. remove_non_linguistic_symbols: bool = False,
  39. ):
  40. check_argument_types()
  41. self.space_symbol = space_symbol
  42. self.non_linguistic_symbols = self.load_symbols(symbol_value)
  43. self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
  44. @staticmethod
  45. def load_symbols(value: Union[Path, str, Iterable[str]] = None) -> Set:
  46. if value is None:
  47. return set()
  48. if isinstance(value, Iterable[str]):
  49. return set(value)
  50. file_path = Path(value)
  51. if not file_path.exists():
  52. logging.warning("%s doesn't exist.", file_path)
  53. return set()
  54. with file_path.open("r", encoding="utf-8") as f:
  55. return set(line.rstrip() for line in f)
  56. def text2tokens(self, line: Union[str, list]) -> List[str]:
  57. tokens = []
  58. while len(line) != 0:
  59. for w in self.non_linguistic_symbols:
  60. if line.startswith(w):
  61. if not self.remove_non_linguistic_symbols:
  62. tokens.append(line[: len(w)])
  63. line = line[len(w):]
  64. break
  65. else:
  66. t = line[0]
  67. if t == " ":
  68. t = "<space>"
  69. tokens.append(t)
  70. line = line[1:]
  71. return tokens
  72. def tokens2text(self, tokens: Iterable[str]) -> str:
  73. tokens = [t if t != self.space_symbol else " " for t in tokens]
  74. return "".join(tokens)
  75. def __repr__(self):
  76. return (
  77. f"{self.__class__.__name__}("
  78. f'space_symbol="{self.space_symbol}"'
  79. f'non_linguistic_symbols="{self.non_linguistic_symbols}"'
  80. f")"
  81. )
  82. class Hypothesis(NamedTuple):
  83. """Hypothesis data type."""
  84. yseq: np.ndarray
  85. score: Union[float, np.ndarray] = 0
  86. scores: Dict[str, Union[float, np.ndarray]] = dict()
  87. states: Dict[str, Any] = dict()
  88. def asdict(self) -> dict:
  89. """Convert data to JSON-friendly dict."""
  90. return self._replace(
  91. yseq=self.yseq.tolist(),
  92. score=float(self.score),
  93. scores={k: float(v) for k, v in self.scores.items()},
  94. )._asdict()
  95. class TokenIDConverterError(Exception):
  96. pass
  97. class ONNXRuntimeError(Exception):
  98. pass
  99. class OrtInferSession():
  100. def __init__(self, model_file, device_id=-1, intra_op_num_threads=4):
  101. device_id = str(device_id)
  102. sess_opt = SessionOptions()
  103. sess_opt.intra_op_num_threads = intra_op_num_threads
  104. sess_opt.log_severity_level = 4
  105. sess_opt.enable_cpu_mem_arena = False
  106. sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
  107. cuda_ep = 'CUDAExecutionProvider'
  108. cuda_provider_options = {
  109. "device_id": device_id,
  110. "arena_extend_strategy": "kNextPowerOfTwo",
  111. "cudnn_conv_algo_search": "EXHAUSTIVE",
  112. "do_copy_in_default_stream": "true",
  113. }
  114. cpu_ep = 'CPUExecutionProvider'
  115. cpu_provider_options = {
  116. "arena_extend_strategy": "kSameAsRequested",
  117. }
  118. EP_list = []
  119. if device_id != "-1" and get_device() == 'GPU' \
  120. and cuda_ep in get_available_providers():
  121. EP_list = [(cuda_ep, cuda_provider_options)]
  122. EP_list.append((cpu_ep, cpu_provider_options))
  123. self._verify_model(model_file)
  124. self.session = InferenceSession(model_file,
  125. sess_options=sess_opt,
  126. providers=EP_list)
  127. if device_id != "-1" and cuda_ep not in self.session.get_providers():
  128. warnings.warn(f'{cuda_ep} is not avaiable for current env, the inference part is automatically shifted to be executed under {cpu_ep}.\n'
  129. 'Please ensure the installed onnxruntime-gpu version matches your cuda and cudnn version, '
  130. 'you can check their relations from the offical web site: '
  131. 'https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html',
  132. RuntimeWarning)
  133. def __call__(self,
  134. input_content: List[Union[np.ndarray, np.ndarray]]) -> np.ndarray:
  135. input_dict = dict(zip(self.get_input_names(), input_content))
  136. try:
  137. return self.session.run(self.get_output_names(), input_dict)
  138. except Exception as e:
  139. raise ONNXRuntimeError('ONNXRuntime inferece failed.') from e
  140. def get_input_names(self, ):
  141. return [v.name for v in self.session.get_inputs()]
  142. def get_output_names(self,):
  143. return [v.name for v in self.session.get_outputs()]
  144. def get_character_list(self, key: str = 'character'):
  145. return self.meta_dict[key].splitlines()
  146. def have_key(self, key: str = 'character') -> bool:
  147. self.meta_dict = self.session.get_modelmeta().custom_metadata_map
  148. if key in self.meta_dict.keys():
  149. return True
  150. return False
  151. @staticmethod
  152. def _verify_model(model_path):
  153. model_path = Path(model_path)
  154. if not model_path.exists():
  155. raise FileNotFoundError(f'{model_path} does not exists.')
  156. if not model_path.is_file():
  157. raise FileExistsError(f'{model_path} is not a file.')
  158. def split_to_mini_sentence(words: list, word_limit: int = 20):
  159. assert word_limit > 1
  160. if len(words) <= word_limit:
  161. return [words]
  162. sentences = []
  163. length = len(words)
  164. sentence_len = length // word_limit
  165. for i in range(sentence_len):
  166. sentences.append(words[i * word_limit:(i + 1) * word_limit])
  167. if length % word_limit > 0:
  168. sentences.append(words[sentence_len * word_limit:])
  169. return sentences
  170. def code_mix_split_words(text: str):
  171. words = []
  172. segs = text.split()
  173. for seg in segs:
  174. # There is no space in seg.
  175. current_word = ""
  176. for c in seg:
  177. if len(c.encode()) == 1:
  178. # This is an ASCII char.
  179. current_word += c
  180. else:
  181. # This is a Chinese char.
  182. if len(current_word) > 0:
  183. words.append(current_word)
  184. current_word = ""
  185. words.append(c)
  186. if len(current_word) > 0:
  187. words.append(current_word)
  188. return words
  189. def read_yaml(yaml_path: Union[str, Path]) -> Dict:
  190. if not Path(yaml_path).exists():
  191. raise FileExistsError(f'The {yaml_path} does not exist.')
  192. with open(str(yaml_path), 'rb') as f:
  193. data = yaml.load(f, Loader=yaml.Loader)
  194. return data
  195. @functools.lru_cache()
  196. def get_logger(name='funasr_onnx'):
  197. """Initialize and get a logger by name.
  198. If the logger has not been initialized, this method will initialize the
  199. logger by adding one or two handlers, otherwise the initialized logger will
  200. be directly returned. During initialization, a StreamHandler will always be
  201. added.
  202. Args:
  203. name (str): Logger name.
  204. Returns:
  205. logging.Logger: The expected logger.
  206. """
  207. logger = logging.getLogger(name)
  208. if name in logger_initialized:
  209. return logger
  210. for logger_name in logger_initialized:
  211. if name.startswith(logger_name):
  212. return logger
  213. formatter = logging.Formatter(
  214. '[%(asctime)s] %(name)s %(levelname)s: %(message)s',
  215. datefmt="%Y/%m/%d %H:%M:%S")
  216. sh = logging.StreamHandler()
  217. sh.setFormatter(formatter)
  218. logger.addHandler(sh)
  219. logger_initialized[name] = True
  220. logger.propagate = False
  221. return logger