utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. # -*- encoding: utf-8 -*-
  2. import functools
  3. import logging
  4. import pickle
  5. from pathlib import Path
  6. from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
  7. import re
  8. import torch
  9. import numpy as np
  10. import yaml
  11. try:
  12. from onnxruntime import (GraphOptimizationLevel, InferenceSession,
  13. SessionOptions, get_available_providers, get_device)
  14. except:
  15. print("please pip3 install onnxruntime")
  16. import jieba
  17. import warnings
  18. root_dir = Path(__file__).resolve().parent
  19. logger_initialized = {}
  20. def pad_list(xs, pad_value, max_len=None):
  21. n_batch = len(xs)
  22. if max_len is None:
  23. max_len = max(x.size(0) for x in xs)
  24. pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)
  25. for i in range(n_batch):
  26. pad[i, : xs[i].size(0)] = xs[i]
  27. return pad
  28. def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
  29. if length_dim == 0:
  30. raise ValueError("length_dim cannot be 0: {}".format(length_dim))
  31. if not isinstance(lengths, list):
  32. lengths = lengths.tolist()
  33. bs = int(len(lengths))
  34. if maxlen is None:
  35. if xs is None:
  36. maxlen = int(max(lengths))
  37. else:
  38. maxlen = xs.size(length_dim)
  39. else:
  40. assert xs is None
  41. assert maxlen >= int(max(lengths))
  42. seq_range = torch.arange(0, maxlen, dtype=torch.int64)
  43. seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
  44. seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
  45. mask = seq_range_expand >= seq_length_expand
  46. if xs is not None:
  47. assert xs.size(0) == bs, (xs.size(0), bs)
  48. if length_dim < 0:
  49. length_dim = xs.dim() + length_dim
  50. # ind = (:, None, ..., None, :, , None, ..., None)
  51. ind = tuple(
  52. slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
  53. )
  54. mask = mask[ind].expand_as(xs).to(xs.device)
  55. return mask
  56. class TokenIDConverter():
  57. def __init__(self, token_list: Union[List, str],
  58. ):
  59. self.token_list = token_list
  60. self.unk_symbol = token_list[-1]
  61. self.token2id = {v: i for i, v in enumerate(self.token_list)}
  62. self.unk_id = self.token2id[self.unk_symbol]
  63. def get_num_vocabulary_size(self) -> int:
  64. return len(self.token_list)
  65. def ids2tokens(self,
  66. integers: Union[np.ndarray, Iterable[int]]) -> List[str]:
  67. if isinstance(integers, np.ndarray) and integers.ndim != 1:
  68. raise TokenIDConverterError(
  69. f"Must be 1 dim ndarray, but got {integers.ndim}")
  70. return [self.token_list[i] for i in integers]
  71. def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
  72. return [self.token2id.get(i, self.unk_id) for i in tokens]
  73. class CharTokenizer():
  74. def __init__(
  75. self,
  76. symbol_value: Union[Path, str, Iterable[str]] = None,
  77. space_symbol: str = "<space>",
  78. remove_non_linguistic_symbols: bool = False,
  79. ):
  80. self.space_symbol = space_symbol
  81. self.non_linguistic_symbols = self.load_symbols(symbol_value)
  82. self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
  83. @staticmethod
  84. def load_symbols(value: Union[Path, str, Iterable[str]] = None) -> Set:
  85. if value is None:
  86. return set()
  87. if isinstance(value, Iterable[str]):
  88. return set(value)
  89. file_path = Path(value)
  90. if not file_path.exists():
  91. logging.warning("%s doesn't exist.", file_path)
  92. return set()
  93. with file_path.open("r", encoding="utf-8") as f:
  94. return set(line.rstrip() for line in f)
  95. def text2tokens(self, line: Union[str, list]) -> List[str]:
  96. tokens = []
  97. while len(line) != 0:
  98. for w in self.non_linguistic_symbols:
  99. if line.startswith(w):
  100. if not self.remove_non_linguistic_symbols:
  101. tokens.append(line[: len(w)])
  102. line = line[len(w):]
  103. break
  104. else:
  105. t = line[0]
  106. if t == " ":
  107. t = "<space>"
  108. tokens.append(t)
  109. line = line[1:]
  110. return tokens
  111. def tokens2text(self, tokens: Iterable[str]) -> str:
  112. tokens = [t if t != self.space_symbol else " " for t in tokens]
  113. return "".join(tokens)
  114. def __repr__(self):
  115. return (
  116. f"{self.__class__.__name__}("
  117. f'space_symbol="{self.space_symbol}"'
  118. f'non_linguistic_symbols="{self.non_linguistic_symbols}"'
  119. f")"
  120. )
  121. class Hypothesis(NamedTuple):
  122. """Hypothesis data type."""
  123. yseq: np.ndarray
  124. score: Union[float, np.ndarray] = 0
  125. scores: Dict[str, Union[float, np.ndarray]] = dict()
  126. states: Dict[str, Any] = dict()
  127. def asdict(self) -> dict:
  128. """Convert data to JSON-friendly dict."""
  129. return self._replace(
  130. yseq=self.yseq.tolist(),
  131. score=float(self.score),
  132. scores={k: float(v) for k, v in self.scores.items()},
  133. )._asdict()
  134. class TokenIDConverterError(Exception):
  135. pass
  136. class ONNXRuntimeError(Exception):
  137. pass
  138. class OrtInferSession():
  139. def __init__(self, model_file, device_id=-1, intra_op_num_threads=4):
  140. device_id = str(device_id)
  141. sess_opt = SessionOptions()
  142. sess_opt.intra_op_num_threads = intra_op_num_threads
  143. sess_opt.log_severity_level = 4
  144. sess_opt.enable_cpu_mem_arena = False
  145. sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
  146. cuda_ep = 'CUDAExecutionProvider'
  147. cuda_provider_options = {
  148. "device_id": device_id,
  149. "arena_extend_strategy": "kNextPowerOfTwo",
  150. "cudnn_conv_algo_search": "EXHAUSTIVE",
  151. "do_copy_in_default_stream": "true",
  152. }
  153. cpu_ep = 'CPUExecutionProvider'
  154. cpu_provider_options = {
  155. "arena_extend_strategy": "kSameAsRequested",
  156. }
  157. EP_list = []
  158. if device_id != "-1" and get_device() == 'GPU' \
  159. and cuda_ep in get_available_providers():
  160. EP_list = [(cuda_ep, cuda_provider_options)]
  161. EP_list.append((cpu_ep, cpu_provider_options))
  162. self._verify_model(model_file)
  163. self.session = InferenceSession(model_file,
  164. sess_options=sess_opt,
  165. providers=EP_list)
  166. if device_id != "-1" and cuda_ep not in self.session.get_providers():
  167. warnings.warn(f'{cuda_ep} is not avaiable for current env, the inference part is automatically shifted to be executed under {cpu_ep}.\n'
  168. 'Please ensure the installed onnxruntime-gpu version matches your cuda and cudnn version, '
  169. 'you can check their relations from the offical web site: '
  170. 'https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html',
  171. RuntimeWarning)
  172. def __call__(self,
  173. input_content: List[Union[np.ndarray, np.ndarray]]) -> np.ndarray:
  174. input_dict = dict(zip(self.get_input_names(), input_content))
  175. try:
  176. return self.session.run(self.get_output_names(), input_dict)
  177. except Exception as e:
  178. raise ONNXRuntimeError('ONNXRuntime inferece failed.') from e
  179. def get_input_names(self, ):
  180. return [v.name for v in self.session.get_inputs()]
  181. def get_output_names(self,):
  182. return [v.name for v in self.session.get_outputs()]
  183. def get_character_list(self, key: str = 'character'):
  184. return self.meta_dict[key].splitlines()
  185. def have_key(self, key: str = 'character') -> bool:
  186. self.meta_dict = self.session.get_modelmeta().custom_metadata_map
  187. if key in self.meta_dict.keys():
  188. return True
  189. return False
  190. @staticmethod
  191. def _verify_model(model_path):
  192. model_path = Path(model_path)
  193. if not model_path.exists():
  194. raise FileNotFoundError(f'{model_path} does not exists.')
  195. if not model_path.is_file():
  196. raise FileExistsError(f'{model_path} is not a file.')
  197. def split_to_mini_sentence(words: list, word_limit: int = 20):
  198. assert word_limit > 1
  199. if len(words) <= word_limit:
  200. return [words]
  201. sentences = []
  202. length = len(words)
  203. sentence_len = length // word_limit
  204. for i in range(sentence_len):
  205. sentences.append(words[i * word_limit:(i + 1) * word_limit])
  206. if length % word_limit > 0:
  207. sentences.append(words[sentence_len * word_limit:])
  208. return sentences
  209. def code_mix_split_words(text: str):
  210. words = []
  211. segs = text.split()
  212. for seg in segs:
  213. # There is no space in seg.
  214. current_word = ""
  215. for c in seg:
  216. if len(c.encode()) == 1:
  217. # This is an ASCII char.
  218. current_word += c
  219. else:
  220. # This is a Chinese char.
  221. if len(current_word) > 0:
  222. words.append(current_word)
  223. current_word = ""
  224. words.append(c)
  225. if len(current_word) > 0:
  226. words.append(current_word)
  227. return words
  228. def isEnglish(text:str):
  229. if re.search('^[a-zA-Z\']+$', text):
  230. return True
  231. else:
  232. return False
  233. def join_chinese_and_english(input_list):
  234. line = ''
  235. for token in input_list:
  236. if isEnglish(token):
  237. line = line + ' ' + token
  238. else:
  239. line = line + token
  240. line = line.strip()
  241. return line
  242. def code_mix_split_words_jieba(seg_dict_file: str):
  243. jieba.load_userdict(seg_dict_file)
  244. def _fn(text: str):
  245. input_list = text.split()
  246. token_list_all = []
  247. langauge_list = []
  248. token_list_tmp = []
  249. language_flag = None
  250. for token in input_list:
  251. if isEnglish(token) and language_flag == 'Chinese':
  252. token_list_all.append(token_list_tmp)
  253. langauge_list.append('Chinese')
  254. token_list_tmp = []
  255. elif not isEnglish(token) and language_flag == 'English':
  256. token_list_all.append(token_list_tmp)
  257. langauge_list.append('English')
  258. token_list_tmp = []
  259. token_list_tmp.append(token)
  260. if isEnglish(token):
  261. language_flag = 'English'
  262. else:
  263. language_flag = 'Chinese'
  264. if token_list_tmp:
  265. token_list_all.append(token_list_tmp)
  266. langauge_list.append(language_flag)
  267. result_list = []
  268. for token_list_tmp, language_flag in zip(token_list_all, langauge_list):
  269. if language_flag == 'English':
  270. result_list.extend(token_list_tmp)
  271. else:
  272. seg_list = jieba.cut(join_chinese_and_english(token_list_tmp), HMM=False)
  273. result_list.extend(seg_list)
  274. return result_list
  275. return _fn
  276. def read_yaml(yaml_path: Union[str, Path]) -> Dict:
  277. if not Path(yaml_path).exists():
  278. raise FileExistsError(f'The {yaml_path} does not exist.')
  279. with open(str(yaml_path), 'rb') as f:
  280. data = yaml.load(f, Loader=yaml.Loader)
  281. return data
  282. @functools.lru_cache()
  283. def get_logger(name='funasr_onnx'):
  284. """Initialize and get a logger by name.
  285. If the logger has not been initialized, this method will initialize the
  286. logger by adding one or two handlers, otherwise the initialized logger will
  287. be directly returned. During initialization, a StreamHandler will always be
  288. added.
  289. Args:
  290. name (str): Logger name.
  291. Returns:
  292. logging.Logger: The expected logger.
  293. """
  294. logger = logging.getLogger(name)
  295. if name in logger_initialized:
  296. return logger
  297. for logger_name in logger_initialized:
  298. if name.startswith(logger_name):
  299. return logger
  300. formatter = logging.Formatter(
  301. '[%(asctime)s] %(name)s %(levelname)s: %(message)s',
  302. datefmt="%Y/%m/%d %H:%M:%S")
  303. sh = logging.StreamHandler()
  304. sh.setFormatter(formatter)
  305. logger.addHandler(sh)
  306. logger_initialized[name] = True
  307. logger.propagate = False
  308. logging.basicConfig(level=logging.ERROR)
  309. return logger