data_loader_utils.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. import json
  2. import re
  3. import string
  4. from collections import defaultdict, namedtuple
  5. from typing import Dict, List, Optional, Set, Tuple
  6. from unicodedata import category
  7. import logging
  8. EOS_TYPE = "EOS"
  9. PUNCT_TYPE = "PUNCT"
  10. PLAIN_TYPE = "PLAIN"
  11. Instance = namedtuple('Instance', 'token_type un_normalized normalized')
  12. known_types = [
  13. "PLAIN",
  14. "DATE",
  15. "CARDINAL",
  16. "LETTERS",
  17. "VERBATIM",
  18. "MEASURE",
  19. "DECIMAL",
  20. "ORDINAL",
  21. "DIGIT",
  22. "MONEY",
  23. "TELEPHONE",
  24. "ELECTRONIC",
  25. "FRACTION",
  26. "TIME",
  27. "ADDRESS",
  28. ]
  29. def _load_kaggle_text_norm_file(file_path: str) -> List[Instance]:
  30. """
  31. https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
  32. Loads text file in the Kaggle Google text normalization file format: <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
  33. E.g.
  34. PLAIN Brillantaisia <self>
  35. PLAIN is <self>
  36. PLAIN a <self>
  37. PLAIN genus <self>
  38. PLAIN of <self>
  39. PLAIN plant <self>
  40. PLAIN in <self>
  41. PLAIN family <self>
  42. PLAIN Acanthaceae <self>
  43. PUNCT . sil
  44. <eos> <eos>
  45. Args:
  46. file_path: file path to text file
  47. Returns: flat list of instances
  48. """
  49. res = []
  50. with open(file_path, 'r') as fp:
  51. for line in fp:
  52. parts = line.strip().split("\t")
  53. if parts[0] == "<eos>":
  54. res.append(Instance(token_type=EOS_TYPE, un_normalized="", normalized=""))
  55. else:
  56. l_type, l_token, l_normalized = parts
  57. l_token = l_token.lower()
  58. l_normalized = l_normalized.lower()
  59. if l_type == PLAIN_TYPE:
  60. res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_token))
  61. elif l_type != PUNCT_TYPE:
  62. res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_normalized))
  63. return res
  64. def load_files(file_paths: List[str], load_func=_load_kaggle_text_norm_file) -> List[Instance]:
  65. """
  66. Load given list of text files using the `load_func` function.
  67. Args:
  68. file_paths: list of file paths
  69. load_func: loading function
  70. Returns: flat list of instances
  71. """
  72. res = []
  73. for file_path in file_paths:
  74. res.extend(load_func(file_path=file_path))
  75. return res
  76. def clean_generic(text: str) -> str:
  77. """
  78. Cleans text without affecting semiotic classes.
  79. Args:
  80. text: string
  81. Returns: cleaned string
  82. """
  83. text = text.strip()
  84. text = text.lower()
  85. return text
  86. def evaluate(preds: List[str], labels: List[str], input: Optional[List[str]] = None, verbose: bool = True) -> float:
  87. """
  88. Evaluates accuracy given predictions and labels.
  89. Args:
  90. preds: predictions
  91. labels: labels
  92. input: optional, only needed for verbosity
  93. verbose: if true prints [input], golden labels and predictions
  94. Returns accuracy
  95. """
  96. acc = 0
  97. nums = len(preds)
  98. for i in range(nums):
  99. pred_norm = clean_generic(preds[i])
  100. label_norm = clean_generic(labels[i])
  101. if pred_norm == label_norm:
  102. acc = acc + 1
  103. else:
  104. if input:
  105. print(f"inpu: {json.dumps(input[i])}")
  106. print(f"gold: {json.dumps(label_norm)}")
  107. print(f"pred: {json.dumps(pred_norm)}")
  108. return acc / nums
  109. def training_data_to_tokens(
  110. data: List[Instance], category: Optional[str] = None
  111. ) -> Dict[str, Tuple[List[str], List[str]]]:
  112. """
  113. Filters the instance list by category if provided and converts it into a map from token type to list of un_normalized and normalized strings
  114. Args:
  115. data: list of instances
  116. category: optional semiotic class category name
  117. Returns Dict: token type -> (list of un_normalized strings, list of normalized strings)
  118. """
  119. result = defaultdict(lambda: ([], []))
  120. for instance in data:
  121. if instance.token_type != EOS_TYPE:
  122. if category is None or instance.token_type == category:
  123. result[instance.token_type][0].append(instance.un_normalized)
  124. result[instance.token_type][1].append(instance.normalized)
  125. return result
  126. def training_data_to_sentences(data: List[Instance]) -> Tuple[List[str], List[str], List[Set[str]]]:
  127. """
  128. Takes instance list, creates list of sentences split by EOS_Token
  129. Args:
  130. data: list of instances
  131. Returns (list of unnormalized sentences, list of normalized sentences, list of sets of categories in a sentence)
  132. """
  133. # split data at EOS boundaries
  134. sentences = []
  135. sentence = []
  136. categories = []
  137. sentence_categories = set()
  138. for instance in data:
  139. if instance.token_type == EOS_TYPE:
  140. sentences.append(sentence)
  141. sentence = []
  142. categories.append(sentence_categories)
  143. sentence_categories = set()
  144. else:
  145. sentence.append(instance)
  146. sentence_categories.update([instance.token_type])
  147. un_normalized = [" ".join([instance.un_normalized for instance in sentence]) for sentence in sentences]
  148. normalized = [" ".join([instance.normalized for instance in sentence]) for sentence in sentences]
  149. return un_normalized, normalized, categories
  150. def post_process_punctuation(text: str) -> str:
  151. """
  152. Normalized quotes and spaces
  153. Args:
  154. text: text
  155. Returns: text with normalized spaces and quotes
  156. """
  157. text = (
  158. text.replace('( ', '(')
  159. .replace(' )', ')')
  160. .replace('{ ', '{')
  161. .replace(' }', '}')
  162. .replace('[ ', '[')
  163. .replace(' ]', ']')
  164. .replace(' ', ' ')
  165. .replace('”', '"')
  166. .replace("’", "'")
  167. .replace("»", '"')
  168. .replace("«", '"')
  169. .replace("\\", "")
  170. .replace("„", '"')
  171. .replace("´", "'")
  172. .replace("’", "'")
  173. .replace('“', '"')
  174. .replace("‘", "'")
  175. .replace('`', "'")
  176. .replace('- -', "--")
  177. )
  178. for punct in "!,.:;?":
  179. text = text.replace(f' {punct}', punct)
  180. return text.strip()
  181. def pre_process(text: str) -> str:
  182. """
  183. Optional text preprocessing before normalization (part of TTS TN pipeline)
  184. Args:
  185. text: string that may include semiotic classes
  186. Returns: text with spaces around punctuation marks
  187. """
  188. space_both = '[]'
  189. for punct in space_both:
  190. text = text.replace(punct, ' ' + punct + ' ')
  191. # remove extra space
  192. text = re.sub(r' +', ' ', text)
  193. return text
  194. def load_file(file_path: str) -> List[str]:
  195. """
  196. Loads given text file with separate lines into list of string.
  197. Args:
  198. file_path: file path
  199. Returns: flat list of string
  200. """
  201. res = []
  202. with open(file_path, 'r') as fp:
  203. for line in fp:
  204. res.append(line)
  205. return res
  206. def write_file(file_path: str, data: List[str]):
  207. """
  208. Writes out list of string to file.
  209. Args:
  210. file_path: file path
  211. data: list of string
  212. """
  213. with open(file_path, 'w') as fp:
  214. for line in data:
  215. fp.write(line + '\n')
  216. def post_process_punct(input: str, normalized_text: str, add_unicode_punct: bool = False):
  217. """
  218. Post-processing of the normalized output to match input in terms of spaces around punctuation marks.
  219. After NN normalization, Moses detokenization puts a space after
  220. punctuation marks, and attaches an opening quote "'" to the word to the right.
  221. E.g., input to the TN NN model is "12 test' example",
  222. after normalization and detokenization -> "twelve test 'example" (the quote is considered to be an opening quote,
  223. but it doesn't match the input and can cause issues during TTS voice generation.)
  224. The current function will match the punctuation and spaces of the normalized text with the input sequence.
  225. "12 test' example" -> "twelve test 'example" -> "twelve test' example" (the quote was shifted to match the input).
  226. Args:
  227. input: input text (original input to the NN, before normalization or tokenization)
  228. normalized_text: output text (output of the TN NN model)
  229. add_unicode_punct: set to True to handle unicode punctuation marks as well as default string.punctuation (increases post processing time)
  230. """
  231. # in the post-processing WFST graph "``" are repalced with '"" quotes (otherwise single quotes "`" won't be handled correctly)
  232. # this function fixes spaces around them based on input sequence, so here we're making the same double quote replacement
  233. # to make sure these new double quotes work with this function
  234. if "``" in input and "``" not in normalized_text:
  235. input = input.replace("``", '"')
  236. input = [x for x in input]
  237. normalized_text = [x for x in normalized_text]
  238. punct_marks = [x for x in string.punctuation if x in input]
  239. if add_unicode_punct:
  240. punct_unicode = [
  241. chr(i)
  242. for i in range(sys.maxunicode)
  243. if category(chr(i)).startswith("P") and chr(i) not in punct_default and chr(i) in input
  244. ]
  245. punct_marks = punct_marks.extend(punct_unicode)
  246. for punct in punct_marks:
  247. try:
  248. equal = True
  249. if input.count(punct) != normalized_text.count(punct):
  250. equal = False
  251. idx_in, idx_out = 0, 0
  252. while punct in input[idx_in:]:
  253. idx_out = normalized_text.index(punct, idx_out)
  254. idx_in = input.index(punct, idx_in)
  255. def _is_valid(idx_out, idx_in, normalized_text, input):
  256. """Check if previous or next word match (for cases when punctuation marks are part of
  257. semiotic token, i.e. some punctuation can be missing in the normalized text)"""
  258. return (idx_out > 0 and idx_in > 0 and normalized_text[idx_out - 1] == input[idx_in - 1]) or (
  259. idx_out < len(normalized_text) - 1
  260. and idx_in < len(input) - 1
  261. and normalized_text[idx_out + 1] == input[idx_in + 1]
  262. )
  263. if not equal and not _is_valid(idx_out, idx_in, normalized_text, input):
  264. idx_in += 1
  265. continue
  266. if idx_in > 0 and idx_out > 0:
  267. if normalized_text[idx_out - 1] == " " and input[idx_in - 1] != " ":
  268. normalized_text[idx_out - 1] = ""
  269. elif normalized_text[idx_out - 1] != " " and input[idx_in - 1] == " ":
  270. normalized_text[idx_out - 1] += " "
  271. if idx_in < len(input) - 1 and idx_out < len(normalized_text) - 1:
  272. if normalized_text[idx_out + 1] == " " and input[idx_in + 1] != " ":
  273. normalized_text[idx_out + 1] = ""
  274. elif normalized_text[idx_out + 1] != " " and input[idx_in + 1] == " ":
  275. normalized_text[idx_out] = normalized_text[idx_out] + " "
  276. idx_out += 1
  277. idx_in += 1
  278. except:
  279. logging.debug(f"Skipping post-processing of {''.join(normalized_text)} for '{punct}'")
  280. normalized_text = "".join(normalized_text)
  281. return re.sub(r' +', ' ', normalized_text)