data_loader_utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. # Copyright NeMo (https://github.com/NVIDIA/NeMo). All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import json
  15. import re
  16. import string
  17. from collections import defaultdict, namedtuple
  18. from typing import Dict, List, Optional, Set, Tuple
  19. from unicodedata import category
  20. import logging
  21. EOS_TYPE = "EOS"
  22. PUNCT_TYPE = "PUNCT"
  23. PLAIN_TYPE = "PLAIN"
  24. Instance = namedtuple('Instance', 'token_type un_normalized normalized')
  25. known_types = [
  26. "PLAIN",
  27. "DATE",
  28. "CARDINAL",
  29. "LETTERS",
  30. "VERBATIM",
  31. "MEASURE",
  32. "DECIMAL",
  33. "ORDINAL",
  34. "DIGIT",
  35. "MONEY",
  36. "TELEPHONE",
  37. "ELECTRONIC",
  38. "FRACTION",
  39. "TIME",
  40. "ADDRESS",
  41. ]
  42. def _load_kaggle_text_norm_file(file_path: str) -> List[Instance]:
  43. """
  44. https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
  45. Loads text file in the Kaggle Google text normalization file format: <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
  46. E.g.
  47. PLAIN Brillantaisia <self>
  48. PLAIN is <self>
  49. PLAIN a <self>
  50. PLAIN genus <self>
  51. PLAIN of <self>
  52. PLAIN plant <self>
  53. PLAIN in <self>
  54. PLAIN family <self>
  55. PLAIN Acanthaceae <self>
  56. PUNCT . sil
  57. <eos> <eos>
  58. Args:
  59. file_path: file path to text file
  60. Returns: flat list of instances
  61. """
  62. res = []
  63. with open(file_path, 'r') as fp:
  64. for line in fp:
  65. parts = line.strip().split("\t")
  66. if parts[0] == "<eos>":
  67. res.append(Instance(token_type=EOS_TYPE, un_normalized="", normalized=""))
  68. else:
  69. l_type, l_token, l_normalized = parts
  70. l_token = l_token.lower()
  71. l_normalized = l_normalized.lower()
  72. if l_type == PLAIN_TYPE:
  73. res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_token))
  74. elif l_type != PUNCT_TYPE:
  75. res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_normalized))
  76. return res
  77. def load_files(file_paths: List[str], load_func=_load_kaggle_text_norm_file) -> List[Instance]:
  78. """
  79. Load given list of text files using the `load_func` function.
  80. Args:
  81. file_paths: list of file paths
  82. load_func: loading function
  83. Returns: flat list of instances
  84. """
  85. res = []
  86. for file_path in file_paths:
  87. res.extend(load_func(file_path=file_path))
  88. return res
  89. def clean_generic(text: str) -> str:
  90. """
  91. Cleans text without affecting semiotic classes.
  92. Args:
  93. text: string
  94. Returns: cleaned string
  95. """
  96. text = text.strip()
  97. text = text.lower()
  98. return text
  99. def evaluate(preds: List[str], labels: List[str], input: Optional[List[str]] = None, verbose: bool = True) -> float:
  100. """
  101. Evaluates accuracy given predictions and labels.
  102. Args:
  103. preds: predictions
  104. labels: labels
  105. input: optional, only needed for verbosity
  106. verbose: if true prints [input], golden labels and predictions
  107. Returns accuracy
  108. """
  109. acc = 0
  110. nums = len(preds)
  111. for i in range(nums):
  112. pred_norm = clean_generic(preds[i])
  113. label_norm = clean_generic(labels[i])
  114. if pred_norm == label_norm:
  115. acc = acc + 1
  116. else:
  117. if input:
  118. print(f"inpu: {json.dumps(input[i])}")
  119. print(f"gold: {json.dumps(label_norm)}")
  120. print(f"pred: {json.dumps(pred_norm)}")
  121. return acc / nums
  122. def training_data_to_tokens(
  123. data: List[Instance], category: Optional[str] = None
  124. ) -> Dict[str, Tuple[List[str], List[str]]]:
  125. """
  126. Filters the instance list by category if provided and converts it into a map from token type to list of un_normalized and normalized strings
  127. Args:
  128. data: list of instances
  129. category: optional semiotic class category name
  130. Returns Dict: token type -> (list of un_normalized strings, list of normalized strings)
  131. """
  132. result = defaultdict(lambda: ([], []))
  133. for instance in data:
  134. if instance.token_type != EOS_TYPE:
  135. if category is None or instance.token_type == category:
  136. result[instance.token_type][0].append(instance.un_normalized)
  137. result[instance.token_type][1].append(instance.normalized)
  138. return result
  139. def training_data_to_sentences(data: List[Instance]) -> Tuple[List[str], List[str], List[Set[str]]]:
  140. """
  141. Takes instance list, creates list of sentences split by EOS_Token
  142. Args:
  143. data: list of instances
  144. Returns (list of unnormalized sentences, list of normalized sentences, list of sets of categories in a sentence)
  145. """
  146. # split data at EOS boundaries
  147. sentences = []
  148. sentence = []
  149. categories = []
  150. sentence_categories = set()
  151. for instance in data:
  152. if instance.token_type == EOS_TYPE:
  153. sentences.append(sentence)
  154. sentence = []
  155. categories.append(sentence_categories)
  156. sentence_categories = set()
  157. else:
  158. sentence.append(instance)
  159. sentence_categories.update([instance.token_type])
  160. un_normalized = [" ".join([instance.un_normalized for instance in sentence]) for sentence in sentences]
  161. normalized = [" ".join([instance.normalized for instance in sentence]) for sentence in sentences]
  162. return un_normalized, normalized, categories
  163. def post_process_punctuation(text: str) -> str:
  164. """
  165. Normalized quotes and spaces
  166. Args:
  167. text: text
  168. Returns: text with normalized spaces and quotes
  169. """
  170. text = (
  171. text.replace('( ', '(')
  172. .replace(' )', ')')
  173. .replace('{ ', '{')
  174. .replace(' }', '}')
  175. .replace('[ ', '[')
  176. .replace(' ]', ']')
  177. .replace(' ', ' ')
  178. .replace('”', '"')
  179. .replace("’", "'")
  180. .replace("»", '"')
  181. .replace("«", '"')
  182. .replace("\\", "")
  183. .replace("„", '"')
  184. .replace("´", "'")
  185. .replace("’", "'")
  186. .replace('“', '"')
  187. .replace("‘", "'")
  188. .replace('`', "'")
  189. .replace('- -', "--")
  190. )
  191. for punct in "!,.:;?":
  192. text = text.replace(f' {punct}', punct)
  193. return text.strip()
  194. def pre_process(text: str) -> str:
  195. """
  196. Optional text preprocessing before normalization (part of TTS TN pipeline)
  197. Args:
  198. text: string that may include semiotic classes
  199. Returns: text with spaces around punctuation marks
  200. """
  201. space_both = '[]'
  202. for punct in space_both:
  203. text = text.replace(punct, ' ' + punct + ' ')
  204. # remove extra space
  205. text = re.sub(r' +', ' ', text)
  206. return text
  207. def load_file(file_path: str) -> List[str]:
  208. """
  209. Loads given text file with separate lines into list of string.
  210. Args:
  211. file_path: file path
  212. Returns: flat list of string
  213. """
  214. res = []
  215. with open(file_path, 'r') as fp:
  216. for line in fp:
  217. res.append(line)
  218. return res
  219. def write_file(file_path: str, data: List[str]):
  220. """
  221. Writes out list of string to file.
  222. Args:
  223. file_path: file path
  224. data: list of string
  225. """
  226. with open(file_path, 'w') as fp:
  227. for line in data:
  228. fp.write(line + '\n')
  229. def post_process_punct(input: str, normalized_text: str, add_unicode_punct: bool = False):
  230. """
  231. Post-processing of the normalized output to match input in terms of spaces around punctuation marks.
  232. After NN normalization, Moses detokenization puts a space after
  233. punctuation marks, and attaches an opening quote "'" to the word to the right.
  234. E.g., input to the TN NN model is "12 test' example",
  235. after normalization and detokenization -> "twelve test 'example" (the quote is considered to be an opening quote,
  236. but it doesn't match the input and can cause issues during TTS voice generation.)
  237. The current function will match the punctuation and spaces of the normalized text with the input sequence.
  238. "12 test' example" -> "twelve test 'example" -> "twelve test' example" (the quote was shifted to match the input).
  239. Args:
  240. input: input text (original input to the NN, before normalization or tokenization)
  241. normalized_text: output text (output of the TN NN model)
  242. add_unicode_punct: set to True to handle unicode punctuation marks as well as default string.punctuation (increases post processing time)
  243. """
  244. # in the post-processing WFST graph "``" are repalced with '"" quotes (otherwise single quotes "`" won't be handled correctly)
  245. # this function fixes spaces around them based on input sequence, so here we're making the same double quote replacement
  246. # to make sure these new double quotes work with this function
  247. if "``" in input and "``" not in normalized_text:
  248. input = input.replace("``", '"')
  249. input = [x for x in input]
  250. normalized_text = [x for x in normalized_text]
  251. punct_marks = [x for x in string.punctuation if x in input]
  252. if add_unicode_punct:
  253. punct_unicode = [
  254. chr(i)
  255. for i in range(sys.maxunicode)
  256. if category(chr(i)).startswith("P") and chr(i) not in punct_default and chr(i) in input
  257. ]
  258. punct_marks = punct_marks.extend(punct_unicode)
  259. for punct in punct_marks:
  260. try:
  261. equal = True
  262. if input.count(punct) != normalized_text.count(punct):
  263. equal = False
  264. idx_in, idx_out = 0, 0
  265. while punct in input[idx_in:]:
  266. idx_out = normalized_text.index(punct, idx_out)
  267. idx_in = input.index(punct, idx_in)
  268. def _is_valid(idx_out, idx_in, normalized_text, input):
  269. """Check if previous or next word match (for cases when punctuation marks are part of
  270. semiotic token, i.e. some punctuation can be missing in the normalized text)"""
  271. return (idx_out > 0 and idx_in > 0 and normalized_text[idx_out - 1] == input[idx_in - 1]) or (
  272. idx_out < len(normalized_text) - 1
  273. and idx_in < len(input) - 1
  274. and normalized_text[idx_out + 1] == input[idx_in + 1]
  275. )
  276. if not equal and not _is_valid(idx_out, idx_in, normalized_text, input):
  277. idx_in += 1
  278. continue
  279. if idx_in > 0 and idx_out > 0:
  280. if normalized_text[idx_out - 1] == " " and input[idx_in - 1] != " ":
  281. normalized_text[idx_out - 1] = ""
  282. elif normalized_text[idx_out - 1] != " " and input[idx_in - 1] == " ":
  283. normalized_text[idx_out - 1] += " "
  284. if idx_in < len(input) - 1 and idx_out < len(normalized_text) - 1:
  285. if normalized_text[idx_out + 1] == " " and input[idx_in + 1] != " ":
  286. normalized_text[idx_out + 1] = ""
  287. elif normalized_text[idx_out + 1] != " " and input[idx_in + 1] == " ":
  288. normalized_text[idx_out] = normalized_text[idx_out] + " "
  289. idx_out += 1
  290. idx_in += 1
  291. except:
  292. logging.debug(f"Skipping post-processing of {''.join(normalized_text)} for '{punct}'")
  293. normalized_text = "".join(normalized_text)
  294. return re.sub(r' +', ' ', normalized_text)