timestamp_tools.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. from scipy.fftpack import shift
  2. import torch
  3. import copy
  4. import codecs
  5. import logging
  6. import edit_distance
  7. import argparse
  8. import numpy as np
  9. from typing import Any, List, Tuple, Union
  10. def ts_prediction_lfr6_standard(us_alphas,
  11. us_peaks,
  12. char_list,
  13. vad_offset=0.0,
  14. force_time_shift=-1.5
  15. ):
  16. if not len(char_list):
  17. return []
  18. START_END_THRESHOLD = 5
  19. MAX_TOKEN_DURATION = 12
  20. TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
  21. if len(us_alphas.shape) == 2:
  22. _, peaks = us_alphas[0], us_peaks[0] # support inference batch_size=1 only
  23. else:
  24. _, peaks = us_alphas, us_peaks
  25. num_frames = peaks.shape[0]
  26. if char_list[-1] == '</s>':
  27. char_list = char_list[:-1]
  28. timestamp_list = []
  29. new_char_list = []
  30. # for bicif model trained with large data, cif2 actually fires when a character starts
  31. # so treat the frames between two peaks as the duration of the former token
  32. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  33. num_peak = len(fire_place)
  34. assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
  35. # begin silence
  36. if fire_place[0] > START_END_THRESHOLD:
  37. # char_list.insert(0, '<sil>')
  38. timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
  39. new_char_list.append('<sil>')
  40. # tokens timestamp
  41. for i in range(len(fire_place)-1):
  42. new_char_list.append(char_list[i])
  43. if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
  44. timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
  45. else:
  46. # cut the duration to token and sil of the 0-weight frames last long
  47. _split = fire_place[i] + MAX_TOKEN_DURATION
  48. timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
  49. timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
  50. new_char_list.append('<sil>')
  51. # tail token and end silence
  52. # new_char_list.append(char_list[-1])
  53. if num_frames - fire_place[-1] > START_END_THRESHOLD:
  54. _end = (num_frames + fire_place[-1]) * 0.5
  55. # _end = fire_place[-1]
  56. timestamp_list[-1][1] = _end*TIME_RATE
  57. timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
  58. new_char_list.append("<sil>")
  59. else:
  60. timestamp_list[-1][1] = num_frames*TIME_RATE
  61. if vad_offset: # add offset time in model with vad
  62. for i in range(len(timestamp_list)):
  63. timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
  64. timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
  65. res_txt = ""
  66. for char, timestamp in zip(new_char_list, timestamp_list):
  67. res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
  68. res = []
  69. for char, timestamp in zip(new_char_list, timestamp_list):
  70. if char != '<sil>':
  71. res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
  72. return res_txt, res
  73. def time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed):
  74. res = []
  75. if text_postprocessed is None:
  76. return res
  77. if time_stamp_postprocessed is None:
  78. return res
  79. if len(time_stamp_postprocessed) == 0:
  80. return res
  81. if len(text_postprocessed) == 0:
  82. return res
  83. if punc_id_list is None or len(punc_id_list) == 0:
  84. res.append({
  85. 'text': text_postprocessed.split(),
  86. "start": time_stamp_postprocessed[0][0],
  87. "end": time_stamp_postprocessed[-1][1]
  88. })
  89. return res
  90. if len(punc_id_list) != len(time_stamp_postprocessed):
  91. res.append({
  92. 'text': text_postprocessed.split(),
  93. "start": time_stamp_postprocessed[0][0],
  94. "end": time_stamp_postprocessed[-1][1]
  95. })
  96. return res
  97. sentence_text = ''
  98. sentence_start = time_stamp_postprocessed[0][0]
  99. texts = text_postprocessed.split()
  100. for i in range(len(punc_id_list)):
  101. sentence_text += texts[i]
  102. if punc_id_list[i] == 2:
  103. sentence_text += ','
  104. res.append({
  105. 'text': sentence_text,
  106. "start": sentence_start,
  107. "end": time_stamp_postprocessed[i][1]
  108. })
  109. sentence_text = ''
  110. sentence_start = time_stamp_postprocessed[i][1]
  111. elif punc_id_list[i] == 3:
  112. sentence_text += '.'
  113. res.append({
  114. 'text': sentence_text,
  115. "start": sentence_start,
  116. "end": time_stamp_postprocessed[i][1]
  117. })
  118. sentence_text = ''
  119. sentence_start = time_stamp_postprocessed[i][1]
  120. return res
  121. class AverageShiftCalculator():
  122. def __init__(self):
  123. logging.warning("Calculating average shift.")
  124. def __call__(self, file1, file2):
  125. uttid_list1, ts_dict1 = self.read_timestamps(file1)
  126. uttid_list2, ts_dict2 = self.read_timestamps(file2)
  127. uttid_intersection = self._intersection(uttid_list1, uttid_list2)
  128. res = self.as_cal(uttid_intersection, ts_dict1, ts_dict2)
  129. logging.warning("Average shift of {} and {}: {}.".format(file1, file2, str(res)[:8]))
  130. logging.warning("Following timestamp pair differs most: {}, detail:{}".format(self.max_shift_uttid))
  131. def _intersection(self, list1, list2):
  132. set1 = set(list1)
  133. set2 = set(list2)
  134. if set1 == set2:
  135. logging.warning("Uttid same checked.")
  136. return set1
  137. itsc = list(set1 & set2)
  138. logging.warning("Uttid differs: file1 {}, file2 {}, lines same {}.".format(len(list1), len(list2), len(itsc)))
  139. return itsc
  140. def read_timestamps(self, file):
  141. # read timestamps file in standard format
  142. uttid_list = []
  143. ts_dict = {}
  144. with codecs.open(file, 'r') as fin:
  145. for line in fin.readlines():
  146. text = ''
  147. ts_list = []
  148. line = line.rstrip()
  149. uttid = line.split()[0]
  150. uttid_list.append(uttid)
  151. body = " ".join(line.split()[1:])
  152. for pd in body.split(';'):
  153. if not len(pd): continue
  154. # pdb.set_trace()
  155. char, start, end = pd.lstrip(" ").split(' ')
  156. text += char + ','
  157. ts_list.append((float(start), float(end)))
  158. # ts_lists.append(ts_list)
  159. ts_dict[uttid] = (text[:-1], ts_list)
  160. logging.warning("File {} read done.".format(file))
  161. return uttid_list, ts_dict
  162. def _shift(self, filtered_timestamp_list1, filtered_timestamp_list2):
  163. for fts1, fts2 in zip(filtered_timestamp_list1, filtered_timestamp_list2):
  164. shift_time = abs(fts1[0] - fts2[0]) + abs(fts1[1] - fts2[1])
  165. num_tokens = len(filtered_timestamp_list1)
  166. return shift_time, num_tokens
  167. def as_cal(self, uttid_list, ts_dict1, ts_dict2):
  168. # calculate average shift between timestamp1 and timestamp2
  169. # when characters differ, use edit distance alignment
  170. # and calculate the error between the same characters
  171. self._accumlated_shift = 0
  172. self._accumlated_tokens = 0
  173. self.max_shift = 0
  174. self.max_shift_uttid = None
  175. for uttid in uttid_list:
  176. (t1, ts1) = ts_dict1[uttid]
  177. (t2, ts2) = ts_dict2[uttid]
  178. _align, _align2, _align3 = [], [], []
  179. fts1, fts2 = [], []
  180. _t1, _t2 = [], []
  181. sm = edit_distance.SequenceMatcher(t1.split(','), t2.split(','))
  182. s = sm.get_opcodes()
  183. for j in range(len(s)):
  184. if s[j][0] == "replace" or s[j][0] == "insert":
  185. _align.append(0)
  186. if s[j][0] == "replace" or s[j][0] == "delete":
  187. _align3.append(0)
  188. elif s[j][0] == "equal":
  189. _align.append(1)
  190. _align3.append(1)
  191. else:
  192. continue
  193. # use s to index t2
  194. for a, ts , t in zip(_align, ts2, t2.split(',')):
  195. if a:
  196. fts2.append(ts)
  197. _t2.append(t)
  198. sm2 = edit_distance.SequenceMatcher(t2.split(','), t1.split(','))
  199. s = sm2.get_opcodes()
  200. for j in range(len(s)):
  201. if s[j][0] == "replace" or s[j][0] == "insert":
  202. _align2.append(0)
  203. elif s[j][0] == "equal":
  204. _align2.append(1)
  205. else:
  206. continue
  207. # use s2 tp index t1
  208. for a, ts, t in zip(_align3, ts1, t1.split(',')):
  209. if a:
  210. fts1.append(ts)
  211. _t1.append(t)
  212. if len(fts1) == len(fts2):
  213. shift_time, num_tokens = self._shift(fts1, fts2)
  214. self._accumlated_shift += shift_time
  215. self._accumlated_tokens += num_tokens
  216. if shift_time/num_tokens > self.max_shift:
  217. self.max_shift = shift_time/num_tokens
  218. self.max_shift_uttid = uttid
  219. else:
  220. logging.warning("length mismatch")
  221. return self._accumlated_shift / self._accumlated_tokens
  222. SUPPORTED_MODES = ['cal_aas']
  223. def main(args):
  224. if args.mode == 'cal_aas':
  225. asc = AverageShiftCalculator()
  226. asc(args.input, args.input2)
  227. else:
  228. logging.error("Mode {} not in SUPPORTED_MODES: {}.".format(args.mode, SUPPORTED_MODES))
  229. if __name__ == '__main__':
  230. parser = argparse.ArgumentParser(description='timestamp tools')
  231. parser.add_argument('--mode',
  232. default=None,
  233. type=str,
  234. choices=SUPPORTED_MODES,
  235. help='timestamp related toolbox')
  236. parser.add_argument('--input', default=None, type=str, help='input file path')
  237. parser.add_argument('--output', default=None, type=str, help='output file name')
  238. parser.add_argument('--input2', default=None, type=str, help='input2 file path')
  239. parser.add_argument('--kaldi-ts-type',
  240. default='v2',
  241. type=str,
  242. choices=['v0', 'v1', 'v2'],
  243. help='kaldi timestamp to write')
  244. args = parser.parse_args()
  245. main(args)