timestamp_tools.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. import torch
  2. import codecs
  3. import logging
  4. import argparse
  5. import numpy as np
  6. import edit_distance
  7. from itertools import zip_longest
  8. def ts_prediction_lfr6_standard(us_alphas,
  9. us_peaks,
  10. char_list,
  11. vad_offset=0.0,
  12. force_time_shift=-1.5,
  13. sil_in_str=True
  14. ):
  15. if not len(char_list):
  16. return "", []
  17. START_END_THRESHOLD = 5
  18. MAX_TOKEN_DURATION = 12
  19. TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
  20. if len(us_alphas.shape) == 2:
  21. _, peaks = us_alphas[0], us_peaks[0] # support inference batch_size=1 only
  22. else:
  23. _, peaks = us_alphas, us_peaks
  24. num_frames = peaks.shape[0]
  25. if char_list[-1] == '</s>':
  26. char_list = char_list[:-1]
  27. timestamp_list = []
  28. new_char_list = []
  29. # for bicif model trained with large data, cif2 actually fires when a character starts
  30. # so treat the frames between two peaks as the duration of the former token
  31. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  32. num_peak = len(fire_place)
  33. if num_peak != len(char_list) + 1:
  34. logging.warning("length mismatch, result might be incorrect.")
  35. logging.warning("num_peaks: {}, num_chars+1: {}, which is supposed to be same.".format(num_peak, len(char_list)+1))
  36. if num_peak > len(char_list) + 1:
  37. fire_place = fire_place[:len(char_list) - 1]
  38. elif num_peak < len(char_list) + 1:
  39. char_list = char_list[:num_peak + 1]
  40. # assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
  41. # begin silence
  42. if fire_place[0] > START_END_THRESHOLD:
  43. # char_list.insert(0, '<sil>')
  44. timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
  45. new_char_list.append('<sil>')
  46. # tokens timestamp
  47. for i in range(len(fire_place)-1):
  48. new_char_list.append(char_list[i])
  49. if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
  50. timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
  51. else:
  52. # cut the duration to token and sil of the 0-weight frames last long
  53. _split = fire_place[i] + MAX_TOKEN_DURATION
  54. timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
  55. timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
  56. new_char_list.append('<sil>')
  57. # tail token and end silence
  58. # new_char_list.append(char_list[-1])
  59. if num_frames - fire_place[-1] > START_END_THRESHOLD:
  60. _end = (num_frames + fire_place[-1]) * 0.5
  61. # _end = fire_place[-1]
  62. timestamp_list[-1][1] = _end*TIME_RATE
  63. timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
  64. new_char_list.append("<sil>")
  65. else:
  66. timestamp_list[-1][1] = num_frames*TIME_RATE
  67. if vad_offset: # add offset time in model with vad
  68. for i in range(len(timestamp_list)):
  69. timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
  70. timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
  71. res_txt = ""
  72. for char, timestamp in zip(new_char_list, timestamp_list):
  73. #if char != '<sil>':
  74. if not sil_in_str and char == '<sil>': continue
  75. res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
  76. res = []
  77. for char, timestamp in zip(new_char_list, timestamp_list):
  78. if char != '<sil>':
  79. res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
  80. return res_txt, res
  81. def time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed):
  82. punc_list = [',', '。', '?', '、']
  83. res = []
  84. if text_postprocessed is None:
  85. return res
  86. if time_stamp_postprocessed is None:
  87. return res
  88. if len(time_stamp_postprocessed) == 0:
  89. return res
  90. if len(text_postprocessed) == 0:
  91. return res
  92. if punc_id_list is None or len(punc_id_list) == 0:
  93. res.append({
  94. 'text': text_postprocessed.split(),
  95. "start": time_stamp_postprocessed[0][0],
  96. "end": time_stamp_postprocessed[-1][1],
  97. 'text_seg': text_postprocessed.split(),
  98. "ts_list": time_stamp_postprocessed,
  99. })
  100. return res
  101. if len(punc_id_list) != len(time_stamp_postprocessed):
  102. print(" warning length mistach!!!!!!")
  103. sentence_text = ""
  104. sentence_text_seg = ""
  105. ts_list = []
  106. sentence_start = time_stamp_postprocessed[0][0]
  107. sentence_end = time_stamp_postprocessed[0][1]
  108. texts = text_postprocessed.split()
  109. punc_stamp_text_list = list(zip_longest(punc_id_list, time_stamp_postprocessed, texts, fillvalue=None))
  110. for punc_stamp_text in punc_stamp_text_list:
  111. punc_id, time_stamp, text = punc_stamp_text
  112. # sentence_text += text if text is not None else ''
  113. if text is not None:
  114. if 'a' <= text[0] <= 'z' or 'A' <= text[0] <= 'Z':
  115. sentence_text += ' ' + text
  116. elif len(sentence_text) and ('a' <= sentence_text[-1] <= 'z' or 'A' <= sentence_text[-1] <= 'Z'):
  117. sentence_text += ' ' + text
  118. else:
  119. sentence_text += text
  120. sentence_text_seg += text + ' '
  121. ts_list.append(time_stamp)
  122. punc_id = int(punc_id) if punc_id is not None else 1
  123. sentence_end = time_stamp[1] if time_stamp is not None else sentence_end
  124. if punc_id > 1:
  125. sentence_text += punc_list[punc_id - 2]
  126. res.append({
  127. 'text': sentence_text,
  128. "start": sentence_start,
  129. "end": sentence_end,
  130. "text_seg": sentence_text_seg,
  131. "ts_list": ts_list
  132. })
  133. sentence_text = ''
  134. sentence_text_seg = ''
  135. ts_list = []
  136. sentence_start = sentence_end
  137. return res
  138. class AverageShiftCalculator():
  139. def __init__(self):
  140. logging.warning("Calculating average shift.")
  141. def __call__(self, file1, file2):
  142. uttid_list1, ts_dict1 = self.read_timestamps(file1)
  143. uttid_list2, ts_dict2 = self.read_timestamps(file2)
  144. uttid_intersection = self._intersection(uttid_list1, uttid_list2)
  145. res = self.as_cal(uttid_intersection, ts_dict1, ts_dict2)
  146. logging.warning("Average shift of {} and {}: {}.".format(file1, file2, str(res)[:8]))
  147. logging.warning("Following timestamp pair differs most: {}, detail:{}".format(self.max_shift, self.max_shift_uttid))
  148. def _intersection(self, list1, list2):
  149. set1 = set(list1)
  150. set2 = set(list2)
  151. if set1 == set2:
  152. logging.warning("Uttid same checked.")
  153. return set1
  154. itsc = list(set1 & set2)
  155. logging.warning("Uttid differs: file1 {}, file2 {}, lines same {}.".format(len(list1), len(list2), len(itsc)))
  156. return itsc
  157. def read_timestamps(self, file):
  158. # read timestamps file in standard format
  159. uttid_list = []
  160. ts_dict = {}
  161. with codecs.open(file, 'r') as fin:
  162. for line in fin.readlines():
  163. text = ''
  164. ts_list = []
  165. line = line.rstrip()
  166. uttid = line.split()[0]
  167. uttid_list.append(uttid)
  168. body = " ".join(line.split()[1:])
  169. for pd in body.split(';'):
  170. if not len(pd): continue
  171. # pdb.set_trace()
  172. char, start, end = pd.lstrip(" ").split(' ')
  173. text += char + ','
  174. ts_list.append((float(start), float(end)))
  175. # ts_lists.append(ts_list)
  176. ts_dict[uttid] = (text[:-1], ts_list)
  177. logging.warning("File {} read done.".format(file))
  178. return uttid_list, ts_dict
  179. def _shift(self, filtered_timestamp_list1, filtered_timestamp_list2):
  180. shift_time = 0
  181. for fts1, fts2 in zip(filtered_timestamp_list1, filtered_timestamp_list2):
  182. shift_time += abs(fts1[0] - fts2[0]) + abs(fts1[1] - fts2[1])
  183. num_tokens = len(filtered_timestamp_list1)
  184. return shift_time, num_tokens
  185. def as_cal(self, uttid_list, ts_dict1, ts_dict2):
  186. # calculate average shift between timestamp1 and timestamp2
  187. # when characters differ, use edit distance alignment
  188. # and calculate the error between the same characters
  189. self._accumlated_shift = 0
  190. self._accumlated_tokens = 0
  191. self.max_shift = 0
  192. self.max_shift_uttid = None
  193. for uttid in uttid_list:
  194. (t1, ts1) = ts_dict1[uttid]
  195. (t2, ts2) = ts_dict2[uttid]
  196. _align, _align2, _align3 = [], [], []
  197. fts1, fts2 = [], []
  198. _t1, _t2 = [], []
  199. sm = edit_distance.SequenceMatcher(t1.split(','), t2.split(','))
  200. s = sm.get_opcodes()
  201. for j in range(len(s)):
  202. if s[j][0] == "replace" or s[j][0] == "insert":
  203. _align.append(0)
  204. if s[j][0] == "replace" or s[j][0] == "delete":
  205. _align3.append(0)
  206. elif s[j][0] == "equal":
  207. _align.append(1)
  208. _align3.append(1)
  209. else:
  210. continue
  211. # use s to index t2
  212. for a, ts , t in zip(_align, ts2, t2.split(',')):
  213. if a:
  214. fts2.append(ts)
  215. _t2.append(t)
  216. sm2 = edit_distance.SequenceMatcher(t2.split(','), t1.split(','))
  217. s = sm2.get_opcodes()
  218. for j in range(len(s)):
  219. if s[j][0] == "replace" or s[j][0] == "insert":
  220. _align2.append(0)
  221. elif s[j][0] == "equal":
  222. _align2.append(1)
  223. else:
  224. continue
  225. # use s2 tp index t1
  226. for a, ts, t in zip(_align3, ts1, t1.split(',')):
  227. if a:
  228. fts1.append(ts)
  229. _t1.append(t)
  230. if len(fts1) == len(fts2):
  231. shift_time, num_tokens = self._shift(fts1, fts2)
  232. self._accumlated_shift += shift_time
  233. self._accumlated_tokens += num_tokens
  234. if shift_time/num_tokens > self.max_shift:
  235. self.max_shift = shift_time/num_tokens
  236. self.max_shift_uttid = uttid
  237. else:
  238. logging.warning("length mismatch")
  239. return self._accumlated_shift / self._accumlated_tokens
  240. def convert_external_alphas(alphas_file, text_file, output_file):
  241. from funasr.models.predictor.cif import cif_wo_hidden
  242. with open(alphas_file, 'r') as f1, open(text_file, 'r') as f2, open(output_file, 'w') as f3:
  243. for line1, line2 in zip(f1.readlines(), f2.readlines()):
  244. line1 = line1.rstrip()
  245. line2 = line2.rstrip()
  246. assert line1.split()[0] == line2.split()[0]
  247. uttid = line1.split()[0]
  248. alphas = [float(i) for i in line1.split()[1:]]
  249. new_alphas = np.array(remove_chunk_padding(alphas))
  250. new_alphas[-1] += 1e-4
  251. text = line2.split()[1:]
  252. if len(text) + 1 != int(new_alphas.sum()):
  253. # force resize
  254. new_alphas *= (len(text) + 1) / int(new_alphas.sum())
  255. peaks = cif_wo_hidden(torch.Tensor(new_alphas).unsqueeze(0), 1.0-1e-4)
  256. if " " in text:
  257. text = text.split()
  258. else:
  259. text = [i for i in text]
  260. res_str, _ = ts_prediction_lfr6_standard(new_alphas, peaks[0], text,
  261. force_time_shift=-7.0,
  262. sil_in_str=False)
  263. f3.write("{} {}\n".format(uttid, res_str))
  264. def remove_chunk_padding(alphas):
  265. # remove the padding part in alphas if using chunk paraformer for GPU
  266. START_ZERO = 45
  267. MID_ZERO = 75
  268. REAL_FRAMES = 360 # for chunk based encoder 10-120-10 and fsmn padding 5
  269. alphas = alphas[START_ZERO:] # remove the padding at beginning
  270. new_alphas = []
  271. while True:
  272. new_alphas = new_alphas + alphas[:REAL_FRAMES]
  273. alphas = alphas[REAL_FRAMES+MID_ZERO:]
  274. if len(alphas) < REAL_FRAMES: break
  275. return new_alphas
  276. SUPPORTED_MODES = ['cal_aas', 'read_ext_alphas']
  277. def main(args):
  278. if args.mode == 'cal_aas':
  279. asc = AverageShiftCalculator()
  280. asc(args.input, args.input2)
  281. elif args.mode == 'read_ext_alphas':
  282. convert_external_alphas(args.input, args.input2, args.output)
  283. else:
  284. logging.error("Mode {} not in SUPPORTED_MODES: {}.".format(args.mode, SUPPORTED_MODES))
  285. if __name__ == '__main__':
  286. parser = argparse.ArgumentParser(description='timestamp tools')
  287. parser.add_argument('--mode',
  288. default=None,
  289. type=str,
  290. choices=SUPPORTED_MODES,
  291. help='timestamp related toolbox')
  292. parser.add_argument('--input', default=None, type=str, help='input file path')
  293. parser.add_argument('--output', default=None, type=str, help='output file name')
  294. parser.add_argument('--input2', default=None, type=str, help='input2 file path')
  295. parser.add_argument('--kaldi-ts-type',
  296. default='v2',
  297. type=str,
  298. choices=['v0', 'v1', 'v2'],
  299. help='kaldi timestamp to write')
  300. args = parser.parse_args()
  301. main(args)