timestamp_tools.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. import torch
  2. import copy
  3. import codecs
  4. import logging
  5. import edit_distance
  6. import argparse
  7. import pdb
  8. import numpy as np
  9. from typing import Any, List, Tuple, Union
  10. def ts_prediction_lfr6_standard(us_alphas,
  11. us_peaks,
  12. char_list,
  13. vad_offset=0.0,
  14. force_time_shift=-1.5,
  15. sil_in_str=True
  16. ):
  17. if not len(char_list):
  18. return []
  19. START_END_THRESHOLD = 5
  20. MAX_TOKEN_DURATION = 12
  21. TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
  22. if len(us_alphas.shape) == 2:
  23. _, peaks = us_alphas[0], us_peaks[0] # support inference batch_size=1 only
  24. else:
  25. _, peaks = us_alphas, us_peaks
  26. num_frames = peaks.shape[0]
  27. if char_list[-1] == '</s>':
  28. char_list = char_list[:-1]
  29. timestamp_list = []
  30. new_char_list = []
  31. # for bicif model trained with large data, cif2 actually fires when a character starts
  32. # so treat the frames between two peaks as the duration of the former token
  33. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  34. num_peak = len(fire_place)
  35. assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
  36. # begin silence
  37. if fire_place[0] > START_END_THRESHOLD:
  38. # char_list.insert(0, '<sil>')
  39. timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
  40. new_char_list.append('<sil>')
  41. # tokens timestamp
  42. for i in range(len(fire_place)-1):
  43. new_char_list.append(char_list[i])
  44. if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
  45. timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
  46. else:
  47. # cut the duration to token and sil of the 0-weight frames last long
  48. _split = fire_place[i] + MAX_TOKEN_DURATION
  49. timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
  50. timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
  51. new_char_list.append('<sil>')
  52. # tail token and end silence
  53. # new_char_list.append(char_list[-1])
  54. if num_frames - fire_place[-1] > START_END_THRESHOLD:
  55. _end = (num_frames + fire_place[-1]) * 0.5
  56. # _end = fire_place[-1]
  57. timestamp_list[-1][1] = _end*TIME_RATE
  58. timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
  59. new_char_list.append("<sil>")
  60. else:
  61. timestamp_list[-1][1] = num_frames*TIME_RATE
  62. if vad_offset: # add offset time in model with vad
  63. for i in range(len(timestamp_list)):
  64. timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
  65. timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
  66. res_txt = ""
  67. for char, timestamp in zip(new_char_list, timestamp_list):
  68. #if char != '<sil>':
  69. if not sil_in_str and char == '<sil>': continue
  70. res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
  71. res = []
  72. for char, timestamp in zip(new_char_list, timestamp_list):
  73. if char != '<sil>':
  74. res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
  75. return res_txt, res
  76. def time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed):
  77. res = []
  78. if text_postprocessed is None:
  79. return res
  80. if time_stamp_postprocessed is None:
  81. return res
  82. if len(time_stamp_postprocessed) == 0:
  83. return res
  84. if len(text_postprocessed) == 0:
  85. return res
  86. if punc_id_list is None or len(punc_id_list) == 0:
  87. res.append({
  88. 'text': text_postprocessed.split(),
  89. "start": time_stamp_postprocessed[0][0],
  90. "end": time_stamp_postprocessed[-1][1]
  91. })
  92. return res
  93. if len(punc_id_list) != len(time_stamp_postprocessed):
  94. res.append({
  95. 'text': text_postprocessed.split(),
  96. "start": time_stamp_postprocessed[0][0],
  97. "end": time_stamp_postprocessed[-1][1]
  98. })
  99. return res
  100. sentence_text = ''
  101. sentence_start = time_stamp_postprocessed[0][0]
  102. texts = text_postprocessed.split()
  103. for i in range(len(punc_id_list)):
  104. sentence_text += texts[i]
  105. if punc_id_list[i] == 2:
  106. sentence_text += ','
  107. res.append({
  108. 'text': sentence_text,
  109. "start": sentence_start,
  110. "end": time_stamp_postprocessed[i][1]
  111. })
  112. sentence_text = ''
  113. sentence_start = time_stamp_postprocessed[i][1]
  114. elif punc_id_list[i] == 3:
  115. sentence_text += '.'
  116. res.append({
  117. 'text': sentence_text,
  118. "start": sentence_start,
  119. "end": time_stamp_postprocessed[i][1]
  120. })
  121. sentence_text = ''
  122. sentence_start = time_stamp_postprocessed[i][1]
  123. return res
  124. class AverageShiftCalculator():
  125. def __init__(self):
  126. logging.warning("Calculating average shift.")
  127. def __call__(self, file1, file2):
  128. uttid_list1, ts_dict1 = self.read_timestamps(file1)
  129. uttid_list2, ts_dict2 = self.read_timestamps(file2)
  130. uttid_intersection = self._intersection(uttid_list1, uttid_list2)
  131. res = self.as_cal(uttid_intersection, ts_dict1, ts_dict2)
  132. logging.warning("Average shift of {} and {}: {}.".format(file1, file2, str(res)[:8]))
  133. logging.warning("Following timestamp pair differs most: {}, detail:{}".format(self.max_shift, self.max_shift_uttid))
  134. def _intersection(self, list1, list2):
  135. set1 = set(list1)
  136. set2 = set(list2)
  137. if set1 == set2:
  138. logging.warning("Uttid same checked.")
  139. return set1
  140. itsc = list(set1 & set2)
  141. logging.warning("Uttid differs: file1 {}, file2 {}, lines same {}.".format(len(list1), len(list2), len(itsc)))
  142. return itsc
  143. def read_timestamps(self, file):
  144. # read timestamps file in standard format
  145. uttid_list = []
  146. ts_dict = {}
  147. with codecs.open(file, 'r') as fin:
  148. for line in fin.readlines():
  149. text = ''
  150. ts_list = []
  151. line = line.rstrip()
  152. uttid = line.split()[0]
  153. uttid_list.append(uttid)
  154. body = " ".join(line.split()[1:])
  155. for pd in body.split(';'):
  156. if not len(pd): continue
  157. # pdb.set_trace()
  158. char, start, end = pd.lstrip(" ").split(' ')
  159. text += char + ','
  160. ts_list.append((float(start), float(end)))
  161. # ts_lists.append(ts_list)
  162. ts_dict[uttid] = (text[:-1], ts_list)
  163. logging.warning("File {} read done.".format(file))
  164. return uttid_list, ts_dict
  165. def _shift(self, filtered_timestamp_list1, filtered_timestamp_list2):
  166. shift_time = 0
  167. for fts1, fts2 in zip(filtered_timestamp_list1, filtered_timestamp_list2):
  168. shift_time += abs(fts1[0] - fts2[0]) + abs(fts1[1] - fts2[1])
  169. num_tokens = len(filtered_timestamp_list1)
  170. return shift_time, num_tokens
  171. def as_cal(self, uttid_list, ts_dict1, ts_dict2):
  172. # calculate average shift between timestamp1 and timestamp2
  173. # when characters differ, use edit distance alignment
  174. # and calculate the error between the same characters
  175. self._accumlated_shift = 0
  176. self._accumlated_tokens = 0
  177. self.max_shift = 0
  178. self.max_shift_uttid = None
  179. for uttid in uttid_list:
  180. (t1, ts1) = ts_dict1[uttid]
  181. (t2, ts2) = ts_dict2[uttid]
  182. _align, _align2, _align3 = [], [], []
  183. fts1, fts2 = [], []
  184. _t1, _t2 = [], []
  185. sm = edit_distance.SequenceMatcher(t1.split(','), t2.split(','))
  186. s = sm.get_opcodes()
  187. for j in range(len(s)):
  188. if s[j][0] == "replace" or s[j][0] == "insert":
  189. _align.append(0)
  190. if s[j][0] == "replace" or s[j][0] == "delete":
  191. _align3.append(0)
  192. elif s[j][0] == "equal":
  193. _align.append(1)
  194. _align3.append(1)
  195. else:
  196. continue
  197. # use s to index t2
  198. for a, ts , t in zip(_align, ts2, t2.split(',')):
  199. if a:
  200. fts2.append(ts)
  201. _t2.append(t)
  202. sm2 = edit_distance.SequenceMatcher(t2.split(','), t1.split(','))
  203. s = sm2.get_opcodes()
  204. for j in range(len(s)):
  205. if s[j][0] == "replace" or s[j][0] == "insert":
  206. _align2.append(0)
  207. elif s[j][0] == "equal":
  208. _align2.append(1)
  209. else:
  210. continue
  211. # use s2 tp index t1
  212. for a, ts, t in zip(_align3, ts1, t1.split(',')):
  213. if a:
  214. fts1.append(ts)
  215. _t1.append(t)
  216. if len(fts1) == len(fts2):
  217. shift_time, num_tokens = self._shift(fts1, fts2)
  218. self._accumlated_shift += shift_time
  219. self._accumlated_tokens += num_tokens
  220. if shift_time/num_tokens > self.max_shift:
  221. self.max_shift = shift_time/num_tokens
  222. self.max_shift_uttid = uttid
  223. else:
  224. logging.warning("length mismatch")
  225. return self._accumlated_shift / self._accumlated_tokens
  226. def convert_external_alphas(alphas_file, text_file, output_file):
  227. from funasr.models.predictor.cif import cif_wo_hidden
  228. with open(alphas_file, 'r') as f1, open(text_file, 'r') as f2, open(output_file, 'w') as f3:
  229. for line1, line2 in zip(f1.readlines(), f2.readlines()):
  230. line1 = line1.rstrip()
  231. line2 = line2.rstrip()
  232. assert line1.split()[0] == line2.split()[0]
  233. uttid = line1.split()[0]
  234. alphas = [float(i) for i in line1.split()[1:]]
  235. new_alphas = np.array(remove_chunk_padding(alphas))
  236. new_alphas[-1] += 1e-4
  237. text = line2.split()[1:]
  238. if len(text) + 1 != int(new_alphas.sum()):
  239. # force resize
  240. new_alphas *= (len(text) + 1) / int(new_alphas.sum())
  241. peaks = cif_wo_hidden(torch.Tensor(new_alphas).unsqueeze(0), 1.0-1e-4)
  242. if " " in text:
  243. text = text.split()
  244. else:
  245. text = [i for i in text]
  246. res_str, _ = ts_prediction_lfr6_standard(new_alphas, peaks[0], text,
  247. force_time_shift=-7.0,
  248. sil_in_str=False)
  249. f3.write("{} {}\n".format(uttid, res_str))
  250. def remove_chunk_padding(alphas):
  251. # remove the padding part in alphas if using chunk paraformer for GPU
  252. START_ZERO = 45
  253. MID_ZERO = 75
  254. REAL_FRAMES = 360 # for chunk based encoder 10-120-10 and fsmn padding 5
  255. alphas = alphas[START_ZERO:] # remove the padding at beginning
  256. new_alphas = []
  257. while True:
  258. new_alphas = new_alphas + alphas[:REAL_FRAMES]
  259. alphas = alphas[REAL_FRAMES+MID_ZERO:]
  260. if len(alphas) < REAL_FRAMES: break
  261. return new_alphas
  262. SUPPORTED_MODES = ['cal_aas', 'read_ext_alphas']
  263. def main(args):
  264. if args.mode == 'cal_aas':
  265. asc = AverageShiftCalculator()
  266. asc(args.input, args.input2)
  267. elif args.mode == 'read_ext_alphas':
  268. convert_external_alphas(args.input, args.input2, args.output)
  269. else:
  270. logging.error("Mode {} not in SUPPORTED_MODES: {}.".format(args.mode, SUPPORTED_MODES))
  271. if __name__ == '__main__':
  272. parser = argparse.ArgumentParser(description='timestamp tools')
  273. parser.add_argument('--mode',
  274. default=None,
  275. type=str,
  276. choices=SUPPORTED_MODES,
  277. help='timestamp related toolbox')
  278. parser.add_argument('--input', default=None, type=str, help='input file path')
  279. parser.add_argument('--output', default=None, type=str, help='output file name')
  280. parser.add_argument('--input2', default=None, type=str, help='input2 file path')
  281. parser.add_argument('--kaldi-ts-type',
  282. default='v2',
  283. type=str,
  284. choices=['v0', 'v1', 'v2'],
  285. help='kaldi timestamp to write')
  286. args = parser.parse_args()
  287. main(args)