timestamp_tools.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. import torch
  2. import codecs
  3. import logging
  4. import argparse
  5. import numpy as np
  6. # import edit_distance
  7. from itertools import zip_longest
  8. def cif_wo_hidden(alphas, threshold):
  9. batch_size, len_time = alphas.size()
  10. # loop varss
  11. integrate = torch.zeros([batch_size], device=alphas.device)
  12. # intermediate vars along time
  13. list_fires = []
  14. for t in range(len_time):
  15. alpha = alphas[:, t]
  16. integrate += alpha
  17. list_fires.append(integrate)
  18. fire_place = integrate >= threshold
  19. integrate = torch.where(fire_place,
  20. integrate - torch.ones([batch_size], device=alphas.device)*threshold,
  21. integrate)
  22. fires = torch.stack(list_fires, 1)
  23. return fires
  24. def ts_prediction_lfr6_standard(us_alphas,
  25. us_peaks,
  26. char_list,
  27. vad_offset=0.0,
  28. force_time_shift=-1.5,
  29. sil_in_str=True
  30. ):
  31. if not len(char_list):
  32. return "", []
  33. START_END_THRESHOLD = 5
  34. MAX_TOKEN_DURATION = 12
  35. TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
  36. if len(us_alphas.shape) == 2:
  37. alphas, peaks = us_alphas[0], us_peaks[0] # support inference batch_size=1 only
  38. else:
  39. alphas, peaks = us_alphas, us_peaks
  40. if char_list[-1] == '</s>':
  41. char_list = char_list[:-1]
  42. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  43. if len(fire_place) != len(char_list) + 1:
  44. alphas /= (alphas.sum() / (len(char_list) + 1))
  45. alphas = alphas.unsqueeze(0)
  46. peaks = cif_wo_hidden(alphas, threshold=1.0-1e-4)[0]
  47. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  48. num_frames = peaks.shape[0]
  49. timestamp_list = []
  50. new_char_list = []
  51. # for bicif model trained with large data, cif2 actually fires when a character starts
  52. # so treat the frames between two peaks as the duration of the former token
  53. fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
  54. # assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
  55. # begin silence
  56. if fire_place[0] > START_END_THRESHOLD:
  57. # char_list.insert(0, '<sil>')
  58. timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
  59. new_char_list.append('<sil>')
  60. # tokens timestamp
  61. for i in range(len(fire_place)-1):
  62. new_char_list.append(char_list[i])
  63. if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
  64. timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
  65. else:
  66. # cut the duration to token and sil of the 0-weight frames last long
  67. _split = fire_place[i] + MAX_TOKEN_DURATION
  68. timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
  69. timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
  70. new_char_list.append('<sil>')
  71. # tail token and end silence
  72. # new_char_list.append(char_list[-1])
  73. if num_frames - fire_place[-1] > START_END_THRESHOLD:
  74. _end = (num_frames + fire_place[-1]) * 0.5
  75. # _end = fire_place[-1]
  76. timestamp_list[-1][1] = _end*TIME_RATE
  77. timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
  78. new_char_list.append("<sil>")
  79. else:
  80. timestamp_list[-1][1] = num_frames*TIME_RATE
  81. if vad_offset: # add offset time in model with vad
  82. for i in range(len(timestamp_list)):
  83. timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
  84. timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
  85. res_txt = ""
  86. for char, timestamp in zip(new_char_list, timestamp_list):
  87. #if char != '<sil>':
  88. if not sil_in_str and char == '<sil>': continue
  89. res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
  90. res = []
  91. for char, timestamp in zip(new_char_list, timestamp_list):
  92. if char != '<sil>':
  93. res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
  94. return res_txt, res
  95. def time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed):
  96. punc_list = [',', '。', '?', '、']
  97. res = []
  98. if text_postprocessed is None:
  99. return res
  100. if time_stamp_postprocessed is None:
  101. return res
  102. if len(time_stamp_postprocessed) == 0:
  103. return res
  104. if len(text_postprocessed) == 0:
  105. return res
  106. if punc_id_list is None or len(punc_id_list) == 0:
  107. res.append({
  108. 'text': text_postprocessed.split(),
  109. "start": time_stamp_postprocessed[0][0],
  110. "end": time_stamp_postprocessed[-1][1],
  111. 'text_seg': text_postprocessed.split(),
  112. "ts_list": time_stamp_postprocessed,
  113. })
  114. return res
  115. if len(punc_id_list) != len(time_stamp_postprocessed):
  116. print(" warning length mistach!!!!!!")
  117. sentence_text = ""
  118. sentence_text_seg = ""
  119. ts_list = []
  120. sentence_start = time_stamp_postprocessed[0][0]
  121. sentence_end = time_stamp_postprocessed[0][1]
  122. texts = text_postprocessed.split()
  123. punc_stamp_text_list = list(zip_longest(punc_id_list, time_stamp_postprocessed, texts, fillvalue=None))
  124. for punc_stamp_text in punc_stamp_text_list:
  125. punc_id, time_stamp, text = punc_stamp_text
  126. # sentence_text += text if text is not None else ''
  127. if text is not None:
  128. if 'a' <= text[0] <= 'z' or 'A' <= text[0] <= 'Z':
  129. sentence_text += ' ' + text
  130. elif len(sentence_text) and ('a' <= sentence_text[-1] <= 'z' or 'A' <= sentence_text[-1] <= 'Z'):
  131. sentence_text += ' ' + text
  132. else:
  133. sentence_text += text
  134. sentence_text_seg += text + ' '
  135. ts_list.append(time_stamp)
  136. punc_id = int(punc_id) if punc_id is not None else 1
  137. sentence_end = time_stamp[1] if time_stamp is not None else sentence_end
  138. if punc_id > 1:
  139. sentence_text += punc_list[punc_id - 2]
  140. res.append({
  141. 'text': sentence_text,
  142. "start": sentence_start,
  143. "end": sentence_end,
  144. "text_seg": sentence_text_seg,
  145. "ts_list": ts_list
  146. })
  147. sentence_text = ''
  148. sentence_text_seg = ''
  149. ts_list = []
  150. sentence_start = sentence_end
  151. return res
  152. # class AverageShiftCalculator():
  153. # def __init__(self):
  154. # logging.warning("Calculating average shift.")
  155. # def __call__(self, file1, file2):
  156. # uttid_list1, ts_dict1 = self.read_timestamps(file1)
  157. # uttid_list2, ts_dict2 = self.read_timestamps(file2)
  158. # uttid_intersection = self._intersection(uttid_list1, uttid_list2)
  159. # res = self.as_cal(uttid_intersection, ts_dict1, ts_dict2)
  160. # logging.warning("Average shift of {} and {}: {}.".format(file1, file2, str(res)[:8]))
  161. # logging.warning("Following timestamp pair differs most: {}, detail:{}".format(self.max_shift, self.max_shift_uttid))
  162. #
  163. # def _intersection(self, list1, list2):
  164. # set1 = set(list1)
  165. # set2 = set(list2)
  166. # if set1 == set2:
  167. # logging.warning("Uttid same checked.")
  168. # return set1
  169. # itsc = list(set1 & set2)
  170. # logging.warning("Uttid differs: file1 {}, file2 {}, lines same {}.".format(len(list1), len(list2), len(itsc)))
  171. # return itsc
  172. #
  173. # def read_timestamps(self, file):
  174. # # read timestamps file in standard format
  175. # uttid_list = []
  176. # ts_dict = {}
  177. # with codecs.open(file, 'r') as fin:
  178. # for line in fin.readlines():
  179. # text = ''
  180. # ts_list = []
  181. # line = line.rstrip()
  182. # uttid = line.split()[0]
  183. # uttid_list.append(uttid)
  184. # body = " ".join(line.split()[1:])
  185. # for pd in body.split(';'):
  186. # if not len(pd): continue
  187. # # pdb.set_trace()
  188. # char, start, end = pd.lstrip(" ").split(' ')
  189. # text += char + ','
  190. # ts_list.append((float(start), float(end)))
  191. # # ts_lists.append(ts_list)
  192. # ts_dict[uttid] = (text[:-1], ts_list)
  193. # logging.warning("File {} read done.".format(file))
  194. # return uttid_list, ts_dict
  195. #
  196. # def _shift(self, filtered_timestamp_list1, filtered_timestamp_list2):
  197. # shift_time = 0
  198. # for fts1, fts2 in zip(filtered_timestamp_list1, filtered_timestamp_list2):
  199. # shift_time += abs(fts1[0] - fts2[0]) + abs(fts1[1] - fts2[1])
  200. # num_tokens = len(filtered_timestamp_list1)
  201. # return shift_time, num_tokens
  202. #
  203. # # def as_cal(self, uttid_list, ts_dict1, ts_dict2):
  204. # # # calculate average shift between timestamp1 and timestamp2
  205. # # # when characters differ, use edit distance alignment
  206. # # # and calculate the error between the same characters
  207. # # self._accumlated_shift = 0
  208. # # self._accumlated_tokens = 0
  209. # # self.max_shift = 0
  210. # # self.max_shift_uttid = None
  211. # # for uttid in uttid_list:
  212. # # (t1, ts1) = ts_dict1[uttid]
  213. # # (t2, ts2) = ts_dict2[uttid]
  214. # # _align, _align2, _align3 = [], [], []
  215. # # fts1, fts2 = [], []
  216. # # _t1, _t2 = [], []
  217. # # sm = edit_distance.SequenceMatcher(t1.split(','), t2.split(','))
  218. # # s = sm.get_opcodes()
  219. # # for j in range(len(s)):
  220. # # if s[j][0] == "replace" or s[j][0] == "insert":
  221. # # _align.append(0)
  222. # # if s[j][0] == "replace" or s[j][0] == "delete":
  223. # # _align3.append(0)
  224. # # elif s[j][0] == "equal":
  225. # # _align.append(1)
  226. # # _align3.append(1)
  227. # # else:
  228. # # continue
  229. # # # use s to index t2
  230. # # for a, ts , t in zip(_align, ts2, t2.split(',')):
  231. # # if a:
  232. # # fts2.append(ts)
  233. # # _t2.append(t)
  234. # # sm2 = edit_distance.SequenceMatcher(t2.split(','), t1.split(','))
  235. # # s = sm2.get_opcodes()
  236. # # for j in range(len(s)):
  237. # # if s[j][0] == "replace" or s[j][0] == "insert":
  238. # # _align2.append(0)
  239. # # elif s[j][0] == "equal":
  240. # # _align2.append(1)
  241. # # else:
  242. # # continue
  243. # # # use s2 tp index t1
  244. # # for a, ts, t in zip(_align3, ts1, t1.split(',')):
  245. # # if a:
  246. # # fts1.append(ts)
  247. # # _t1.append(t)
  248. # # if len(fts1) == len(fts2):
  249. # # shift_time, num_tokens = self._shift(fts1, fts2)
  250. # # self._accumlated_shift += shift_time
  251. # # self._accumlated_tokens += num_tokens
  252. # # if shift_time/num_tokens > self.max_shift:
  253. # # self.max_shift = shift_time/num_tokens
  254. # # self.max_shift_uttid = uttid
  255. # # else:
  256. # # logging.warning("length mismatch")
  257. # # return self._accumlated_shift / self._accumlated_tokens
  258. def convert_external_alphas(alphas_file, text_file, output_file):
  259. from funasr.models.paraformer.cif_predictor import cif_wo_hidden
  260. with open(alphas_file, 'r') as f1, open(text_file, 'r') as f2, open(output_file, 'w') as f3:
  261. for line1, line2 in zip(f1.readlines(), f2.readlines()):
  262. line1 = line1.rstrip()
  263. line2 = line2.rstrip()
  264. assert line1.split()[0] == line2.split()[0]
  265. uttid = line1.split()[0]
  266. alphas = [float(i) for i in line1.split()[1:]]
  267. new_alphas = np.array(remove_chunk_padding(alphas))
  268. new_alphas[-1] += 1e-4
  269. text = line2.split()[1:]
  270. if len(text) + 1 != int(new_alphas.sum()):
  271. # force resize
  272. new_alphas *= (len(text) + 1) / int(new_alphas.sum())
  273. peaks = cif_wo_hidden(torch.Tensor(new_alphas).unsqueeze(0), 1.0-1e-4)
  274. if " " in text:
  275. text = text.split()
  276. else:
  277. text = [i for i in text]
  278. res_str, _ = ts_prediction_lfr6_standard(new_alphas, peaks[0], text,
  279. force_time_shift=-7.0,
  280. sil_in_str=False)
  281. f3.write("{} {}\n".format(uttid, res_str))
  282. def remove_chunk_padding(alphas):
  283. # remove the padding part in alphas if using chunk paraformer for GPU
  284. START_ZERO = 45
  285. MID_ZERO = 75
  286. REAL_FRAMES = 360 # for chunk based encoder 10-120-10 and fsmn padding 5
  287. alphas = alphas[START_ZERO:] # remove the padding at beginning
  288. new_alphas = []
  289. while True:
  290. new_alphas = new_alphas + alphas[:REAL_FRAMES]
  291. alphas = alphas[REAL_FRAMES+MID_ZERO:]
  292. if len(alphas) < REAL_FRAMES: break
  293. return new_alphas
  294. SUPPORTED_MODES = ['cal_aas', 'read_ext_alphas']
  295. def main(args):
  296. # if args.mode == 'cal_aas':
  297. # asc = AverageShiftCalculator()
  298. # asc(args.input, args.input2)
  299. if args.mode == 'read_ext_alphas':
  300. convert_external_alphas(args.input, args.input2, args.output)
  301. else:
  302. logging.error("Mode {} not in SUPPORTED_MODES: {}.".format(args.mode, SUPPORTED_MODES))
  303. if __name__ == '__main__':
  304. parser = argparse.ArgumentParser(description='timestamp tools')
  305. parser.add_argument('--mode',
  306. default=None,
  307. type=str,
  308. choices=SUPPORTED_MODES,
  309. help='timestamp related toolbox')
  310. parser.add_argument('--input', default=None, type=str, help='input file path')
  311. parser.add_argument('--output', default=None, type=str, help='output file name')
  312. parser.add_argument('--input2', default=None, type=str, help='input2 file path')
  313. parser.add_argument('--kaldi-ts-type',
  314. default='v2',
  315. type=str,
  316. choices=['v0', 'v1', 'v2'],
  317. help='kaldi timestamp to write')
  318. args = parser.parse_args()
  319. main(args)