model.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. from typing import Any
  2. from typing import List
  3. from typing import Tuple
  4. from typing import Optional
  5. import numpy as np
  6. import torch.nn.functional as F
  7. from funasr.models.transformer.utils.nets_utils import make_pad_mask
  8. from funasr.train_utils.device_funcs import force_gatherable
  9. from funasr.train_utils.device_funcs import to_device
  10. import torch
  11. import torch.nn as nn
  12. from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words
  13. from funasr.utils.load_utils import load_audio_text_image_video
  14. from funasr.register import tables
  15. @tables.register("model_classes", "CTTransformer")
  16. class CTTransformer(nn.Module):
  17. """
  18. Author: Speech Lab of DAMO Academy, Alibaba Group
  19. CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
  20. https://arxiv.org/pdf/2003.01309.pdf
  21. """
  22. def __init__(
  23. self,
  24. encoder: str = None,
  25. encoder_conf: dict = None,
  26. vocab_size: int = -1,
  27. punc_list: list = None,
  28. punc_weight: list = None,
  29. embed_unit: int = 128,
  30. att_unit: int = 256,
  31. dropout_rate: float = 0.5,
  32. ignore_id: int = -1,
  33. sos: int = 1,
  34. eos: int = 2,
  35. sentence_end_id: int = 3,
  36. **kwargs,
  37. ):
  38. super().__init__()
  39. punc_size = len(punc_list)
  40. if punc_weight is None:
  41. punc_weight = [1] * punc_size
  42. self.embed = nn.Embedding(vocab_size, embed_unit)
  43. encoder_class = tables.encoder_classes.get(encoder)
  44. encoder = encoder_class(**encoder_conf)
  45. self.decoder = nn.Linear(att_unit, punc_size)
  46. self.encoder = encoder
  47. self.punc_list = punc_list
  48. self.punc_weight = punc_weight
  49. self.ignore_id = ignore_id
  50. self.sos = sos
  51. self.eos = eos
  52. self.sentence_end_id = sentence_end_id
  53. def punc_forward(self, text: torch.Tensor, text_lengths: torch.Tensor, **kwargs):
  54. """Compute loss value from buffer sequences.
  55. Args:
  56. input (torch.Tensor): Input ids. (batch, len)
  57. hidden (torch.Tensor): Target ids. (batch, len)
  58. """
  59. x = self.embed(text)
  60. # mask = self._target_mask(input)
  61. h, _, _ = self.encoder(x, text_lengths)
  62. y = self.decoder(h)
  63. return y, None
  64. def with_vad(self):
  65. return False
  66. def score(self, y: torch.Tensor, state: Any, x: torch.Tensor) -> Tuple[torch.Tensor, Any]:
  67. """Score new token.
  68. Args:
  69. y (torch.Tensor): 1D torch.int64 prefix tokens.
  70. state: Scorer state for prefix tokens
  71. x (torch.Tensor): encoder feature that generates ys.
  72. Returns:
  73. tuple[torch.Tensor, Any]: Tuple of
  74. torch.float32 scores for next token (vocab_size)
  75. and next state for ys
  76. """
  77. y = y.unsqueeze(0)
  78. h, _, cache = self.encoder.forward_one_step(self.embed(y), self._target_mask(y), cache=state)
  79. h = self.decoder(h[:, -1])
  80. logp = h.log_softmax(dim=-1).squeeze(0)
  81. return logp, cache
  82. def batch_score(self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor) -> Tuple[torch.Tensor, List[Any]]:
  83. """Score new token batch.
  84. Args:
  85. ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
  86. states (List[Any]): Scorer states for prefix tokens.
  87. xs (torch.Tensor):
  88. The encoder feature that generates ys (n_batch, xlen, n_feat).
  89. Returns:
  90. tuple[torch.Tensor, List[Any]]: Tuple of
  91. batchfied scores for next token with shape of `(n_batch, vocab_size)`
  92. and next state list for ys.
  93. """
  94. # merge states
  95. n_batch = len(ys)
  96. n_layers = len(self.encoder.encoders)
  97. if states[0] is None:
  98. batch_state = None
  99. else:
  100. # transpose state of [batch, layer] into [layer, batch]
  101. batch_state = [torch.stack([states[b][i] for b in range(n_batch)]) for i in range(n_layers)]
  102. # batch decoding
  103. h, _, states = self.encoder.forward_one_step(self.embed(ys), self._target_mask(ys), cache=batch_state)
  104. h = self.decoder(h[:, -1])
  105. logp = h.log_softmax(dim=-1)
  106. # transpose state of [layer, batch] into [batch, layer]
  107. state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
  108. return logp, state_list
  109. def nll(
  110. self,
  111. text: torch.Tensor,
  112. punc: torch.Tensor,
  113. text_lengths: torch.Tensor,
  114. punc_lengths: torch.Tensor,
  115. max_length: Optional[int] = None,
  116. vad_indexes: Optional[torch.Tensor] = None,
  117. vad_indexes_lengths: Optional[torch.Tensor] = None,
  118. ) -> Tuple[torch.Tensor, torch.Tensor]:
  119. """Compute negative log likelihood(nll)
  120. Normally, this function is called in batchify_nll.
  121. Args:
  122. text: (Batch, Length)
  123. punc: (Batch, Length)
  124. text_lengths: (Batch,)
  125. max_lengths: int
  126. """
  127. batch_size = text.size(0)
  128. # For data parallel
  129. if max_length is None:
  130. text = text[:, :text_lengths.max()]
  131. punc = punc[:, :text_lengths.max()]
  132. else:
  133. text = text[:, :max_length]
  134. punc = punc[:, :max_length]
  135. if self.with_vad():
  136. # Should be VadRealtimeTransformer
  137. assert vad_indexes is not None
  138. y, _ = self.punc_forward(text, text_lengths, vad_indexes)
  139. else:
  140. # Should be TargetDelayTransformer,
  141. y, _ = self.punc_forward(text, text_lengths)
  142. # Calc negative log likelihood
  143. # nll: (BxL,)
  144. if self.training == False:
  145. _, indices = y.view(-1, y.shape[-1]).topk(1, dim=1)
  146. from sklearn.metrics import f1_score
  147. f1_score = f1_score(punc.view(-1).detach().cpu().numpy(),
  148. indices.squeeze(-1).detach().cpu().numpy(),
  149. average='micro')
  150. nll = torch.Tensor([f1_score]).repeat(text_lengths.sum())
  151. return nll, text_lengths
  152. else:
  153. self.punc_weight = self.punc_weight.to(punc.device)
  154. nll = F.cross_entropy(y.view(-1, y.shape[-1]), punc.view(-1), self.punc_weight, reduction="none",
  155. ignore_index=self.ignore_id)
  156. # nll: (BxL,) -> (BxL,)
  157. if max_length is None:
  158. nll.masked_fill_(make_pad_mask(text_lengths).to(nll.device).view(-1), 0.0)
  159. else:
  160. nll.masked_fill_(
  161. make_pad_mask(text_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
  162. 0.0,
  163. )
  164. # nll: (BxL,) -> (B, L)
  165. nll = nll.view(batch_size, -1)
  166. return nll, text_lengths
  167. def forward(
  168. self,
  169. text: torch.Tensor,
  170. punc: torch.Tensor,
  171. text_lengths: torch.Tensor,
  172. punc_lengths: torch.Tensor,
  173. vad_indexes: Optional[torch.Tensor] = None,
  174. vad_indexes_lengths: Optional[torch.Tensor] = None,
  175. ):
  176. nll, y_lengths = self.nll(text, punc, text_lengths, punc_lengths, vad_indexes=vad_indexes)
  177. ntokens = y_lengths.sum()
  178. loss = nll.sum() / ntokens
  179. stats = dict(loss=loss.detach())
  180. # force_gatherable: to-device and to-tensor if scalar for DataParallel
  181. loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
  182. return loss, stats, weight
  183. def generate(self,
  184. data_in,
  185. data_lengths=None,
  186. key: list = None,
  187. tokenizer=None,
  188. frontend=None,
  189. **kwargs,
  190. ):
  191. assert len(data_in) == 1
  192. text = load_audio_text_image_video(data_in, data_type=kwargs.get("kwargs", "text"))[0]
  193. vad_indexes = kwargs.get("vad_indexes", None)
  194. # text = data_in[0]
  195. # text_lengths = data_lengths[0] if data_lengths is not None else None
  196. split_size = kwargs.get("split_size", 20)
  197. jieba_usr_dict = kwargs.get("jieba_usr_dict", None)
  198. if jieba_usr_dict and isinstance(jieba_usr_dict, str):
  199. import jieba
  200. jieba.load_userdict(jieba_usr_dict)
  201. jieba_usr_dict = jieba
  202. kwargs["jieba_usr_dict"] = "jieba_usr_dict"
  203. tokens = split_words(text, jieba_usr_dict=jieba_usr_dict)
  204. tokens_int = tokenizer.encode(tokens)
  205. mini_sentences = split_to_mini_sentence(tokens, split_size)
  206. mini_sentences_id = split_to_mini_sentence(tokens_int, split_size)
  207. assert len(mini_sentences) == len(mini_sentences_id)
  208. cache_sent = []
  209. cache_sent_id = torch.from_numpy(np.array([], dtype='int32'))
  210. new_mini_sentence = ""
  211. new_mini_sentence_punc = []
  212. cache_pop_trigger_limit = 200
  213. results = []
  214. meta_data = {}
  215. punc_array = None
  216. for mini_sentence_i in range(len(mini_sentences)):
  217. mini_sentence = mini_sentences[mini_sentence_i]
  218. mini_sentence_id = mini_sentences_id[mini_sentence_i]
  219. mini_sentence = cache_sent + mini_sentence
  220. mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0)
  221. data = {
  222. "text": torch.unsqueeze(torch.from_numpy(mini_sentence_id), 0),
  223. "text_lengths": torch.from_numpy(np.array([len(mini_sentence_id)], dtype='int32')),
  224. }
  225. data = to_device(data, kwargs["device"])
  226. # y, _ = self.wrapped_model(**data)
  227. y, _ = self.punc_forward(**data)
  228. _, indices = y.view(-1, y.shape[-1]).topk(1, dim=1)
  229. punctuations = indices
  230. if indices.size()[0] != 1:
  231. punctuations = torch.squeeze(indices)
  232. assert punctuations.size()[0] == len(mini_sentence)
  233. # Search for the last Period/QuestionMark as cache
  234. if mini_sentence_i < len(mini_sentences) - 1:
  235. sentenceEnd = -1
  236. last_comma_index = -1
  237. for i in range(len(punctuations) - 2, 1, -1):
  238. if self.punc_list[punctuations[i]] == "。" or self.punc_list[punctuations[i]] == "?":
  239. sentenceEnd = i
  240. break
  241. if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",":
  242. last_comma_index = i
  243. if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
  244. # The sentence it too long, cut off at a comma.
  245. sentenceEnd = last_comma_index
  246. punctuations[sentenceEnd] = self.sentence_end_id
  247. cache_sent = mini_sentence[sentenceEnd + 1:]
  248. cache_sent_id = mini_sentence_id[sentenceEnd + 1:]
  249. mini_sentence = mini_sentence[0:sentenceEnd + 1]
  250. punctuations = punctuations[0:sentenceEnd + 1]
  251. # if len(punctuations) == 0:
  252. # continue
  253. punctuations_np = punctuations.cpu().numpy()
  254. new_mini_sentence_punc += [int(x) for x in punctuations_np]
  255. words_with_punc = []
  256. for i in range(len(mini_sentence)):
  257. if (i==0 or self.punc_list[punctuations[i-1]] == "。" or self.punc_list[punctuations[i-1]] == "?") and len(mini_sentence[i][0].encode()) == 1:
  258. mini_sentence[i] = mini_sentence[i].capitalize()
  259. if i == 0:
  260. if len(mini_sentence[i][0].encode()) == 1:
  261. mini_sentence[i] = " " + mini_sentence[i]
  262. if i > 0:
  263. if len(mini_sentence[i][0].encode()) == 1 and len(mini_sentence[i - 1][0].encode()) == 1:
  264. mini_sentence[i] = " " + mini_sentence[i]
  265. words_with_punc.append(mini_sentence[i])
  266. if self.punc_list[punctuations[i]] != "_":
  267. punc_res = self.punc_list[punctuations[i]]
  268. if len(mini_sentence[i][0].encode()) == 1:
  269. if punc_res == ",":
  270. punc_res = ","
  271. elif punc_res == "。":
  272. punc_res = "."
  273. elif punc_res == "?":
  274. punc_res = "?"
  275. words_with_punc.append(punc_res)
  276. new_mini_sentence += "".join(words_with_punc)
  277. # Add Period for the end of the sentence
  278. new_mini_sentence_out = new_mini_sentence
  279. new_mini_sentence_punc_out = new_mini_sentence_punc
  280. if mini_sentence_i == len(mini_sentences) - 1:
  281. if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、":
  282. new_mini_sentence_out = new_mini_sentence[:-1] + "。"
  283. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
  284. elif new_mini_sentence[-1] == ",":
  285. new_mini_sentence_out = new_mini_sentence[:-1] + "."
  286. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
  287. elif new_mini_sentence[-1] != "。" and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==0:
  288. new_mini_sentence_out = new_mini_sentence + "。"
  289. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
  290. elif new_mini_sentence[-1] != "." and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==1:
  291. new_mini_sentence_out = new_mini_sentence + "."
  292. new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
  293. # keep a punctuations array for punc segment
  294. if punc_array is None:
  295. punc_array = punctuations
  296. else:
  297. punc_array = torch.cat([punc_array, punctuations], dim=0)
  298. result_i = {"key": key[0], "text": new_mini_sentence_out, "punc_array": punc_array}
  299. results.append(result_i)
  300. return results, meta_data