espnet_model.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. from typing import Dict
  2. from typing import Optional
  3. from typing import Tuple
  4. import torch
  5. import torch.nn.functional as F
  6. from typeguard import check_argument_types
  7. from funasr.modules.nets_utils import make_pad_mask
  8. from funasr.punctuation.abs_model import AbsPunctuation
  9. from funasr.torch_utils.device_funcs import force_gatherable
  10. from funasr.train.abs_espnet_model import AbsESPnetModel
  11. class ESPnetPunctuationModel(AbsESPnetModel):
  12. def __init__(self, punc_model: AbsPunctuation, vocab_size: int, ignore_id: int = 0):
  13. assert check_argument_types()
  14. super().__init__()
  15. self.punc_model = punc_model
  16. self.sos = 1
  17. self.eos = 2
  18. # ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
  19. self.ignore_id = ignore_id
  20. def nll(
  21. self,
  22. text: torch.Tensor,
  23. punc: torch.Tensor,
  24. text_lengths: torch.Tensor,
  25. punc_lengths: torch.Tensor,
  26. max_length: Optional[int] = None,
  27. ) -> Tuple[torch.Tensor, torch.Tensor]:
  28. """Compute negative log likelihood(nll)
  29. Normally, this function is called in batchify_nll.
  30. Args:
  31. text: (Batch, Length)
  32. punc: (Batch, Length)
  33. text_lengths: (Batch,)
  34. max_lengths: int
  35. """
  36. batch_size = text.size(0)
  37. # For data parallel
  38. if max_length is None:
  39. text = text[:, :text_lengths.max()]
  40. punc = punc[:, :text_lengths.max()]
  41. else:
  42. text = text[:, :max_length]
  43. punc = punc[:, :max_length]
  44. # 1. Create a sentence pair like '<sos> w1 w2 w3' and 'w1 w2 w3 <eos>'
  45. # text: (Batch, Length) -> x, y: (Batch, Length + 1)
  46. #x = F.pad(text, [1, 0], "constant", self.eos)
  47. #t = F.pad(text, [0, 1], "constant", self.ignore_id)
  48. #for i, l in enumerate(text_lengths):
  49. # t[i, l] = self.sos
  50. #x_lengths = text_lengths + 1
  51. # 2. Forward Language model
  52. # x: (Batch, Length) -> y: (Batch, Length, NVocab)
  53. y, _ = self.punc_model(text, text_lengths)
  54. # 3. Calc negative log likelihood
  55. # nll: (BxL,)
  56. if self.training == False:
  57. _, indices = y.view(-1, y.shape[-1]).topk(1, dim=1)
  58. from sklearn.metrics import f1_score
  59. f1_score = f1_score(punc.view(-1).detach().cpu().numpy(),
  60. indices.squeeze(-1).detach().cpu().numpy(),
  61. average='micro')
  62. nll = torch.Tensor([f1_score]).repeat(text_lengths.sum())
  63. return nll, text_lengths
  64. else:
  65. nll = F.cross_entropy(y.view(-1, y.shape[-1]), punc.view(-1), reduction="none", ignore_index=self.ignore_id)
  66. # nll: (BxL,) -> (BxL,)
  67. if max_length is None:
  68. nll.masked_fill_(make_pad_mask(text_lengths).to(nll.device).view(-1), 0.0)
  69. else:
  70. nll.masked_fill_(
  71. make_pad_mask(text_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
  72. 0.0,
  73. )
  74. # nll: (BxL,) -> (B, L)
  75. nll = nll.view(batch_size, -1)
  76. return nll, text_lengths
  77. def batchify_nll(self,
  78. text: torch.Tensor,
  79. punc: torch.Tensor,
  80. text_lengths: torch.Tensor,
  81. punc_lengths: torch.Tensor,
  82. batch_size: int = 100) -> Tuple[torch.Tensor, torch.Tensor]:
  83. """Compute negative log likelihood(nll) from transformer language model
  84. To avoid OOM, this fuction seperate the input into batches.
  85. Then call nll for each batch and combine and return results.
  86. Args:
  87. text: (Batch, Length)
  88. punc: (Batch, Length)
  89. text_lengths: (Batch,)
  90. batch_size: int, samples each batch contain when computing nll,
  91. you may change this to avoid OOM or increase
  92. """
  93. total_num = text.size(0)
  94. if total_num <= batch_size:
  95. nll, x_lengths = self.nll(text, punc, text_lengths)
  96. else:
  97. nlls = []
  98. x_lengths = []
  99. max_length = text_lengths.max()
  100. start_idx = 0
  101. while True:
  102. end_idx = min(start_idx + batch_size, total_num)
  103. batch_text = text[start_idx:end_idx, :]
  104. batch_punc = punc[start_idx:end_idx, :]
  105. batch_text_lengths = text_lengths[start_idx:end_idx]
  106. # batch_nll: [B * T]
  107. batch_nll, batch_x_lengths = self.nll(batch_text, batch_punc, batch_text_lengths, max_length=max_length)
  108. nlls.append(batch_nll)
  109. x_lengths.append(batch_x_lengths)
  110. start_idx = end_idx
  111. if start_idx == total_num:
  112. break
  113. nll = torch.cat(nlls)
  114. x_lengths = torch.cat(x_lengths)
  115. assert nll.size(0) == total_num
  116. assert x_lengths.size(0) == total_num
  117. return nll, x_lengths
  118. def forward(self, text: torch.Tensor, punc: torch.Tensor, text_lengths: torch.Tensor,
  119. punc_lengths: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
  120. nll, y_lengths = self.nll(text, punc, text_lengths, punc_lengths)
  121. ntokens = y_lengths.sum()
  122. loss = nll.sum() / ntokens
  123. stats = dict(loss=loss.detach())
  124. # force_gatherable: to-device and to-tensor if scalar for DataParallel
  125. loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
  126. return loss, stats, weight
  127. def collect_feats(self, text: torch.Tensor, punc: torch.Tensor,
  128. text_lengths: torch.Tensor) -> Dict[str, torch.Tensor]:
  129. return {}
  130. def inference(self, text: torch.Tensor, text_lengths: torch.Tensor) -> Tuple[torch.Tensor, None]:
  131. return self.punc_model(text, text_lengths)