abs_model.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. from abc import ABC
  2. from abc import abstractmethod
  3. from typing import Dict
  4. from typing import Optional
  5. from typing import Tuple
  6. import torch
  7. import torch.nn.functional as F
  8. from typeguard import check_argument_types
  9. from funasr.modules.nets_utils import make_pad_mask
  10. from funasr.torch_utils.device_funcs import force_gatherable
  11. from funasr.train.abs_espnet_model import AbsESPnetModel
  12. from funasr.modules.scorers.scorer_interface import BatchScorerInterface
  13. class AbsPunctuation(torch.nn.Module, BatchScorerInterface, ABC):
  14. """The abstract class
  15. To share the loss calculation way among different models,
  16. We uses delegate pattern here:
  17. The instance of this class should be passed to "LanguageModel"
  18. This "model" is one of mediator objects for "Task" class.
  19. """
  20. @abstractmethod
  21. def forward(self, input: torch.Tensor, hidden: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  22. raise NotImplementedError
  23. @abstractmethod
  24. def with_vad(self) -> bool:
  25. raise NotImplementedError
  26. class PunctuationModel(AbsESPnetModel):
  27. def __init__(self, punc_model: AbsPunctuation, vocab_size: int, ignore_id: int = 0, punc_weight: list = None):
  28. assert check_argument_types()
  29. super().__init__()
  30. self.punc_model = punc_model
  31. self.punc_weight = torch.Tensor(punc_weight)
  32. self.sos = 1
  33. self.eos = 2
  34. # ignore_id may be assumed as 0, shared with CTC-blank symbol for ASR.
  35. self.ignore_id = ignore_id
  36. # if self.punc_model.with_vad():
  37. # print("This is a vad puncuation model.")
  38. def nll(
  39. self,
  40. text: torch.Tensor,
  41. punc: torch.Tensor,
  42. text_lengths: torch.Tensor,
  43. punc_lengths: torch.Tensor,
  44. max_length: Optional[int] = None,
  45. vad_indexes: Optional[torch.Tensor] = None,
  46. vad_indexes_lengths: Optional[torch.Tensor] = None,
  47. ) -> Tuple[torch.Tensor, torch.Tensor]:
  48. """Compute negative log likelihood(nll)
  49. Normally, this function is called in batchify_nll.
  50. Args:
  51. text: (Batch, Length)
  52. punc: (Batch, Length)
  53. text_lengths: (Batch,)
  54. max_lengths: int
  55. """
  56. batch_size = text.size(0)
  57. # For data parallel
  58. if max_length is None:
  59. text = text[:, :text_lengths.max()]
  60. punc = punc[:, :text_lengths.max()]
  61. else:
  62. text = text[:, :max_length]
  63. punc = punc[:, :max_length]
  64. if self.punc_model.with_vad():
  65. # Should be VadRealtimeTransformer
  66. assert vad_indexes is not None
  67. y, _ = self.punc_model(text, text_lengths, vad_indexes)
  68. else:
  69. # Should be TargetDelayTransformer,
  70. y, _ = self.punc_model(text, text_lengths)
  71. # Calc negative log likelihood
  72. # nll: (BxL,)
  73. if self.training == False:
  74. _, indices = y.view(-1, y.shape[-1]).topk(1, dim=1)
  75. from sklearn.metrics import f1_score
  76. f1_score = f1_score(punc.view(-1).detach().cpu().numpy(),
  77. indices.squeeze(-1).detach().cpu().numpy(),
  78. average='micro')
  79. nll = torch.Tensor([f1_score]).repeat(text_lengths.sum())
  80. return nll, text_lengths
  81. else:
  82. self.punc_weight = self.punc_weight.to(punc.device)
  83. nll = F.cross_entropy(y.view(-1, y.shape[-1]), punc.view(-1), self.punc_weight, reduction="none",
  84. ignore_index=self.ignore_id)
  85. # nll: (BxL,) -> (BxL,)
  86. if max_length is None:
  87. nll.masked_fill_(make_pad_mask(text_lengths).to(nll.device).view(-1), 0.0)
  88. else:
  89. nll.masked_fill_(
  90. make_pad_mask(text_lengths, maxlen=max_length + 1).to(nll.device).view(-1),
  91. 0.0,
  92. )
  93. # nll: (BxL,) -> (B, L)
  94. nll = nll.view(batch_size, -1)
  95. return nll, text_lengths
  96. def batchify_nll(self,
  97. text: torch.Tensor,
  98. punc: torch.Tensor,
  99. text_lengths: torch.Tensor,
  100. punc_lengths: torch.Tensor,
  101. batch_size: int = 100) -> Tuple[torch.Tensor, torch.Tensor]:
  102. """Compute negative log likelihood(nll) from transformer language model
  103. To avoid OOM, this fuction seperate the input into batches.
  104. Then call nll for each batch and combine and return results.
  105. Args:
  106. text: (Batch, Length)
  107. punc: (Batch, Length)
  108. text_lengths: (Batch,)
  109. batch_size: int, samples each batch contain when computing nll,
  110. you may change this to avoid OOM or increase
  111. """
  112. total_num = text.size(0)
  113. if total_num <= batch_size:
  114. nll, x_lengths = self.nll(text, punc, text_lengths)
  115. else:
  116. nlls = []
  117. x_lengths = []
  118. max_length = text_lengths.max()
  119. start_idx = 0
  120. while True:
  121. end_idx = min(start_idx + batch_size, total_num)
  122. batch_text = text[start_idx:end_idx, :]
  123. batch_punc = punc[start_idx:end_idx, :]
  124. batch_text_lengths = text_lengths[start_idx:end_idx]
  125. # batch_nll: [B * T]
  126. batch_nll, batch_x_lengths = self.nll(batch_text, batch_punc, batch_text_lengths, max_length=max_length)
  127. nlls.append(batch_nll)
  128. x_lengths.append(batch_x_lengths)
  129. start_idx = end_idx
  130. if start_idx == total_num:
  131. break
  132. nll = torch.cat(nlls)
  133. x_lengths = torch.cat(x_lengths)
  134. assert nll.size(0) == total_num
  135. assert x_lengths.size(0) == total_num
  136. return nll, x_lengths
  137. def forward(
  138. self,
  139. text: torch.Tensor,
  140. punc: torch.Tensor,
  141. text_lengths: torch.Tensor,
  142. punc_lengths: torch.Tensor,
  143. vad_indexes: Optional[torch.Tensor] = None,
  144. vad_indexes_lengths: Optional[torch.Tensor] = None,
  145. ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
  146. nll, y_lengths = self.nll(text, punc, text_lengths, punc_lengths, vad_indexes=vad_indexes)
  147. ntokens = y_lengths.sum()
  148. loss = nll.sum() / ntokens
  149. stats = dict(loss=loss.detach())
  150. # force_gatherable: to-device and to-tensor if scalar for DataParallel
  151. loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
  152. return loss, stats, weight
  153. def collect_feats(self, text: torch.Tensor, punc: torch.Tensor,
  154. text_lengths: torch.Tensor) -> Dict[str, torch.Tensor]:
  155. return {}
  156. def inference(self,
  157. text: torch.Tensor,
  158. text_lengths: torch.Tensor,
  159. vad_indexes: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, None]:
  160. if self.punc_model.with_vad():
  161. assert vad_indexes is not None
  162. return self.punc_model(text, text_lengths, vad_indexes)
  163. else:
  164. return self.punc_model(text, text_lengths)