target_delay_transformer.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. from typing import Any
  2. from typing import List
  3. from typing import Tuple
  4. import torch
  5. import torch.nn as nn
  6. from funasr.modules.embedding import SinusoidalPositionEncoder
  7. #from funasr.models.encoder.transformer_encoder import TransformerEncoder as Encoder
  8. from funasr.punctuation.sanm_encoder import SANMEncoder as Encoder
  9. #from funasr.modules.mask import subsequent_n_mask
  10. from funasr.train.abs_model import AbsPunctuation
  11. class TargetDelayTransformer(AbsPunctuation):
  12. def __init__(
  13. self,
  14. vocab_size: int,
  15. punc_size: int,
  16. pos_enc: str = None,
  17. embed_unit: int = 128,
  18. att_unit: int = 256,
  19. head: int = 2,
  20. unit: int = 1024,
  21. layer: int = 4,
  22. dropout_rate: float = 0.5,
  23. ):
  24. super().__init__()
  25. if pos_enc == "sinusoidal":
  26. # pos_enc_class = PositionalEncoding
  27. pos_enc_class = SinusoidalPositionEncoder
  28. elif pos_enc is None:
  29. def pos_enc_class(*args, **kwargs):
  30. return nn.Sequential() # indentity
  31. else:
  32. raise ValueError(f"unknown pos-enc option: {pos_enc}")
  33. self.embed = nn.Embedding(vocab_size, embed_unit)
  34. self.encoder = Encoder(
  35. input_size=embed_unit,
  36. output_size=att_unit,
  37. attention_heads=head,
  38. linear_units=unit,
  39. num_blocks=layer,
  40. dropout_rate=dropout_rate,
  41. input_layer="pe",
  42. # pos_enc_class=pos_enc_class,
  43. padding_idx=0,
  44. )
  45. self.decoder = nn.Linear(att_unit, punc_size)
  46. # def _target_mask(self, ys_in_pad):
  47. # ys_mask = ys_in_pad != 0
  48. # m = subsequent_n_mask(ys_mask.size(-1), 5, device=ys_mask.device).unsqueeze(0)
  49. # return ys_mask.unsqueeze(-2) & m
  50. def forward(self, input: torch.Tensor, text_lengths: torch.Tensor) -> Tuple[torch.Tensor, None]:
  51. """Compute loss value from buffer sequences.
  52. Args:
  53. input (torch.Tensor): Input ids. (batch, len)
  54. hidden (torch.Tensor): Target ids. (batch, len)
  55. """
  56. x = self.embed(input)
  57. # mask = self._target_mask(input)
  58. h, _, _ = self.encoder(x, text_lengths)
  59. y = self.decoder(h)
  60. return y, None
  61. def with_vad(self):
  62. return False
  63. def score(self, y: torch.Tensor, state: Any, x: torch.Tensor) -> Tuple[torch.Tensor, Any]:
  64. """Score new token.
  65. Args:
  66. y (torch.Tensor): 1D torch.int64 prefix tokens.
  67. state: Scorer state for prefix tokens
  68. x (torch.Tensor): encoder feature that generates ys.
  69. Returns:
  70. tuple[torch.Tensor, Any]: Tuple of
  71. torch.float32 scores for next token (vocab_size)
  72. and next state for ys
  73. """
  74. y = y.unsqueeze(0)
  75. h, _, cache = self.encoder.forward_one_step(self.embed(y), self._target_mask(y), cache=state)
  76. h = self.decoder(h[:, -1])
  77. logp = h.log_softmax(dim=-1).squeeze(0)
  78. return logp, cache
  79. def batch_score(self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor) -> Tuple[torch.Tensor, List[Any]]:
  80. """Score new token batch.
  81. Args:
  82. ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
  83. states (List[Any]): Scorer states for prefix tokens.
  84. xs (torch.Tensor):
  85. The encoder feature that generates ys (n_batch, xlen, n_feat).
  86. Returns:
  87. tuple[torch.Tensor, List[Any]]: Tuple of
  88. batchfied scores for next token with shape of `(n_batch, vocab_size)`
  89. and next state list for ys.
  90. """
  91. # merge states
  92. n_batch = len(ys)
  93. n_layers = len(self.encoder.encoders)
  94. if states[0] is None:
  95. batch_state = None
  96. else:
  97. # transpose state of [batch, layer] into [layer, batch]
  98. batch_state = [torch.stack([states[b][i] for b in range(n_batch)]) for i in range(n_layers)]
  99. # batch decoding
  100. h, _, states = self.encoder.forward_one_step(self.embed(ys), self._target_mask(ys), cache=batch_state)
  101. h = self.decoder(h[:, -1])
  102. logp = h.log_softmax(dim=-1)
  103. # transpose state of [layer, batch] into [batch, layer]
  104. state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
  105. return logp, state_list