vad_realtime_transformer.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. from typing import Any
  2. from typing import List
  3. from typing import Tuple
  4. import torch
  5. import torch.nn as nn
  6. from funasr.modules.embedding import SinusoidalPositionEncoder
  7. from funasr.punctuation.sanm_encoder import SANMVadEncoder as Encoder
  8. from funasr.train.abs_model import AbsPunctuation
  9. class VadRealtimeTransformer(AbsPunctuation):
  10. def __init__(
  11. self,
  12. vocab_size: int,
  13. punc_size: int,
  14. pos_enc: str = None,
  15. embed_unit: int = 128,
  16. att_unit: int = 256,
  17. head: int = 2,
  18. unit: int = 1024,
  19. layer: int = 4,
  20. dropout_rate: float = 0.5,
  21. kernel_size: int = 11,
  22. sanm_shfit: int = 0,
  23. ):
  24. super().__init__()
  25. if pos_enc == "sinusoidal":
  26. # pos_enc_class = PositionalEncoding
  27. pos_enc_class = SinusoidalPositionEncoder
  28. elif pos_enc is None:
  29. def pos_enc_class(*args, **kwargs):
  30. return nn.Sequential() # indentity
  31. else:
  32. raise ValueError(f"unknown pos-enc option: {pos_enc}")
  33. self.embed = nn.Embedding(vocab_size, embed_unit)
  34. self.encoder = Encoder(
  35. input_size=embed_unit,
  36. output_size=att_unit,
  37. attention_heads=head,
  38. linear_units=unit,
  39. num_blocks=layer,
  40. dropout_rate=dropout_rate,
  41. input_layer="pe",
  42. # pos_enc_class=pos_enc_class,
  43. padding_idx=0,
  44. kernel_size=kernel_size,
  45. sanm_shfit=sanm_shfit,
  46. )
  47. self.decoder = nn.Linear(att_unit, punc_size)
  48. # def _target_mask(self, ys_in_pad):
  49. # ys_mask = ys_in_pad != 0
  50. # m = subsequent_n_mask(ys_mask.size(-1), 5, device=ys_mask.device).unsqueeze(0)
  51. # return ys_mask.unsqueeze(-2) & m
  52. def forward(self, input: torch.Tensor, text_lengths: torch.Tensor,
  53. vad_indexes: torch.Tensor) -> Tuple[torch.Tensor, None]:
  54. """Compute loss value from buffer sequences.
  55. Args:
  56. input (torch.Tensor): Input ids. (batch, len)
  57. hidden (torch.Tensor): Target ids. (batch, len)
  58. """
  59. x = self.embed(input)
  60. # mask = self._target_mask(input)
  61. h, _, _ = self.encoder(x, text_lengths, vad_indexes)
  62. y = self.decoder(h)
  63. return y, None
  64. def with_vad(self):
  65. return True
  66. def score(self, y: torch.Tensor, state: Any, x: torch.Tensor) -> Tuple[torch.Tensor, Any]:
  67. """Score new token.
  68. Args:
  69. y (torch.Tensor): 1D torch.int64 prefix tokens.
  70. state: Scorer state for prefix tokens
  71. x (torch.Tensor): encoder feature that generates ys.
  72. Returns:
  73. tuple[torch.Tensor, Any]: Tuple of
  74. torch.float32 scores for next token (vocab_size)
  75. and next state for ys
  76. """
  77. y = y.unsqueeze(0)
  78. h, _, cache = self.encoder.forward_one_step(self.embed(y), self._target_mask(y), cache=state)
  79. h = self.decoder(h[:, -1])
  80. logp = h.log_softmax(dim=-1).squeeze(0)
  81. return logp, cache
  82. def batch_score(self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor) -> Tuple[torch.Tensor, List[Any]]:
  83. """Score new token batch.
  84. Args:
  85. ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
  86. states (List[Any]): Scorer states for prefix tokens.
  87. xs (torch.Tensor):
  88. The encoder feature that generates ys (n_batch, xlen, n_feat).
  89. Returns:
  90. tuple[torch.Tensor, List[Any]]: Tuple of
  91. batchfied scores for next token with shape of `(n_batch, vocab_size)`
  92. and next state list for ys.
  93. """
  94. # merge states
  95. n_batch = len(ys)
  96. n_layers = len(self.encoder.encoders)
  97. if states[0] is None:
  98. batch_state = None
  99. else:
  100. # transpose state of [batch, layer] into [layer, batch]
  101. batch_state = [torch.stack([states[b][i] for b in range(n_batch)]) for i in range(n_layers)]
  102. # batch decoding
  103. h, _, states = self.encoder.forward_one_step(self.embed(ys), self._target_mask(ys), cache=batch_state)
  104. h = self.decoder(h[:, -1])
  105. logp = h.log_softmax(dim=-1)
  106. # transpose state of [layer, batch] into [batch, layer]
  107. state_list = [[states[i][b] for i in range(n_layers)] for b in range(n_batch)]
  108. return logp, state_list