sanm_encoder.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. import torch
  2. import torch.nn as nn
  3. from funasr.export.utils.torch_function import MakePadMask
  4. from funasr.export.utils.torch_function import sequence_mask
  5. from funasr.modules.attention import MultiHeadedAttentionSANM
  6. from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANM as MultiHeadedAttentionSANM_export
  7. from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
  8. from funasr.modules.positionwise_feed_forward import PositionwiseFeedForward
  9. from funasr.export.models.modules.feedforward import PositionwiseFeedForward as PositionwiseFeedForward_export
  10. class SANMEncoder(nn.Module):
  11. def __init__(
  12. self,
  13. model,
  14. max_seq_len=512,
  15. feats_dim=560,
  16. model_name='encoder',
  17. onnx: bool = True,
  18. ):
  19. super().__init__()
  20. self.embed = model.embed
  21. self.model = model
  22. self.feats_dim = feats_dim
  23. self._output_size = model._output_size
  24. if onnx:
  25. self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
  26. else:
  27. self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
  28. if hasattr(model, 'encoders0'):
  29. for i, d in enumerate(self.model.encoders0):
  30. if isinstance(d.self_attn, MultiHeadedAttentionSANM):
  31. d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
  32. if isinstance(d.feed_forward, PositionwiseFeedForward):
  33. d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
  34. self.model.encoders0[i] = EncoderLayerSANM_export(d)
  35. for i, d in enumerate(self.model.encoders):
  36. if isinstance(d.self_attn, MultiHeadedAttentionSANM):
  37. d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
  38. if isinstance(d.feed_forward, PositionwiseFeedForward):
  39. d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
  40. self.model.encoders[i] = EncoderLayerSANM_export(d)
  41. self.model_name = model_name
  42. self.num_heads = model.encoders[0].self_attn.h
  43. self.hidden_size = model.encoders[0].self_attn.linear_out.out_features
  44. def prepare_mask(self, mask):
  45. mask_3d_btd = mask[:, :, None]
  46. if len(mask.shape) == 2:
  47. mask_4d_bhlt = 1 - mask[:, None, None, :]
  48. elif len(mask.shape) == 3:
  49. mask_4d_bhlt = 1 - mask[:, None, :]
  50. mask_4d_bhlt = mask_4d_bhlt * -10000.0
  51. return mask_3d_btd, mask_4d_bhlt
  52. def forward(self,
  53. speech: torch.Tensor,
  54. speech_lengths: torch.Tensor,
  55. ):
  56. speech = speech * self._output_size ** 0.5
  57. mask = self.make_pad_mask(speech_lengths)
  58. mask = self.prepare_mask(mask)
  59. if self.embed is None:
  60. xs_pad = speech
  61. else:
  62. xs_pad = self.embed(speech)
  63. encoder_outs = self.model.encoders0(xs_pad, mask)
  64. xs_pad, masks = encoder_outs[0], encoder_outs[1]
  65. encoder_outs = self.model.encoders(xs_pad, mask)
  66. xs_pad, masks = encoder_outs[0], encoder_outs[1]
  67. xs_pad = self.model.after_norm(xs_pad)
  68. return xs_pad, speech_lengths
  69. def get_output_size(self):
  70. return self.model.encoders[0].size
  71. def get_dummy_inputs(self):
  72. feats = torch.randn(1, 100, self.feats_dim)
  73. return (feats)
  74. def get_input_names(self):
  75. return ['feats']
  76. def get_output_names(self):
  77. return ['encoder_out', 'encoder_out_lens', 'predictor_weight']
  78. def get_dynamic_axes(self):
  79. return {
  80. 'feats': {
  81. 1: 'feats_length'
  82. },
  83. 'encoder_out': {
  84. 1: 'enc_out_length'
  85. },
  86. 'predictor_weight':{
  87. 1: 'pre_out_length'
  88. }
  89. }
  90. class SANMVadEncoder(nn.Module):
  91. def __init__(
  92. self,
  93. model,
  94. max_seq_len=512,
  95. feats_dim=560,
  96. model_name='encoder',
  97. onnx: bool = True,
  98. ):
  99. super().__init__()
  100. self.embed = model.embed
  101. self.model = model
  102. self.feats_dim = feats_dim
  103. self._output_size = model._output_size
  104. if onnx:
  105. self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
  106. else:
  107. self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
  108. if hasattr(model, 'encoders0'):
  109. for i, d in enumerate(self.model.encoders0):
  110. if isinstance(d.self_attn, MultiHeadedAttentionSANM):
  111. d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
  112. if isinstance(d.feed_forward, PositionwiseFeedForward):
  113. d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
  114. self.model.encoders0[i] = EncoderLayerSANM_export(d)
  115. for i, d in enumerate(self.model.encoders):
  116. if isinstance(d.self_attn, MultiHeadedAttentionSANM):
  117. d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
  118. if isinstance(d.feed_forward, PositionwiseFeedForward):
  119. d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
  120. self.model.encoders[i] = EncoderLayerSANM_export(d)
  121. self.model_name = model_name
  122. self.num_heads = model.encoders[0].self_attn.h
  123. self.hidden_size = model.encoders[0].self_attn.linear_out.out_features
  124. def prepare_mask(self, mask, sub_masks):
  125. mask_3d_btd = mask[:, :, None]
  126. mask_4d_bhlt = (1 - sub_masks) * -10000.0
  127. return mask_3d_btd, mask_4d_bhlt
  128. def forward(self,
  129. speech: torch.Tensor,
  130. speech_lengths: torch.Tensor,
  131. vad_mask: torch.Tensor,
  132. sub_masks: torch.Tensor,
  133. ):
  134. speech = speech * self._output_size ** 0.5
  135. mask = self.make_pad_mask(speech_lengths)
  136. mask = self.prepare_mask(mask, sub_masks)
  137. if self.embed is None:
  138. xs_pad = speech
  139. else:
  140. xs_pad = self.embed(speech)
  141. encoder_outs = self.model.encoders0(xs_pad, mask)
  142. xs_pad, masks = encoder_outs[0], encoder_outs[1]
  143. # encoder_outs = self.model.encoders(xs_pad, mask)
  144. for layer_idx, encoder_layer in enumerate(self.model.encoders):
  145. if layer_idx == len(self.model.encoders) - 1:
  146. mask = (mask[0], vad_mask)
  147. encoder_outs = encoder_layer(xs_pad, mask)
  148. xs_pad, masks = encoder_outs[0], encoder_outs[1]
  149. xs_pad = self.model.after_norm(xs_pad)
  150. return xs_pad, speech_lengths
  151. def get_output_size(self):
  152. return self.model.encoders[0].size
  153. def get_dummy_inputs(self):
  154. feats = torch.randn(1, 100, self.feats_dim)
  155. return (feats)
  156. def get_input_names(self):
  157. return ['feats']
  158. def get_output_names(self):
  159. return ['encoder_out', 'encoder_out_lens', 'predictor_weight']
  160. def get_dynamic_axes(self):
  161. return {
  162. 'feats': {
  163. 1: 'feats_length'
  164. },
  165. 'encoder_out': {
  166. 1: 'enc_out_length'
  167. },
  168. 'predictor_weight': {
  169. 1: 'pre_out_length'
  170. }
  171. }