| 12345678910111213141516171819202122 |
- #!/usr/bin/env python3
- # -*- encoding: utf-8 -*-
- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
- # MIT License (https://opensource.org/licenses/MIT)
- import torch
- from funasr.models.sanm.attention import MultiHeadedAttentionSANM
- class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
- q_h, k_h, v_h, v = self.forward_qkv(x)
- fsmn_memory = self.forward_fsmn(v, mask[0], mask_shfit_chunk)
- q_h = q_h * self.d_k ** (-0.5)
- scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
- return att_outs + fsmn_memory
|