|
|
@@ -116,6 +116,22 @@ class DecoderLayerSANM(torch.nn.Module):
|
|
|
# x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
|
|
|
|
|
|
return x, tgt_mask, memory, memory_mask, cache
|
|
|
+
|
|
|
+ def get_attn_mat(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
|
|
|
+ residual = tgt
|
|
|
+ tgt = self.norm1(tgt)
|
|
|
+ tgt = self.feed_forward(tgt)
|
|
|
+
|
|
|
+ x = tgt
|
|
|
+ if self.self_attn is not None:
|
|
|
+ tgt = self.norm2(tgt)
|
|
|
+ x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
|
|
|
+ x = residual + x
|
|
|
+
|
|
|
+ residual = x
|
|
|
+ x = self.norm3(x)
|
|
|
+ x_src_attn, attn_mat = self.src_attn(x, memory, memory_mask, ret_attn=True)
|
|
|
+ return attn_mat
|
|
|
|
|
|
def forward_one_step(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
|
|
|
"""Compute decoded features.
|
|
|
@@ -396,6 +412,46 @@ class ParaformerSANMDecoder(BaseTransformerDecoder):
|
|
|
ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
|
|
|
)
|
|
|
return logp.squeeze(0), state
|
|
|
+
|
|
|
+ def forward_asf2(
|
|
|
+ self,
|
|
|
+ hs_pad: torch.Tensor,
|
|
|
+ hlens: torch.Tensor,
|
|
|
+ ys_in_pad: torch.Tensor,
|
|
|
+ ys_in_lens: torch.Tensor,
|
|
|
+ ):
|
|
|
+
|
|
|
+ tgt = ys_in_pad
|
|
|
+ tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
|
|
|
+
|
|
|
+ memory = hs_pad
|
|
|
+ memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
|
|
|
+
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ attn_mat = self.model.decoders[1].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
|
|
|
+ return attn_mat
|
|
|
+
|
|
|
+ def forward_asf6(
|
|
|
+ self,
|
|
|
+ hs_pad: torch.Tensor,
|
|
|
+ hlens: torch.Tensor,
|
|
|
+ ys_in_pad: torch.Tensor,
|
|
|
+ ys_in_lens: torch.Tensor,
|
|
|
+ ):
|
|
|
+
|
|
|
+ tgt = ys_in_pad
|
|
|
+ tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
|
|
|
+
|
|
|
+ memory = hs_pad
|
|
|
+ memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
|
|
|
+
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[1](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[2](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[3](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ tgt, tgt_mask, memory, memory_mask, _ = self.decoders[4](tgt, tgt_mask, memory, memory_mask)
|
|
|
+ attn_mat = self.decoders[5].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
|
|
|
+ return attn_mat
|
|
|
|
|
|
def forward_chunk(
|
|
|
self,
|