| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270 |
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- # Copyright 2020 Johns Hopkins University (Shinji Watanabe)
- # Northwestern Polytechnical University (Pengcheng Guo)
- # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
- """Encoder self-attention layer definition."""
- import torch
- from torch import nn
- from funasr.models.transformer.layer_norm import LayerNorm
- from torch.autograd import Variable
- class Encoder_Conformer_Layer(nn.Module):
- """Encoder layer module.
- Args:
- size (int): Input dimension.
- self_attn (torch.nn.Module): Self-attention module instance.
- `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
- can be used as the argument.
- feed_forward (torch.nn.Module): Feed-forward module instance.
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
- can be used as the argument.
- feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
- can be used as the argument.
- conv_module (torch.nn.Module): Convolution module instance.
- `ConvlutionModule` instance can be used as the argument.
- dropout_rate (float): Dropout rate.
- normalize_before (bool): Whether to use layer_norm before the first block.
- concat_after (bool): Whether to concat attention layer's input and output.
- if True, additional linear will be applied.
- i.e. x -> x + linear(concat(x, att(x)))
- if False, no additional linear will be applied. i.e. x -> x + att(x)
- """
- def __init__(
- self,
- size,
- self_attn,
- feed_forward,
- feed_forward_macaron,
- conv_module,
- dropout_rate,
- normalize_before=True,
- concat_after=False,
- cca_pos=0,
- ):
- """Construct an Encoder_Conformer_Layer object."""
- super(Encoder_Conformer_Layer, self).__init__()
- self.self_attn = self_attn
- self.feed_forward = feed_forward
- self.feed_forward_macaron = feed_forward_macaron
- self.conv_module = conv_module
- self.norm_ff = LayerNorm(size) # for the FNN module
- self.norm_mha = LayerNorm(size) # for the MHA module
- if feed_forward_macaron is not None:
- self.norm_ff_macaron = LayerNorm(size)
- self.ff_scale = 0.5
- else:
- self.ff_scale = 1.0
- if self.conv_module is not None:
- self.norm_conv = LayerNorm(size) # for the CNN module
- self.norm_final = LayerNorm(size) # for the final output of the block
- self.dropout = nn.Dropout(dropout_rate)
- self.size = size
- self.normalize_before = normalize_before
- self.concat_after = concat_after
- self.cca_pos = cca_pos
- if self.concat_after:
- self.concat_linear = nn.Linear(size + size, size)
- def forward(self, x_input, mask, cache=None):
- """Compute encoded features.
- Args:
- x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
- - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
- - w/o pos emb: Tensor (#batch, time, size).
- mask (torch.Tensor): Mask tensor for the input (#batch, time).
- cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
- Returns:
- torch.Tensor: Output tensor (#batch, time, size).
- torch.Tensor: Mask tensor (#batch, time).
- """
- if isinstance(x_input, tuple):
- x, pos_emb = x_input[0], x_input[1]
- else:
- x, pos_emb = x_input, None
- # whether to use macaron style
- if self.feed_forward_macaron is not None:
- residual = x
- if self.normalize_before:
- x = self.norm_ff_macaron(x)
- x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))
- if not self.normalize_before:
- x = self.norm_ff_macaron(x)
- # multi-headed self-attention module
- residual = x
- if self.normalize_before:
- x = self.norm_mha(x)
- if cache is None:
- x_q = x
- else:
- assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
- x_q = x[:, -1:, :]
- residual = residual[:, -1:, :]
- mask = None if mask is None else mask[:, -1:, :]
- if self.cca_pos<2:
- if pos_emb is not None:
- x_att = self.self_attn(x_q, x, x, pos_emb, mask)
- else:
- x_att = self.self_attn(x_q, x, x, mask)
- else:
- x_att = self.self_attn(x_q, x, x, mask)
- if self.concat_after:
- x_concat = torch.cat((x, x_att), dim=-1)
- x = residual + self.concat_linear(x_concat)
- else:
- x = residual + self.dropout(x_att)
- if not self.normalize_before:
- x = self.norm_mha(x)
- # convolution module
- if self.conv_module is not None:
- residual = x
- if self.normalize_before:
- x = self.norm_conv(x)
- x = residual + self.dropout(self.conv_module(x))
- if not self.normalize_before:
- x = self.norm_conv(x)
- # feed forward module
- residual = x
- if self.normalize_before:
- x = self.norm_ff(x)
- x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
- if not self.normalize_before:
- x = self.norm_ff(x)
- if self.conv_module is not None:
- x = self.norm_final(x)
- if cache is not None:
- x = torch.cat([cache, x], dim=1)
- if pos_emb is not None:
- return (x, pos_emb), mask
- return x, mask
- class EncoderLayer(nn.Module):
- """Encoder layer module.
- Args:
- size (int): Input dimension.
- self_attn (torch.nn.Module): Self-attention module instance.
- `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
- can be used as the argument.
- feed_forward (torch.nn.Module): Feed-forward module instance.
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
- can be used as the argument.
- feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
- can be used as the argument.
- conv_module (torch.nn.Module): Convolution module instance.
- `ConvlutionModule` instance can be used as the argument.
- dropout_rate (float): Dropout rate.
- normalize_before (bool): Whether to use layer_norm before the first block.
- concat_after (bool): Whether to concat attention layer's input and output.
- if True, additional linear will be applied.
- i.e. x -> x + linear(concat(x, att(x)))
- if False, no additional linear will be applied. i.e. x -> x + att(x)
- """
- def __init__(
- self,
- size,
- self_attn_cros_channel,
- self_attn_conformer,
- feed_forward_csa,
- feed_forward_macaron_csa,
- conv_module_csa,
- dropout_rate,
- normalize_before=True,
- concat_after=False,
- ):
- """Construct an EncoderLayer object."""
- super(EncoderLayer, self).__init__()
- self.encoder_cros_channel_atten = self_attn_cros_channel
- self.encoder_csa = Encoder_Conformer_Layer(
- size,
- self_attn_conformer,
- feed_forward_csa,
- feed_forward_macaron_csa,
- conv_module_csa,
- dropout_rate,
- normalize_before,
- concat_after,
- cca_pos=0)
- self.norm_mha = LayerNorm(size) # for the MHA module
- self.dropout = nn.Dropout(dropout_rate)
- def forward(self, x_input, mask, channel_size, cache=None):
- """Compute encoded features.
- Args:
- x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.
- - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].
- - w/o pos emb: Tensor (#batch, time, size).
- mask (torch.Tensor): Mask tensor for the input (#batch, time).
- cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
- Returns:
- torch.Tensor: Output tensor (#batch, time, size).
- torch.Tensor: Mask tensor (#batch, time).
- """
- if isinstance(x_input, tuple):
- x, pos_emb = x_input[0], x_input[1]
- else:
- x, pos_emb = x_input, None
- residual = x
- x = self.norm_mha(x)
- t_leng = x.size(1)
- d_dim = x.size(2)
- x_new = x.reshape(-1,channel_size,t_leng,d_dim).transpose(1,2) # x_new B*T * C * D
- x_k_v = x_new.new(x_new.size(0),x_new.size(1),5,x_new.size(2),x_new.size(3))
- pad_before = Variable(torch.zeros(x_new.size(0),2,x_new.size(2),x_new.size(3))).type(x_new.type())
- pad_after = Variable(torch.zeros(x_new.size(0),2,x_new.size(2),x_new.size(3))).type(x_new.type())
- x_pad = torch.cat([pad_before,x_new, pad_after], 1)
- x_k_v[:,:,0,:,:]=x_pad[:,0:-4,:,:]
- x_k_v[:,:,1,:,:]=x_pad[:,1:-3,:,:]
- x_k_v[:,:,2,:,:]=x_pad[:,2:-2,:,:]
- x_k_v[:,:,3,:,:]=x_pad[:,3:-1,:,:]
- x_k_v[:,:,4,:,:]=x_pad[:,4:,:,:]
- x_new = x_new.reshape(-1,channel_size,d_dim)
- x_k_v = x_k_v.reshape(-1,5*channel_size,d_dim)
- x_att = self.encoder_cros_channel_atten(x_new, x_k_v, x_k_v, None)
- x_att = x_att.reshape(-1,t_leng,channel_size,d_dim).transpose(1,2).reshape(-1,t_leng,d_dim)
- x = residual + self.dropout(x_att)
- if pos_emb is not None:
- x_input = (x, pos_emb)
- else:
- x_input = x
- x_input, mask = self.encoder_csa(x_input, mask)
- return x_input, mask , channel_size
|