| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684 |
- # Copyright 2019 Shigeki Karita
- # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
- """Transformer encoder definition."""
- from typing import List
- from typing import Optional
- from typing import Tuple
- import torch
- from torch import nn
- from typeguard import check_argument_types
- import logging
- from funasr.models.ctc import CTC
- from funasr.models.encoder.abs_encoder import AbsEncoder
- from funasr.modules.attention import MultiHeadedAttention
- from funasr.modules.embedding import PositionalEncoding
- from funasr.modules.layer_norm import LayerNorm
- from funasr.modules.multi_layer_conv import Conv1dLinear
- from funasr.modules.multi_layer_conv import MultiLayeredConv1d
- from funasr.modules.nets_utils import make_pad_mask
- from funasr.modules.positionwise_feed_forward import (
- PositionwiseFeedForward, # noqa: H301
- )
- from funasr.modules.repeat import repeat
- from funasr.modules.nets_utils import rename_state_dict
- from funasr.modules.dynamic_conv import DynamicConvolution
- from funasr.modules.dynamic_conv2d import DynamicConvolution2D
- from funasr.modules.lightconv import LightweightConvolution
- from funasr.modules.lightconv2d import LightweightConvolution2D
- from funasr.modules.subsampling import Conv2dSubsampling
- from funasr.modules.subsampling import Conv2dSubsampling2
- from funasr.modules.subsampling import Conv2dSubsampling6
- from funasr.modules.subsampling import Conv2dSubsampling8
- from funasr.modules.subsampling import TooShortUttError
- from funasr.modules.subsampling import check_short_utt
- class EncoderLayer(nn.Module):
- """Encoder layer module.
- Args:
- size (int): Input dimension.
- self_attn (torch.nn.Module): Self-attention module instance.
- `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance
- can be used as the argument.
- feed_forward (torch.nn.Module): Feed-forward module instance.
- `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
- can be used as the argument.
- dropout_rate (float): Dropout rate.
- normalize_before (bool): Whether to use layer_norm before the first block.
- concat_after (bool): Whether to concat attention layer's input and output.
- if True, additional linear will be applied.
- i.e. x -> x + linear(concat(x, att(x)))
- if False, no additional linear will be applied. i.e. x -> x + att(x)
- stochastic_depth_rate (float): Proability to skip this layer.
- During training, the layer may skip residual computation and return input
- as-is with given probability.
- """
- def __init__(
- self,
- size,
- self_attn,
- feed_forward,
- dropout_rate,
- normalize_before=True,
- concat_after=False,
- stochastic_depth_rate=0.0,
- ):
- """Construct an EncoderLayer object."""
- super(EncoderLayer, self).__init__()
- self.self_attn = self_attn
- self.feed_forward = feed_forward
- self.norm1 = LayerNorm(size)
- self.norm2 = LayerNorm(size)
- self.dropout = nn.Dropout(dropout_rate)
- self.size = size
- self.normalize_before = normalize_before
- self.concat_after = concat_after
- if self.concat_after:
- self.concat_linear = nn.Linear(size + size, size)
- self.stochastic_depth_rate = stochastic_depth_rate
- def forward(self, x, mask, cache=None):
- """Compute encoded features.
- Args:
- x_input (torch.Tensor): Input tensor (#batch, time, size).
- mask (torch.Tensor): Mask tensor for the input (#batch, time).
- cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).
- Returns:
- torch.Tensor: Output tensor (#batch, time, size).
- torch.Tensor: Mask tensor (#batch, time).
- """
- skip_layer = False
- # with stochastic depth, residual connection `x + f(x)` becomes
- # `x <- x + 1 / (1 - p) * f(x)` at training time.
- stoch_layer_coeff = 1.0
- if self.training and self.stochastic_depth_rate > 0:
- skip_layer = torch.rand(1).item() < self.stochastic_depth_rate
- stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)
- if skip_layer:
- if cache is not None:
- x = torch.cat([cache, x], dim=1)
- return x, mask
- residual = x
- if self.normalize_before:
- x = self.norm1(x)
- if cache is None:
- x_q = x
- else:
- assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)
- x_q = x[:, -1:, :]
- residual = residual[:, -1:, :]
- mask = None if mask is None else mask[:, -1:, :]
- if self.concat_after:
- x_concat = torch.cat((x, self.self_attn(x_q, x, x, mask)), dim=-1)
- x = residual + stoch_layer_coeff * self.concat_linear(x_concat)
- else:
- x = residual + stoch_layer_coeff * self.dropout(
- self.self_attn(x_q, x, x, mask)
- )
- if not self.normalize_before:
- x = self.norm1(x)
- residual = x
- if self.normalize_before:
- x = self.norm2(x)
- x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x))
- if not self.normalize_before:
- x = self.norm2(x)
- if cache is not None:
- x = torch.cat([cache, x], dim=1)
- return x, mask
- class TransformerEncoder(AbsEncoder):
- """Transformer encoder module.
- Args:
- input_size: input dim
- output_size: dimension of attention
- attention_heads: the number of heads of multi head attention
- linear_units: the number of units of position-wise feed forward
- num_blocks: the number of decoder blocks
- dropout_rate: dropout rate
- attention_dropout_rate: dropout rate in attention
- positional_dropout_rate: dropout rate after adding positional encoding
- input_layer: input layer type
- pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
- normalize_before: whether to use layer_norm before the first block
- concat_after: whether to concat attention layer's input and output
- if True, additional linear will be applied.
- i.e. x -> x + linear(concat(x, att(x)))
- if False, no additional linear will be applied.
- i.e. x -> x + att(x)
- positionwise_layer_type: linear of conv1d
- positionwise_conv_kernel_size: kernel size of positionwise conv1d layer
- padding_idx: padding_idx for input_layer=embed
- """
- def __init__(
- self,
- input_size: int,
- output_size: int = 256,
- attention_heads: int = 4,
- linear_units: int = 2048,
- num_blocks: int = 6,
- dropout_rate: float = 0.1,
- positional_dropout_rate: float = 0.1,
- attention_dropout_rate: float = 0.0,
- input_layer: Optional[str] = "conv2d",
- pos_enc_class=PositionalEncoding,
- normalize_before: bool = True,
- concat_after: bool = False,
- positionwise_layer_type: str = "linear",
- positionwise_conv_kernel_size: int = 1,
- padding_idx: int = -1,
- interctc_layer_idx: List[int] = [],
- interctc_use_conditioning: bool = False,
- ):
- assert check_argument_types()
- super().__init__()
- self._output_size = output_size
- if input_layer == "linear":
- self.embed = torch.nn.Sequential(
- torch.nn.Linear(input_size, output_size),
- torch.nn.LayerNorm(output_size),
- torch.nn.Dropout(dropout_rate),
- torch.nn.ReLU(),
- pos_enc_class(output_size, positional_dropout_rate),
- )
- elif input_layer == "conv2d":
- self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate)
- elif input_layer == "conv2d2":
- self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate)
- elif input_layer == "conv2d6":
- self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate)
- elif input_layer == "conv2d8":
- self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate)
- elif input_layer == "embed":
- self.embed = torch.nn.Sequential(
- torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx),
- pos_enc_class(output_size, positional_dropout_rate),
- )
- elif input_layer is None:
- if input_size == output_size:
- self.embed = None
- else:
- self.embed = torch.nn.Linear(input_size, output_size)
- else:
- raise ValueError("unknown input_layer: " + input_layer)
- self.normalize_before = normalize_before
- if positionwise_layer_type == "linear":
- positionwise_layer = PositionwiseFeedForward
- positionwise_layer_args = (
- output_size,
- linear_units,
- dropout_rate,
- )
- elif positionwise_layer_type == "conv1d":
- positionwise_layer = MultiLayeredConv1d
- positionwise_layer_args = (
- output_size,
- linear_units,
- positionwise_conv_kernel_size,
- dropout_rate,
- )
- elif positionwise_layer_type == "conv1d-linear":
- positionwise_layer = Conv1dLinear
- positionwise_layer_args = (
- output_size,
- linear_units,
- positionwise_conv_kernel_size,
- dropout_rate,
- )
- else:
- raise NotImplementedError("Support only linear or conv1d.")
- self.encoders = repeat(
- num_blocks,
- lambda lnum: EncoderLayer(
- output_size,
- MultiHeadedAttention(
- attention_heads, output_size, attention_dropout_rate
- ),
- positionwise_layer(*positionwise_layer_args),
- dropout_rate,
- normalize_before,
- concat_after,
- ),
- )
- if self.normalize_before:
- self.after_norm = LayerNorm(output_size)
- self.interctc_layer_idx = interctc_layer_idx
- if len(interctc_layer_idx) > 0:
- assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks
- self.interctc_use_conditioning = interctc_use_conditioning
- self.conditioning_layer = None
- def output_size(self) -> int:
- return self._output_size
- def forward(
- self,
- xs_pad: torch.Tensor,
- ilens: torch.Tensor,
- prev_states: torch.Tensor = None,
- ctc: CTC = None,
- ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
- """Embed positions in tensor.
- Args:
- xs_pad: input tensor (B, L, D)
- ilens: input length (B)
- prev_states: Not to be used now.
- Returns:
- position embedded tensor and mask
- """
- masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
- if self.embed is None:
- xs_pad = xs_pad
- elif (
- isinstance(self.embed, Conv2dSubsampling)
- or isinstance(self.embed, Conv2dSubsampling2)
- or isinstance(self.embed, Conv2dSubsampling6)
- or isinstance(self.embed, Conv2dSubsampling8)
- ):
- short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
- if short_status:
- raise TooShortUttError(
- f"has {xs_pad.size(1)} frames and is too short for subsampling "
- + f"(it needs more than {limit_size} frames), return empty results",
- xs_pad.size(1),
- limit_size,
- )
- xs_pad, masks = self.embed(xs_pad, masks)
- else:
- xs_pad = self.embed(xs_pad)
- intermediate_outs = []
- if len(self.interctc_layer_idx) == 0:
- xs_pad, masks = self.encoders(xs_pad, masks)
- else:
- for layer_idx, encoder_layer in enumerate(self.encoders):
- xs_pad, masks = encoder_layer(xs_pad, masks)
- if layer_idx + 1 in self.interctc_layer_idx:
- encoder_out = xs_pad
- # intermediate outputs are also normalized
- if self.normalize_before:
- encoder_out = self.after_norm(encoder_out)
- intermediate_outs.append((layer_idx + 1, encoder_out))
- if self.interctc_use_conditioning:
- ctc_out = ctc.softmax(encoder_out)
- xs_pad = xs_pad + self.conditioning_layer(ctc_out)
- if self.normalize_before:
- xs_pad = self.after_norm(xs_pad)
- olens = masks.squeeze(1).sum(1)
- if len(intermediate_outs) > 0:
- return (xs_pad, intermediate_outs), olens, None
- return xs_pad, olens, None
- def _pre_hook(
- state_dict,
- prefix,
- local_metadata,
- strict,
- missing_keys,
- unexpected_keys,
- error_msgs,
- ):
- # https://github.com/espnet/espnet/commit/21d70286c354c66c0350e65dc098d2ee236faccc#diff-bffb1396f038b317b2b64dd96e6d3563
- rename_state_dict(prefix + "input_layer.", prefix + "embed.", state_dict)
- # https://github.com/espnet/espnet/commit/3d422f6de8d4f03673b89e1caef698745ec749ea#diff-bffb1396f038b317b2b64dd96e6d3563
- rename_state_dict(prefix + "norm.", prefix + "after_norm.", state_dict)
- class TransformerEncoder_s0(torch.nn.Module):
- """Transformer encoder module.
- Args:
- idim (int): Input dimension.
- attention_dim (int): Dimension of attention.
- attention_heads (int): The number of heads of multi head attention.
- conv_wshare (int): The number of kernel of convolution. Only used in
- selfattention_layer_type == "lightconv*" or "dynamiconv*".
- conv_kernel_length (Union[int, str]): Kernel size str of convolution
- (e.g. 71_71_71_71_71_71). Only used in selfattention_layer_type
- == "lightconv*" or "dynamiconv*".
- conv_usebias (bool): Whether to use bias in convolution. Only used in
- selfattention_layer_type == "lightconv*" or "dynamiconv*".
- linear_units (int): The number of units of position-wise feed forward.
- num_blocks (int): The number of decoder blocks.
- dropout_rate (float): Dropout rate.
- positional_dropout_rate (float): Dropout rate after adding positional encoding.
- attention_dropout_rate (float): Dropout rate in attention.
- input_layer (Union[str, torch.nn.Module]): Input layer type.
- pos_enc_class (torch.nn.Module): Positional encoding module class.
- `PositionalEncoding `or `ScaledPositionalEncoding`
- normalize_before (bool): Whether to use layer_norm before the first block.
- concat_after (bool): Whether to concat attention layer's input and output.
- if True, additional linear will be applied.
- i.e. x -> x + linear(concat(x, att(x)))
- if False, no additional linear will be applied. i.e. x -> x + att(x)
- positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
- positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer.
- selfattention_layer_type (str): Encoder attention layer type.
- padding_idx (int): Padding idx for input_layer=embed.
- stochastic_depth_rate (float): Maximum probability to skip the encoder layer.
- intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer.
- indices start from 1.
- if not None, intermediate outputs are returned (which changes return type
- signature.)
- """
- def __init__(
- self,
- idim,
- attention_dim=256,
- attention_heads=4,
- conv_wshare=4,
- conv_kernel_length="11",
- conv_usebias=False,
- linear_units=2048,
- num_blocks=6,
- dropout_rate=0.1,
- positional_dropout_rate=0.1,
- attention_dropout_rate=0.0,
- input_layer="conv2d",
- pos_enc_class=PositionalEncoding,
- normalize_before=True,
- concat_after=False,
- positionwise_layer_type="linear",
- positionwise_conv_kernel_size=1,
- selfattention_layer_type="selfattn",
- padding_idx=-1,
- stochastic_depth_rate=0.0,
- intermediate_layers=None,
- ctc_softmax=None,
- conditioning_layer_dim=None,
- ):
- """Construct an Encoder object."""
- super(TransformerEncoder_s0, self).__init__()
- self._register_load_state_dict_pre_hook(_pre_hook)
- self.conv_subsampling_factor = 1
- if input_layer == "linear":
- self.embed = torch.nn.Sequential(
- torch.nn.Linear(idim, attention_dim),
- torch.nn.LayerNorm(attention_dim),
- torch.nn.Dropout(dropout_rate),
- torch.nn.ReLU(),
- pos_enc_class(attention_dim, positional_dropout_rate),
- )
- elif input_layer == "conv2d":
- self.embed = Conv2dSubsampling(idim, attention_dim, dropout_rate)
- self.conv_subsampling_factor = 4
- elif input_layer == "conv2d-scaled-pos-enc":
- self.embed = Conv2dSubsampling(
- idim,
- attention_dim,
- dropout_rate,
- pos_enc_class(attention_dim, positional_dropout_rate),
- )
- self.conv_subsampling_factor = 4
- elif input_layer == "conv2d6":
- self.embed = Conv2dSubsampling6(idim, attention_dim, dropout_rate)
- self.conv_subsampling_factor = 6
- elif input_layer == "conv2d8":
- self.embed = Conv2dSubsampling8(idim, attention_dim, dropout_rate)
- self.conv_subsampling_factor = 8
- elif input_layer == "embed":
- self.embed = torch.nn.Sequential(
- torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx),
- pos_enc_class(attention_dim, positional_dropout_rate),
- )
- elif isinstance(input_layer, torch.nn.Module):
- self.embed = torch.nn.Sequential(
- input_layer,
- pos_enc_class(attention_dim, positional_dropout_rate),
- )
- elif input_layer is None:
- self.embed = torch.nn.Sequential(
- pos_enc_class(attention_dim, positional_dropout_rate)
- )
- else:
- raise ValueError("unknown input_layer: " + input_layer)
- self.normalize_before = normalize_before
- positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(
- positionwise_layer_type,
- attention_dim,
- linear_units,
- dropout_rate,
- positionwise_conv_kernel_size,
- )
- if selfattention_layer_type in [
- "selfattn",
- "rel_selfattn",
- "legacy_rel_selfattn",
- ]:
- logging.info("encoder self-attention layer type = self-attention")
- encoder_selfattn_layer = MultiHeadedAttention
- encoder_selfattn_layer_args = [
- (
- attention_heads,
- attention_dim,
- attention_dropout_rate,
- )
- ] * num_blocks
- elif selfattention_layer_type == "lightconv":
- logging.info("encoder self-attention layer type = lightweight convolution")
- encoder_selfattn_layer = LightweightConvolution
- encoder_selfattn_layer_args = [
- (
- conv_wshare,
- attention_dim,
- attention_dropout_rate,
- int(conv_kernel_length.split("_")[lnum]),
- False,
- conv_usebias,
- )
- for lnum in range(num_blocks)
- ]
- elif selfattention_layer_type == "lightconv2d":
- logging.info(
- "encoder self-attention layer "
- "type = lightweight convolution 2-dimensional"
- )
- encoder_selfattn_layer = LightweightConvolution2D
- encoder_selfattn_layer_args = [
- (
- conv_wshare,
- attention_dim,
- attention_dropout_rate,
- int(conv_kernel_length.split("_")[lnum]),
- False,
- conv_usebias,
- )
- for lnum in range(num_blocks)
- ]
- elif selfattention_layer_type == "dynamicconv":
- logging.info("encoder self-attention layer type = dynamic convolution")
- encoder_selfattn_layer = DynamicConvolution
- encoder_selfattn_layer_args = [
- (
- conv_wshare,
- attention_dim,
- attention_dropout_rate,
- int(conv_kernel_length.split("_")[lnum]),
- False,
- conv_usebias,
- )
- for lnum in range(num_blocks)
- ]
- elif selfattention_layer_type == "dynamicconv2d":
- logging.info(
- "encoder self-attention layer type = dynamic convolution 2-dimensional"
- )
- encoder_selfattn_layer = DynamicConvolution2D
- encoder_selfattn_layer_args = [
- (
- conv_wshare,
- attention_dim,
- attention_dropout_rate,
- int(conv_kernel_length.split("_")[lnum]),
- False,
- conv_usebias,
- )
- for lnum in range(num_blocks)
- ]
- else:
- raise NotImplementedError(selfattention_layer_type)
- self.encoders = repeat(
- num_blocks,
- lambda lnum: EncoderLayer(
- attention_dim,
- encoder_selfattn_layer(*encoder_selfattn_layer_args[lnum]),
- positionwise_layer(*positionwise_layer_args),
- dropout_rate,
- normalize_before,
- concat_after,
- stochastic_depth_rate * float(1 + lnum) / num_blocks,
- ),
- )
- if self.normalize_before:
- self.after_norm = LayerNorm(attention_dim)
- self.intermediate_layers = intermediate_layers
- self.use_conditioning = True if ctc_softmax is not None else False
- if self.use_conditioning:
- self.ctc_softmax = ctc_softmax
- self.conditioning_layer = torch.nn.Linear(
- conditioning_layer_dim, attention_dim
- )
- def get_positionwise_layer(
- self,
- positionwise_layer_type="linear",
- attention_dim=256,
- linear_units=2048,
- dropout_rate=0.1,
- positionwise_conv_kernel_size=1,
- ):
- """Define positionwise layer."""
- if positionwise_layer_type == "linear":
- positionwise_layer = PositionwiseFeedForward
- positionwise_layer_args = (attention_dim, linear_units, dropout_rate)
- elif positionwise_layer_type == "conv1d":
- positionwise_layer = MultiLayeredConv1d
- positionwise_layer_args = (
- attention_dim,
- linear_units,
- positionwise_conv_kernel_size,
- dropout_rate,
- )
- elif positionwise_layer_type == "conv1d-linear":
- positionwise_layer = Conv1dLinear
- positionwise_layer_args = (
- attention_dim,
- linear_units,
- positionwise_conv_kernel_size,
- dropout_rate,
- )
- else:
- raise NotImplementedError("Support only linear or conv1d.")
- return positionwise_layer, positionwise_layer_args
- def forward(self, xs, masks):
- """Encode input sequence.
- Args:
- xs (torch.Tensor): Input tensor (#batch, time, idim).
- masks (torch.Tensor): Mask tensor (#batch, time).
- Returns:
- torch.Tensor: Output tensor (#batch, time, attention_dim).
- torch.Tensor: Mask tensor (#batch, time).
- """
- if isinstance(
- self.embed,
- (Conv2dSubsampling, Conv2dSubsampling6, Conv2dSubsampling8),
- ):
- xs, masks = self.embed(xs, masks)
- else:
- xs = self.embed(xs)
- if self.intermediate_layers is None:
- xs, masks = self.encoders(xs, masks)
- else:
- intermediate_outputs = []
- for layer_idx, encoder_layer in enumerate(self.encoders):
- xs, masks = encoder_layer(xs, masks)
- if (
- self.intermediate_layers is not None
- and layer_idx + 1 in self.intermediate_layers
- ):
- encoder_output = xs
- # intermediate branches also require normalization.
- if self.normalize_before:
- encoder_output = self.after_norm(encoder_output)
- intermediate_outputs.append(encoder_output)
- if self.use_conditioning:
- intermediate_result = self.ctc_softmax(encoder_output)
- xs = xs + self.conditioning_layer(intermediate_result)
- if self.normalize_before:
- xs = self.after_norm(xs)
- if self.intermediate_layers is not None:
- return xs, masks, intermediate_outputs
- return xs, masks
- def forward_one_step(self, xs, masks, cache=None):
- """Encode input frame.
- Args:
- xs (torch.Tensor): Input tensor.
- masks (torch.Tensor): Mask tensor.
- cache (List[torch.Tensor]): List of cache tensors.
- Returns:
- torch.Tensor: Output tensor.
- torch.Tensor: Mask tensor.
- List[torch.Tensor]: List of new cache tensors.
- """
- if isinstance(self.embed, Conv2dSubsampling):
- xs, masks = self.embed(xs, masks)
- else:
- xs = self.embed(xs)
- if cache is None:
- cache = [None for _ in range(len(self.encoders))]
- new_cache = []
- for c, e in zip(cache, self.encoders):
- xs, masks = e(xs, masks, cache=c)
- new_cache.append(xs)
- if self.normalize_before:
- xs = self.after_norm(xs)
- return xs, masks, new_cache
|