| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024 |
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- # Copyright 2019 Shigeki Karita
- # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
- """Multi-Head Attention layer definition."""
- import math
- import numpy
- import torch
- from torch import nn
- from typing import Optional, Tuple
- import torch.nn.functional as F
- from funasr.modules.nets_utils import make_pad_mask
- import funasr.modules.lora.layers as lora
- class MultiHeadedAttention(nn.Module):
- """Multi-Head Attention layer.
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- """
- def __init__(self, n_head, n_feat, dropout_rate):
- """Construct an MultiHeadedAttention object."""
- super(MultiHeadedAttention, self).__init__()
- assert n_feat % n_head == 0
- # We assume d_v always equals d_k
- self.d_k = n_feat // n_head
- self.h = n_head
- self.linear_q = nn.Linear(n_feat, n_feat)
- self.linear_k = nn.Linear(n_feat, n_feat)
- self.linear_v = nn.Linear(n_feat, n_feat)
- self.linear_out = nn.Linear(n_feat, n_feat)
- self.attn = None
- self.dropout = nn.Dropout(p=dropout_rate)
- def forward_qkv(self, query, key, value):
- """Transform query, key and value.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- Returns:
- torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
- torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
- torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
- """
- n_batch = query.size(0)
- q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
- k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
- v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
- q = q.transpose(1, 2) # (batch, head, time1, d_k)
- k = k.transpose(1, 2) # (batch, head, time2, d_k)
- v = v.transpose(1, 2) # (batch, head, time2, d_k)
- return q, k, v
- def forward_attention(self, value, scores, mask):
- """Compute attention context vector.
- Args:
- value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
- scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
- mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
- Returns:
- torch.Tensor: Transformed value (#batch, time1, d_model)
- weighted by the attention score (#batch, time1, time2).
- """
- n_batch = value.size(0)
- if mask is not None:
- mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
- min_value = float(
- numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
- )
- scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
- mask, 0.0
- ) # (batch, head, time1, time2)
- else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
- x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
- x = (
- x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
- ) # (batch, time1, d_model)
- return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, query, key, value, mask):
- """Compute scaled dot product attention.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q, k, v = self.forward_qkv(query, key, value)
- scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
- return self.forward_attention(v, scores, mask)
- class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):
- """Multi-Head Attention layer with relative position encoding (old version).
- Details can be found in https://github.com/espnet/espnet/pull/2816.
- Paper: https://arxiv.org/abs/1901.02860
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
- """
- def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
- """Construct an RelPositionMultiHeadedAttention object."""
- super().__init__(n_head, n_feat, dropout_rate)
- self.zero_triu = zero_triu
- # linear transformation for positional encoding
- self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
- # these two learnable bias are used in matrix c and matrix d
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
- self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
- self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
- torch.nn.init.xavier_uniform_(self.pos_bias_u)
- torch.nn.init.xavier_uniform_(self.pos_bias_v)
- def rel_shift(self, x):
- """Compute relative positional encoding.
- Args:
- x (torch.Tensor): Input tensor (batch, head, time1, time2).
- Returns:
- torch.Tensor: Output tensor.
- """
- zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
- x_padded = torch.cat([zero_pad, x], dim=-1)
- x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
- x = x_padded[:, :, 1:].view_as(x)
- if self.zero_triu:
- ones = torch.ones((x.size(2), x.size(3)))
- x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
- return x
- def forward(self, query, key, value, pos_emb, mask):
- """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q, k, v = self.forward_qkv(query, key, value)
- q = q.transpose(1, 2) # (batch, time1, head, d_k)
- n_batch_pos = pos_emb.size(0)
- p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
- p = p.transpose(1, 2) # (batch, head, time1, d_k)
- # (batch, head, time1, d_k)
- q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
- # (batch, head, time1, d_k)
- q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
- # compute attention score
- # first compute matrix a and matrix c
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
- # (batch, head, time1, time2)
- matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
- # compute matrix b and matrix d
- # (batch, head, time1, time1)
- matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
- matrix_bd = self.rel_shift(matrix_bd)
- scores = (matrix_ac + matrix_bd) / math.sqrt(
- self.d_k
- ) # (batch, head, time1, time2)
- return self.forward_attention(v, scores, mask)
- class RelPositionMultiHeadedAttention(MultiHeadedAttention):
- """Multi-Head Attention layer with relative position encoding (new implementation).
- Details can be found in https://github.com/espnet/espnet/pull/2816.
- Paper: https://arxiv.org/abs/1901.02860
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
- """
- def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
- """Construct an RelPositionMultiHeadedAttention object."""
- super().__init__(n_head, n_feat, dropout_rate)
- self.zero_triu = zero_triu
- # linear transformation for positional encoding
- self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
- # these two learnable bias are used in matrix c and matrix d
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
- self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
- self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
- torch.nn.init.xavier_uniform_(self.pos_bias_u)
- torch.nn.init.xavier_uniform_(self.pos_bias_v)
- def rel_shift(self, x):
- """Compute relative positional encoding.
- Args:
- x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
- time1 means the length of query vector.
- Returns:
- torch.Tensor: Output tensor.
- """
- zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
- x_padded = torch.cat([zero_pad, x], dim=-1)
- x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
- x = x_padded[:, :, 1:].view_as(x)[
- :, :, :, : x.size(-1) // 2 + 1
- ] # only keep the positions from 0 to time2
- if self.zero_triu:
- ones = torch.ones((x.size(2), x.size(3)), device=x.device)
- x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
- return x
- def forward(self, query, key, value, pos_emb, mask):
- """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- pos_emb (torch.Tensor): Positional embedding tensor
- (#batch, 2*time1-1, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q, k, v = self.forward_qkv(query, key, value)
- q = q.transpose(1, 2) # (batch, time1, head, d_k)
- n_batch_pos = pos_emb.size(0)
- p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
- p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
- # (batch, head, time1, d_k)
- q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
- # (batch, head, time1, d_k)
- q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
- # compute attention score
- # first compute matrix a and matrix c
- # as described in https://arxiv.org/abs/1901.02860 Section 3.3
- # (batch, head, time1, time2)
- matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
- # compute matrix b and matrix d
- # (batch, head, time1, 2*time1-1)
- matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
- matrix_bd = self.rel_shift(matrix_bd)
- scores = (matrix_ac + matrix_bd) / math.sqrt(
- self.d_k
- ) # (batch, head, time1, time2)
- return self.forward_attention(v, scores, mask)
- class MultiHeadedAttentionSANM(nn.Module):
- """Multi-Head Attention layer.
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- """
- def __init__(self, n_head, in_feat, n_feat, dropout_rate, kernel_size, sanm_shfit=0, lora_list=None, lora_rank=8, lora_alpha=16, lora_dropout=0.1):
- """Construct an MultiHeadedAttention object."""
- super(MultiHeadedAttentionSANM, self).__init__()
- assert n_feat % n_head == 0
- # We assume d_v always equals d_k
- self.d_k = n_feat // n_head
- self.h = n_head
- # self.linear_q = nn.Linear(n_feat, n_feat)
- # self.linear_k = nn.Linear(n_feat, n_feat)
- # self.linear_v = nn.Linear(n_feat, n_feat)
- if lora_list is not None:
- if "o" in lora_list:
- self.linear_out = lora.Linear(n_feat, n_feat, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout)
- else:
- self.linear_out = nn.Linear(n_feat, n_feat)
- lora_qkv_list = ["q" in lora_list, "k" in lora_list, "v" in lora_list]
- if lora_qkv_list == [False, False, False]:
- self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)
- else:
- self.linear_q_k_v = lora.MergedLinear(in_feat, n_feat * 3, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, enable_lora=lora_qkv_list)
- else:
- self.linear_out = nn.Linear(n_feat, n_feat)
- self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)
- self.attn = None
- self.dropout = nn.Dropout(p=dropout_rate)
- self.fsmn_block = nn.Conv1d(n_feat, n_feat, kernel_size, stride=1, padding=0, groups=n_feat, bias=False)
- # padding
- left_padding = (kernel_size - 1) // 2
- if sanm_shfit > 0:
- left_padding = left_padding + sanm_shfit
- right_padding = kernel_size - 1 - left_padding
- self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
- def forward_fsmn(self, inputs, mask, mask_shfit_chunk=None):
- b, t, d = inputs.size()
- if mask is not None:
- mask = torch.reshape(mask, (b, -1, 1))
- if mask_shfit_chunk is not None:
- mask = mask * mask_shfit_chunk
- inputs = inputs * mask
- x = inputs.transpose(1, 2)
- x = self.pad_fn(x)
- x = self.fsmn_block(x)
- x = x.transpose(1, 2)
- x += inputs
- x = self.dropout(x)
- if mask is not None:
- x = x * mask
- return x
- def forward_qkv(self, x):
- """Transform query, key and value.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- Returns:
- torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
- torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
- torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
- """
- b, t, d = x.size()
- q_k_v = self.linear_q_k_v(x)
- q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
- q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)
- k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- return q_h, k_h, v_h, v
- def forward_attention(self, value, scores, mask, mask_att_chunk_encoder=None):
- """Compute attention context vector.
- Args:
- value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
- scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
- mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
- Returns:
- torch.Tensor: Transformed value (#batch, time1, d_model)
- weighted by the attention score (#batch, time1, time2).
- """
- n_batch = value.size(0)
- if mask is not None:
- if mask_att_chunk_encoder is not None:
- mask = mask * mask_att_chunk_encoder
- mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
- min_value = float(
- numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
- )
- scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
- mask, 0.0
- ) # (batch, head, time1, time2)
- else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
- x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
- x = (
- x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
- ) # (batch, time1, d_model)
- return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
- """Compute scaled dot product attention.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q_h, k_h, v_h, v = self.forward_qkv(x)
- fsmn_memory = self.forward_fsmn(v, mask, mask_shfit_chunk)
- q_h = q_h * self.d_k ** (-0.5)
- scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
- return att_outs + fsmn_memory
- class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
- q_h, k_h, v_h, v = self.forward_qkv(x)
- fsmn_memory = self.forward_fsmn(v, mask[0], mask_shfit_chunk)
- q_h = q_h * self.d_k ** (-0.5)
- scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
- return att_outs + fsmn_memory
- class MultiHeadedAttentionSANMDecoder(nn.Module):
- """Multi-Head Attention layer.
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- """
- def __init__(self, n_feat, dropout_rate, kernel_size, sanm_shfit=0):
- """Construct an MultiHeadedAttention object."""
- super(MultiHeadedAttentionSANMDecoder, self).__init__()
- self.dropout = nn.Dropout(p=dropout_rate)
- self.fsmn_block = nn.Conv1d(n_feat, n_feat,
- kernel_size, stride=1, padding=0, groups=n_feat, bias=False)
- # padding
- # padding
- left_padding = (kernel_size - 1) // 2
- if sanm_shfit > 0:
- left_padding = left_padding + sanm_shfit
- right_padding = kernel_size - 1 - left_padding
- self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
- self.kernel_size = kernel_size
- def forward(self, inputs, mask, cache=None, mask_shfit_chunk=None):
- '''
- :param x: (#batch, time1, size).
- :param mask: Mask tensor (#batch, 1, time)
- :return:
- '''
- # print("in fsmn, inputs", inputs.size())
- b, t, d = inputs.size()
- # logging.info(
- # "mask: {}".format(mask.size()))
- if mask is not None:
- mask = torch.reshape(mask, (b ,-1, 1))
- # logging.info("in fsmn, mask: {}, {}".format(mask.size(), mask[0:100:50, :, :]))
- if mask_shfit_chunk is not None:
- # logging.info("in fsmn, mask_fsmn: {}, {}".format(mask_shfit_chunk.size(), mask_shfit_chunk[0:100:50, :, :]))
- mask = mask * mask_shfit_chunk
- # logging.info("in fsmn, mask_after_fsmn: {}, {}".format(mask.size(), mask[0:100:50, :, :]))
- # print("in fsmn, mask", mask.size())
- # print("in fsmn, inputs", inputs.size())
- inputs = inputs * mask
- x = inputs.transpose(1, 2)
- b, d, t = x.size()
- if cache is None:
- # print("in fsmn, cache is None, x", x.size())
- x = self.pad_fn(x)
- if not self.training:
- cache = x
- else:
- # print("in fsmn, cache is not None, x", x.size())
- # x = torch.cat((x, cache), dim=2)[:, :, :-1]
- # if t < self.kernel_size:
- # x = self.pad_fn(x)
- x = torch.cat((cache[:, :, 1:], x), dim=2)
- x = x[:, :, -(self.kernel_size+t-1):]
- # print("in fsmn, cache is not None, x_cat", x.size())
- cache = x
- x = self.fsmn_block(x)
- x = x.transpose(1, 2)
- # print("in fsmn, fsmn_out", x.size())
- if x.size(1) != inputs.size(1):
- inputs = inputs[:, -1, :]
- x = x + inputs
- x = self.dropout(x)
- if mask is not None:
- x = x * mask
- return x, cache
- class MultiHeadedAttentionCrossAtt(nn.Module):
- """Multi-Head Attention layer.
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- """
- def __init__(self, n_head, n_feat, dropout_rate, lora_list=None, lora_rank=8, lora_alpha=16, lora_dropout=0.1, encoder_output_size=None):
- """Construct an MultiHeadedAttention object."""
- super(MultiHeadedAttentionCrossAtt, self).__init__()
- assert n_feat % n_head == 0
- # We assume d_v always equals d_k
- self.d_k = n_feat // n_head
- self.h = n_head
- if lora_list is not None:
- if "q" in lora_list:
- self.linear_q = lora.Linear(n_feat, n_feat, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout)
- else:
- self.linear_q = nn.Linear(n_feat, n_feat)
- lora_kv_list = ["k" in lora_list, "v" in lora_list]
- if lora_kv_list == [False, False]:
- self.linear_k_v = nn.Linear(n_feat if encoder_output_size is None else encoder_output_size, n_feat*2)
- else:
- self.linear_k_v = lora.MergedLinear(n_feat if encoder_output_size is None else encoder_output_size, n_feat * 2,
- r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, enable_lora=lora_kv_list)
- if "o" in lora_list:
- self.linear_out = lora.Linear(n_feat, n_feat, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout)
- else:
- self.linear_out = nn.Linear(n_feat, n_feat)
- else:
- self.linear_q = nn.Linear(n_feat, n_feat)
- self.linear_k_v = nn.Linear(n_feat if encoder_output_size is None else encoder_output_size, n_feat*2)
- self.linear_out = nn.Linear(n_feat, n_feat)
- self.attn = None
- self.dropout = nn.Dropout(p=dropout_rate)
- def forward_qkv(self, x, memory):
- """Transform query, key and value.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- Returns:
- torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
- torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
- torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
- """
- # print("in forward_qkv, x", x.size())
- b = x.size(0)
- q = self.linear_q(x)
- q_h = torch.reshape(q, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)
- k_v = self.linear_k_v(memory)
- k, v = torch.split(k_v, int(self.h*self.d_k), dim=-1)
- k_h = torch.reshape(k, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- v_h = torch.reshape(v, (b, -1, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- return q_h, k_h, v_h
- def forward_attention(self, value, scores, mask):
- """Compute attention context vector.
- Args:
- value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
- scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
- mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
- Returns:
- torch.Tensor: Transformed value (#batch, time1, d_model)
- weighted by the attention score (#batch, time1, time2).
- """
- n_batch = value.size(0)
- if mask is not None:
- mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
- min_value = float(
- numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
- )
- # logging.info(
- # "scores: {}, mask_size: {}".format(scores.size(), mask.size()))
- scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
- mask, 0.0
- ) # (batch, head, time1, time2)
- else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
- x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
- x = (
- x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
- ) # (batch, time1, d_model)
- return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, x, memory, memory_mask):
- """Compute scaled dot product attention.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q_h, k_h, v_h = self.forward_qkv(x, memory)
- q_h = q_h * self.d_k ** (-0.5)
- scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- return self.forward_attention(v_h, scores, memory_mask)
- class MultiHeadSelfAttention(nn.Module):
- """Multi-Head Attention layer.
- Args:
- n_head (int): The number of heads.
- n_feat (int): The number of features.
- dropout_rate (float): Dropout rate.
- """
- def __init__(self, n_head, in_feat, n_feat, dropout_rate):
- """Construct an MultiHeadedAttention object."""
- super(MultiHeadSelfAttention, self).__init__()
- assert n_feat % n_head == 0
- # We assume d_v always equals d_k
- self.d_k = n_feat // n_head
- self.h = n_head
- self.linear_out = nn.Linear(n_feat, n_feat)
- self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)
- self.attn = None
- self.dropout = nn.Dropout(p=dropout_rate)
- def forward_qkv(self, x):
- """Transform query, key and value.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- Returns:
- torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
- torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
- torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
- """
- b, t, d = x.size()
- q_k_v = self.linear_q_k_v(x)
- q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
- q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)
- k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
- return q_h, k_h, v_h, v
- def forward_attention(self, value, scores, mask, mask_att_chunk_encoder=None):
- """Compute attention context vector.
- Args:
- value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
- scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
- mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
- Returns:
- torch.Tensor: Transformed value (#batch, time1, d_model)
- weighted by the attention score (#batch, time1, time2).
- """
- n_batch = value.size(0)
- if mask is not None:
- if mask_att_chunk_encoder is not None:
- mask = mask * mask_att_chunk_encoder
- mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
- min_value = float(
- numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
- )
- scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
- mask, 0.0
- ) # (batch, head, time1, time2)
- else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
- x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
- x = (
- x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
- ) # (batch, time1, d_model)
- return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, x, mask, mask_att_chunk_encoder=None):
- """Compute scaled dot product attention.
- Args:
- query (torch.Tensor): Query tensor (#batch, time1, size).
- key (torch.Tensor): Key tensor (#batch, time2, size).
- value (torch.Tensor): Value tensor (#batch, time2, size).
- mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
- (#batch, time1, time2).
- Returns:
- torch.Tensor: Output tensor (#batch, time1, d_model).
- """
- q_h, k_h, v_h, v = self.forward_qkv(x)
- q_h = q_h * self.d_k ** (-0.5)
- scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
- return att_outs
- class RelPositionMultiHeadedAttentionChunk(torch.nn.Module):
- """RelPositionMultiHeadedAttention definition.
- Args:
- num_heads: Number of attention heads.
- embed_size: Embedding size.
- dropout_rate: Dropout rate.
- """
- def __init__(
- self,
- num_heads: int,
- embed_size: int,
- dropout_rate: float = 0.0,
- simplified_attention_score: bool = False,
- ) -> None:
- """Construct an MultiHeadedAttention object."""
- super().__init__()
- self.d_k = embed_size // num_heads
- self.num_heads = num_heads
- assert self.d_k * num_heads == embed_size, (
- "embed_size (%d) must be divisible by num_heads (%d)",
- (embed_size, num_heads),
- )
- self.linear_q = torch.nn.Linear(embed_size, embed_size)
- self.linear_k = torch.nn.Linear(embed_size, embed_size)
- self.linear_v = torch.nn.Linear(embed_size, embed_size)
- self.linear_out = torch.nn.Linear(embed_size, embed_size)
- if simplified_attention_score:
- self.linear_pos = torch.nn.Linear(embed_size, num_heads)
- self.compute_att_score = self.compute_simplified_attention_score
- else:
- self.linear_pos = torch.nn.Linear(embed_size, embed_size, bias=False)
- self.pos_bias_u = torch.nn.Parameter(torch.Tensor(num_heads, self.d_k))
- self.pos_bias_v = torch.nn.Parameter(torch.Tensor(num_heads, self.d_k))
- torch.nn.init.xavier_uniform_(self.pos_bias_u)
- torch.nn.init.xavier_uniform_(self.pos_bias_v)
- self.compute_att_score = self.compute_attention_score
- self.dropout = torch.nn.Dropout(p=dropout_rate)
- self.attn = None
- def rel_shift(self, x: torch.Tensor, left_context: int = 0) -> torch.Tensor:
- """Compute relative positional encoding.
- Args:
- x: Input sequence. (B, H, T_1, 2 * T_1 - 1)
- left_context: Number of frames in left context.
- Returns:
- x: Output sequence. (B, H, T_1, T_2)
- """
- batch_size, n_heads, time1, n = x.shape
- time2 = time1 + left_context
- batch_stride, n_heads_stride, time1_stride, n_stride = x.stride()
- return x.as_strided(
- (batch_size, n_heads, time1, time2),
- (batch_stride, n_heads_stride, time1_stride - n_stride, n_stride),
- storage_offset=(n_stride * (time1 - 1)),
- )
- def compute_simplified_attention_score(
- self,
- query: torch.Tensor,
- key: torch.Tensor,
- pos_enc: torch.Tensor,
- left_context: int = 0,
- ) -> torch.Tensor:
- """Simplified attention score computation.
- Reference: https://github.com/k2-fsa/icefall/pull/458
- Args:
- query: Transformed query tensor. (B, H, T_1, d_k)
- key: Transformed key tensor. (B, H, T_2, d_k)
- pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
- left_context: Number of frames in left context.
- Returns:
- : Attention score. (B, H, T_1, T_2)
- """
- pos_enc = self.linear_pos(pos_enc)
- matrix_ac = torch.matmul(query, key.transpose(2, 3))
- matrix_bd = self.rel_shift(
- pos_enc.transpose(1, 2).unsqueeze(2).repeat(1, 1, query.size(2), 1),
- left_context=left_context,
- )
- return (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
- def compute_attention_score(
- self,
- query: torch.Tensor,
- key: torch.Tensor,
- pos_enc: torch.Tensor,
- left_context: int = 0,
- ) -> torch.Tensor:
- """Attention score computation.
- Args:
- query: Transformed query tensor. (B, H, T_1, d_k)
- key: Transformed key tensor. (B, H, T_2, d_k)
- pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
- left_context: Number of frames in left context.
- Returns:
- : Attention score. (B, H, T_1, T_2)
- """
- p = self.linear_pos(pos_enc).view(pos_enc.size(0), -1, self.num_heads, self.d_k)
- query = query.transpose(1, 2)
- q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
- q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
- matrix_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
- matrix_bd = torch.matmul(q_with_bias_v, p.permute(0, 2, 3, 1))
- matrix_bd = self.rel_shift(matrix_bd, left_context=left_context)
- return (matrix_ac + matrix_bd) / math.sqrt(self.d_k)
- def forward_qkv(
- self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
- """Transform query, key and value.
- Args:
- query: Query tensor. (B, T_1, size)
- key: Key tensor. (B, T_2, size)
- v: Value tensor. (B, T_2, size)
- Returns:
- q: Transformed query tensor. (B, H, T_1, d_k)
- k: Transformed key tensor. (B, H, T_2, d_k)
- v: Transformed value tensor. (B, H, T_2, d_k)
- """
- n_batch = query.size(0)
- q = (
- self.linear_q(query)
- .view(n_batch, -1, self.num_heads, self.d_k)
- .transpose(1, 2)
- )
- k = (
- self.linear_k(key)
- .view(n_batch, -1, self.num_heads, self.d_k)
- .transpose(1, 2)
- )
- v = (
- self.linear_v(value)
- .view(n_batch, -1, self.num_heads, self.d_k)
- .transpose(1, 2)
- )
- return q, k, v
- def forward_attention(
- self,
- value: torch.Tensor,
- scores: torch.Tensor,
- mask: torch.Tensor,
- chunk_mask: Optional[torch.Tensor] = None,
- ) -> torch.Tensor:
- """Compute attention context vector.
- Args:
- value: Transformed value. (B, H, T_2, d_k)
- scores: Attention score. (B, H, T_1, T_2)
- mask: Source mask. (B, T_2)
- chunk_mask: Chunk mask. (T_1, T_1)
- Returns:
- attn_output: Transformed value weighted by attention score. (B, T_1, H * d_k)
- """
- batch_size = scores.size(0)
- mask = mask.unsqueeze(1).unsqueeze(2)
- if chunk_mask is not None:
- mask = chunk_mask.unsqueeze(0).unsqueeze(1) | mask
- scores = scores.masked_fill(mask, float("-inf"))
- self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
- attn_output = self.dropout(self.attn)
- attn_output = torch.matmul(attn_output, value)
- attn_output = self.linear_out(
- attn_output.transpose(1, 2)
- .contiguous()
- .view(batch_size, -1, self.num_heads * self.d_k)
- )
- return attn_output
- def forward(
- self,
- query: torch.Tensor,
- key: torch.Tensor,
- value: torch.Tensor,
- pos_enc: torch.Tensor,
- mask: torch.Tensor,
- chunk_mask: Optional[torch.Tensor] = None,
- left_context: int = 0,
- ) -> torch.Tensor:
- """Compute scaled dot product attention with rel. positional encoding.
- Args:
- query: Query tensor. (B, T_1, size)
- key: Key tensor. (B, T_2, size)
- value: Value tensor. (B, T_2, size)
- pos_enc: Positional embedding tensor. (B, 2 * T_1 - 1, size)
- mask: Source mask. (B, T_2)
- chunk_mask: Chunk mask. (T_1, T_1)
- left_context: Number of frames in left context.
- Returns:
- : Output tensor. (B, T_1, H * d_k)
- """
- q, k, v = self.forward_qkv(query, key, value)
- scores = self.compute_att_score(q, k, pos_enc, left_context=left_context)
- return self.forward_attention(v, scores, mask, chunk_mask=chunk_mask)
- class CosineDistanceAttention(nn.Module):
- """ Compute Cosine Distance between spk decoder output and speaker profile
- Args:
- profile_path: speaker profile file path (.npy file)
- """
- def __init__(self):
- super().__init__()
- self.softmax = nn.Softmax(dim=-1)
- def forward(self, spk_decoder_out, profile, profile_lens=None):
- """
- Args:
- spk_decoder_out(torch.Tensor):(B, L, D)
- spk_profiles(torch.Tensor):(B, N, D)
- """
- x = spk_decoder_out.unsqueeze(2) # (B, L, 1, D)
- if profile_lens is not None:
-
- mask = (make_pad_mask(profile_lens)[:, None, :]).to(profile.device)
- min_value = float(
- numpy.finfo(torch.tensor(0, dtype=x.dtype).numpy().dtype).min
- )
- weights_not_softmax=F.cosine_similarity(x, profile.unsqueeze(1), dim=-1).masked_fill(mask, min_value)
- weights = self.softmax(weights_not_softmax).masked_fill(mask, 0.0) # (B, L, N)
- else:
- x = x[:, -1:, :, :]
- weights_not_softmax=F.cosine_similarity(x, profile.unsqueeze(1).to(x.device), dim=-1)
- weights = self.softmax(weights_not_softmax) # (B, 1, N)
- spk_embedding = torch.matmul(weights, profile.to(weights.device)) # (B, L, D)
- return spk_embedding, weights
|