attention.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # Copyright 2019 Shigeki Karita
  4. # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
  5. """Multi-Head Attention layer definition."""
  6. import math
  7. import numpy
  8. import torch
  9. from torch import nn
  10. from typing import Optional, Tuple
  11. import torch.nn.functional as F
  12. from funasr.models.transformer.utils.nets_utils import make_pad_mask
  13. import funasr.models.lora.layers as lora
  14. class MultiHeadedAttention(nn.Module):
  15. """Multi-Head Attention layer.
  16. Args:
  17. n_head (int): The number of heads.
  18. n_feat (int): The number of features.
  19. dropout_rate (float): Dropout rate.
  20. """
  21. def __init__(self, n_head, n_feat, dropout_rate):
  22. """Construct an MultiHeadedAttention object."""
  23. super(MultiHeadedAttention, self).__init__()
  24. assert n_feat % n_head == 0
  25. # We assume d_v always equals d_k
  26. self.d_k = n_feat // n_head
  27. self.h = n_head
  28. self.linear_q = nn.Linear(n_feat, n_feat)
  29. self.linear_k = nn.Linear(n_feat, n_feat)
  30. self.linear_v = nn.Linear(n_feat, n_feat)
  31. self.linear_out = nn.Linear(n_feat, n_feat)
  32. self.attn = None
  33. self.dropout = nn.Dropout(p=dropout_rate)
  34. def forward_qkv(self, query, key, value):
  35. """Transform query, key and value.
  36. Args:
  37. query (torch.Tensor): Query tensor (#batch, time1, size).
  38. key (torch.Tensor): Key tensor (#batch, time2, size).
  39. value (torch.Tensor): Value tensor (#batch, time2, size).
  40. Returns:
  41. torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
  42. torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
  43. torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
  44. """
  45. n_batch = query.size(0)
  46. q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
  47. k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
  48. v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
  49. q = q.transpose(1, 2) # (batch, head, time1, d_k)
  50. k = k.transpose(1, 2) # (batch, head, time2, d_k)
  51. v = v.transpose(1, 2) # (batch, head, time2, d_k)
  52. return q, k, v
  53. def forward_attention(self, value, scores, mask):
  54. """Compute attention context vector.
  55. Args:
  56. value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
  57. scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
  58. mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
  59. Returns:
  60. torch.Tensor: Transformed value (#batch, time1, d_model)
  61. weighted by the attention score (#batch, time1, time2).
  62. """
  63. n_batch = value.size(0)
  64. if mask is not None:
  65. mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
  66. min_value = float(
  67. numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
  68. )
  69. scores = scores.masked_fill(mask, min_value)
  70. self.attn = torch.softmax(scores, dim=-1).masked_fill(
  71. mask, 0.0
  72. ) # (batch, head, time1, time2)
  73. else:
  74. self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
  75. p_attn = self.dropout(self.attn)
  76. x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
  77. x = (
  78. x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
  79. ) # (batch, time1, d_model)
  80. return self.linear_out(x) # (batch, time1, d_model)
  81. def forward(self, query, key, value, mask):
  82. """Compute scaled dot product attention.
  83. Args:
  84. query (torch.Tensor): Query tensor (#batch, time1, size).
  85. key (torch.Tensor): Key tensor (#batch, time2, size).
  86. value (torch.Tensor): Value tensor (#batch, time2, size).
  87. mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
  88. (#batch, time1, time2).
  89. Returns:
  90. torch.Tensor: Output tensor (#batch, time1, d_model).
  91. """
  92. q, k, v = self.forward_qkv(query, key, value)
  93. scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
  94. return self.forward_attention(v, scores, mask)
  95. class RelPositionMultiHeadedAttention(MultiHeadedAttention):
  96. """Multi-Head Attention layer with relative position encoding (new implementation).
  97. Details can be found in https://github.com/espnet/espnet/pull/2816.
  98. Paper: https://arxiv.org/abs/1901.02860
  99. Args:
  100. n_head (int): The number of heads.
  101. n_feat (int): The number of features.
  102. dropout_rate (float): Dropout rate.
  103. zero_triu (bool): Whether to zero the upper triangular part of attention matrix.
  104. """
  105. def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):
  106. """Construct an RelPositionMultiHeadedAttention object."""
  107. super().__init__(n_head, n_feat, dropout_rate)
  108. self.zero_triu = zero_triu
  109. # linear transformation for positional encoding
  110. self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
  111. # these two learnable bias are used in matrix c and matrix d
  112. # as described in https://arxiv.org/abs/1901.02860 Section 3.3
  113. self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
  114. self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
  115. torch.nn.init.xavier_uniform_(self.pos_bias_u)
  116. torch.nn.init.xavier_uniform_(self.pos_bias_v)
  117. def rel_shift(self, x):
  118. """Compute relative positional encoding.
  119. Args:
  120. x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
  121. time1 means the length of query vector.
  122. Returns:
  123. torch.Tensor: Output tensor.
  124. """
  125. zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
  126. x_padded = torch.cat([zero_pad, x], dim=-1)
  127. x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
  128. x = x_padded[:, :, 1:].view_as(x)[
  129. :, :, :, : x.size(-1) // 2 + 1
  130. ] # only keep the positions from 0 to time2
  131. if self.zero_triu:
  132. ones = torch.ones((x.size(2), x.size(3)), device=x.device)
  133. x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
  134. return x
  135. def forward(self, query, key, value, pos_emb, mask):
  136. """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
  137. Args:
  138. query (torch.Tensor): Query tensor (#batch, time1, size).
  139. key (torch.Tensor): Key tensor (#batch, time2, size).
  140. value (torch.Tensor): Value tensor (#batch, time2, size).
  141. pos_emb (torch.Tensor): Positional embedding tensor
  142. (#batch, 2*time1-1, size).
  143. mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
  144. (#batch, time1, time2).
  145. Returns:
  146. torch.Tensor: Output tensor (#batch, time1, d_model).
  147. """
  148. q, k, v = self.forward_qkv(query, key, value)
  149. q = q.transpose(1, 2) # (batch, time1, head, d_k)
  150. n_batch_pos = pos_emb.size(0)
  151. p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
  152. p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
  153. # (batch, head, time1, d_k)
  154. q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
  155. # (batch, head, time1, d_k)
  156. q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
  157. # compute attention score
  158. # first compute matrix a and matrix c
  159. # as described in https://arxiv.org/abs/1901.02860 Section 3.3
  160. # (batch, head, time1, time2)
  161. matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
  162. # compute matrix b and matrix d
  163. # (batch, head, time1, 2*time1-1)
  164. matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
  165. matrix_bd = self.rel_shift(matrix_bd)
  166. scores = (matrix_ac + matrix_bd) / math.sqrt(
  167. self.d_k
  168. ) # (batch, head, time1, time2)
  169. return self.forward_attention(v, scores, mask)
  170. class MultiHeadSelfAttention(nn.Module):
  171. """Multi-Head Attention layer.
  172. Args:
  173. n_head (int): The number of heads.
  174. n_feat (int): The number of features.
  175. dropout_rate (float): Dropout rate.
  176. """
  177. def __init__(self, n_head, in_feat, n_feat, dropout_rate):
  178. """Construct an MultiHeadedAttention object."""
  179. super(MultiHeadSelfAttention, self).__init__()
  180. assert n_feat % n_head == 0
  181. # We assume d_v always equals d_k
  182. self.d_k = n_feat // n_head
  183. self.h = n_head
  184. self.linear_out = nn.Linear(n_feat, n_feat)
  185. self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)
  186. self.attn = None
  187. self.dropout = nn.Dropout(p=dropout_rate)
  188. def forward_qkv(self, x):
  189. """Transform query, key and value.
  190. Args:
  191. query (torch.Tensor): Query tensor (#batch, time1, size).
  192. key (torch.Tensor): Key tensor (#batch, time2, size).
  193. value (torch.Tensor): Value tensor (#batch, time2, size).
  194. Returns:
  195. torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
  196. torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
  197. torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
  198. """
  199. b, t, d = x.size()
  200. q_k_v = self.linear_q_k_v(x)
  201. q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
  202. q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)
  203. k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
  204. v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)
  205. return q_h, k_h, v_h, v
  206. def forward_attention(self, value, scores, mask, mask_att_chunk_encoder=None):
  207. """Compute attention context vector.
  208. Args:
  209. value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
  210. scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
  211. mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
  212. Returns:
  213. torch.Tensor: Transformed value (#batch, time1, d_model)
  214. weighted by the attention score (#batch, time1, time2).
  215. """
  216. n_batch = value.size(0)
  217. if mask is not None:
  218. if mask_att_chunk_encoder is not None:
  219. mask = mask * mask_att_chunk_encoder
  220. mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
  221. min_value = float(
  222. numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
  223. )
  224. scores = scores.masked_fill(mask, min_value)
  225. self.attn = torch.softmax(scores, dim=-1).masked_fill(
  226. mask, 0.0
  227. ) # (batch, head, time1, time2)
  228. else:
  229. self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
  230. p_attn = self.dropout(self.attn)
  231. x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
  232. x = (
  233. x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
  234. ) # (batch, time1, d_model)
  235. return self.linear_out(x) # (batch, time1, d_model)
  236. def forward(self, x, mask, mask_att_chunk_encoder=None):
  237. """Compute scaled dot product attention.
  238. Args:
  239. query (torch.Tensor): Query tensor (#batch, time1, size).
  240. key (torch.Tensor): Key tensor (#batch, time2, size).
  241. value (torch.Tensor): Value tensor (#batch, time2, size).
  242. mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
  243. (#batch, time1, time2).
  244. Returns:
  245. torch.Tensor: Output tensor (#batch, time1, d_model).
  246. """
  247. q_h, k_h, v_h, v = self.forward_qkv(x)
  248. q_h = q_h * self.d_k ** (-0.5)
  249. scores = torch.matmul(q_h, k_h.transpose(-2, -1))
  250. att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
  251. return att_outs