attention.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # Copyright 2024 yufan
  4. # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
  5. """Multi-Head Attention Return Weight layer definition."""
  6. import math
  7. import torch
  8. from torch import nn
  9. class MultiHeadedAttentionReturnWeight(nn.Module):
  10. """Multi-Head Attention layer.
  11. Args:
  12. n_head (int): The number of heads.
  13. n_feat (int): The number of features.
  14. dropout_rate (float): Dropout rate.
  15. """
  16. def __init__(self, n_head, n_feat, dropout_rate):
  17. """Construct an MultiHeadedAttentionReturnWeight object."""
  18. super(MultiHeadedAttentionReturnWeight, self).__init__()
  19. assert n_feat % n_head == 0
  20. # We assume d_v always equals d_k
  21. self.d_k = n_feat // n_head
  22. self.h = n_head
  23. self.linear_q = nn.Linear(n_feat, n_feat)
  24. self.linear_k = nn.Linear(n_feat, n_feat)
  25. self.linear_v = nn.Linear(n_feat, n_feat)
  26. self.linear_out = nn.Linear(n_feat, n_feat)
  27. self.attn = None
  28. self.dropout = nn.Dropout(p=dropout_rate)
  29. def forward_qkv(self, query, key, value):
  30. """Transform query, key and value.
  31. Args:
  32. query (torch.Tensor): Query tensor (#batch, time1, size).
  33. key (torch.Tensor): Key tensor (#batch, time2, size).
  34. value (torch.Tensor): Value tensor (#batch, time2, size).
  35. Returns:
  36. torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).
  37. torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).
  38. torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).
  39. """
  40. n_batch = query.size(0)
  41. q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
  42. k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
  43. v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
  44. q = q.transpose(1, 2) # (batch, head, time1, d_k)
  45. k = k.transpose(1, 2) # (batch, head, time2, d_k)
  46. v = v.transpose(1, 2) # (batch, head, time2, d_k)
  47. return q, k, v
  48. def forward_attention(self, value, scores, mask):
  49. """Compute attention context vector.
  50. Args:
  51. value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).
  52. scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).
  53. mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).
  54. Returns:
  55. torch.Tensor: Transformed value (#batch, time1, d_model)
  56. weighted by the attention score (#batch, time1, time2).
  57. """
  58. n_batch = value.size(0)
  59. if mask is not None:
  60. mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
  61. min_value = torch.finfo(scores.dtype).min
  62. scores = scores.masked_fill(mask, min_value)
  63. self.attn = torch.softmax(scores, dim=-1).masked_fill(
  64. mask, 0.0
  65. ) # (batch, head, time1, time2)
  66. else:
  67. self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
  68. p_attn = self.dropout(self.attn)
  69. x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
  70. x = (
  71. x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
  72. ) # (batch, time1, d_model)
  73. return self.linear_out(x), self.attn # (batch, time1, d_model)
  74. def forward(self, query, key, value, mask):
  75. """Compute scaled dot product attention.
  76. Args:
  77. query (torch.Tensor): Query tensor (#batch, time1, size).
  78. key (torch.Tensor): Key tensor (#batch, time2, size).
  79. value (torch.Tensor): Value tensor (#batch, time2, size).
  80. mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
  81. (#batch, time1, time2).
  82. Returns:
  83. torch.Tensor: Output tensor (#batch, time1, d_model).
  84. """
  85. q, k, v = self.forward_qkv(query, key, value)
  86. scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
  87. return self.forward_attention(v, scores, mask)