cgmlp.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. """MLP with convolutional gating (cgMLP) definition.
  2. References:
  3. https://openreview.net/forum?id=RA-zVvZLYIy
  4. https://arxiv.org/abs/2105.08050
  5. """
  6. import torch
  7. from funasr.modules.nets_utils import get_activation
  8. from funasr.modules.layer_norm import LayerNorm
  9. class ConvolutionalSpatialGatingUnit(torch.nn.Module):
  10. """Convolutional Spatial Gating Unit (CSGU)."""
  11. def __init__(
  12. self,
  13. size: int,
  14. kernel_size: int,
  15. dropout_rate: float,
  16. use_linear_after_conv: bool,
  17. gate_activation: str,
  18. ):
  19. super().__init__()
  20. n_channels = size // 2 # split input channels
  21. self.norm = LayerNorm(n_channels)
  22. self.conv = torch.nn.Conv1d(
  23. n_channels,
  24. n_channels,
  25. kernel_size,
  26. 1,
  27. (kernel_size - 1) // 2,
  28. groups=n_channels,
  29. )
  30. if use_linear_after_conv:
  31. self.linear = torch.nn.Linear(n_channels, n_channels)
  32. else:
  33. self.linear = None
  34. if gate_activation == "identity":
  35. self.act = torch.nn.Identity()
  36. else:
  37. self.act = get_activation(gate_activation)
  38. self.dropout = torch.nn.Dropout(dropout_rate)
  39. def espnet_initialization_fn(self):
  40. torch.nn.init.normal_(self.conv.weight, std=1e-6)
  41. torch.nn.init.ones_(self.conv.bias)
  42. if self.linear is not None:
  43. torch.nn.init.normal_(self.linear.weight, std=1e-6)
  44. torch.nn.init.ones_(self.linear.bias)
  45. def forward(self, x, gate_add=None):
  46. """Forward method
  47. Args:
  48. x (torch.Tensor): (N, T, D)
  49. gate_add (torch.Tensor): (N, T, D/2)
  50. Returns:
  51. out (torch.Tensor): (N, T, D/2)
  52. """
  53. x_r, x_g = x.chunk(2, dim=-1)
  54. x_g = self.norm(x_g) # (N, T, D/2)
  55. x_g = self.conv(x_g.transpose(1, 2)).transpose(1, 2) # (N, T, D/2)
  56. if self.linear is not None:
  57. x_g = self.linear(x_g)
  58. if gate_add is not None:
  59. x_g = x_g + gate_add
  60. x_g = self.act(x_g)
  61. out = x_r * x_g # (N, T, D/2)
  62. out = self.dropout(out)
  63. return out
  64. class ConvolutionalGatingMLP(torch.nn.Module):
  65. """Convolutional Gating MLP (cgMLP)."""
  66. def __init__(
  67. self,
  68. size: int,
  69. linear_units: int,
  70. kernel_size: int,
  71. dropout_rate: float,
  72. use_linear_after_conv: bool,
  73. gate_activation: str,
  74. ):
  75. super().__init__()
  76. self.channel_proj1 = torch.nn.Sequential(
  77. torch.nn.Linear(size, linear_units), torch.nn.GELU()
  78. )
  79. self.csgu = ConvolutionalSpatialGatingUnit(
  80. size=linear_units,
  81. kernel_size=kernel_size,
  82. dropout_rate=dropout_rate,
  83. use_linear_after_conv=use_linear_after_conv,
  84. gate_activation=gate_activation,
  85. )
  86. self.channel_proj2 = torch.nn.Linear(linear_units // 2, size)
  87. def forward(self, x, mask):
  88. if isinstance(x, tuple):
  89. xs_pad, pos_emb = x
  90. else:
  91. xs_pad, pos_emb = x, None
  92. xs_pad = self.channel_proj1(xs_pad) # size -> linear_units
  93. xs_pad = self.csgu(xs_pad) # linear_units -> linear_units/2
  94. xs_pad = self.channel_proj2(xs_pad) # linear_units/2 -> size
  95. if pos_emb is not None:
  96. out = (xs_pad, pos_emb)
  97. else:
  98. out = xs_pad
  99. return out