rwkv_subsampling.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #!/usr/bin/env python3
  2. # -*- encoding: utf-8 -*-
  3. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  4. # MIT License (https://opensource.org/licenses/MIT)
  5. import math
  6. import torch
  7. from typing import Optional, Tuple, Union
  8. from funasr.models.transformer.utils.nets_utils import pad_to_len
  9. class TooShortUttError(Exception):
  10. """Raised when the utt is too short for subsampling.
  11. Args:
  12. message (str): Message for error catch
  13. actual_size (int): the short size that cannot pass the subsampling
  14. limit (int): the limit size for subsampling
  15. """
  16. def __init__(self, message, actual_size, limit):
  17. """Construct a TooShortUttError for error handler."""
  18. super().__init__(message)
  19. self.actual_size = actual_size
  20. self.limit = limit
  21. def check_short_utt(ins, size):
  22. """Check if the utterance is too short for subsampling."""
  23. if isinstance(ins, Conv2dSubsampling2) and size < 3:
  24. return True, 3
  25. if isinstance(ins, Conv2dSubsampling) and size < 7:
  26. return True, 7
  27. if isinstance(ins, Conv2dSubsampling6) and size < 11:
  28. return True, 11
  29. if isinstance(ins, Conv2dSubsampling8) and size < 15:
  30. return True, 15
  31. return False, -1
  32. class RWKVConvInput(torch.nn.Module):
  33. """Streaming ConvInput module definition.
  34. Args:
  35. input_size: Input size.
  36. conv_size: Convolution size.
  37. subsampling_factor: Subsampling factor.
  38. output_size: Block output dimension.
  39. """
  40. def __init__(
  41. self,
  42. input_size: int,
  43. conv_size: Union[int, Tuple],
  44. subsampling_factor: int = 4,
  45. conv_kernel_size: int = 3,
  46. output_size: Optional[int] = None,
  47. ) -> None:
  48. """Construct a ConvInput object."""
  49. super().__init__()
  50. if subsampling_factor == 1:
  51. conv_size1, conv_size2, conv_size3 = conv_size
  52. self.conv = torch.nn.Sequential(
  53. torch.nn.Conv2d(1, conv_size1, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  54. torch.nn.ReLU(),
  55. torch.nn.Conv2d(conv_size1, conv_size1, conv_kernel_size, stride=[1, 2], padding=(conv_kernel_size-1)//2),
  56. torch.nn.ReLU(),
  57. torch.nn.Conv2d(conv_size1, conv_size2, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  58. torch.nn.ReLU(),
  59. torch.nn.Conv2d(conv_size2, conv_size2, conv_kernel_size, stride=[1, 2], padding=(conv_kernel_size-1)//2),
  60. torch.nn.ReLU(),
  61. torch.nn.Conv2d(conv_size2, conv_size3, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  62. torch.nn.ReLU(),
  63. torch.nn.Conv2d(conv_size3, conv_size3, conv_kernel_size, stride=[1, 2], padding=(conv_kernel_size-1)//2),
  64. torch.nn.ReLU(),
  65. )
  66. output_proj = conv_size3 * ((input_size // 2) // 2)
  67. self.subsampling_factor = 1
  68. self.stride_1 = 1
  69. self.create_new_mask = self.create_new_vgg_mask
  70. else:
  71. conv_size1, conv_size2, conv_size3 = conv_size
  72. kernel_1 = int(subsampling_factor / 2)
  73. self.conv = torch.nn.Sequential(
  74. torch.nn.Conv2d(1, conv_size1, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  75. torch.nn.ReLU(),
  76. torch.nn.Conv2d(conv_size1, conv_size1, conv_kernel_size, stride=[kernel_1, 2], padding=(conv_kernel_size-1)//2),
  77. torch.nn.ReLU(),
  78. torch.nn.Conv2d(conv_size1, conv_size2, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  79. torch.nn.ReLU(),
  80. torch.nn.Conv2d(conv_size2, conv_size2, conv_kernel_size, stride=[2, 2], padding=(conv_kernel_size-1)//2),
  81. torch.nn.ReLU(),
  82. torch.nn.Conv2d(conv_size2, conv_size3, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  83. torch.nn.ReLU(),
  84. torch.nn.Conv2d(conv_size3, conv_size3, conv_kernel_size, stride=1, padding=(conv_kernel_size-1)//2),
  85. torch.nn.ReLU(),
  86. )
  87. output_proj = conv_size3 * ((input_size // 2) // 2)
  88. self.subsampling_factor = subsampling_factor
  89. self.create_new_mask = self.create_new_vgg_mask
  90. self.stride_1 = kernel_1
  91. self.min_frame_length = 7
  92. if output_size is not None:
  93. self.output = torch.nn.Linear(output_proj, output_size)
  94. self.output_size = output_size
  95. else:
  96. self.output = None
  97. self.output_size = output_proj
  98. def forward(
  99. self, x: torch.Tensor, mask: Optional[torch.Tensor], chunk_size: Optional[torch.Tensor]
  100. ) -> Tuple[torch.Tensor, torch.Tensor]:
  101. """Encode input sequences.
  102. Args:
  103. x: ConvInput input sequences. (B, T, D_feats)
  104. mask: Mask of input sequences. (B, 1, T)
  105. Returns:
  106. x: ConvInput output sequences. (B, sub(T), D_out)
  107. mask: Mask of output sequences. (B, 1, sub(T))
  108. """
  109. if mask is not None:
  110. mask = self.create_new_mask(mask)
  111. olens = max(mask.eq(0).sum(1))
  112. b, t, f = x.size()
  113. x = x.unsqueeze(1) # (b. 1. t. f)
  114. if chunk_size is not None:
  115. max_input_length = int(
  116. chunk_size * self.subsampling_factor * (math.ceil(float(t) / (chunk_size * self.subsampling_factor) ))
  117. )
  118. x = map(lambda inputs: pad_to_len(inputs, max_input_length, 1), x)
  119. x = list(x)
  120. x = torch.stack(x, dim=0)
  121. N_chunks = max_input_length // ( chunk_size * self.subsampling_factor)
  122. x = x.view(b * N_chunks, 1, chunk_size * self.subsampling_factor, f)
  123. x = self.conv(x)
  124. _, c, _, f = x.size()
  125. if chunk_size is not None:
  126. x = x.transpose(1, 2).contiguous().view(b, -1, c * f)[:,:olens,:]
  127. else:
  128. x = x.transpose(1, 2).contiguous().view(b, -1, c * f)
  129. if self.output is not None:
  130. x = self.output(x)
  131. return x, mask[:,:olens][:,:x.size(1)]
  132. def create_new_vgg_mask(self, mask: torch.Tensor) -> torch.Tensor:
  133. """Create a new mask for VGG output sequences.
  134. Args:
  135. mask: Mask of input sequences. (B, T)
  136. Returns:
  137. mask: Mask of output sequences. (B, sub(T))
  138. """
  139. if self.subsampling_factor > 1:
  140. return mask[:, ::2][:, ::self.stride_1]
  141. else:
  142. return mask
  143. def get_size_before_subsampling(self, size: int) -> int:
  144. """Return the original size before subsampling for a given size.
  145. Args:
  146. size: Number of frames after subsampling.
  147. Returns:
  148. : Number of frames before subsampling.
  149. """
  150. return size * self.subsampling_factor