layers.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. # Copyright 3D-Speaker (https://github.com/alibaba-damo-academy/3D-Speaker). All Rights Reserved.
  2. # Licensed under the Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
  3. import torch
  4. import torch.nn.functional as F
  5. import torch.utils.checkpoint as cp
  6. from torch import nn
  7. def get_nonlinear(config_str, channels):
  8. nonlinear = nn.Sequential()
  9. for name in config_str.split('-'):
  10. if name == 'relu':
  11. nonlinear.add_module('relu', nn.ReLU(inplace=True))
  12. elif name == 'prelu':
  13. nonlinear.add_module('prelu', nn.PReLU(channels))
  14. elif name == 'batchnorm':
  15. nonlinear.add_module('batchnorm', nn.BatchNorm1d(channels))
  16. elif name == 'batchnorm_':
  17. nonlinear.add_module('batchnorm',
  18. nn.BatchNorm1d(channels, affine=False))
  19. else:
  20. raise ValueError('Unexpected module ({}).'.format(name))
  21. return nonlinear
  22. def statistics_pooling(x, dim=-1, keepdim=False, unbiased=True, eps=1e-2):
  23. mean = x.mean(dim=dim)
  24. std = x.std(dim=dim, unbiased=unbiased)
  25. stats = torch.cat([mean, std], dim=-1)
  26. if keepdim:
  27. stats = stats.unsqueeze(dim=dim)
  28. return stats
  29. class StatsPool(nn.Module):
  30. def forward(self, x):
  31. return statistics_pooling(x)
  32. class TDNNLayer(nn.Module):
  33. def __init__(self,
  34. in_channels,
  35. out_channels,
  36. kernel_size,
  37. stride=1,
  38. padding=0,
  39. dilation=1,
  40. bias=False,
  41. config_str='batchnorm-relu'):
  42. super(TDNNLayer, self).__init__()
  43. if padding < 0:
  44. assert kernel_size % 2 == 1, 'Expect equal paddings, but got even kernel size ({})'.format(
  45. kernel_size)
  46. padding = (kernel_size - 1) // 2 * dilation
  47. self.linear = nn.Conv1d(in_channels,
  48. out_channels,
  49. kernel_size,
  50. stride=stride,
  51. padding=padding,
  52. dilation=dilation,
  53. bias=bias)
  54. self.nonlinear = get_nonlinear(config_str, out_channels)
  55. def forward(self, x):
  56. x = self.linear(x)
  57. x = self.nonlinear(x)
  58. return x
  59. class CAMLayer(nn.Module):
  60. def __init__(self,
  61. bn_channels,
  62. out_channels,
  63. kernel_size,
  64. stride,
  65. padding,
  66. dilation,
  67. bias,
  68. reduction=2):
  69. super(CAMLayer, self).__init__()
  70. self.linear_local = nn.Conv1d(bn_channels,
  71. out_channels,
  72. kernel_size,
  73. stride=stride,
  74. padding=padding,
  75. dilation=dilation,
  76. bias=bias)
  77. self.linear1 = nn.Conv1d(bn_channels, bn_channels // reduction, 1)
  78. self.relu = nn.ReLU(inplace=True)
  79. self.linear2 = nn.Conv1d(bn_channels // reduction, out_channels, 1)
  80. self.sigmoid = nn.Sigmoid()
  81. def forward(self, x):
  82. y = self.linear_local(x)
  83. context = x.mean(-1, keepdim=True) + self.seg_pooling(x)
  84. context = self.relu(self.linear1(context))
  85. m = self.sigmoid(self.linear2(context))
  86. return y * m
  87. def seg_pooling(self, x, seg_len=100, stype='avg'):
  88. if stype == 'avg':
  89. seg = F.avg_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
  90. elif stype == 'max':
  91. seg = F.max_pool1d(x, kernel_size=seg_len, stride=seg_len, ceil_mode=True)
  92. else:
  93. raise ValueError('Wrong segment pooling type.')
  94. shape = seg.shape
  95. seg = seg.unsqueeze(-1).expand(*shape, seg_len).reshape(*shape[:-1], -1)
  96. seg = seg[..., :x.shape[-1]]
  97. return seg
  98. class CAMDenseTDNNLayer(nn.Module):
  99. def __init__(self,
  100. in_channels,
  101. out_channels,
  102. bn_channels,
  103. kernel_size,
  104. stride=1,
  105. dilation=1,
  106. bias=False,
  107. config_str='batchnorm-relu',
  108. memory_efficient=False):
  109. super(CAMDenseTDNNLayer, self).__init__()
  110. assert kernel_size % 2 == 1, 'Expect equal paddings, but got even kernel size ({})'.format(
  111. kernel_size)
  112. padding = (kernel_size - 1) // 2 * dilation
  113. self.memory_efficient = memory_efficient
  114. self.nonlinear1 = get_nonlinear(config_str, in_channels)
  115. self.linear1 = nn.Conv1d(in_channels, bn_channels, 1, bias=False)
  116. self.nonlinear2 = get_nonlinear(config_str, bn_channels)
  117. self.cam_layer = CAMLayer(bn_channels,
  118. out_channels,
  119. kernel_size,
  120. stride=stride,
  121. padding=padding,
  122. dilation=dilation,
  123. bias=bias)
  124. def bn_function(self, x):
  125. return self.linear1(self.nonlinear1(x))
  126. def forward(self, x):
  127. if self.training and self.memory_efficient:
  128. x = cp.checkpoint(self.bn_function, x)
  129. else:
  130. x = self.bn_function(x)
  131. x = self.cam_layer(self.nonlinear2(x))
  132. return x
  133. class CAMDenseTDNNBlock(nn.ModuleList):
  134. def __init__(self,
  135. num_layers,
  136. in_channels,
  137. out_channels,
  138. bn_channels,
  139. kernel_size,
  140. stride=1,
  141. dilation=1,
  142. bias=False,
  143. config_str='batchnorm-relu',
  144. memory_efficient=False):
  145. super(CAMDenseTDNNBlock, self).__init__()
  146. for i in range(num_layers):
  147. layer = CAMDenseTDNNLayer(in_channels=in_channels + i * out_channels,
  148. out_channels=out_channels,
  149. bn_channels=bn_channels,
  150. kernel_size=kernel_size,
  151. stride=stride,
  152. dilation=dilation,
  153. bias=bias,
  154. config_str=config_str,
  155. memory_efficient=memory_efficient)
  156. self.add_module('tdnnd%d' % (i + 1), layer)
  157. def forward(self, x):
  158. for layer in self:
  159. x = torch.cat([x, layer(x)], dim=1)
  160. return x
  161. class TransitLayer(nn.Module):
  162. def __init__(self,
  163. in_channels,
  164. out_channels,
  165. bias=True,
  166. config_str='batchnorm-relu'):
  167. super(TransitLayer, self).__init__()
  168. self.nonlinear = get_nonlinear(config_str, in_channels)
  169. self.linear = nn.Conv1d(in_channels, out_channels, 1, bias=bias)
  170. def forward(self, x):
  171. x = self.nonlinear(x)
  172. x = self.linear(x)
  173. return x
  174. class DenseLayer(nn.Module):
  175. def __init__(self,
  176. in_channels,
  177. out_channels,
  178. bias=False,
  179. config_str='batchnorm-relu'):
  180. super(DenseLayer, self).__init__()
  181. self.linear = nn.Conv1d(in_channels, out_channels, 1, bias=bias)
  182. self.nonlinear = get_nonlinear(config_str, out_channels)
  183. def forward(self, x):
  184. if len(x.shape) == 2:
  185. x = self.linear(x.unsqueeze(dim=-1)).squeeze(dim=-1)
  186. else:
  187. x = self.linear(x)
  188. x = self.nonlinear(x)
  189. return x
  190. class BasicResBlock(nn.Module):
  191. expansion = 1
  192. def __init__(self, in_planes, planes, stride=1):
  193. super(BasicResBlock, self).__init__()
  194. self.conv1 = nn.Conv2d(in_planes,
  195. planes,
  196. kernel_size=3,
  197. stride=(stride, 1),
  198. padding=1,
  199. bias=False)
  200. self.bn1 = nn.BatchNorm2d(planes)
  201. self.conv2 = nn.Conv2d(planes,
  202. planes,
  203. kernel_size=3,
  204. stride=1,
  205. padding=1,
  206. bias=False)
  207. self.bn2 = nn.BatchNorm2d(planes)
  208. self.shortcut = nn.Sequential()
  209. if stride != 1 or in_planes != self.expansion * planes:
  210. self.shortcut = nn.Sequential(
  211. nn.Conv2d(in_planes,
  212. self.expansion * planes,
  213. kernel_size=1,
  214. stride=(stride, 1),
  215. bias=False),
  216. nn.BatchNorm2d(self.expansion * planes))
  217. def forward(self, x):
  218. out = F.relu(self.bn1(self.conv1(x)))
  219. out = self.bn2(self.conv2(out))
  220. out += self.shortcut(x)
  221. out = F.relu(out)
  222. return out