s3prl.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. import copy
  2. import logging
  3. import os
  4. from argparse import Namespace
  5. from typing import Optional
  6. from typing import Tuple
  7. from typing import Union
  8. import humanfriendly
  9. import torch
  10. import torch.nn as nn
  11. from funasr.frontends.utils.frontend import Frontend
  12. from funasr.models.transformer.utils.nets_utils import pad_list
  13. def base_s3prl_setup(args):
  14. args.upstream_feature_selection = getattr(args, "upstream_feature_selection", None)
  15. args.upstream_model_config = getattr(args, "upstream_model_config", None)
  16. args.upstream_refresh = getattr(args, "upstream_refresh", False)
  17. args.upstream_ckpt = getattr(args, "upstream_ckpt", None)
  18. args.init_ckpt = getattr(args, "init_ckpt", None)
  19. args.verbose = getattr(args, "verbose", False)
  20. args.tile_factor = getattr(args, "tile_factor", 1)
  21. return args
  22. class S3prlFrontend(nn.Module):
  23. """Speech Pretrained Representation frontend structure for ASR."""
  24. def __init__(
  25. self,
  26. fs: Union[int, str] = 16000,
  27. frontend_conf: Optional[dict] = None,
  28. download_dir: str = None,
  29. multilayer_feature: bool = False,
  30. ):
  31. super().__init__()
  32. if isinstance(fs, str):
  33. fs = humanfriendly.parse_size(fs)
  34. if download_dir is not None:
  35. torch.hub.set_dir(download_dir)
  36. self.multilayer_feature = multilayer_feature
  37. self.upstream, self.featurizer = self._get_upstream(frontend_conf)
  38. self.pretrained_params = copy.deepcopy(self.upstream.state_dict())
  39. self.output_dim = self.featurizer.output_dim
  40. self.frontend_type = "s3prl"
  41. self.hop_length = self.upstream.get_downsample_rates("key")
  42. def _get_upstream(self, frontend_conf):
  43. """Get S3PRL upstream model."""
  44. s3prl_args = base_s3prl_setup(
  45. Namespace(**frontend_conf, device="cpu"),
  46. )
  47. self.args = s3prl_args
  48. s3prl_path = None
  49. python_path_list = os.environ.get("PYTHONPATH", "(None)").split(":")
  50. for p in python_path_list:
  51. if p.endswith("s3prl"):
  52. s3prl_path = p
  53. break
  54. assert s3prl_path is not None
  55. s3prl_upstream = torch.hub.load(
  56. s3prl_path,
  57. s3prl_args.upstream,
  58. ckpt=s3prl_args.upstream_ckpt,
  59. model_config=s3prl_args.upstream_model_config,
  60. refresh=s3prl_args.upstream_refresh,
  61. source="local",
  62. ).to("cpu")
  63. if getattr(
  64. s3prl_upstream, "model", None
  65. ) is not None and s3prl_upstream.model.__class__.__name__ in [
  66. "Wav2Vec2Model",
  67. "HubertModel",
  68. ]:
  69. s3prl_upstream.model.encoder.layerdrop = 0.0
  70. from s3prl.upstream.interfaces import Featurizer
  71. if self.multilayer_feature is None:
  72. feature_selection = "last_hidden_state"
  73. else:
  74. feature_selection = "hidden_states"
  75. s3prl_featurizer = Featurizer(
  76. upstream=s3prl_upstream,
  77. feature_selection=feature_selection,
  78. upstream_device="cpu",
  79. )
  80. return s3prl_upstream, s3prl_featurizer
  81. def _tile_representations(self, feature):
  82. """Tile up the representations by `tile_factor`.
  83. Input - sequence of representations
  84. shape: (batch_size, seq_len, feature_dim)
  85. Output - sequence of tiled representations
  86. shape: (batch_size, seq_len * factor, feature_dim)
  87. """
  88. assert (
  89. len(feature.shape) == 3
  90. ), "Input argument `feature` has invalid shape: {}".format(feature.shape)
  91. tiled_feature = feature.repeat(1, 1, self.args.tile_factor)
  92. tiled_feature = tiled_feature.reshape(
  93. feature.size(0), feature.size(1) * self.args.tile_factor, feature.size(2)
  94. )
  95. return tiled_feature
  96. def output_size(self) -> int:
  97. return self.output_dim
  98. def forward(
  99. self, input: torch.Tensor, input_lengths: torch.Tensor
  100. ) -> Tuple[torch.Tensor, torch.Tensor]:
  101. wavs = [wav[: input_lengths[i]] for i, wav in enumerate(input)]
  102. self.upstream.eval()
  103. with torch.no_grad():
  104. feats = self.upstream(wavs)
  105. feats = self.featurizer(wavs, feats)
  106. if self.args.tile_factor != 1:
  107. feats = self._tile_representations(feats)
  108. input_feats = pad_list(feats, 0.0)
  109. feats_lens = torch.tensor([f.shape[0] for f in feats], dtype=torch.long)
  110. # Saving CUDA Memory
  111. del feats
  112. return input_feats, feats_lens
  113. def reload_pretrained_parameters(self):
  114. self.upstream.load_state_dict(self.pretrained_params)
  115. logging.info("Pretrained S3PRL frontend model parameters reloaded!")