s3prl.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. import copy
  2. import logging
  3. import os
  4. from argparse import Namespace
  5. from typing import Optional
  6. from typing import Tuple
  7. from typing import Union
  8. import humanfriendly
  9. import torch
  10. from funasr.models.frontend.abs_frontend import AbsFrontend
  11. from funasr.modules.frontends.frontend import Frontend
  12. from funasr.modules.nets_utils import pad_list
  13. from funasr.utils.get_default_kwargs import get_default_kwargs
  14. def base_s3prl_setup(args):
  15. args.upstream_feature_selection = getattr(args, "upstream_feature_selection", None)
  16. args.upstream_model_config = getattr(args, "upstream_model_config", None)
  17. args.upstream_refresh = getattr(args, "upstream_refresh", False)
  18. args.upstream_ckpt = getattr(args, "upstream_ckpt", None)
  19. args.init_ckpt = getattr(args, "init_ckpt", None)
  20. args.verbose = getattr(args, "verbose", False)
  21. args.tile_factor = getattr(args, "tile_factor", 1)
  22. return args
  23. class S3prlFrontend(AbsFrontend):
  24. """Speech Pretrained Representation frontend structure for ASR."""
  25. def __init__(
  26. self,
  27. fs: Union[int, str] = 16000,
  28. frontend_conf: Optional[dict] = get_default_kwargs(Frontend),
  29. download_dir: str = None,
  30. multilayer_feature: bool = False,
  31. ):
  32. super().__init__()
  33. if isinstance(fs, str):
  34. fs = humanfriendly.parse_size(fs)
  35. if download_dir is not None:
  36. torch.hub.set_dir(download_dir)
  37. self.multilayer_feature = multilayer_feature
  38. self.upstream, self.featurizer = self._get_upstream(frontend_conf)
  39. self.pretrained_params = copy.deepcopy(self.upstream.state_dict())
  40. self.output_dim = self.featurizer.output_dim
  41. self.frontend_type = "s3prl"
  42. self.hop_length = self.upstream.get_downsample_rates("key")
  43. def _get_upstream(self, frontend_conf):
  44. """Get S3PRL upstream model."""
  45. s3prl_args = base_s3prl_setup(
  46. Namespace(**frontend_conf, device="cpu"),
  47. )
  48. self.args = s3prl_args
  49. s3prl_path = None
  50. python_path_list = os.environ.get("PYTHONPATH", "(None)").split(":")
  51. for p in python_path_list:
  52. if p.endswith("s3prl"):
  53. s3prl_path = p
  54. break
  55. assert s3prl_path is not None
  56. s3prl_upstream = torch.hub.load(
  57. s3prl_path,
  58. s3prl_args.upstream,
  59. ckpt=s3prl_args.upstream_ckpt,
  60. model_config=s3prl_args.upstream_model_config,
  61. refresh=s3prl_args.upstream_refresh,
  62. source="local",
  63. ).to("cpu")
  64. if getattr(
  65. s3prl_upstream, "model", None
  66. ) is not None and s3prl_upstream.model.__class__.__name__ in [
  67. "Wav2Vec2Model",
  68. "HubertModel",
  69. ]:
  70. s3prl_upstream.model.encoder.layerdrop = 0.0
  71. from s3prl.upstream.interfaces import Featurizer
  72. if self.multilayer_feature is None:
  73. feature_selection = "last_hidden_state"
  74. else:
  75. feature_selection = "hidden_states"
  76. s3prl_featurizer = Featurizer(
  77. upstream=s3prl_upstream,
  78. feature_selection=feature_selection,
  79. upstream_device="cpu",
  80. )
  81. return s3prl_upstream, s3prl_featurizer
  82. def _tile_representations(self, feature):
  83. """Tile up the representations by `tile_factor`.
  84. Input - sequence of representations
  85. shape: (batch_size, seq_len, feature_dim)
  86. Output - sequence of tiled representations
  87. shape: (batch_size, seq_len * factor, feature_dim)
  88. """
  89. assert (
  90. len(feature.shape) == 3
  91. ), "Input argument `feature` has invalid shape: {}".format(feature.shape)
  92. tiled_feature = feature.repeat(1, 1, self.args.tile_factor)
  93. tiled_feature = tiled_feature.reshape(
  94. feature.size(0), feature.size(1) * self.args.tile_factor, feature.size(2)
  95. )
  96. return tiled_feature
  97. def output_size(self) -> int:
  98. return self.output_dim
  99. def forward(
  100. self, input: torch.Tensor, input_lengths: torch.Tensor
  101. ) -> Tuple[torch.Tensor, torch.Tensor]:
  102. wavs = [wav[: input_lengths[i]] for i, wav in enumerate(input)]
  103. self.upstream.eval()
  104. with torch.no_grad():
  105. feats = self.upstream(wavs)
  106. feats = self.featurizer(wavs, feats)
  107. if self.args.tile_factor != 1:
  108. feats = self._tile_representations(feats)
  109. input_feats = pad_list(feats, 0.0)
  110. feats_lens = torch.tensor([f.shape[0] for f in feats], dtype=torch.long)
  111. # Saving CUDA Memory
  112. del feats
  113. return input_feats, feats_lens
  114. def reload_pretrained_parameters(self):
  115. self.upstream.load_state_dict(self.pretrained_params)
  116. logging.info("Pretrained S3PRL frontend model parameters reloaded!")