export_model.py 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. from typing import Union, Dict
  2. from pathlib import Path
  3. from typeguard import check_argument_types
  4. import os
  5. import logging
  6. import torch
  7. from funasr.bin.asr_inference_paraformer import Speech2Text
  8. from funasr.export.models import get_model
  9. class ASRModelExportParaformer:
  10. def __init__(self, cache_dir: Union[Path, str] = None, onnx: bool = True):
  11. assert check_argument_types()
  12. if cache_dir is None:
  13. cache_dir = Path.home() / "cache" / "export"
  14. self.cache_dir = Path(cache_dir)
  15. self.export_config = dict(
  16. feats_dim=560,
  17. onnx=onnx,
  18. )
  19. logging.info("output dir: {}".format(self.cache_dir))
  20. self.onnx = onnx
  21. def export(
  22. self,
  23. model: Speech2Text,
  24. tag_name: str = None,
  25. verbose: bool = False,
  26. ):
  27. export_dir = self.cache_dir / tag_name.replace(' ', '-')
  28. os.makedirs(export_dir, exist_ok=True)
  29. # export encoder1
  30. self.export_config["model_name"] = "model"
  31. model = get_model(
  32. model,
  33. self.export_config,
  34. )
  35. if self.onnx:
  36. self._export_onnx(model, verbose, export_dir)
  37. logging.info("output dir: {}".format(export_dir))
  38. def export_from_modelscope(
  39. self,
  40. tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
  41. ):
  42. from funasr.tasks.asr import ASRTaskParaformer as ASRTask
  43. from modelscope.hub.snapshot_download import snapshot_download
  44. model_dir = snapshot_download(tag_name, cache_dir=self.cache_dir)
  45. asr_train_config = os.path.join(model_dir, 'config.yaml')
  46. asr_model_file = os.path.join(model_dir, 'model.pb')
  47. cmvn_file = os.path.join(model_dir, 'am.mvn')
  48. model, asr_train_args = ASRTask.build_model_from_file(
  49. asr_train_config, asr_model_file, cmvn_file, 'cpu'
  50. )
  51. self.export(model, tag_name)
  52. def _export_onnx(self, model, verbose, path, enc_size=None):
  53. if enc_size:
  54. dummy_input = model.get_dummy_inputs(enc_size)
  55. else:
  56. dummy_input = model.get_dummy_inputs()
  57. # model_script = torch.jit.script(model)
  58. model_script = model #torch.jit.trace(model)
  59. torch.onnx.export(
  60. model_script,
  61. dummy_input,
  62. os.path.join(path, f'{model.model_name}.onnx'),
  63. verbose=verbose,
  64. opset_version=12,
  65. input_names=model.get_input_names(),
  66. output_names=model.get_output_names(),
  67. dynamic_axes=model.get_dynamic_axes()
  68. )
  69. if __name__ == '__main__':
  70. export_model = ASRModelExportParaformer()
  71. export_model.export_from_modelscope('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')