export_model.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. import json
  2. from typing import Union, Dict
  3. from pathlib import Path
  4. from typeguard import check_argument_types
  5. import os
  6. import logging
  7. import torch
  8. from funasr.export.models import get_model
  9. import numpy as np
  10. import random
  11. from funasr.utils.types import str2bool
  12. # torch_version = float(".".join(torch.__version__.split(".")[:2]))
  13. # assert torch_version > 1.9
  14. class ASRModelExportParaformer:
  15. def __init__(
  16. self,
  17. cache_dir: Union[Path, str] = None,
  18. onnx: bool = True,
  19. quant: bool = True,
  20. fallback_num: int = 0,
  21. audio_in: str = None,
  22. calib_num: int = 200,
  23. ):
  24. assert check_argument_types()
  25. self.set_all_random_seed(0)
  26. if cache_dir is None:
  27. cache_dir = Path.home() / ".cache" / "export"
  28. self.cache_dir = Path(cache_dir)
  29. self.export_config = dict(
  30. feats_dim=560,
  31. onnx=False,
  32. )
  33. print("output dir: {}".format(self.cache_dir))
  34. self.onnx = onnx
  35. self.quant = quant
  36. self.fallback_num = fallback_num
  37. self.frontend = None
  38. self.audio_in = audio_in
  39. self.calib_num = calib_num
  40. def _export(
  41. self,
  42. model,
  43. tag_name: str = None,
  44. verbose: bool = False,
  45. ):
  46. export_dir = self.cache_dir / tag_name.replace(' ', '-')
  47. os.makedirs(export_dir, exist_ok=True)
  48. # export encoder1
  49. self.export_config["model_name"] = "model"
  50. model = get_model(
  51. model,
  52. self.export_config,
  53. )
  54. model.eval()
  55. # self._export_onnx(model, verbose, export_dir)
  56. if self.onnx:
  57. self._export_onnx(model, verbose, export_dir)
  58. else:
  59. self._export_torchscripts(model, verbose, export_dir)
  60. print("output dir: {}".format(export_dir))
  61. def _torch_quantize(self, model):
  62. def _run_calibration_data(m):
  63. # using dummy inputs for a example
  64. if self.audio_in is not None:
  65. feats, feats_len = self.load_feats(self.audio_in)
  66. for i, (feat, len) in enumerate(zip(feats, feats_len)):
  67. with torch.no_grad():
  68. m(feat, len)
  69. else:
  70. dummy_input = model.get_dummy_inputs()
  71. m(*dummy_input)
  72. from torch_quant.module import ModuleFilter
  73. from torch_quant.quantizer import Backend, Quantizer
  74. from funasr.export.models.modules.decoder_layer import DecoderLayerSANM
  75. from funasr.export.models.modules.encoder_layer import EncoderLayerSANM
  76. module_filter = ModuleFilter(include_classes=[EncoderLayerSANM, DecoderLayerSANM])
  77. module_filter.exclude_op_types = [torch.nn.Conv1d]
  78. quantizer = Quantizer(
  79. module_filter=module_filter,
  80. backend=Backend.FBGEMM,
  81. )
  82. model.eval()
  83. calib_model = quantizer.calib(model)
  84. _run_calibration_data(calib_model)
  85. if self.fallback_num > 0:
  86. # perform automatic mixed precision quantization
  87. amp_model = quantizer.amp(model)
  88. _run_calibration_data(amp_model)
  89. quantizer.fallback(amp_model, num=self.fallback_num)
  90. print('Fallback layers:')
  91. print('\n'.join(quantizer.module_filter.exclude_names))
  92. quant_model = quantizer.quantize(model)
  93. return quant_model
  94. def _export_torchscripts(self, model, verbose, path, enc_size=None):
  95. if enc_size:
  96. dummy_input = model.get_dummy_inputs(enc_size)
  97. else:
  98. dummy_input = model.get_dummy_inputs()
  99. # model_script = torch.jit.script(model)
  100. model_script = torch.jit.trace(model, dummy_input)
  101. model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
  102. if self.quant:
  103. quant_model = self._torch_quantize(model)
  104. model_script = torch.jit.trace(quant_model, dummy_input)
  105. model_script.save(os.path.join(path, f'{model.model_name}_quant.torchscripts'))
  106. def set_all_random_seed(self, seed: int):
  107. random.seed(seed)
  108. np.random.seed(seed)
  109. torch.random.manual_seed(seed)
  110. def parse_audio_in(self, audio_in):
  111. wav_list, name_list = [], []
  112. if audio_in.endswith(".scp"):
  113. f = open(audio_in, 'r')
  114. lines = f.readlines()[:self.calib_num]
  115. for line in lines:
  116. name, path = line.strip().split()
  117. name_list.append(name)
  118. wav_list.append(path)
  119. else:
  120. wav_list = [audio_in,]
  121. name_list = ["test",]
  122. return wav_list, name_list
  123. def load_feats(self, audio_in: str = None):
  124. import torchaudio
  125. wav_list, name_list = self.parse_audio_in(audio_in)
  126. feats = []
  127. feats_len = []
  128. for line in wav_list:
  129. path = line.strip()
  130. waveform, sampling_rate = torchaudio.load(path)
  131. if sampling_rate != self.frontend.fs:
  132. waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
  133. new_freq=self.frontend.fs)(waveform)
  134. fbank, fbank_len = self.frontend(waveform, [waveform.size(1)])
  135. feats.append(fbank)
  136. feats_len.append(fbank_len)
  137. return feats, feats_len
  138. def export(self,
  139. tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
  140. mode: str = 'paraformer',
  141. ):
  142. model_dir = tag_name
  143. if model_dir.startswith('damo/'):
  144. from modelscope.hub.snapshot_download import snapshot_download
  145. model_dir = snapshot_download(model_dir, cache_dir=self.cache_dir)
  146. asr_train_config = os.path.join(model_dir, 'config.yaml')
  147. asr_model_file = os.path.join(model_dir, 'model.pb')
  148. cmvn_file = os.path.join(model_dir, 'am.mvn')
  149. json_file = os.path.join(model_dir, 'configuration.json')
  150. if mode is None:
  151. import json
  152. with open(json_file, 'r') as f:
  153. config_data = json.load(f)
  154. mode = config_data['model']['model_config']['mode']
  155. if mode.startswith('paraformer'):
  156. from funasr.tasks.asr import ASRTaskParaformer as ASRTask
  157. elif mode.startswith('uniasr'):
  158. from funasr.tasks.asr import ASRTaskUniASR as ASRTask
  159. model, asr_train_args = ASRTask.build_model_from_file(
  160. asr_train_config, asr_model_file, cmvn_file, 'cpu'
  161. )
  162. self.frontend = model.frontend
  163. self._export(model, tag_name)
  164. def _export_onnx(self, model, verbose, path, enc_size=None):
  165. if enc_size:
  166. dummy_input = model.get_dummy_inputs(enc_size)
  167. else:
  168. dummy_input = model.get_dummy_inputs()
  169. # model_script = torch.jit.script(model)
  170. model_script = model #torch.jit.trace(model)
  171. model_path = os.path.join(path, f'{model.model_name}.onnx')
  172. torch.onnx.export(
  173. model_script,
  174. dummy_input,
  175. model_path,
  176. verbose=verbose,
  177. opset_version=14,
  178. input_names=model.get_input_names(),
  179. output_names=model.get_output_names(),
  180. dynamic_axes=model.get_dynamic_axes()
  181. )
  182. if self.quant:
  183. from onnxruntime.quantization import QuantType, quantize_dynamic
  184. import onnx
  185. quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx')
  186. onnx_model = onnx.load(model_path)
  187. nodes = [n.name for n in onnx_model.graph.node]
  188. nodes_to_exclude = [m for m in nodes if 'output' in m]
  189. quantize_dynamic(
  190. model_input=model_path,
  191. model_output=quant_model_path,
  192. op_types_to_quantize=['MatMul'],
  193. per_channel=True,
  194. reduce_range=False,
  195. weight_type=QuantType.QUInt8,
  196. nodes_to_exclude=nodes_to_exclude,
  197. )
  198. if __name__ == '__main__':
  199. import argparse
  200. parser = argparse.ArgumentParser()
  201. parser.add_argument('--model-name', type=str, required=True)
  202. parser.add_argument('--export-dir', type=str, required=True)
  203. parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
  204. parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
  205. parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
  206. parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
  207. parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
  208. args = parser.parse_args()
  209. export_model = ASRModelExportParaformer(
  210. cache_dir=args.export_dir,
  211. onnx=args.type == 'onnx',
  212. quant=args.quantize,
  213. fallback_num=args.fallback_num,
  214. audio_in=args.audio_in,
  215. calib_num=args.calib_num,
  216. )
  217. export_model.export(args.model_name)