export_model.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. import json
  2. from typing import Union, Dict
  3. from pathlib import Path
  4. from typeguard import check_argument_types
  5. import os
  6. import logging
  7. import torch
  8. from funasr.export.models import get_model
  9. import numpy as np
  10. import random
  11. from funasr.utils.types import str2bool
  12. # torch_version = float(".".join(torch.__version__.split(".")[:2]))
  13. # assert torch_version > 1.9
  14. class ModelExport:
  15. def __init__(
  16. self,
  17. cache_dir: Union[Path, str] = None,
  18. onnx: bool = True,
  19. device: str = "cpu",
  20. quant: bool = True,
  21. fallback_num: int = 0,
  22. audio_in: str = None,
  23. calib_num: int = 200,
  24. ):
  25. assert check_argument_types()
  26. self.set_all_random_seed(0)
  27. if cache_dir is None:
  28. cache_dir = Path.home() / ".cache" / "export"
  29. self.cache_dir = Path(cache_dir)
  30. self.export_config = dict(
  31. feats_dim=560,
  32. onnx=False,
  33. )
  34. print("output dir: {}".format(self.cache_dir))
  35. self.onnx = onnx
  36. self.device = device
  37. self.quant = quant
  38. self.fallback_num = fallback_num
  39. self.frontend = None
  40. self.audio_in = audio_in
  41. self.calib_num = calib_num
  42. def _export(
  43. self,
  44. model,
  45. tag_name: str = None,
  46. verbose: bool = False,
  47. ):
  48. export_dir = self.cache_dir / tag_name.replace(' ', '-')
  49. os.makedirs(export_dir, exist_ok=True)
  50. # export encoder1
  51. self.export_config["model_name"] = "model"
  52. model = get_model(
  53. model,
  54. self.export_config,
  55. )
  56. model.eval()
  57. # self._export_onnx(model, verbose, export_dir)
  58. if self.onnx:
  59. self._export_onnx(model, verbose, export_dir)
  60. else:
  61. self._export_torchscripts(model, verbose, export_dir)
  62. print("output dir: {}".format(export_dir))
  63. def _torch_quantize(self, model):
  64. def _run_calibration_data(m):
  65. # using dummy inputs for a example
  66. if self.audio_in is not None:
  67. feats, feats_len = self.load_feats(self.audio_in)
  68. for i, (feat, len) in enumerate(zip(feats, feats_len)):
  69. with torch.no_grad():
  70. m(feat, len)
  71. else:
  72. dummy_input = model.get_dummy_inputs()
  73. m(*dummy_input)
  74. from torch_quant.module import ModuleFilter
  75. from torch_quant.quantizer import Backend, Quantizer
  76. from funasr.export.models.modules.decoder_layer import DecoderLayerSANM
  77. from funasr.export.models.modules.encoder_layer import EncoderLayerSANM
  78. module_filter = ModuleFilter(include_classes=[EncoderLayerSANM, DecoderLayerSANM])
  79. module_filter.exclude_op_types = [torch.nn.Conv1d]
  80. quantizer = Quantizer(
  81. module_filter=module_filter,
  82. backend=Backend.FBGEMM,
  83. )
  84. model.eval()
  85. calib_model = quantizer.calib(model)
  86. _run_calibration_data(calib_model)
  87. if self.fallback_num > 0:
  88. # perform automatic mixed precision quantization
  89. amp_model = quantizer.amp(model)
  90. _run_calibration_data(amp_model)
  91. quantizer.fallback(amp_model, num=self.fallback_num)
  92. print('Fallback layers:')
  93. print('\n'.join(quantizer.module_filter.exclude_names))
  94. quant_model = quantizer.quantize(model)
  95. return quant_model
  96. def _export_torchscripts(self, model, verbose, path, enc_size=None):
  97. if enc_size:
  98. dummy_input = model.get_dummy_inputs(enc_size)
  99. else:
  100. dummy_input = model.get_dummy_inputs()
  101. if self.device == 'cuda':
  102. model = model.cuda()
  103. dummy_input = tuple([i.cuda() for i in dummy_input])
  104. # model_script = torch.jit.script(model)
  105. model_script = torch.jit.trace(model, dummy_input)
  106. model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
  107. if self.quant:
  108. quant_model = self._torch_quantize(model)
  109. model_script = torch.jit.trace(quant_model, dummy_input)
  110. model_script.save(os.path.join(path, f'{model.model_name}_quant.torchscripts'))
  111. def set_all_random_seed(self, seed: int):
  112. random.seed(seed)
  113. np.random.seed(seed)
  114. torch.random.manual_seed(seed)
  115. def parse_audio_in(self, audio_in):
  116. wav_list, name_list = [], []
  117. if audio_in.endswith(".scp"):
  118. f = open(audio_in, 'r')
  119. lines = f.readlines()[:self.calib_num]
  120. for line in lines:
  121. name, path = line.strip().split()
  122. name_list.append(name)
  123. wav_list.append(path)
  124. else:
  125. wav_list = [audio_in,]
  126. name_list = ["test",]
  127. return wav_list, name_list
  128. def load_feats(self, audio_in: str = None):
  129. import torchaudio
  130. wav_list, name_list = self.parse_audio_in(audio_in)
  131. feats = []
  132. feats_len = []
  133. for line in wav_list:
  134. path = line.strip()
  135. waveform, sampling_rate = torchaudio.load(path)
  136. if sampling_rate != self.frontend.fs:
  137. waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
  138. new_freq=self.frontend.fs)(waveform)
  139. fbank, fbank_len = self.frontend(waveform, [waveform.size(1)])
  140. feats.append(fbank)
  141. feats_len.append(fbank_len)
  142. return feats, feats_len
  143. def export(self,
  144. tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
  145. mode: str = 'paraformer',
  146. ):
  147. model_dir = tag_name
  148. if model_dir.startswith('damo/'):
  149. from modelscope.hub.snapshot_download import snapshot_download
  150. model_dir = snapshot_download(model_dir, cache_dir=self.cache_dir)
  151. asr_train_config = os.path.join(model_dir, 'config.yaml')
  152. asr_model_file = os.path.join(model_dir, 'model.pb')
  153. cmvn_file = os.path.join(model_dir, 'am.mvn')
  154. json_file = os.path.join(model_dir, 'configuration.json')
  155. if mode is None:
  156. import json
  157. with open(json_file, 'r') as f:
  158. config_data = json.load(f)
  159. mode = config_data['model']['model_config']['mode']
  160. if mode.startswith('paraformer'):
  161. from funasr.tasks.asr import ASRTaskParaformer as ASRTask
  162. elif mode.startswith('uniasr'):
  163. from funasr.tasks.asr import ASRTaskUniASR as ASRTask
  164. model, asr_train_args = ASRTask.build_model_from_file(
  165. asr_train_config, asr_model_file, cmvn_file, 'cpu'
  166. )
  167. self.frontend = model.frontend
  168. self._export(model, tag_name)
  169. def _export_onnx(self, model, verbose, path, enc_size=None):
  170. if enc_size:
  171. dummy_input = model.get_dummy_inputs(enc_size)
  172. else:
  173. dummy_input = model.get_dummy_inputs()
  174. # model_script = torch.jit.script(model)
  175. model_script = model #torch.jit.trace(model)
  176. model_path = os.path.join(path, f'{model.model_name}.onnx')
  177. torch.onnx.export(
  178. model_script,
  179. dummy_input,
  180. model_path,
  181. verbose=verbose,
  182. opset_version=14,
  183. input_names=model.get_input_names(),
  184. output_names=model.get_output_names(),
  185. dynamic_axes=model.get_dynamic_axes()
  186. )
  187. if self.quant:
  188. from onnxruntime.quantization import QuantType, quantize_dynamic
  189. import onnx
  190. quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx')
  191. onnx_model = onnx.load(model_path)
  192. nodes = [n.name for n in onnx_model.graph.node]
  193. nodes_to_exclude = [m for m in nodes if 'output' in m]
  194. quantize_dynamic(
  195. model_input=model_path,
  196. model_output=quant_model_path,
  197. op_types_to_quantize=['MatMul'],
  198. per_channel=True,
  199. reduce_range=False,
  200. weight_type=QuantType.QUInt8,
  201. nodes_to_exclude=nodes_to_exclude,
  202. )
  203. if __name__ == '__main__':
  204. import argparse
  205. parser = argparse.ArgumentParser()
  206. parser.add_argument('--model-name', type=str, required=True)
  207. parser.add_argument('--export-dir', type=str, required=True)
  208. parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
  209. parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
  210. parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
  211. parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
  212. parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
  213. parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
  214. args = parser.parse_args()
  215. export_model = ModelExport(
  216. cache_dir=args.export_dir,
  217. onnx=args.type == 'onnx',
  218. device=args.device,
  219. quant=args.quantize,
  220. fallback_num=args.fallback_num,
  221. audio_in=args.audio_in,
  222. calib_num=args.calib_num,
  223. )
  224. export_model.export(args.model_name)