export_model.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. import json
  2. from typing import Union, Dict
  3. from pathlib import Path
  4. from typeguard import check_argument_types
  5. import os
  6. import logging
  7. import torch
  8. from funasr.export.models import get_model
  9. import numpy as np
  10. import random
  11. from funasr.utils.types import str2bool
  12. # torch_version = float(".".join(torch.__version__.split(".")[:2]))
  13. # assert torch_version > 1.9
  14. class ModelExport:
  15. def __init__(
  16. self,
  17. cache_dir: Union[Path, str] = None,
  18. onnx: bool = True,
  19. device: str = "cpu",
  20. quant: bool = True,
  21. fallback_num: int = 0,
  22. audio_in: str = None,
  23. calib_num: int = 200,
  24. ):
  25. assert check_argument_types()
  26. self.set_all_random_seed(0)
  27. self.cache_dir = cache_dir
  28. self.export_config = dict(
  29. feats_dim=560,
  30. onnx=False,
  31. )
  32. self.onnx = onnx
  33. self.device = device
  34. self.quant = quant
  35. self.fallback_num = fallback_num
  36. self.frontend = None
  37. self.audio_in = audio_in
  38. self.calib_num = calib_num
  39. def _export(
  40. self,
  41. model,
  42. tag_name: str = None,
  43. verbose: bool = False,
  44. ):
  45. export_dir = self.cache_dir
  46. os.makedirs(export_dir, exist_ok=True)
  47. # export encoder1
  48. self.export_config["model_name"] = "model"
  49. model = get_model(
  50. model,
  51. self.export_config,
  52. )
  53. model.eval()
  54. # self._export_onnx(model, verbose, export_dir)
  55. if self.onnx:
  56. self._export_onnx(model, verbose, export_dir)
  57. else:
  58. self._export_torchscripts(model, verbose, export_dir)
  59. print("output dir: {}".format(export_dir))
  60. def _torch_quantize(self, model):
  61. def _run_calibration_data(m):
  62. # using dummy inputs for a example
  63. if self.audio_in is not None:
  64. feats, feats_len = self.load_feats(self.audio_in)
  65. for i, (feat, len) in enumerate(zip(feats, feats_len)):
  66. with torch.no_grad():
  67. m(feat, len)
  68. else:
  69. dummy_input = model.get_dummy_inputs()
  70. m(*dummy_input)
  71. from torch_quant.module import ModuleFilter
  72. from torch_quant.quantizer import Backend, Quantizer
  73. from funasr.export.models.modules.decoder_layer import DecoderLayerSANM
  74. from funasr.export.models.modules.encoder_layer import EncoderLayerSANM
  75. module_filter = ModuleFilter(include_classes=[EncoderLayerSANM, DecoderLayerSANM])
  76. module_filter.exclude_op_types = [torch.nn.Conv1d]
  77. quantizer = Quantizer(
  78. module_filter=module_filter,
  79. backend=Backend.FBGEMM,
  80. )
  81. model.eval()
  82. calib_model = quantizer.calib(model)
  83. _run_calibration_data(calib_model)
  84. if self.fallback_num > 0:
  85. # perform automatic mixed precision quantization
  86. amp_model = quantizer.amp(model)
  87. _run_calibration_data(amp_model)
  88. quantizer.fallback(amp_model, num=self.fallback_num)
  89. print('Fallback layers:')
  90. print('\n'.join(quantizer.module_filter.exclude_names))
  91. quant_model = quantizer.quantize(model)
  92. return quant_model
  93. def _export_torchscripts(self, model, verbose, path, enc_size=None):
  94. if enc_size:
  95. dummy_input = model.get_dummy_inputs(enc_size)
  96. else:
  97. dummy_input = model.get_dummy_inputs()
  98. if self.device == 'cuda':
  99. model = model.cuda()
  100. dummy_input = tuple([i.cuda() for i in dummy_input])
  101. # model_script = torch.jit.script(model)
  102. model_script = torch.jit.trace(model, dummy_input)
  103. model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
  104. if self.quant:
  105. quant_model = self._torch_quantize(model)
  106. model_script = torch.jit.trace(quant_model, dummy_input)
  107. model_script.save(os.path.join(path, f'{model.model_name}_quant.torchscripts'))
  108. def set_all_random_seed(self, seed: int):
  109. random.seed(seed)
  110. np.random.seed(seed)
  111. torch.random.manual_seed(seed)
  112. def parse_audio_in(self, audio_in):
  113. wav_list, name_list = [], []
  114. if audio_in.endswith(".scp"):
  115. f = open(audio_in, 'r')
  116. lines = f.readlines()[:self.calib_num]
  117. for line in lines:
  118. name, path = line.strip().split()
  119. name_list.append(name)
  120. wav_list.append(path)
  121. else:
  122. wav_list = [audio_in,]
  123. name_list = ["test",]
  124. return wav_list, name_list
  125. def load_feats(self, audio_in: str = None):
  126. import torchaudio
  127. wav_list, name_list = self.parse_audio_in(audio_in)
  128. feats = []
  129. feats_len = []
  130. for line in wav_list:
  131. path = line.strip()
  132. waveform, sampling_rate = torchaudio.load(path)
  133. if sampling_rate != self.frontend.fs:
  134. waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
  135. new_freq=self.frontend.fs)(waveform)
  136. fbank, fbank_len = self.frontend(waveform, [waveform.size(1)])
  137. feats.append(fbank)
  138. feats_len.append(fbank_len)
  139. return feats, feats_len
  140. def export(self,
  141. tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
  142. mode: str = None,
  143. ):
  144. model_dir = tag_name
  145. if model_dir.startswith('damo'):
  146. from modelscope.hub.snapshot_download import snapshot_download
  147. model_dir = snapshot_download(model_dir, cache_dir=self.cache_dir)
  148. self.cache_dir = model_dir
  149. if mode is None:
  150. import json
  151. json_file = os.path.join(model_dir, 'configuration.json')
  152. with open(json_file, 'r') as f:
  153. config_data = json.load(f)
  154. if config_data['task'] == "punctuation":
  155. mode = config_data['model']['punc_model_config']['mode']
  156. else:
  157. mode = config_data['model']['model_config']['mode']
  158. if mode.startswith('paraformer'):
  159. from funasr.tasks.asr import ASRTaskParaformer as ASRTask
  160. config = os.path.join(model_dir, 'config.yaml')
  161. model_file = os.path.join(model_dir, 'model.pb')
  162. cmvn_file = os.path.join(model_dir, 'am.mvn')
  163. model, asr_train_args = ASRTask.build_model_from_file(
  164. config, model_file, cmvn_file, 'cpu'
  165. )
  166. self.frontend = model.frontend
  167. elif mode.startswith('offline'):
  168. from funasr.tasks.vad import VADTask
  169. config = os.path.join(model_dir, 'vad.yaml')
  170. model_file = os.path.join(model_dir, 'vad.pb')
  171. cmvn_file = os.path.join(model_dir, 'vad.mvn')
  172. model, vad_infer_args = VADTask.build_model_from_file(
  173. config, model_file, cmvn_file=cmvn_file, device='cpu'
  174. )
  175. self.export_config["feats_dim"] = 400
  176. self.frontend = model.frontend
  177. elif mode.startswith('punc'):
  178. from funasr.tasks.punctuation import PunctuationTask as PUNCTask
  179. punc_train_config = os.path.join(model_dir, 'config.yaml')
  180. punc_model_file = os.path.join(model_dir, 'punc.pb')
  181. model, punc_train_args = PUNCTask.build_model_from_file(
  182. punc_train_config, punc_model_file, 'cpu'
  183. )
  184. elif mode.startswith('punc_VadRealtime'):
  185. from funasr.tasks.punctuation import PunctuationTask as PUNCTask
  186. punc_train_config = os.path.join(model_dir, 'config.yaml')
  187. punc_model_file = os.path.join(model_dir, 'punc.pb')
  188. model, punc_train_args = PUNCTask.build_model_from_file(
  189. punc_train_config, punc_model_file, 'cpu'
  190. )
  191. self._export(model, tag_name)
  192. def _export_onnx(self, model, verbose, path, enc_size=None):
  193. if enc_size:
  194. dummy_input = model.get_dummy_inputs(enc_size)
  195. else:
  196. dummy_input = model.get_dummy_inputs()
  197. # model_script = torch.jit.script(model)
  198. model_script = model #torch.jit.trace(model)
  199. model_path = os.path.join(path, f'{model.model_name}.onnx')
  200. torch.onnx.export(
  201. model_script,
  202. dummy_input,
  203. model_path,
  204. verbose=verbose,
  205. opset_version=14,
  206. input_names=model.get_input_names(),
  207. output_names=model.get_output_names(),
  208. dynamic_axes=model.get_dynamic_axes()
  209. )
  210. if self.quant:
  211. from onnxruntime.quantization import QuantType, quantize_dynamic
  212. import onnx
  213. quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx')
  214. onnx_model = onnx.load(model_path)
  215. nodes = [n.name for n in onnx_model.graph.node]
  216. nodes_to_exclude = [m for m in nodes if 'output' in m]
  217. quantize_dynamic(
  218. model_input=model_path,
  219. model_output=quant_model_path,
  220. op_types_to_quantize=['MatMul'],
  221. per_channel=True,
  222. reduce_range=False,
  223. weight_type=QuantType.QUInt8,
  224. nodes_to_exclude=nodes_to_exclude,
  225. )
  226. if __name__ == '__main__':
  227. import argparse
  228. parser = argparse.ArgumentParser()
  229. parser.add_argument('--model-name', type=str, required=True)
  230. parser.add_argument('--export-dir', type=str, required=True)
  231. parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
  232. parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
  233. parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
  234. parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
  235. parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
  236. parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
  237. args = parser.parse_args()
  238. export_model = ModelExport(
  239. cache_dir=args.export_dir,
  240. onnx=args.type == 'onnx',
  241. device=args.device,
  242. quant=args.quantize,
  243. fallback_num=args.fallback_num,
  244. audio_in=args.audio_in,
  245. calib_num=args.calib_num,
  246. )
  247. export_model.export(args.model_name)