Browse Source

rtf benchmark

游雁 3 years ago
parent
commit
50571828c6

+ 1 - 1
funasr/export/README.md

@@ -17,7 +17,7 @@ The installation is the same as [funasr](../../README.md)
        --model-name [model_name] \
        --export-dir [export_dir] \
        --type [onnx, torch] \
-       --quantize \
+       --quantize [true, false] \
        --fallback-num [fallback_num]
    ```
    `model-name`: the model is to export. It could be the models from modelscope, or local finetuned model(named: model.pb).

+ 2 - 2
funasr/export/export_model.py

@@ -10,7 +10,7 @@ import torch
 from funasr.export.models import get_model
 import numpy as np
 import random
-
+from funasr.utils.types import str2bool
 # torch_version = float(".".join(torch.__version__.split(".")[:2]))
 # assert torch_version > 1.9
 
@@ -234,7 +234,7 @@ if __name__ == '__main__':
     parser.add_argument('--model-name', type=str, required=True)
     parser.add_argument('--export-dir', type=str, required=True)
     parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
-    parser.add_argument('--quantize', action='store_true', help='export quantized model')
+    parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
     parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
     parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
     parser.add_argument('--calib_num', type=int, default=200, help='calib max num')

+ 2 - 2
funasr/runtime/python/utils/test_rtf.py

@@ -2,14 +2,14 @@
 import time
 import sys
 import librosa
-
+from funasr.utils.types import str2bool
 
 import argparse
 parser = argparse.ArgumentParser()
 parser.add_argument('--model_dir', type=str, required=True)
 parser.add_argument('--backend', type=str, default='onnx', help='["onnx", "torch"]')
 parser.add_argument('--wav_file', type=str, default=None, help='amp fallback number')
-parser.add_argument('--quantize', type=bool, default=False, help='quantized model')
+parser.add_argument('--quantize', type=str2bool, default=False, help='quantized model')
 parser.add_argument('--intra_op_num_threads', type=int, default=1, help='intra_op_num_threads for onnx')
 args = parser.parse_args()
 

+ 1 - 5
funasr/runtime/python/utils/test_rtf.sh

@@ -10,7 +10,7 @@ rtf_tool=test_rtf.py
 #:<<!
 model_name="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
 backend="onnx" # "torch"
-quantize='True' # 'False'
+quantize='true' # 'False'
 tag=${model_name}/${backend}_${quantize}
 !
 
@@ -21,11 +21,7 @@ echo ${logs_outputs_dir}
 
 if [ $stage == 0 ];then
 
-  if [ $quantize == 'True' ];then
     python -m funasr.export.export_model --model-name ${model_name} --export-dir ${export_root} --type ${backend} --quantize --audio_in ${scp}
-  else
-    python -m funasr.export.export_model --model-name ${model_name} --export-dir ${export_root} --type ${backend}
-  fi
 
 fi
 

+ 0 - 0
push_scan.py


+ 0 - 0
scan.py