仁迷 3 лет назад
Родитель
Сommit
f182090cda

+ 1 - 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py

@@ -23,7 +23,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
         batch_size=1
     )
     audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
-    inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
+    inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "offline"})
 
 
 def modelscope_infer(params):

+ 1 - 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer_after_finetune.py

@@ -34,7 +34,7 @@ def modelscope_infer_after_finetune(params):
         batch_size=1
     )
     audio_in = os.path.join(params["data_dir"], "wav.scp")
-    inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
+    inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "offline"})
 
     # computer CER if GT text is set
     text_in = os.path.join(params["data_dir"], "text")