소스 검색

output_dir = None

游雁 2 년 전
부모
커밋
d16e9d8248

+ 1 - 1
egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

+ 1 - 1
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

+ 1 - 1
egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

+ 1 - 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online",

+ 1 - 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online",

+ 1 - 1
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
         model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online",

+ 1 - 1
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.voice_activity_detection,
         model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",

+ 1 - 1
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py

@@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL)
 import soundfile
 
 if __name__ == '__main__':
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.voice_activity_detection,
         model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",

+ 1 - 1
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py

@@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
 
 if __name__ == '__main__':
     audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example_8k.wav'
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.voice_activity_detection,
         model="damo/speech_fsmn_vad_zh-cn-8k-common",

+ 1 - 1
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py

@@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL)
 import soundfile
 
 if __name__ == '__main__':
-    output_dir = None
+    output_dir = "./results"
     inference_pipeline = pipeline(
         task=Tasks.voice_activity_detection,
         model="damo/speech_fsmn_vad_zh-cn-8k-common",