|
@@ -142,6 +142,28 @@ class TestParaformerInferencePipelines(unittest.TestCase):
|
|
|
audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
|
|
audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
|
|
|
logger.info("asr inference result: {0}".format(rec_result))
|
|
logger.info("asr inference result: {0}".format(rec_result))
|
|
|
|
|
|
|
|
|
|
+class TestParaformerBertInferencePipelines(unittest.TestCase):
|
|
|
|
|
+ def test_funasr_path(self):
|
|
|
|
|
+ import funasr
|
|
|
|
|
+ import os
|
|
|
|
|
+ logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
|
|
|
|
|
+
|
|
|
|
|
+ def test_aishell1(self):
|
|
|
|
|
+ inference_pipeline = pipeline(
|
|
|
|
|
+ task=Tasks.auto_speech_recognition,
|
|
|
|
|
+ model='damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch')
|
|
|
|
|
+ rec_result = inference_pipeline(
|
|
|
|
|
+ audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
|
|
|
|
|
+ logger.info("asr inference result: {0}".format(rec_result))
|
|
|
|
|
+
|
|
|
|
|
+ def test_aishell2(self):
|
|
|
|
|
+ inference_pipeline = pipeline(
|
|
|
|
|
+ task=Tasks.auto_speech_recognition,
|
|
|
|
|
+ model='damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch')
|
|
|
|
|
+ rec_result = inference_pipeline(
|
|
|
|
|
+ audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
|
|
|
|
|
+ logger.info("asr inference result: {0}".format(rec_result))
|
|
|
|
|
+
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
if __name__ == '__main__':
|
|
|
unittest.main()
|
|
unittest.main()
|