|
|
@@ -395,6 +395,24 @@ class TestUniasrInferencePipelines(unittest.TestCase):
|
|
|
param_dict={"decoding_model": "normal"})
|
|
|
logger.info("asr inference result: {0}".format(rec_result))
|
|
|
|
|
|
+ def test_uniasr_2pass_vi_common_offline(self):
|
|
|
+ inference_pipeline = pipeline(
|
|
|
+ task=Tasks.auto_speech_recognition,
|
|
|
+ model='damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline')
|
|
|
+ rec_result = inference_pipeline(
|
|
|
+ audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav',
|
|
|
+ param_dict={"decoding_model": "offline"})
|
|
|
+ logger.info("asr inference result: {0}".format(rec_result))
|
|
|
+
|
|
|
+ def test_uniasr_2pass_vi_common_online(self):
|
|
|
+ inference_pipeline = pipeline(
|
|
|
+ task=Tasks.auto_speech_recognition,
|
|
|
+ model='damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online')
|
|
|
+ rec_result = inference_pipeline(
|
|
|
+ audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav',
|
|
|
+ param_dict={"decoding_model": "normal"})
|
|
|
+ logger.info("asr inference result: {0}".format(rec_result))
|
|
|
+
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
unittest.main()
|