test_asr_inference_pipeline.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. import unittest
  2. from modelscope.pipelines import pipeline
  3. from modelscope.utils.constant import Tasks
  4. from modelscope.utils.logger import get_logger
  5. logger = get_logger()
  6. class TestConformerInferencePipelines(unittest.TestCase):
  7. def test_funasr_path(self):
  8. import funasr
  9. import os
  10. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  11. def test_aishell1(self):
  12. inference_pipeline = pipeline(
  13. task=Tasks.auto_speech_recognition,
  14. model='damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch')
  15. rec_result = inference_pipeline(
  16. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  17. logger.info("asr inference result: {0}".format(rec_result))
  18. def test_aishell2(self):
  19. inference_pipeline = pipeline(
  20. task=Tasks.auto_speech_recognition,
  21. model='damo/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch')
  22. rec_result = inference_pipeline(
  23. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  24. logger.info("asr inference result: {0}".format(rec_result))
  25. class TestData2vecInferencePipelines(unittest.TestCase):
  26. def test_funasr_path(self):
  27. import funasr
  28. import os
  29. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  30. def test_transformer(self):
  31. inference_pipeline = pipeline(
  32. task=Tasks.auto_speech_recognition,
  33. model='damo/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch')
  34. rec_result = inference_pipeline(
  35. audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
  36. logger.info("asr inference result: {0}".format(rec_result))
  37. assert rec_result["text"] == "每一天都要快乐喔"
  38. def test_paraformer(self):
  39. inference_pipeline = pipeline(
  40. task=Tasks.auto_speech_recognition,
  41. model='damo/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k')
  42. rec_result = inference_pipeline(
  43. audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
  44. logger.info("asr inference result: {0}".format(rec_result))
  45. assert rec_result["text"] == "每一天都要快乐喔"
  46. class TestMfccaInferencePipelines(unittest.TestCase):
  47. def test_funasr_path(self):
  48. import funasr
  49. import os
  50. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  51. def test_alimeeting(self):
  52. inference_pipeline = pipeline(
  53. task=Tasks.auto_speech_recognition,
  54. model='NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950',
  55. model_revision='v3.0.0')
  56. rec_result = inference_pipeline(
  57. audio_in='https://pre.modelscope.cn/api/v1/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/repo?Revision=master&FilePath=example/asr_example_mc.wav')
  58. logger.info("asr inference result: {0}".format(rec_result))
  59. class TestParaformerInferencePipelines(unittest.TestCase):
  60. def test_funasr_path(self):
  61. import funasr
  62. import os
  63. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  64. def test_paraformer_large_contextual_common(self):
  65. param_dict = dict()
  66. param_dict['hotword'] = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/hotword.txt"
  67. inference_pipeline = pipeline(
  68. task=Tasks.auto_speech_recognition,
  69. model='damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404',
  70. param_dict=param_dict)
  71. rec_result = inference_pipeline(
  72. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_hotword.wav')
  73. logger.info("asr inference result: {0}".format(rec_result))
  74. def test_paraformer_large_aishell1(self):
  75. inference_pipeline = pipeline(
  76. task=Tasks.auto_speech_recognition,
  77. model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch')
  78. rec_result = inference_pipeline(
  79. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  80. logger.info("asr inference result: {0}".format(rec_result))
  81. def test_paraformer_large_aishell2(self):
  82. inference_pipeline = pipeline(
  83. task=Tasks.auto_speech_recognition,
  84. model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch')
  85. rec_result = inference_pipeline(
  86. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  87. logger.info("asr inference result: {0}".format(rec_result))
  88. def test_paraformer_large_common(self):
  89. inference_pipeline = pipeline(
  90. task=Tasks.auto_speech_recognition,
  91. model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
  92. rec_result = inference_pipeline(
  93. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  94. logger.info("asr inference result: {0}".format(rec_result))
  95. def test_paraformer_tiny_commandword(self):
  96. inference_pipeline = pipeline(
  97. task=Tasks.auto_speech_recognition,
  98. model='damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch')
  99. rec_result = inference_pipeline(
  100. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh_command.wav')
  101. logger.info("asr inference result: {0}".format(rec_result))
  102. def test_paraformer_8k(self):
  103. inference_pipeline = pipeline(
  104. task=Tasks.auto_speech_recognition,
  105. model='damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1')
  106. rec_result = inference_pipeline(
  107. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_8K.wav')
  108. logger.info("asr inference result: {0}".format(rec_result))
  109. def test_paraformer_aishell1(self):
  110. inference_pipeline = pipeline(
  111. task=Tasks.auto_speech_recognition,
  112. model='damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch')
  113. rec_result = inference_pipeline(
  114. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  115. logger.info("asr inference result: {0}".format(rec_result))
  116. def test_paraformer_aishell2(self):
  117. inference_pipeline = pipeline(
  118. task=Tasks.auto_speech_recognition,
  119. model='damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch')
  120. rec_result = inference_pipeline(
  121. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  122. logger.info("asr inference result: {0}".format(rec_result))
  123. class TestParaformerBertInferencePipelines(unittest.TestCase):
  124. def test_funasr_path(self):
  125. import funasr
  126. import os
  127. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  128. def test_aishell1(self):
  129. inference_pipeline = pipeline(
  130. task=Tasks.auto_speech_recognition,
  131. model='damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch')
  132. rec_result = inference_pipeline(
  133. audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
  134. logger.info("asr inference result: {0}".format(rec_result))
  135. def test_aishell2(self):
  136. inference_pipeline = pipeline(
  137. task=Tasks.auto_speech_recognition,
  138. model='damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch')
  139. rec_result = inference_pipeline(
  140. audio_in='https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav')
  141. logger.info("asr inference result: {0}".format(rec_result))
  142. class TestUniasrInferencePipelines(unittest.TestCase):
  143. def test_funasr_path(self):
  144. import funasr
  145. import os
  146. logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
  147. def test_uniasr_2pass_cantonese_chs_16k_common_offline(self):
  148. inference_pipeline = pipeline(
  149. task=Tasks.auto_speech_recognition,
  150. model='damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online')
  151. rec_result = inference_pipeline(
  152. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cantonese-CHS.wav',
  153. param_dict={"decoding_model": "offline"})
  154. logger.info("asr inference result: {0}".format(rec_result))
  155. def test_uniasr_2pass_cantonese_chs_16k_common_online(self):
  156. inference_pipeline = pipeline(
  157. task=Tasks.auto_speech_recognition,
  158. model='damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online')
  159. rec_result = inference_pipeline(
  160. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cantonese-CHS.wav',
  161. param_dict={"decoding_model": "normal"})
  162. logger.info("asr inference result: {0}".format(rec_result))
  163. def test_uniasr_2pass_cn_dialect_offline(self):
  164. inference_pipeline = pipeline(
  165. task=Tasks.auto_speech_recognition,
  166. model='damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline')
  167. rec_result = inference_pipeline(
  168. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  169. logger.info("asr inference result: {0}".format(rec_result))
  170. def test_uniasr_2pass_cn_dialect_online(self):
  171. inference_pipeline = pipeline(
  172. task=Tasks.auto_speech_recognition,
  173. model='damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online')
  174. rec_result = inference_pipeline(
  175. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
  176. logger.info("asr inference result: {0}".format(rec_result))
  177. def test_uniasr_2pass_de_common_offline(self):
  178. inference_pipeline = pipeline(
  179. task=Tasks.auto_speech_recognition,
  180. model='damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline')
  181. rec_result = inference_pipeline(
  182. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav',
  183. param_dict={"decoding_model": "offline"})
  184. logger.info("asr inference result: {0}".format(rec_result))
  185. def test_uniasr_2pass_de_common_online(self):
  186. inference_pipeline = pipeline(
  187. task=Tasks.auto_speech_recognition,
  188. model='damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online')
  189. rec_result = inference_pipeline(
  190. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav',
  191. param_dict={"decoding_model": "normal"})
  192. logger.info("asr inference result: {0}".format(rec_result))
  193. def test_uniasr_2pass_en_common_offline(self):
  194. inference_pipeline = pipeline(
  195. task=Tasks.auto_speech_recognition,
  196. model='damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline')
  197. rec_result = inference_pipeline(
  198. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav',
  199. param_dict={"decoding_model": "offline"})
  200. logger.info("asr inference result: {0}".format(rec_result))
  201. def test_uniasr_2pass_en_common_online(self):
  202. inference_pipeline = pipeline(
  203. task=Tasks.auto_speech_recognition,
  204. model='damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online')
  205. rec_result = inference_pipeline(
  206. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav',
  207. param_dict={"decoding_model": "normal"})
  208. logger.info("asr inference result: {0}".format(rec_result))
  209. def test_uniasr_2pass_es_common_offline(self):
  210. inference_pipeline = pipeline(
  211. task=Tasks.auto_speech_recognition,
  212. model='damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline')
  213. rec_result = inference_pipeline(
  214. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_es.wav',
  215. param_dict={"decoding_model": "offline"})
  216. logger.info("asr inference result: {0}".format(rec_result))
  217. def test_uniasr_2pass_es_common_online(self):
  218. inference_pipeline = pipeline(
  219. task=Tasks.auto_speech_recognition,
  220. model='damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online')
  221. rec_result = inference_pipeline(
  222. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_es.wav',
  223. param_dict={"decoding_model": "normal"})
  224. logger.info("asr inference result: {0}".format(rec_result))
  225. def test_uniasr_2pass_fa_common_offline(self):
  226. inference_pipeline = pipeline(
  227. task=Tasks.auto_speech_recognition,
  228. model='damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline')
  229. rec_result = inference_pipeline(
  230. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fa.wav',
  231. param_dict={"decoding_model": "offline"})
  232. logger.info("asr inference result: {0}".format(rec_result))
  233. def test_uniasr_2pass_fa_common_online(self):
  234. inference_pipeline = pipeline(
  235. task=Tasks.auto_speech_recognition,
  236. model='damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online')
  237. rec_result = inference_pipeline(
  238. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fa.wav',
  239. param_dict={"decoding_model": "normal"})
  240. logger.info("asr inference result: {0}".format(rec_result))
  241. def test_uniasr_2pass_fr_common_offline(self):
  242. inference_pipeline = pipeline(
  243. task=Tasks.auto_speech_recognition,
  244. model='damo/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline')
  245. rec_result = inference_pipeline(
  246. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fr.wav',
  247. param_dict={"decoding_model": "offline"})
  248. logger.info("asr inference result: {0}".format(rec_result))
  249. def test_uniasr_2pass_fr_common_online(self):
  250. inference_pipeline = pipeline(
  251. task=Tasks.auto_speech_recognition,
  252. model='damo/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online')
  253. rec_result = inference_pipeline(
  254. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fr.wav',
  255. param_dict={"decoding_model": "normal"})
  256. logger.info("asr inference result: {0}".format(rec_result))
  257. def test_uniasr_2pass_id_common_offline(self):
  258. inference_pipeline = pipeline(
  259. task=Tasks.auto_speech_recognition,
  260. model='damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online')
  261. rec_result = inference_pipeline(
  262. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_id.wav',
  263. param_dict={"decoding_model": "offline"})
  264. logger.info("asr inference result: {0}".format(rec_result))
  265. def test_uniasr_2pass_id_common_online(self):
  266. inference_pipeline = pipeline(
  267. task=Tasks.auto_speech_recognition,
  268. model='damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online')
  269. rec_result = inference_pipeline(
  270. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_id.wav',
  271. param_dict={"decoding_model": "normal"})
  272. logger.info("asr inference result: {0}".format(rec_result))
  273. def test_uniasr_2pass_ja_common_offline(self):
  274. inference_pipeline = pipeline(
  275. task=Tasks.auto_speech_recognition,
  276. model='damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline')
  277. rec_result = inference_pipeline(
  278. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ja.wav',
  279. param_dict={"decoding_model": "offline"})
  280. logger.info("asr inference result: {0}".format(rec_result))
  281. def test_uniasr_2pass_ja_common_online(self):
  282. inference_pipeline = pipeline(
  283. task=Tasks.auto_speech_recognition,
  284. model='damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online')
  285. rec_result = inference_pipeline(
  286. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ja.wav',
  287. param_dict={"decoding_model": "normal"})
  288. logger.info("asr inference result: {0}".format(rec_result))
  289. def test_uniasr_2pass_ko_common_offline(self):
  290. inference_pipeline = pipeline(
  291. task=Tasks.auto_speech_recognition,
  292. model='damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline')
  293. rec_result = inference_pipeline(
  294. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ko.wav',
  295. param_dict={"decoding_model": "offline"})
  296. logger.info("asr inference result: {0}".format(rec_result))
  297. def test_uniasr_2pass_ko_common_online(self):
  298. inference_pipeline = pipeline(
  299. task=Tasks.auto_speech_recognition,
  300. model='damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online')
  301. rec_result = inference_pipeline(
  302. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ko.wav',
  303. param_dict={"decoding_model": "normal"})
  304. logger.info("asr inference result: {0}".format(rec_result))
  305. def test_uniasr_2pass_minnan_common_offline(self):
  306. inference_pipeline = pipeline(
  307. task=Tasks.auto_speech_recognition,
  308. model='damo/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825')
  309. rec_result = inference_pipeline(
  310. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  311. param_dict={"decoding_model": "offline"})
  312. logger.info("asr inference result: {0}".format(rec_result))
  313. def test_uniasr_2pass_pt_common_offline(self):
  314. inference_pipeline = pipeline(
  315. task=Tasks.auto_speech_recognition,
  316. model='damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline')
  317. rec_result = inference_pipeline(
  318. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_pt.wav',
  319. param_dict={"decoding_model": "offline"})
  320. logger.info("asr inference result: {0}".format(rec_result))
  321. def test_uniasr_2pass_pt_common_online(self):
  322. inference_pipeline = pipeline(
  323. task=Tasks.auto_speech_recognition,
  324. model='damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online')
  325. rec_result = inference_pipeline(
  326. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_pt.wav',
  327. param_dict={"decoding_model": "normal"})
  328. logger.info("asr inference result: {0}".format(rec_result))
  329. def test_uniasr_2pass_ru_common_offline(self):
  330. inference_pipeline = pipeline(
  331. task=Tasks.auto_speech_recognition,
  332. model='damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline')
  333. rec_result = inference_pipeline(
  334. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ru.wav',
  335. param_dict={"decoding_model": "offline"})
  336. logger.info("asr inference result: {0}".format(rec_result))
  337. def test_uniasr_2pass_ru_common_online(self):
  338. inference_pipeline = pipeline(
  339. task=Tasks.auto_speech_recognition,
  340. model='damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online')
  341. rec_result = inference_pipeline(
  342. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ru.wav',
  343. param_dict={"decoding_model": "normal"})
  344. logger.info("asr inference result: {0}".format(rec_result))
  345. def test_uniasr_2pass_vi_common_offline(self):
  346. inference_pipeline = pipeline(
  347. task=Tasks.auto_speech_recognition,
  348. model='damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline')
  349. rec_result = inference_pipeline(
  350. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav',
  351. param_dict={"decoding_model": "offline"})
  352. logger.info("asr inference result: {0}".format(rec_result))
  353. def test_uniasr_2pass_vi_common_online(self):
  354. inference_pipeline = pipeline(
  355. task=Tasks.auto_speech_recognition,
  356. model='damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online')
  357. rec_result = inference_pipeline(
  358. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav',
  359. param_dict={"decoding_model": "normal"})
  360. logger.info("asr inference result: {0}".format(rec_result))
  361. def test_uniasr_2pass_zhcn_8k_common_vocab3445_offline(self):
  362. inference_pipeline = pipeline(
  363. task=Tasks.auto_speech_recognition,
  364. model='damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline')
  365. rec_result = inference_pipeline(
  366. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  367. param_dict={"decoding_model": "offline"})
  368. logger.info("asr inference result: {0}".format(rec_result))
  369. def test_uniasr_2pass_zhcn_8k_common_vocab3445_online(self):
  370. inference_pipeline = pipeline(
  371. task=Tasks.auto_speech_recognition,
  372. model='damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online')
  373. rec_result = inference_pipeline(
  374. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  375. param_dict={"decoding_model": "normal"})
  376. logger.info("asr inference result: {0}".format(rec_result))
  377. def test_uniasr_2pass_zhcn_8k_common_vocab8358_offline(self):
  378. inference_pipeline = pipeline(
  379. task=Tasks.auto_speech_recognition,
  380. model='damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline')
  381. rec_result = inference_pipeline(
  382. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  383. param_dict={"decoding_model": "offline"})
  384. logger.info("asr inference result: {0}".format(rec_result))
  385. def test_uniasr_2pass_zhcn_8k_common_vocab8358_online(self):
  386. inference_pipeline = pipeline(
  387. task=Tasks.auto_speech_recognition,
  388. model='damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online')
  389. rec_result = inference_pipeline(
  390. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  391. param_dict={"decoding_model": "normal"})
  392. logger.info("asr inference result: {0}".format(rec_result))
  393. def test_uniasr_2pass_zhcn_16k_common_vocab8358_offline(self):
  394. inference_pipeline = pipeline(
  395. task=Tasks.auto_speech_recognition,
  396. model='damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline')
  397. rec_result = inference_pipeline(
  398. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  399. param_dict={"decoding_model": "offline"})
  400. logger.info("asr inference result: {0}".format(rec_result))
  401. def test_uniasr_2pass_zhcn_16k_common_vocab8358_online(self):
  402. inference_pipeline = pipeline(
  403. task=Tasks.auto_speech_recognition,
  404. model='damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online')
  405. rec_result = inference_pipeline(
  406. audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
  407. param_dict={"decoding_model": "normal"})
  408. logger.info("asr inference result: {0}".format(rec_result))
  409. if __name__ == '__main__':
  410. unittest.main()