infer_from_local.sh 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  2. # MIT License (https://opensource.org/licenses/MIT)
  3. # method2, inference from local model
  4. # for more input type, please ref to readme.md
  5. input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
  6. output_dir="./outputs/debug"
  7. workspace=`pwd`
  8. # download model
  9. local_path_root=${workspace}/modelscope_models
  10. mkdir -p ${local_path_root}
  11. local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
  12. git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
  13. device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
  14. tokens="${local_path}/tokens.json"
  15. cmvn_file="${local_path}/am.mvn"
  16. config="config.yaml"
  17. init_param="${local_path}/model.pt"
  18. python -m funasr.bin.inference \
  19. --config-path "${local_path}" \
  20. --config-name "${config}" \
  21. ++init_param="${init_param}" \
  22. ++tokenizer_conf.token_list="${tokens}" \
  23. ++frontend_conf.cmvn_file="${cmvn_file}" \
  24. ++input="${input}" \
  25. ++output_dir="${output_dir}" \
  26. ++device="${device}" \