demo.sh 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. file_dir="/home/yf352572/.cache/modelscope/hub/iic/LCB-NET/"
  2. CUDA_VISIBLE_DEVICES="0,1"
  3. inference_device="cuda"
  4. if [ ${inference_device} == "cuda" ]; then
  5. nj=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
  6. else
  7. inference_batch_size=1
  8. CUDA_VISIBLE_DEVICES=""
  9. for JOB in $(seq ${nj}); do
  10. CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"-1,"
  11. done
  12. fi
  13. inference_dir="outputs/slidespeech_dev"
  14. _logdir="${inference_dir}/logdir"
  15. echo "inference_dir: ${inference_dir}"
  16. mkdir -p "${_logdir}"
  17. key_file1=${file_dir}/dev/wav.scp
  18. key_file2=${file_dir}/dev/ocr.txt
  19. split_scps1=
  20. split_scps2=
  21. for JOB in $(seq "${nj}"); do
  22. split_scps1+=" ${_logdir}/wav.${JOB}.scp"
  23. split_scps2+=" ${_logdir}/ocr.${JOB}.txt"
  24. done
  25. utils/split_scp.pl "${key_file1}" ${split_scps1}
  26. utils/split_scp.pl "${key_file2}" ${split_scps2}
  27. gpuid_list_array=(${CUDA_VISIBLE_DEVICES//,/ })
  28. for JOB in $(seq ${nj}); do
  29. {
  30. id=$((JOB-1))
  31. gpuid=${gpuid_list_array[$id]}
  32. export CUDA_VISIBLE_DEVICES=${gpuid}
  33. python -m funasr.bin.inference \
  34. --config-path=${file_dir} \
  35. --config-name="config.yaml" \
  36. ++init_param=${file_dir}/model.pt \
  37. ++tokenizer_conf.token_list=${file_dir}/tokens.txt \
  38. ++input=[${_logdir}/wav.${JOB}.scp,${_logdir}/ocr.${JOB}.txt] \
  39. +data_type='["kaldi_ark", "text"]' \
  40. ++tokenizer_conf.bpemodel=${file_dir}/bpe.pt \
  41. ++normalize_conf.stats_file=${file_dir}/am.mvn \
  42. ++output_dir="${inference_dir}/${JOB}" \
  43. ++device="${inference_device}" \
  44. ++ncpu=1 \
  45. ++disable_log=true &> ${_logdir}/log.${JOB}.txt
  46. }&
  47. done
  48. wait
  49. mkdir -p ${inference_dir}/1best_recog
  50. for JOB in $(seq "${nj}"); do
  51. cat "${inference_dir}/${JOB}/1best_recog/token" >> "${inference_dir}/1best_recog/token"
  52. done
  53. echo "Computing WER ..."
  54. sed -e 's/ /\t/' -e 's/ //g' -e 's/▁/ /g' -e 's/\t /\t/' ${inference_dir}/1best_recog/token > ${inference_dir}/1best_recog/token.proc
  55. cp ${file_dir}/dev/text ${inference_dir}/1best_recog/token.ref
  56. cp ${file_dir}/dev/ocr.list ${inference_dir}/1best_recog/ocr.list
  57. python utils/compute_wer.py ${inference_dir}/1best_recog/token.ref ${inference_dir}/1best_recog/token.proc ${inference_dir}/1best_recog/token.cer
  58. tail -n 3 ${inference_dir}/1best_recog/token.cer
  59. ./run_bwer_recall.sh ${inference_dir}/1best_recog/
  60. tail -n 6 ${inference_dir}/1best_recog/BWER-UWER.results |head -n 5