demo_pdb.sh 1.0 KB

123456789101112131415161718
  1. file_dir="/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch"
  2. #CUDA_VISIBLE_DEVICES="" \
  3. python -m funasr.bin.inference \
  4. --config-path=${file_dir} \
  5. --config-name="config.yaml" \
  6. ++init_param=${file_dir}/model.pb \
  7. ++tokenizer_conf.token_list=${file_dir}/tokens.txt \
  8. +input=["${file_dir}/example/asr_example.wav","${file_dir}/example/ocr.txt"] \
  9. +data_type='["sound","text"]' \
  10. ++tokenizer_conf.bpemodel=${file_dir}/bpe.model \
  11. ++output_dir="./outputs/debug" \
  12. ++device="cpu" \
  13. #++input=["/nfs/yufan.yf/workspace/espnet/egs2/youtube_ppt/asr/dump/raw/dev_oracle_v1_new/data/format.1/YTB+--tMoLpQI-w+00322.wav"] \
  14. #+data_type='["sound"]' \
  15. #++input=["/nfs/yufan.yf/workspace/espnet/egs2/youtube_ppt/asr/dump/raw/dev_oracle_v1_new/data/format.1/YTB+--tMoLpQI-w+00322.wav","/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch/example/ocr2.txt"] \
  16. #+data_type='["sound","text"]' \