| 123456789101112131415161718 |
- file_dir="/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch"
- #CUDA_VISIBLE_DEVICES="" \
- python -m funasr.bin.inference \
- --config-path=${file_dir} \
- --config-name="config.yaml" \
- ++init_param=${file_dir}/model.pb \
- ++tokenizer_conf.token_list=${file_dir}/tokens.txt \
- +input=["${file_dir}/example/asr_example.wav","${file_dir}/example/ocr.txt"] \
- +data_type='["sound","text"]' \
- ++tokenizer_conf.bpemodel=${file_dir}/bpe.model \
- ++output_dir="./outputs/debug" \
- ++device="cpu" \
- #++input=["/nfs/yufan.yf/workspace/espnet/egs2/youtube_ppt/asr/dump/raw/dev_oracle_v1_new/data/format.1/YTB+--tMoLpQI-w+00322.wav"] \
- #+data_type='["sound"]' \
- #++input=["/nfs/yufan.yf/workspace/espnet/egs2/youtube_ppt/asr/dump/raw/dev_oracle_v1_new/data/format.1/YTB+--tMoLpQI-w+00322.wav","/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch/example/ocr2.txt"] \
- #+data_type='["sound","text"]' \
|