|
@@ -1,6 +1,6 @@
|
|
|
file_dir="/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch"
|
|
file_dir="/nfs/yufan.yf/workspace/github/FunASR/examples/industrial_data_pretraining/lcbnet/exp/speech_lcbnet_contextual_asr-en-16k-bpe-vocab5002-pytorch"
|
|
|
|
|
|
|
|
-CUDA_VISIBLE_DEVICES="" \
|
|
|
|
|
|
|
+#CUDA_VISIBLE_DEVICES="" \
|
|
|
python -m funasr.bin.inference \
|
|
python -m funasr.bin.inference \
|
|
|
--config-path=${file_dir} \
|
|
--config-path=${file_dir} \
|
|
|
--config-name="config.yaml" \
|
|
--config-name="config.yaml" \
|
|
@@ -10,4 +10,4 @@ python -m funasr.bin.inference \
|
|
|
+data_type='["kaldi_ark", "text"]' \
|
|
+data_type='["kaldi_ark", "text"]' \
|
|
|
++tokenizer_conf.bpemodel=${file_dir}/bpe.model \
|
|
++tokenizer_conf.bpemodel=${file_dir}/bpe.model \
|
|
|
++output_dir="./outputs/debug" \
|
|
++output_dir="./outputs/debug" \
|
|
|
-++device="" \
|
|
|
|
|
|
|
+++device="cpu" \
|