finetune.sh 856 B

12345678910111213141516171819
  1. ## download model
  2. #local_path_root=../modelscope_models
  3. #mkdir -p ${local_path_root}
  4. #local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
  5. #git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
  6. python funasr/bin/train.py \
  7. +model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
  8. +model_revision="v2.0.2" \
  9. +train_data_set_list="/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/asr_task_debug_len_10.jsonl" \
  10. +valid_data_set_list="/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/asr_task_debug_len_10.jsonl" \
  11. ++dataset_conf.batch_size=64 \
  12. ++dataset_conf.batch_type="example" \
  13. ++train_conf.max_epoch=2 \
  14. ++dataset_conf.num_workers=4 \
  15. +output_dir="outputs/debug/ckpt/funasr2/exp2" \
  16. +debug="true"