Prechádzať zdrojové kódy

Dev gzf (#1379)

* update train recipe

* v1.0.8

* llm

* update trainer

* update trainer

* update trainer

* train finetune demo

* train finetune demo
zhifu gao 1 rok pred
rodič
commit
20c35cdbc7
37 zmenil súbory, kde vykonal 233 pridanie a 103 odobranie
  1. 1 0
      .gitignore
  2. 1 1
      README.md
  3. 1 1
      README_zh.md
  4. 1 0
      examples/aishell/branchformer/demo_infer.sh
  5. 1 0
      examples/aishell/branchformer/demo_train_or_finetune.sh
  6. 1 0
      examples/aishell/conformer/demo_infer.sh
  7. 1 0
      examples/aishell/conformer/demo_train_or_finetune.sh
  8. 1 0
      examples/aishell/e_branchformer/demo_infer.sh
  9. 1 0
      examples/aishell/e_branchformer/demo_train_or_finetune.sh
  10. 0 12
      examples/aishell/e_branchformer/infer.sh
  11. 3 0
      examples/aishell/paraformer/demo_infer.sh
  12. 51 0
      examples/aishell/paraformer/demo_train_or_finetune.sh
  13. 0 12
      examples/aishell/paraformer/infer.sh
  14. 1 0
      examples/aishell/transformer/demo_infer.sh
  15. 1 0
      examples/aishell/transformer/demo_train_or_finetune.sh
  16. 0 12
      examples/aishell/transformer/infer.sh
  17. 0 0
      examples/industrial_data_pretraining/bicif_paraformer/demo.sh
  18. 0 0
      examples/industrial_data_pretraining/conformer/demo.sh
  19. 0 0
      examples/industrial_data_pretraining/contextual_paraformer/demo.sh
  20. 0 0
      examples/industrial_data_pretraining/ct_transformer/demo.sh
  21. 0 0
      examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh
  22. 0 12
      examples/industrial_data_pretraining/emotion2vec/infer.sh
  23. 0 0
      examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh
  24. 0 0
      examples/industrial_data_pretraining/monotonic_aligner/demo.sh
  25. 0 0
      examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh
  26. 0 14
      examples/industrial_data_pretraining/paraformer/demo.sh
  27. 33 16
      examples/industrial_data_pretraining/paraformer/finetune.sh
  28. 61 0
      examples/industrial_data_pretraining/paraformer/finetune_from_local.sh
  29. 21 0
      examples/industrial_data_pretraining/paraformer/infer.sh
  30. 0 12
      examples/industrial_data_pretraining/paraformer/infer_after_finetune.sh
  31. 39 0
      examples/industrial_data_pretraining/paraformer/infer_from_local.sh
  32. 0 0
      examples/industrial_data_pretraining/paraformer_streaming/demo.sh
  33. 0 0
      examples/industrial_data_pretraining/scama/demo.sh
  34. 0 0
      examples/industrial_data_pretraining/seaco_paraformer/demo.sh
  35. 0 0
      examples/industrial_data_pretraining/uniasr/demo.sh
  36. 12 9
      funasr/bin/train.py
  37. 2 2
      funasr/train_utils/trainer.py

+ 1 - 0
.gitignore

@@ -24,3 +24,4 @@ samples
 outputs*
 emotion2vec*
 GPT-SoVITS*
+modelscope_models

+ 1 - 1
README.md

@@ -95,7 +95,7 @@ Below is a quick start tutorial. Test audio files ([Mandarin](https://isv-data.o
 ### Command-line usage
 
 ```shell
-funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=asr_example_zh.wav
+funasr ++model=paraformer-zh ++vad_model="fsmn-vad" ++punc_model="ct-punc" ++input=asr_example_zh.wav
 ```
 
 Notes: Support recognition of single audio file, as well as file list in Kaldi-style wav.scp format: `wav_id wav_pat`

+ 1 - 1
README_zh.md

@@ -91,7 +91,7 @@ FunASR开源了大量在工业数据上预训练模型,您可以在[模型许
 ### 可执行命令行
 
 ```shell
-funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=asr_example_zh.wav
+funasr ++model=paraformer-zh ++vad_model="fsmn-vad" ++punc_model="ct-punc" ++input=asr_example_zh.wav
 ```
 
 注:支持单条音频文件识别,也支持文件列表,列表为kaldi风格wav.scp:`wav_id   wav_path`

+ 1 - 0
examples/aishell/branchformer/demo_infer.sh

@@ -0,0 +1 @@
+../paraformer/demo_infer.sh

+ 1 - 0
examples/aishell/branchformer/demo_train_or_finetune.sh

@@ -0,0 +1 @@
+../paraformer/demo_train_or_finetune.sh

+ 1 - 0
examples/aishell/conformer/demo_infer.sh

@@ -0,0 +1 @@
+../paraformer/demo_infer.sh

+ 1 - 0
examples/aishell/conformer/demo_train_or_finetune.sh

@@ -0,0 +1 @@
+../paraformer/demo_train_or_finetune.sh

+ 1 - 0
examples/aishell/e_branchformer/demo_infer.sh

@@ -0,0 +1 @@
+../paraformer/demo_infer.sh

+ 1 - 0
examples/aishell/e_branchformer/demo_train_or_finetune.sh

@@ -0,0 +1 @@
+../paraformer/demo_train_or_finetune.sh

+ 0 - 12
examples/aishell/e_branchformer/infer.sh

@@ -1,12 +0,0 @@
-
-
-python -m funasr.bin.inference \
---config-path="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3" \
---config-name="config.yaml" \
-++init_param="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3/model.pt.ep38" \
-++tokenizer_conf.token_list="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/zh_token_list/char/tokens.txt" \
-++frontend_conf.cmvn_file="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/train/am.mvn" \
-++input="/mnt/nfs/zhifu.gzf/data/AISHELL-1/data_aishell/wav/train/S0002/BAC009S0002W0122.wav" \
-++output_dir="./outputs/debug" \
-++device="cuda:0" \
-

+ 3 - 0
examples/aishell/conformer/infer.sh → examples/aishell/paraformer/demo_infer.sh

@@ -1,3 +1,6 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 
 
 python -m funasr.bin.inference \

+ 51 - 0
examples/aishell/paraformer/demo_train_or_finetune.sh

@@ -0,0 +1,51 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0,1"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# data dir, which contains: train.json, val.json, tokens.jsonl/tokens.txt, am.mvn
+data_dir="/Users/zhifu/funasr1.0/data/list"
+
+## generate jsonl from wav.scp and text.txt
+#python -m funasr.datasets.audio_datasets.scp2jsonl \
+#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
+#++data_type_list='["source", "target"]' \
+#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+
+train_data="${data_dir}/train.jsonl"
+val_data="${data_dir}/val.jsonl"
+tokens="${data_dir}/tokens.json"
+cmvn_file="${data_dir}/am.mvn"
+
+# exp output dir
+output_dir="/Users/zhifu/exp"
+log_file="${output_dir}/log.txt"
+
+workspace=`pwd`
+config="paraformer_conformer_12e_6d_2048_256.yaml"
+
+init_param="${output_dir}/model.pt"
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+../../../funasr/bin/train.py \
+--config-path "${workspace}/conf" \
+--config-name "${config}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++tokenizer_conf.token_list="${tokens}" \
+++frontend_conf.cmvn_file="${cmvn_file}" \
+++dataset_conf.batch_size=32 \
+++dataset_conf.batch_type="example" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=150 \
+++optim_conf.lr=0.0002 \
+++init_param="${init_param}" \
+++output_dir="${output_dir}" &> ${log_file}

+ 0 - 12
examples/aishell/paraformer/infer.sh

@@ -1,12 +0,0 @@
-
-
-python -m funasr.bin.inference \
---config-path="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3" \
---config-name="config.yaml" \
-++init_param="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3/model.pt.ep38" \
-++tokenizer_conf.token_list="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/zh_token_list/char/tokens.txt" \
-++frontend_conf.cmvn_file="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/train/am.mvn" \
-++input="/mnt/nfs/zhifu.gzf/data/AISHELL-1/data_aishell/wav/train/S0002/BAC009S0002W0122.wav" \
-++output_dir="./outputs/debug" \
-++device="cuda:0" \
-

+ 1 - 0
examples/aishell/transformer/demo_infer.sh

@@ -0,0 +1 @@
+../paraformer/demo_infer.sh

+ 1 - 0
examples/aishell/transformer/demo_train_or_finetune.sh

@@ -0,0 +1 @@
+../paraformer/demo_train_or_finetune.sh

+ 0 - 12
examples/aishell/transformer/infer.sh

@@ -1,12 +0,0 @@
-
-
-python -m funasr.bin.inference \
---config-path="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3" \
---config-name="config.yaml" \
-++init_param="/mnt/workspace/FunASR/examples/aishell/paraformer/exp/baseline_paraformer_conformer_12e_6d_2048_256_zh_char_exp3/model.pt.ep38" \
-++tokenizer_conf.token_list="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/zh_token_list/char/tokens.txt" \
-++frontend_conf.cmvn_file="/mnt/nfs/zhifu.gzf/data/AISHELL-1-feats/DATA/data/train/am.mvn" \
-++input="/mnt/nfs/zhifu.gzf/data/AISHELL-1/data_aishell/wav/train/S0002/BAC009S0002W0122.wav" \
-++output_dir="./outputs/debug" \
-++device="cuda:0" \
-

+ 0 - 0
examples/industrial_data_pretraining/bicif_paraformer/infer.sh → examples/industrial_data_pretraining/bicif_paraformer/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/conformer/infer.sh → examples/industrial_data_pretraining/conformer/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/contextual_paraformer/infer.sh → examples/industrial_data_pretraining/contextual_paraformer/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/ct_transformer/infer.sh → examples/industrial_data_pretraining/ct_transformer/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/ct_transformer_streaming/infer.sh → examples/industrial_data_pretraining/ct_transformer_streaming/demo.sh


+ 0 - 12
examples/industrial_data_pretraining/emotion2vec/infer.sh

@@ -1,12 +0,0 @@
-
-#model="damo/emotion2vec_base"
-model="iic/emotion2vec_base_finetuned"
-model_revision="v2.0.4"
-
-python funasr/bin/inference.py \
-+model=${model} \
-+model_revision=${model_revision} \
-+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" \
-+output_dir="./outputs/debug" \
-+extract_embedding=False \
-+device="cpu" \

+ 0 - 0
examples/industrial_data_pretraining/fsmn_vad_streaming/infer.sh → examples/industrial_data_pretraining/fsmn_vad_streaming/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/monotonic_aligner/infer.sh → examples/industrial_data_pretraining/monotonic_aligner/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/paraformer-zh-spk/infer.sh → examples/industrial_data_pretraining/paraformer-zh-spk/demo.sh


+ 0 - 14
examples/industrial_data_pretraining/paraformer/demo.sh

@@ -1,14 +0,0 @@
-
-model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
-
-python funasr/bin/inference.py \
-+model=${model} \
-+model_revision=${model_revision} \
-+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" \
-+output_dir="./outputs/debug" \
-+device="cpu" \
-
-
-
-

+ 33 - 16
examples/industrial_data_pretraining/paraformer/finetune.sh

@@ -1,9 +1,14 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
 
-## download model
-#local_path_root=../modelscope_models
-#mkdir -p ${local_path_root}
-#local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-#git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
+# method1, finetune from model hub
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0,1"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# data dir, which contains: train.json, val.json
+data_dir="/Users/zhifu/funasr1.0/data/list"
 
 ## generate jsonl from wav.scp and text.txt
 #python -m funasr.datasets.audio_datasets.scp2jsonl \
@@ -11,17 +16,29 @@
 #++data_type_list='["source", "target"]' \
 #++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
 
+train_data="${data_dir}/train.jsonl"
+val_data="${data_dir}/val.jsonl"
+
+
+# exp output dir
+output_dir="/Users/zhifu/exp"
+log_file="${output_dir}/log.txt"
+
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
 
-# torchrun \
-# --nnodes 1 \
-# --nproc_per_node 1 \
-python funasr/bin/train.py \
-+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
-+model_revision="v2.0.4" \
-+train_data_set_list="/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/asr_task_debug_len_10.jsonl" \
-+valid_data_set_list="/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/asr_task_debug_len_10.jsonl" \
-++dataset_conf.batch_size=64 \
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+funasr/bin/train.py \
+++model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+++model_revision="v2.0.4" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++dataset_conf.batch_size=32 \
 ++dataset_conf.batch_type="example" \
-++train_conf.max_epoch=2 \
 ++dataset_conf.num_workers=4 \
-+output_dir="outputs/debug/ckpt/funasr2/exp2"
+++train_conf.max_epoch=20 \
+++optim_conf.lr=0.0002 \
+++output_dir="${output_dir}" &> ${log_file}

+ 61 - 0
examples/industrial_data_pretraining/paraformer/finetune_from_local.sh

@@ -0,0 +1,61 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+# method2, finetune from local model
+
+workspace=`pwd`
+
+# download model
+local_path_root=${workspace}/modelscope_models
+mkdir -p ${local_path_root}
+local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
+
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0,1"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# data dir, which contains: train.json, val.json
+data_dir="/Users/zhifu/funasr1.0/data/list"
+
+## generate jsonl from wav.scp and text.txt
+#python -m funasr.datasets.audio_datasets.scp2jsonl \
+#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
+#++data_type_list='["source", "target"]' \
+#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+
+train_data="${data_dir}/train.jsonl"
+val_data="${data_dir}/val.jsonl"
+
+tokens="${local_path}/tokens.json"
+cmvn_file="${local_path}/am.mvn"
+
+# exp output dir
+output_dir="/Users/zhifu/exp"
+log_file="${output_dir}/log.txt"
+
+config="config.yaml"
+
+init_param="${local_path}/model.pt"
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+../../../funasr/bin/train.py \
+--config-path "${local_path}" \
+--config-name "${config}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++tokenizer_conf.token_list="${tokens}" \
+++frontend_conf.cmvn_file="${cmvn_file}" \
+++dataset_conf.batch_size=32 \
+++dataset_conf.batch_type="example" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=20 \
+++optim_conf.lr=0.0002 \
+++init_param="${init_param}" \
+++output_dir="${output_dir}" &> ${log_file}

+ 21 - 0
examples/industrial_data_pretraining/paraformer/infer.sh

@@ -0,0 +1,21 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+# method1, inference from model hub
+
+# for more input type, please ref to readme.md
+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+
+output_dir="./outputs/debug"
+
+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_revision="v2.0.4"
+
+device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
+
+python -m funasr.bin.inference \
+++model=${model} \
+++model_revision=${model_revision} \
+++input="${input}" \
+++output_dir="${output_dir}" \
+++device="${device}" \

+ 0 - 12
examples/industrial_data_pretraining/paraformer/infer_after_finetune.sh

@@ -1,12 +0,0 @@
-
-
-python funasr/bin/inference.py \
---config-path="/Users/zhifu/funasr_github/test_local/funasr_cli_egs" \
---config-name="config.yaml" \
-++init_param="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/model.pt" \
-++tokenizer_conf.token_list="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/tokens.txt" \
-++frontend_conf.cmvn_file="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/am.mvn" \
-++input="data/wav.scp" \
-++output_dir="./outputs/debug" \
-++device="cuda" \
-

+ 39 - 0
examples/industrial_data_pretraining/paraformer/infer_from_local.sh

@@ -0,0 +1,39 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+# method2, inference from local model
+
+# for more input type, please ref to readme.md
+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+
+output_dir="./outputs/debug"
+
+workspace=`pwd`
+
+# download model
+local_path_root=${workspace}/modelscope_models
+mkdir -p ${local_path_root}
+local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
+
+device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
+
+tokens="${local_path}/tokens.json"
+cmvn_file="${local_path}/am.mvn"
+
+config="config.yaml"
+init_param="${local_path}/model.pt"
+
+python -m funasr.bin.inference \
+--config-path "${local_path}" \
+--config-name "${config}" \
+++init_param="${init_param}" \
+++tokenizer_conf.token_list="${tokens}" \
+++frontend_conf.cmvn_file="${cmvn_file}" \
+++input="${input}" \
+++output_dir="${output_dir}" \
+++device="${device}" \
+
+
+
+

+ 0 - 0
examples/industrial_data_pretraining/paraformer_streaming/infer.sh → examples/industrial_data_pretraining/paraformer_streaming/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/scama/infer.sh → examples/industrial_data_pretraining/scama/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/seaco_paraformer/infer.sh → examples/industrial_data_pretraining/seaco_paraformer/demo.sh


+ 0 - 0
examples/industrial_data_pretraining/uniasr/infer.sh → examples/industrial_data_pretraining/uniasr/demo.sh


+ 12 - 9
funasr/bin/train.py

@@ -96,15 +96,18 @@ def main(**kwargs):
             init_param = (init_param,)
         logging.info("init_param is not None: %s", init_param)
         for p in init_param:
-            logging.info(f"Loading pretrained params from {p}")
-            load_pretrained_model(
-                model=model,
-                path=p,
-                ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
-                oss_bucket=kwargs.get("oss_bucket", None),
-                scope_map=kwargs.get("scope_map", None),
-                excludes=kwargs.get("excludes", None),
-            )
+            if os.path.exists(p):
+                logging.info(f"Loading pretrained params from {p}")
+                load_pretrained_model(
+                    model=model,
+                    path=p,
+                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
+                    oss_bucket=kwargs.get("oss_bucket", None),
+                    scope_map=kwargs.get("scope_map", None),
+                    excludes=kwargs.get("excludes", None),
+                )
+            else:
+                logging.info(f"Checkpoint does not exist, init randomly: {p}")
     else:
         initialize(model, kwargs.get("init", "kaiming_normal"))
 

+ 2 - 2
funasr/train_utils/trainer.py

@@ -181,7 +181,7 @@ class Trainer:
 
             time2 = time.perf_counter()
             time_escaped = (time2 - time1)/3600.0
-            print(f"\nrank: {self.local_rank}, time_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f}\n")
+            print(f"\nrank: {self.local_rank}, time_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f} hours\n")
 
         if self.rank == 0:
             average_checkpoints(self.output_dir, self.avg_nbest_model)
@@ -293,7 +293,7 @@ class Trainer:
                     f"{time_now}, "
                     f"rank: {self.local_rank}, "
                     f"epoch: {epoch}/{self.max_epoch}, "
-                    f"step: {batch_idx+1}/{len(self.dataloader_train)}, total: {self.batch_total}, "
+                    f"step: {batch_idx+1}/{len(self.dataloader_train)}, total step: {self.batch_total}, "
                     f"(loss: {loss.detach().cpu().item():.3f}), "
                     f"(lr: {lr:.3e}), "
                     f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "