Quellcode durchsuchen

modify paraformer train doc (#1427)

Co-authored-by: zhangzc <2608882093@qq.com>
seanzhang-zhichen vor 2 Jahren
Ursprung
Commit
9595a9432f

+ 0 - 2
data/list/audio_datasets.jsonl

@@ -1,2 +0,0 @@
-{"key": "ID0012W0013", "prompt": "<ASR>", "source": "/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0013.wav", "target": "当客户风险承受能力评估依据发生变化时", "source_len": 454, "target_len": 19}
-{"key":"ID0012W0014", "prompt": "<ASR>", "source": "/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0014.wav", "target": "杨涛不得不将工厂关掉", "source_len": 211, "target_len": 11}

+ 0 - 0
data/list/text.txt → data/list/train_text.txt


+ 0 - 0
data/list/wav.scp → data/list/train_wav.scp


+ 2 - 0
data/list/val_text.txt

@@ -0,0 +1,2 @@
+ID0012W0013 当客户风险承受能力评估依据发生变化时
+ID0012W0014 杨涛不得不将工厂关掉

+ 2 - 0
data/list/val_wav.scp

@@ -0,0 +1,2 @@
+ID0012W0013 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0013.wav
+ID0012W0014 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0014.wav

+ 39 - 0
examples/industrial_data_pretraining/paraformer/README_zh.md

@@ -40,3 +40,42 @@ print(res)
   ```[audio_sample1, audio_sample2, ..., audio_sampleN]```
   - fbank输入,支持组batch。shape为[batch, frames, dim],类型为torch.Tensor,例如
 - `output_dir`: None (默认),如果设置,输出结果的输出路径
+
+
+## 微调
+
+#### 准备数据
+
+`train_text.txt`
+
+左边为数据唯一ID,需与`train_wav.scp`中的`ID`一一对应
+右边为音频文件标注文本
+
+```bash
+ID0012W0013 当客户风险承受能力评估依据发生变化时
+ID0012W0014 杨涛不得不将工厂关掉
+```
+
+
+`train_wav.scp`
+
+左边为数据唯一ID,需与`train_text.txt`中的`ID`一一对应
+右边为音频文件的绝对路径
+
+```bash
+ID0012W0013 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0013.wav
+ID0012W0014 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0014.wav
+```
+
+#### 训练
+
+```bash
+cd examples/industrial_data_pretraining/paraformer
+sh finetune_from_local.sh
+```
+
+**查看训练日志**
+
+```bash
+tensorboard --logdir /xxxx/FunASR/examples/industrial_data_pretraining/paraformer/outputs/log/tensorboard
+```

+ 21 - 11
examples/industrial_data_pretraining/paraformer/finetune_from_local.sh

@@ -5,6 +5,8 @@
 
 workspace=`pwd`
 
+echo "current path: ${workspace}" # /xxxx/funasr/examples/industrial_data_pretraining/paraformer
+
 # download model
 local_path_root=${workspace}/modelscope_models
 mkdir -p ${local_path_root}
@@ -17,25 +19,32 @@ export CUDA_VISIBLE_DEVICES="0,1"
 gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
 
 # data dir, which contains: train.json, val.json
-data_dir="/Users/zhifu/funasr1.0/data/list"
-
-## generate jsonl from wav.scp and text.txt
-#python -m funasr.datasets.audio_datasets.scp2jsonl \
-#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-#++data_type_list='["source", "target"]' \
-#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+data_dir="../../../data/list"
 
 train_data="${data_dir}/train.jsonl"
 val_data="${data_dir}/val.jsonl"
 
+
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+python -m funasr.datasets.audio_datasets.scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+python -m funasr.datasets.audio_datasets.scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
+
 tokens="${local_path}/tokens.json"
 cmvn_file="${local_path}/am.mvn"
 
-# exp output dir
-output_dir="/Users/zhifu/exp"
+# output dir
+output_dir="./outputs"
 log_file="${output_dir}/log.txt"
 
-config="config.yaml"
+config_name="config.yaml"
 
 init_param="${local_path}/model.pt"
 
@@ -47,7 +56,7 @@ torchrun \
 --nproc_per_node ${gpu_num} \
 ../../../funasr/bin/train.py \
 --config-path "${local_path}" \
---config-name "${config}" \
+--config-name "${config_name}" \
 ++train_data_set_list="${train_data}" \
 ++valid_data_set_list="${val_data}" \
 ++tokenizer_conf.token_list="${tokens}" \
@@ -57,5 +66,6 @@ torchrun \
 ++dataset_conf.num_workers=4 \
 ++train_conf.max_epoch=20 \
 ++optim_conf.lr=0.0002 \
+++train_conf.log_interval=1 \
 ++init_param="${init_param}" \
 ++output_dir="${output_dir}" &> ${log_file}