shixian.shi 2 лет назад
Родитель
Сommit
c91430542e

+ 36 - 0
egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/finetune.py

@@ -0,0 +1,36 @@
+import os
+
+from modelscope.metainfo import Trainers
+from modelscope.trainers import build_trainer
+
+from funasr.datasets.ms_dataset import MsDataset
+from funasr.utils.modelscope_param import modelscope_args
+
+
+def modelscope_finetune(params):
+    if not os.path.exists(params.output_dir):
+        os.makedirs(params.output_dir, exist_ok=True)
+    # dataset split ["train", "validation"]
+    ds_dict = MsDataset.load(params.data_path)
+    kwargs = dict(
+        model=params.model,
+        data_dir=ds_dict,
+        dataset_type=params.dataset_type,
+        work_dir=params.output_dir,
+        batch_bins=params.batch_bins,
+        max_epoch=params.max_epoch,
+        lr=params.lr)
+    trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
+    trainer.train()
+
+
+if __name__ == '__main__':
+    params = modelscope_args(model="damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", data_path="./data")
+    params.output_dir = "./checkpoint"              # m模型保存路径
+    params.data_path = "./example_data/"            # 数据路径
+    params.dataset_type = "small"                   # 小数据量设置small,若数据量大于1000小时,请使用large
+    params.batch_bins = 2000                       # batch size,如果dataset_type="small",batch_bins单位为fbank特征帧数,如果dataset_type="large",batch_bins单位为毫秒,
+    params.max_epoch = 50                           # 最大训练轮数
+    params.lr = 0.00005                             # 设置学习率
+    
+    modelscope_finetune(params)

+ 5 - 0
egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer_aishell1_subtest_demo.py

@@ -1,3 +1,4 @@
+from itertools import count
 import os
 import tempfile
 import codecs
@@ -19,11 +20,15 @@ if __name__ == '__main__':
         os.makedirs(work_dir)
     wav_file_path = os.path.join(work_dir, "wav.scp")
     
+    counter = 0
     with codecs.open(wav_file_path, 'w') as fin: 
         for line in ds_dict:
+            counter += 1
             wav = line["Audio:FILE"]
             idx = wav.split("/")[-1].split(".")[0]
             fin.writelines(idx + " " + wav + "\n")
+            if counter == 50:
+                break
     audio_in = wav_file_path         
 
     inference_pipeline = pipeline(

+ 2 - 1
funasr/bin/asr_inference_paraformer.py

@@ -41,6 +41,7 @@ from funasr.utils.types import str_or_none
 from funasr.utils import asr_utils, wav_utils, postprocess_utils
 from funasr.models.frontend.wav_frontend import WavFrontend
 from funasr.models.e2e_asr_paraformer import BiCifParaformer, ContextualParaformer
+from funasr.models.e2e_asr_contextual_paraformer import NeatContextualParaformer
 from funasr.export.models.e2e_asr_paraformer import Paraformer as Paraformer_export
 from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
 from funasr.bin.tp_inference import SpeechText2Timestamp
@@ -236,7 +237,7 @@ class Speech2Text:
         pre_token_length = pre_token_length.round().long()
         if torch.max(pre_token_length) < 1:
             return []
-        if not isinstance(self.asr_model, ContextualParaformer):
+        if not isinstance(self.asr_model, ContextualParaformer) and not isinstance(self.asr_model, NeatContextualParaformer):
             if self.hotword_list:
                 logging.warning("Hotword is given but asr model is not a ContextualParaformer.")
             decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)

+ 2 - 4
funasr/models/e2e_asr_contextual_paraformer.py

@@ -68,7 +68,7 @@ class NeatContextualParaformer(Paraformer):
         target_buffer_length: int = -1,
         inner_dim: int = 256, 
         bias_encoder_type: str = 'lstm',
-        use_decoder_embedding: bool = True,
+        use_decoder_embedding: bool = False,
         crit_attn_weight: float = 0.0,
         crit_attn_smooth: float = 0.0,
         bias_encoder_dropout_rate: float = 0.0,
@@ -340,7 +340,7 @@ class NeatContextualParaformer(Paraformer):
             input_mask_expand_dim, 0)
         return sematic_embeds * tgt_mask, decoder_out * tgt_mask
 
-    def cal_decoder_with_predictor_with_hwlist_advanced(self, encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, hw_list=None):
+    def cal_decoder_with_predictor(self, encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, hw_list=None):
         if hw_list is None:
             hw_list = [torch.Tensor([1]).long().to(encoder_out.device)]  # empty hotword list
             hw_list_pad = pad_list(hw_list, 0)
@@ -350,7 +350,6 @@ class NeatContextualParaformer(Paraformer):
                 hw_embed = self.bias_embed(hw_list_pad)
             hw_embed, (h_n, _) = self.bias_encoder(hw_embed)
         else:
-            # hw_list = hw_list[1:] + [hw_list[0]]  # reorder
             hw_lengths = [len(i) for i in hw_list]
             hw_list_pad = pad_list([torch.Tensor(i).long() for i in hw_list], 0).to(encoder_out.device)
             if self.use_decoder_embedding:
@@ -366,7 +365,6 @@ class NeatContextualParaformer(Paraformer):
                 if _h_n is not None:
                     h_n = _h_n
             hw_embed = h_n.repeat(encoder_out.shape[0], 1, 1)
-        # import pdb; pdb.set_trace()
         
         decoder_outs = self.decoder(
             encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, contextual_info=hw_embed