瀏覽代碼

update trainer

游雁 2 年之前
父節點
當前提交
2bc330a599

+ 0 - 0
examples/industrial_data_pretraining/paraformer/infer_demo.sh → examples/industrial_data_pretraining/paraformer/demo.sh


+ 1 - 1
examples/industrial_data_pretraining/paraformer/finetune.sh

@@ -6,7 +6,7 @@
 #git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
 
 ## generate jsonl from wav.scp and text.txt
-#python funasr/datasets/audio_datasets/scp2jsonl.py \
+#python -m funasr.datasets.audio_datasets.scp2jsonl \
 #++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
 #++data_type_list='["source", "target"]' \
 #++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl

+ 8 - 8
funasr/datasets/audio_datasets/scp2jsonl.py

@@ -72,14 +72,7 @@ def parse_context_length(data_list: list, data_type: str):
 
 @hydra.main(config_name=None, version_base=None)
 def main_hydra(cfg: DictConfig):
-    """
-    python funasr/datasets/audio_datasets/scp2jsonl.py \
-    ++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-    ++data_type_list='["source", "target"]' \
-    ++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
-
-    """
-    
+ 
     kwargs = OmegaConf.to_container(cfg, resolve=True)
 
     scp_file_list = kwargs.get("scp_file_list", ("/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"))
@@ -90,6 +83,13 @@ def main_hydra(cfg: DictConfig):
     gen_jsonl_from_wav_text_list(scp_file_list, data_type_list=data_type_list, jsonl_file_out=jsonl_file_out)
     
 
+"""
+python -m funasr.datasets.audio_datasets.scp2jsonl \
+++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+"""
+
 if __name__ == "__main__":
     main_hydra()
 

+ 1 - 1
funasr/models/mossformer/mossformer_encoder.py

@@ -4,7 +4,7 @@ import torch.nn.functional as F
 try:
     from rotary_embedding_torch import RotaryEmbedding
 except:
-    print("Please install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch")
+    print("If you want use mossformer, lease install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch")
 from funasr.models.transformer.layer_norm import GlobalLayerNorm, CumulativeLayerNorm, ScaleNorm
 from funasr.models.transformer.embedding import ScaledSinuEmbedding
 from funasr.models.transformer.mossformer import FLASH_ShareA_FFConvM

+ 9 - 9
funasr/train_utils/trainer.py

@@ -302,17 +302,14 @@ class Trainer:
                 )
                 pbar.set_description(description)
                 if self.writer:
-                    self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(),
-                                           epoch*len(self.dataloader_train) + batch_idx)
+                    self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(), self.batch_total)
+                    self.writer.add_scalar(f'rank{self.local_rank}_lr/train', lr, self.batch_total)
                     for key, var in stats.items():
-                        self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(),
-                                               epoch * len(self.dataloader_train) + batch_idx)
+                        self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(), self.batch_total)
                     for key, var in speed_stats.items():
-                        self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var),
-                                               epoch * len(self.dataloader_train) + batch_idx)
-                    
-            # if batch_idx == 2:
-            #     break
+                        self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var), self.batch_total)
+
+
         pbar.close()
 
     def _validate_epoch(self, epoch):
@@ -356,7 +353,10 @@ class Trainer:
                 
                 if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_val):
                     pbar.update(self.log_interval)
+                    time_now = datetime.now()
+                    time_now = time_now.strftime("%Y-%m-%d %H:%M:%S")
                     description = (
+                        f"{time_now}, "
                         f"rank: {self.local_rank}, "
                         f"validation epoch: {epoch}/{self.max_epoch}, "
                         f"step: {batch_idx+1}/{len(self.dataloader_val)}, "