فهرست منبع

Merge pull request #370 from alibaba-damo-academy/dev_lhn2

update
zhifu gao 2 سال پیش
والد
کامیت
cfc8c117bd
2فایلهای تغییر یافته به همراه3 افزوده شده و 3 حذف شده
  1. 2 2
      funasr/datasets/large_datasets/utils/tokenize.py
  2. 1 1
      funasr/datasets/preprocessor.py

+ 2 - 2
funasr/datasets/large_datasets/utils/tokenize.py

@@ -19,6 +19,7 @@ def forward_segment(text, seg_dict):
 def seg_tokenize(txt, seg_dict):
     out_txt = ""
     for word in txt:
+        word = word.lower()
         if word in seg_dict:
             out_txt += seg_dict[word] + " "
         else:
@@ -41,8 +42,7 @@ def tokenize(data,
 
     if seg_dict is not None:
         assert isinstance(seg_dict, dict)
-        txt = forward_segment("".join(text).lower(), seg_dict)
-        text = seg_tokenize(txt, seg_dict)
+        text = seg_tokenize(text, seg_dict)
 
     length = len(text)
     for i in range(length):

+ 1 - 1
funasr/datasets/preprocessor.py

@@ -48,6 +48,7 @@ def forward_segment(text, dic):
 def seg_tokenize(txt, seg_dict):
     out_txt = ""
     for word in txt:
+        word = word.lower()
         if word in seg_dict:
             out_txt += seg_dict[word] + " "
         else:
@@ -359,7 +360,6 @@ class CommonPreprocessor(AbsPreprocessor):
             if self.split_with_space:
                 tokens = text.strip().split(" ")
                 if self.seg_dict is not None:
-                    tokens = forward_segment("".join(tokens), self.seg_dict)
                     tokens = seg_tokenize(tokens, self.seg_dict)
             else:
                 tokens = self.tokenizer.text2tokens(text)