소스 검색

Merge pull request #417 from alibaba-damo-academy/dev_lhn

update text tokenize
Lizerui9926 2 년 전
부모
커밋
f01a646b71
2개의 변경된 파일18개의 추가작업 그리고 3개의 파일을 삭제
  1. 9 1
      funasr/datasets/large_datasets/utils/tokenize.py
  2. 9 2
      funasr/datasets/preprocessor.py

+ 9 - 1
funasr/datasets/large_datasets/utils/tokenize.py

@@ -17,13 +17,21 @@ def forward_segment(text, seg_dict):
     return word_list
 
 def seg_tokenize(txt, seg_dict):
+    pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
     out_txt = ""
     for word in txt:
         word = word.lower()
         if word in seg_dict:
             out_txt += seg_dict[word] + " "
         else:
-            out_txt += "<unk>" + " "
+            if pattern.match(word):
+                for char in word:
+                    if char in seg_dict:
+                        out_txt += seg_dict[char] + " "
+                    else:
+                        out_txt += "<unk>" + " "
+            else:
+                out_txt += "<unk>" + " "
     return out_txt.strip().split()
 
 def tokenize(data,

+ 9 - 2
funasr/datasets/preprocessor.py

@@ -44,15 +44,22 @@ def forward_segment(text, dic):
         i += len(longest_word)
     return word_list
 
-
 def seg_tokenize(txt, seg_dict):
+    pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
     out_txt = ""
     for word in txt:
         word = word.lower()
         if word in seg_dict:
             out_txt += seg_dict[word] + " "
         else:
-            out_txt += "<unk>" + " "
+            if pattern.match(word):
+                for char in word:
+                    if char in seg_dict:
+                        out_txt += seg_dict[char] + " "
+                    else:
+                        out_txt += "<unk>" + " "
+            else:
+                out_txt += "<unk>" + " "
     return out_txt.strip().split()
 
 def seg_tokenize_wo_pattern(txt, seg_dict):