|
@@ -11,6 +11,7 @@ from typing import Union
|
|
|
import numpy as np
|
|
import numpy as np
|
|
|
import scipy.signal
|
|
import scipy.signal
|
|
|
import soundfile
|
|
import soundfile
|
|
|
|
|
+import jieba
|
|
|
|
|
|
|
|
from funasr.text.build_tokenizer import build_tokenizer
|
|
from funasr.text.build_tokenizer import build_tokenizer
|
|
|
from funasr.text.cleaner import TextCleaner
|
|
from funasr.text.cleaner import TextCleaner
|
|
@@ -628,6 +629,7 @@ class CodeMixTokenizerCommonPreprocessor(CommonPreprocessor):
|
|
|
text_name: str = "text",
|
|
text_name: str = "text",
|
|
|
split_text_name: str = "split_text",
|
|
split_text_name: str = "split_text",
|
|
|
split_with_space: bool = False,
|
|
split_with_space: bool = False,
|
|
|
|
|
+ seg_jieba: bool = False,
|
|
|
seg_dict_file: str = None,
|
|
seg_dict_file: str = None,
|
|
|
):
|
|
):
|
|
|
super().__init__(
|
|
super().__init__(
|
|
@@ -655,6 +657,9 @@ class CodeMixTokenizerCommonPreprocessor(CommonPreprocessor):
|
|
|
)
|
|
)
|
|
|
# The data field name for split text.
|
|
# The data field name for split text.
|
|
|
self.split_text_name = split_text_name
|
|
self.split_text_name = split_text_name
|
|
|
|
|
+ self.seg_jieba = seg_jieba
|
|
|
|
|
+ if self.seg_jieba:
|
|
|
|
|
+ jieba.load_userdict(seg_dict_file)
|
|
|
|
|
|
|
|
@classmethod
|
|
@classmethod
|
|
|
def split_words(cls, text: str):
|
|
def split_words(cls, text: str):
|
|
@@ -677,12 +682,73 @@ class CodeMixTokenizerCommonPreprocessor(CommonPreprocessor):
|
|
|
words.append(current_word)
|
|
words.append(current_word)
|
|
|
return words
|
|
return words
|
|
|
|
|
|
|
|
|
|
+ @classmethod
|
|
|
|
|
+ def isEnglish(cls, text:str):
|
|
|
|
|
+ if re.search('^[a-zA-Z\']+$', text):
|
|
|
|
|
+ return True
|
|
|
|
|
+ else:
|
|
|
|
|
+ return False
|
|
|
|
|
+
|
|
|
|
|
+ @classmethod
|
|
|
|
|
+ def join_chinese_and_english(cls, input_list):
|
|
|
|
|
+ line = ''
|
|
|
|
|
+ for token in input_list:
|
|
|
|
|
+ if cls.isEnglish(token):
|
|
|
|
|
+ line = line + ' ' + token
|
|
|
|
|
+ else:
|
|
|
|
|
+ line = line + token
|
|
|
|
|
+
|
|
|
|
|
+ line = line.strip()
|
|
|
|
|
+ return line
|
|
|
|
|
+
|
|
|
|
|
+ @classmethod
|
|
|
|
|
+ def split_words_jieba(cls, text: str):
|
|
|
|
|
+ input_list = text.split()
|
|
|
|
|
+ token_list_all = []
|
|
|
|
|
+ langauge_list = []
|
|
|
|
|
+ token_list_tmp = []
|
|
|
|
|
+ language_flag = None
|
|
|
|
|
+ for token in input_list:
|
|
|
|
|
+ if cls.isEnglish(token) and language_flag == 'Chinese':
|
|
|
|
|
+ token_list_all.append(token_list_tmp)
|
|
|
|
|
+ langauge_list.append('Chinese')
|
|
|
|
|
+ token_list_tmp = []
|
|
|
|
|
+ elif not cls.isEnglish(token) and language_flag == 'English':
|
|
|
|
|
+ token_list_all.append(token_list_tmp)
|
|
|
|
|
+ langauge_list.append('English')
|
|
|
|
|
+ token_list_tmp = []
|
|
|
|
|
+
|
|
|
|
|
+ token_list_tmp.append(token)
|
|
|
|
|
+
|
|
|
|
|
+ if cls.isEnglish(token):
|
|
|
|
|
+ language_flag = 'English'
|
|
|
|
|
+ else:
|
|
|
|
|
+ language_flag = 'Chinese'
|
|
|
|
|
+
|
|
|
|
|
+ if token_list_tmp:
|
|
|
|
|
+ token_list_all.append(token_list_tmp)
|
|
|
|
|
+ langauge_list.append(language_flag)
|
|
|
|
|
+
|
|
|
|
|
+ result_list = []
|
|
|
|
|
+ for token_list_tmp, language_flag in zip(token_list_all, langauge_list):
|
|
|
|
|
+ if language_flag == 'English':
|
|
|
|
|
+ result_list.extend(token_list_tmp)
|
|
|
|
|
+ else:
|
|
|
|
|
+ seg_list = jieba.cut(cls.join_chinese_and_english(token_list_tmp), HMM=False)
|
|
|
|
|
+ result_list.extend(seg_list)
|
|
|
|
|
+
|
|
|
|
|
+ return result_list
|
|
|
|
|
+
|
|
|
def __call__(
|
|
def __call__(
|
|
|
self, uid: str, data: Dict[str, Union[list, str, np.ndarray]]
|
|
self, uid: str, data: Dict[str, Union[list, str, np.ndarray]]
|
|
|
) -> Dict[str, Union[list, np.ndarray]]:
|
|
) -> Dict[str, Union[list, np.ndarray]]:
|
|
|
# Split words.
|
|
# Split words.
|
|
|
if isinstance(data[self.text_name], str):
|
|
if isinstance(data[self.text_name], str):
|
|
|
- split_text = self.split_words(data[self.text_name])
|
|
|
|
|
|
|
+ if self.seg_jieba:
|
|
|
|
|
+ # jieba.load_userdict(seg_dict_file)
|
|
|
|
|
+ split_text = self.split_words_jieba(data[self.text_name])
|
|
|
|
|
+ else:
|
|
|
|
|
+ split_text = self.split_words(data[self.text_name])
|
|
|
else:
|
|
else:
|
|
|
split_text = data[self.text_name]
|
|
split_text = data[self.text_name]
|
|
|
data[self.text_name] = " ".join(split_text)
|
|
data[self.text_name] = " ".join(split_text)
|
|
@@ -782,7 +848,6 @@ class PuncTrainTokenizerCommonPreprocessor(CommonPreprocessor):
|
|
|
) -> Dict[str, np.ndarray]:
|
|
) -> Dict[str, np.ndarray]:
|
|
|
for i in range(self.num_tokenizer):
|
|
for i in range(self.num_tokenizer):
|
|
|
text_name = self.text_name[i]
|
|
text_name = self.text_name[i]
|
|
|
- #import pdb; pdb.set_trace()
|
|
|
|
|
if text_name in data and self.tokenizer[i] is not None:
|
|
if text_name in data and self.tokenizer[i] is not None:
|
|
|
text = data[text_name]
|
|
text = data[text_name]
|
|
|
text = self.text_cleaner(text)
|
|
text = self.text_cleaner(text)
|