Просмотр исходного кода

Merge pull request #61 from alibaba-damo-academy/dev_lhn

Dev lhn
hnluo 3 лет назад
Родитель
Сommit
450ed4f344

+ 2 - 0
funasr/bin/asr_inference.py

@@ -534,6 +534,7 @@ def inference_modelscope(
     def _forward(data_path_and_name_and_type,
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  param_dict: dict = None,
                  ):
                  ):
         # 3. Build data-iterator
         # 3. Build data-iterator
@@ -544,6 +545,7 @@ def inference_modelscope(
         loader = ASRTask.build_streaming_iterator(
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             dtype=dtype,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             batch_size=batch_size,
             key_file=key_file,
             key_file=key_file,
             num_workers=num_workers,
             num_workers=num_workers,

+ 2 - 0
funasr/bin/asr_inference_paraformer.py

@@ -579,6 +579,7 @@ def inference_modelscope(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             output_dir_v2: Optional[str] = None,
             output_dir_v2: Optional[str] = None,
+            fs: dict = None,
             param_dict: dict = None,
             param_dict: dict = None,
     ):
     ):
         # 3. Build data-iterator
         # 3. Build data-iterator
@@ -589,6 +590,7 @@ def inference_modelscope(
         loader = ASRTask.build_streaming_iterator(
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             dtype=dtype,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             batch_size=batch_size,
             key_file=key_file,
             key_file=key_file,
             num_workers=num_workers,
             num_workers=num_workers,

+ 2 - 0
funasr/bin/asr_inference_paraformer_vad_punc.py

@@ -548,6 +548,7 @@ def inference_modelscope(
     def _forward(data_path_and_name_and_type,
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  param_dict: dict = None,
                  ):
                  ):
         # 3. Build data-iterator
         # 3. Build data-iterator
@@ -558,6 +559,7 @@ def inference_modelscope(
         loader = ASRTask.build_streaming_iterator(
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             dtype=dtype,
             dtype=dtype,
+            fs=fs,
             batch_size=1,
             batch_size=1,
             key_file=key_file,
             key_file=key_file,
             num_workers=num_workers,
             num_workers=num_workers,

+ 2 - 0
funasr/bin/asr_inference_uniasr.py

@@ -575,6 +575,7 @@ def inference_modelscope(
     def _forward(data_path_and_name_and_type,
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  param_dict: dict = None,
                  ):
                  ):
         # 3. Build data-iterator
         # 3. Build data-iterator
@@ -585,6 +586,7 @@ def inference_modelscope(
         loader = ASRTask.build_streaming_iterator(
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             dtype=dtype,
             dtype=dtype,
+            fs=fs,
             batch_size=batch_size,
             batch_size=batch_size,
             key_file=key_file,
             key_file=key_file,
             num_workers=num_workers,
             num_workers=num_workers,

+ 3 - 0
funasr/bin/vad_inference.py

@@ -251,6 +251,7 @@ def inference_modelscope(
         dtype: str = "float32",
         dtype: str = "float32",
         seed: int = 0,
         seed: int = 0,
         num_workers: int = 1,
         num_workers: int = 1,
+        param_dict: dict = None,
         **kwargs,
         **kwargs,
 ):
 ):
     assert check_argument_types()
     assert check_argument_types()
@@ -287,6 +288,8 @@ def inference_modelscope(
         data_path_and_name_and_type,
         data_path_and_name_and_type,
         raw_inputs: Union[np.ndarray, torch.Tensor] = None,
         raw_inputs: Union[np.ndarray, torch.Tensor] = None,
         output_dir_v2: Optional[str] = None,
         output_dir_v2: Optional[str] = None,
+        fs: dict = None,
+        param_dict: dict = None,
     ):
     ):
         # 3. Build data-iterator
         # 3. Build data-iterator
         loader = VADTask.build_streaming_iterator(
         loader = VADTask.build_streaming_iterator(

+ 29 - 2
funasr/datasets/iterable_dataset.py

@@ -11,7 +11,6 @@ from typing import Union
 
 
 import kaldiio
 import kaldiio
 import numpy as np
 import numpy as np
-import soundfile
 import torch
 import torch
 import torchaudio
 import torchaudio
 from torch.utils.data.dataset import IterableDataset
 from torch.utils.data.dataset import IterableDataset
@@ -101,6 +100,7 @@ class IterableESPnetDataset(IterableDataset):
                 [str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
                 [str, Dict[str, np.ndarray]], Dict[str, np.ndarray]
             ] = None,
             ] = None,
             float_dtype: str = "float32",
             float_dtype: str = "float32",
+            fs: dict = None,
             int_dtype: str = "long",
             int_dtype: str = "long",
             key_file: str = None,
             key_file: str = None,
     ):
     ):
@@ -116,6 +116,7 @@ class IterableESPnetDataset(IterableDataset):
         self.float_dtype = float_dtype
         self.float_dtype = float_dtype
         self.int_dtype = int_dtype
         self.int_dtype = int_dtype
         self.key_file = key_file
         self.key_file = key_file
+        self.fs = fs
 
 
         self.debug_info = {}
         self.debug_info = {}
         non_iterable_list = []
         non_iterable_list = []
@@ -175,6 +176,15 @@ class IterableESPnetDataset(IterableDataset):
             _type = self.path_name_type_list[0][2]
             _type = self.path_name_type_list[0][2]
             func = DATA_TYPES[_type]
             func = DATA_TYPES[_type]
             array = func(value)
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                   new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
             data[name] = array
 
 
             if self.preprocess is not None:
             if self.preprocess is not None:
@@ -211,6 +221,15 @@ class IterableESPnetDataset(IterableDataset):
                         f'Not supported audio type: {audio_type}')
                         f'Not supported audio type: {audio_type}')
             func = DATA_TYPES[_type]
             func = DATA_TYPES[_type]
             array = func(value)
             array = func(value)
+            if self.fs is not None and name == "speech":
+                audio_fs = self.fs["audio_fs"]
+                model_fs = self.fs["model_fs"]
+                if audio_fs is not None and model_fs is not None:
+                    array = torch.from_numpy(array)
+                    array = array.unsqueeze(0)
+                    array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                           new_freq=model_fs)(array)
+                    array = array.squeeze(0).numpy()
             data[name] = array
             data[name] = array
 
 
             if self.preprocess is not None:
             if self.preprocess is not None:
@@ -302,6 +321,15 @@ class IterableESPnetDataset(IterableDataset):
                     func = DATA_TYPES[_type]
                     func = DATA_TYPES[_type]
                     # Load entry
                     # Load entry
                     array = func(value)
                     array = func(value)
+                    if self.fs is not None and name == "speech":
+                        audio_fs = self.fs["audio_fs"]
+                        model_fs = self.fs["model_fs"]
+                        if audio_fs is not None and model_fs is not None:
+                            array = torch.from_numpy(array)
+                            array = array.unsqueeze(0)
+                            array = torchaudio.transforms.Resample(orig_freq=audio_fs,
+                                                                   new_freq=model_fs)(array)
+                            array = array.squeeze(0).numpy()
                     data[name] = array
                     data[name] = array
                 if self.non_iterable_dataset is not None:
                 if self.non_iterable_dataset is not None:
                     # 2.b. Load data from non-iterable dataset
                     # 2.b. Load data from non-iterable dataset
@@ -335,4 +363,3 @@ class IterableESPnetDataset(IterableDataset):
 
 
         if count == 0:
         if count == 0:
             raise RuntimeError("No iteration")
             raise RuntimeError("No iteration")
-

+ 2 - 0
funasr/tasks/abs_task.py

@@ -1783,6 +1783,7 @@ class AbsTask(ABC):
             collate_fn,
             collate_fn,
             key_file: str = None,
             key_file: str = None,
             batch_size: int = 1,
             batch_size: int = 1,
+            fs: dict = None,
             dtype: str = np.float32,
             dtype: str = np.float32,
             num_workers: int = 1,
             num_workers: int = 1,
             allow_variable_data_keys: bool = False,
             allow_variable_data_keys: bool = False,
@@ -1800,6 +1801,7 @@ class AbsTask(ABC):
         dataset = IterableESPnetDataset(
         dataset = IterableESPnetDataset(
             data_path_and_name_and_type,
             data_path_and_name_and_type,
             float_dtype=dtype,
             float_dtype=dtype,
+            fs=fs,
             preprocess=preprocess_fn,
             preprocess=preprocess_fn,
             key_file=key_file,
             key_file=key_file,
         )
         )