Sfoglia il codice sorgente

Merge branch 'main' of github.com:alibaba-damo-academy/FunASR
add

游雁 2 anni fa
parent
commit
7da5b31e25

+ 127 - 127
docs/benchmark/benchmark_pipeline_cer.md

@@ -45,156 +45,156 @@ bash infer.sh
 ### Chinese Dataset
 ### Chinese Dataset
 
 
 
 
-<table>
+<table border="1">
     <tr align="center">
     <tr align="center">
-        <td>Model</td>
-        <td>Offline/Online</td>
-        <td colspan="2">Aishell1</td>
-        <td colspan="4">Aishell2</td>
-        <td colspan="3">WenetSpeech</td>
+        <td style="border: 1px solid">Model</td>
+        <td style="border: 1px solid">Offline/Online</td>
+        <td colspan="2" style="border: 1px solid">Aishell1</td>
+        <td colspan="4" style="border: 1px solid">Aishell2</td>
+        <td colspan="3" style="border: 1px solid">WenetSpeech</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td></td>
-        <td></td>
-        <td>dev</td> 
-        <td>test</td>
-        <td>dev_ios</td>
-        <td>test_ios</td>
-        <td>test_android</td>
-        <td>test_mic</td>
-        <td>dev</td>
-        <td>test_meeting</td>
-        <td>test_net</td>
+        <td style="border: 1px solid"></td>
+        <td style="border: 1px solid"></td>
+        <td style="border: 1px solid">dev</td> 
+        <td style="border: 1px solid">test</td>
+        <td style="border: 1px solid">dev_ios</td>
+        <td style="border: 1px solid">test_ios</td>
+        <td style="border: 1px solid">test_android</td>
+        <td style="border: 1px solid">test_mic</td>
+        <td style="border: 1px solid">dev</td>
+        <td style="border: 1px solid">test_meeting</td>
+        <td style="border: 1px solid">test_net</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large</a> </td>
-        <td>Offline</td>
-        <td>1.76</td>
-        <td>1.94</td>
-        <td>2.79</td>
-        <td>2.84</td>
-        <td>3.08</td>
-        <td>3.03</td>
-        <td>3.43</td>
-        <td>7.01</td>
-        <td>6.66</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large</a> </td>
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">1.76</td>
+        <td style="border: 1px solid">1.94</td>
+        <td style="border: 1px solid">2.79</td>
+        <td style="border: 1px solid">2.84</td>
+        <td style="border: 1px solid">3.08</td>
+        <td style="border: 1px solid">3.03</td>
+        <td style="border: 1px solid">3.43</td>
+        <td style="border: 1px solid">7.01</td>
+        <td style="border: 1px solid">6.66</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large-long</a> </td> 
-        <td>Offline</td>      
-        <td>1.80</td>
-        <td>2.10</td>
-        <td>2.78</td>
-        <td>2.87</td>
-        <td>3.12</td>
-        <td>3.11</td>
-        <td>3.44</td>
-        <td>13.28</td>
-        <td>7.08</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary">Paraformer-large-long</a> </td> 
+        <td style="border: 1px solid">Offline</td>      
+        <td style="border: 1px solid">1.80</td>
+        <td style="border: 1px solid">2.10</td>
+        <td style="border: 1px solid">2.78</td>
+        <td style="border: 1px solid">2.87</td>
+        <td style="border: 1px solid">3.12</td>
+        <td style="border: 1px solid">3.11</td>
+        <td style="border: 1px solid">3.44</td>
+        <td style="border: 1px solid">13.28</td>
+        <td style="border: 1px solid">7.08</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary">Paraformer-large-contextual</a> </td>
-        <td>Offline</td>
-        <td>1.76</td>
-        <td>2.02</td>
-        <td>2.73</td>
-        <td>2.85</td>
-        <td>2.98</td>
-        <td>2.95</td>
-        <td>3.42</td>
-        <td>7.16</td>
-        <td>6.72</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary">Paraformer-large-contextual</a> </td>
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">1.76</td>
+        <td style="border: 1px solid">2.02</td>
+        <td style="border: 1px solid">2.73</td>
+        <td style="border: 1px solid">2.85</td>
+        <td style="border: 1px solid">2.98</td>
+        <td style="border: 1px solid">2.95</td>
+        <td style="border: 1px solid">3.42</td>
+        <td style="border: 1px solid">7.16</td>
+        <td style="border: 1px solid">6.72</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td> <a href="https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary">Paraformer</a> </td> 
-        <td>Offline</td>
-        <td>3.24</td>
-        <td>3.69</td>
-        <td>4.58</td>
-        <td>4.63</td>
-        <td>4.83</td>
-        <td>4.71</td>
-        <td>4.19</td>
-        <td>8.32</td>
-        <td>9.19</td>
+        <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary">Paraformer</a> </td> 
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">3.24</td>
+        <td style="border: 1px solid">3.69</td>
+        <td style="border: 1px solid">4.58</td>
+        <td style="border: 1px solid">4.63</td>
+        <td style="border: 1px solid">4.83</td>
+        <td style="border: 1px solid">4.71</td>
+        <td style="border: 1px solid">4.19</td>
+        <td style="border: 1px solid">8.32</td>
+        <td style="border: 1px solid">9.19</td>
     </tr>
     </tr>
    <tr align="center">
    <tr align="center">
-        <td> <a href="https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary">UniASR</a> </td> 
-        <td>Online</td>
-        <td>3.34</td>
-        <td>3.99</td>
-        <td>4.62</td>
-        <td>4.52</td>
-        <td>4.77</td>
-        <td>4.73</td>
-        <td>4.51</td>
-        <td>10.63</td>
-        <td>9.70</td>
+        <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary">UniASR</a> </td> 
+        <td style="border: 1px solid">Online</td>
+        <td style="border: 1px solid">3.34</td>
+        <td style="border: 1px solid">3.99</td>
+        <td style="border: 1px solid">4.62</td>
+        <td style="border: 1px solid">4.52</td>
+        <td style="border: 1px solid">4.77</td>
+        <td style="border: 1px solid">4.73</td>
+        <td style="border: 1px solid">4.51</td>
+        <td style="border: 1px solid">10.63</td>
+        <td style="border: 1px solid">9.70</td>
     </tr>
     </tr>
    <tr align="center">
    <tr align="center">
-        <td> <a href="https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary">UniASR-large</a> </td> 
-        <td>Offline</td>      
-        <td>2.93</td>
-        <td>3.48</td>
-        <td>3.95</td>
-        <td>3.87</td>
-        <td>4.11</td>
-        <td>4.11</td>
-        <td>4.16</td>
-        <td>10.09</td>
-        <td>8.69</td>
+        <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary">UniASR-large</a> </td> 
+        <td style="border: 1px solid">Offline</td>      
+        <td style="border: 1px solid">2.93</td>
+        <td style="border: 1px solid">3.48</td>
+        <td style="border: 1px solid">3.95</td>
+        <td style="border: 1px solid">3.87</td>
+        <td style="border: 1px solid">4.11</td>
+        <td style="border: 1px solid">4.11</td>
+        <td style="border: 1px solid">4.16</td>
+        <td style="border: 1px solid">10.09</td>
+        <td style="border: 1px solid">8.69</td>
     </tr>
     </tr>
     <tr align="center">
     <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-aishell1-pytorch/summary">Paraformer-aishell</a> </td>
-        <td>Offline</td>
-        <td>4.88</td>
-        <td>5.43</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-aishell1-pytorch/summary">Paraformer-aishell</a> </td>
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">4.88</td>
+        <td style="border: 1px solid">5.43</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
     </tr>
     </tr>
    <tr align="center">
    <tr align="center">
-        <td> <a href="https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary">ParaformerBert-aishell</a> </td>
-        <td>Offline</td>
-        <td>6.14</td>
-        <td>7.01</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
+        <td style="border: 1px solid"> <a href="https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary">ParaformerBert-aishell</a> </td>
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">6.14</td>
+        <td style="border: 1px solid">7.01</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
     </tr>
     </tr>
    <tr align="center">
    <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">Paraformer-aishell2</a> </td> 
-        <td>Offline</td>
-        <td>-</td>
-        <td>-</td>
-        <td>5.82</td>
-        <td>6.30</td>
-        <td>6.60</td>
-        <td>5.83</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">Paraformer-aishell2</a> </td> 
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">5.82</td>
+        <td style="border: 1px solid">6.30</td>
+        <td style="border: 1px solid">6.60</td>
+        <td style="border: 1px solid">5.83</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
     </tr>
     </tr>
    <tr align="center">
    <tr align="center">
-        <td> <a href="https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">ParaformerBert-aishell2</a> </td> 
-        <td>Offline</td>
-        <td>-</td>
-        <td>-</td>
-        <td>4.95</td>
-        <td>5.45</td>
-        <td>5.59</td>
-        <td>5.83</td>
-        <td>-</td>
-        <td>-</td>
-        <td>-</td>
+        <td style="border: 1px solid"> <a href="https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary">ParaformerBert-aishell2</a> </td> 
+        <td style="border: 1px solid">Offline</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">4.95</td>
+        <td style="border: 1px solid">5.45</td>
+        <td style="border: 1px solid">5.59</td>
+        <td style="border: 1px solid">5.83</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
+        <td style="border: 1px solid">-</td>
     </tr>
     </tr>
 </table>
 </table>
 
 

+ 4 - 1
funasr/bin/asr_inference_launch.py

@@ -867,7 +867,10 @@ def inference_paraformer_online(
             try:
             try:
                 raw_inputs = torchaudio.load(data_path_and_name_and_type[0])[0][0]
                 raw_inputs = torchaudio.load(data_path_and_name_and_type[0])[0][0]
             except:
             except:
-                raw_inputs = torch.tensor(soundfile.read(data_path_and_name_and_type[0])[0])
+                raw_inputs = soundfile.read(data_path_and_name_and_type[0], dtype='float32')[0]
+                if raw_inputs.ndim == 2:
+                    raw_inputs = raw_inputs[:, 0]
+                raw_inputs = torch.tensor(raw_inputs)
         if data_path_and_name_and_type is None and raw_inputs is not None:
         if data_path_and_name_and_type is None and raw_inputs is not None:
             if isinstance(raw_inputs, np.ndarray):
             if isinstance(raw_inputs, np.ndarray):
                 raw_inputs = torch.tensor(raw_inputs)
                 raw_inputs = torch.tensor(raw_inputs)

+ 4 - 1
funasr/datasets/iterable_dataset.py

@@ -71,7 +71,10 @@ def load_wav(input):
     try:
     try:
         return torchaudio.load(input)[0].numpy()
         return torchaudio.load(input)[0].numpy()
     except:
     except:
-        return np.expand_dims(soundfile.read(input)[0], axis=0)
+        waveform, _ = soundfile.read(input, dtype='float32')
+        if waveform.ndim == 2:
+            waveform = waveform[:, 0]
+        return np.expand_dims(waveform, axis=0)
 
 
 DATA_TYPES = {
 DATA_TYPES = {
     "sound": load_wav,
     "sound": load_wav,

+ 3 - 1
funasr/datasets/large_datasets/dataset.py

@@ -128,7 +128,9 @@ class AudioDataset(IterableDataset):
                         try:
                         try:
                             waveform, sampling_rate = torchaudio.load(path)
                             waveform, sampling_rate = torchaudio.load(path)
                         except:
                         except:
-                            waveform, sampling_rate = soundfile.read(path)
+                            waveform, sampling_rate = soundfile.read(path, dtype='float32')
+                            if waveform.ndim == 2:
+                                waveform = waveform[:, 0]
                             waveform = np.expand_dims(waveform, axis=0)
                             waveform = np.expand_dims(waveform, axis=0)
                             waveform = torch.tensor(waveform)
                             waveform = torch.tensor(waveform)
                         if self.frontend_conf is not None:
                         if self.frontend_conf is not None:

+ 5 - 3
funasr/utils/wav_utils.py

@@ -166,7 +166,9 @@ def compute_fbank(wav_file,
         try:
         try:
             waveform, audio_sr = torchaudio.load(wav_file)
             waveform, audio_sr = torchaudio.load(wav_file)
         except:
         except:
-            waveform, audio_sr = soundfile.read(wav_file)
+            waveform, audio_sr = soundfile.read(wav_file, dtype='float32')
+            if waveform.ndim == 2:
+                waveform = waveform[:, 0]
             waveform = torch.tensor(np.expand_dims(waveform, axis=0))
             waveform = torch.tensor(np.expand_dims(waveform, axis=0))
         waveform = waveform * (1 << 15)
         waveform = waveform * (1 << 15)
         waveform = torch_resample(waveform, audio_sr, model_sr)
         waveform = torch_resample(waveform, audio_sr, model_sr)
@@ -187,9 +189,9 @@ def compute_fbank(wav_file,
 
 
 def wav2num_frame(wav_path, frontend_conf):
 def wav2num_frame(wav_path, frontend_conf):
     try:
     try:
-        waveform, audio_sr = torchaudio.load(wav_file)
+        waveform, sampling_rate = torchaudio.load(wav_path)
     except:
     except:
-        waveform, audio_sr = soundfile.read(wav_file)
+        waveform, sampling_rate = soundfile.read(wav_path)
         waveform = torch.tensor(np.expand_dims(waveform, axis=0))
         waveform = torch.tensor(np.expand_dims(waveform, axis=0))
     speech_length = (waveform.shape[1] / sampling_rate) * 1000.
     speech_length = (waveform.shape[1] / sampling_rate) * 1000.
     n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])
     n_frames = (waveform.shape[1] * 1000.0) / (sampling_rate * frontend_conf["frame_shift"] * frontend_conf["lfr_n"])