Просмотр исходного кода

Merge pull request #565 from alibaba-damo-academy/dev_zly2

support in_cache
hnluo 2 лет назад
Родитель
Сommit
94bee90292
3 измененных файлов с 8 добавлено и 5 удалено
  1. 2 1
      funasr/bin/vad_infer.py
  2. 2 2
      funasr/models/e2e_vad.py
  3. 4 2
      funasr/models/frontend/wav_frontend.py

+ 2 - 1
funasr/bin/vad_infer.py

@@ -175,7 +175,8 @@ class Speech2VadSegmentOnline(Speech2VadSegment):
         batch_size = speech.shape[0]
         segments = [[]] * batch_size
         if self.frontend is not None:
-            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final)
+            reset = in_cache == dict()
+            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final, reset)
             fbanks, _ = self.frontend.get_fbank()
         else:
             raise Exception("Need to extract feats first, please configure frontend configuration")

+ 2 - 2
funasr/models/e2e_vad.py

@@ -226,7 +226,6 @@ class E2EVadModel(nn.Module):
                                                self.vad_opts.frame_in_ms)
         self.encoder = encoder
         # init variables
-        self.is_final = False
         self.data_buf_start_frame = 0
         self.frm_cnt = 0
         self.latest_confirmed_speech_frame = 0
@@ -257,7 +256,6 @@ class E2EVadModel(nn.Module):
         self.frontend = frontend
 
     def AllResetDetection(self):
-        self.is_final = False
         self.data_buf_start_frame = 0
         self.frm_cnt = 0
         self.latest_confirmed_speech_frame = 0
@@ -473,6 +471,8 @@ class E2EVadModel(nn.Module):
     def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
                 is_final: bool = False
                 ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
+        if not in_cache:
+            self.AllResetDetection()
         self.waveform = waveform  # compute decibel for each frame
         self.ComputeDecibel()
         self.ComputeScores(feats, in_cache)

+ 4 - 2
funasr/models/frontend/wav_frontend.py

@@ -395,8 +395,10 @@ class WavFrontendOnline(AbsFrontend):
         return feats_pad, feats_lens, lfr_splice_frame_idxs
 
     def forward(
-            self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False
+        self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False, reset: bool = False
     ) -> Tuple[torch.Tensor, torch.Tensor]:
+        if reset:
+            self.cache_reset()
         batch_size = input.shape[0]
         assert batch_size == 1, 'we support to extract feature online only when the batch size is equal to 1 now'
         waveforms, feats, feats_lengths = self.forward_fbank(input, input_lengths)  # input shape: B T D
@@ -500,4 +502,4 @@ class WavFrontendMel23(AbsFrontend):
         feats_pad = pad_sequence(feats,
                                  batch_first=True,
                                  padding_value=0.0)
-        return feats_pad, feats_lens
+        return feats_pad, feats_lens