Przeglądaj źródła

support vad_inference_online

凌匀 3 lat temu
rodzic
commit
6da711a8d3

+ 32 - 0
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/infer_online.py

@@ -0,0 +1,32 @@
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+import soundfile
+
+
+if __name__ == '__main__':
+    output_dir = None
+    inference_pipline = pipeline(
+        task=Tasks.voice_activity_detection,
+        model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+        model_revision='v1.1.9',
+        output_dir=None,
+        batch_size=1,
+    )
+    speech, sample_rate = soundfile.read("./vad_example_16k.wav")
+    speech_length = speech.shape[0]
+    
+    sample_offset = 0
+    
+    step = 160 * 10
+    param_dict = {'in_cache': dict()}
+    for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
+        if sample_offset + step >= speech_length - 1:
+            step = speech_length - sample_offset
+            is_final = True
+        else:
+            is_final = False
+        param_dict['is_final'] = is_final
+        segments_result = inference_pipline(audio_in=speech[sample_offset: sample_offset + step],
+                                            param_dict=param_dict)
+        print(segments_result)
+

+ 32 - 0
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/infer_online.py

@@ -0,0 +1,32 @@
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+import soundfile
+
+
+if __name__ == '__main__':
+    output_dir = None
+    inference_pipline = pipeline(
+        task=Tasks.voice_activity_detection,
+        model="damo/speech_fsmn_vad_zh-cn-8k-common",
+        model_revision='v1.1.9',
+        output_dir='./output_dir',
+        batch_size=1,
+    )
+    speech, sample_rate = soundfile.read("./vad_example_8k.wav")
+    speech_length = speech.shape[0]
+    
+    sample_offset = 0
+    
+    step = 80 * 10
+    param_dict = {'in_cache': dict()}
+    for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
+        if sample_offset + step >= speech_length - 1:
+            step = speech_length - sample_offset
+            is_final = True
+        else:
+            is_final = False
+        param_dict['is_final'] = is_final
+        segments_result = inference_pipline(audio_in=speech[sample_offset: sample_offset + step],
+                                            param_dict=param_dict)
+        print(segments_result)
+

+ 344 - 0
funasr/bin/vad_inference_online.py

@@ -0,0 +1,344 @@
+import argparse
+import logging
+import sys
+import json
+from pathlib import Path
+from typing import Any
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import Union
+from typing import Dict
+
+import numpy as np
+import torch
+from typeguard import check_argument_types
+from typeguard import check_return_type
+
+from funasr.fileio.datadir_writer import DatadirWriter
+from funasr.tasks.vad import VADTask
+from funasr.torch_utils.device_funcs import to_device
+from funasr.torch_utils.set_all_random_seed import set_all_random_seed
+from funasr.utils import config_argparse
+from funasr.utils.cli_utils import get_commandline_args
+from funasr.utils.types import str2bool
+from funasr.utils.types import str2triple_str
+from funasr.utils.types import str_or_none
+from funasr.models.frontend.wav_frontend import WavFrontendOnline
+from funasr.models.frontend.wav_frontend import WavFrontend
+from funasr.bin.vad_inference import Speech2VadSegment
+
+header_colors = '\033[95m'
+end_colors = '\033[0m'
+
+global_asr_language: str = 'zh-cn'
+global_sample_rate: Union[int, Dict[Any, int]] = {
+    'audio_fs': 16000,
+    'model_fs': 16000
+}
+
+
+class Speech2VadSegmentOnline(Speech2VadSegment):
+    """Speech2VadSegmentOnline class
+
+    Examples:
+        >>> import soundfile
+        >>> speech2segment = Speech2VadSegmentOnline("vad_config.yml", "vad.pt")
+        >>> audio, rate = soundfile.read("speech.wav")
+        >>> speech2segment(audio)
+        [[10, 230], [245, 450], ...]
+
+    """
+    def __init__(self, **kwargs):
+        super(Speech2VadSegmentOnline, self).__init__(**kwargs)
+        vad_cmvn_file = kwargs.get('vad_cmvn_file', None)
+        self.frontend = None
+        if self.vad_infer_args.frontend is not None:
+            self.frontend = WavFrontendOnline(cmvn_file=vad_cmvn_file, **self.vad_infer_args.frontend_conf)
+
+
+    @torch.no_grad()
+    def __call__(
+            self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
+            in_cache: Dict[str, torch.Tensor] = dict(), is_final: bool = False
+    ) -> Tuple[torch.Tensor, List[List[int]], torch.Tensor]:
+        """Inference
+
+        Args:
+            speech: Input speech data
+        Returns:
+            text, token, token_int, hyp
+
+        """
+        assert check_argument_types()
+
+        # Input as audio signal
+        if isinstance(speech, np.ndarray):
+            speech = torch.tensor(speech)
+        batch_size = speech.shape[0]
+        segments = [[]] * batch_size
+        if self.frontend is not None:
+            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final)
+            fbanks, _ = self.frontend.get_fbank()
+        else:
+            raise Exception("Need to extract feats first, please configure frontend configuration")
+        if feats.shape[0]:
+            feats = to_device(feats, device=self.device)
+            feats_len = feats_len.int()
+            waveforms = self.frontend.get_waveforms()
+
+            batch = {
+                "feats": feats,
+                "waveform": waveforms,
+                "in_cache": in_cache,
+                "is_final": is_final
+            }
+            # a. To device
+            batch = to_device(batch, device=self.device)
+            segments, in_cache = self.vad_model(**batch)
+            # in_cache.update(batch['in_cache'])
+            # in_cache = {key: value for key, value in batch['in_cache'].items()}
+        return fbanks, segments, in_cache
+
+
+def inference(
+        batch_size: int,
+        ngpu: int,
+        log_level: Union[int, str],
+        data_path_and_name_and_type,
+        vad_infer_config: Optional[str],
+        vad_model_file: Optional[str],
+        vad_cmvn_file: Optional[str] = None,
+        raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+        key_file: Optional[str] = None,
+        allow_variable_data_keys: bool = False,
+        output_dir: Optional[str] = None,
+        dtype: str = "float32",
+        seed: int = 0,
+        num_workers: int = 1,
+        **kwargs,
+):
+    inference_pipeline = inference_modelscope(
+        batch_size=batch_size,
+        ngpu=ngpu,
+        log_level=log_level,
+        vad_infer_config=vad_infer_config,
+        vad_model_file=vad_model_file,
+        vad_cmvn_file=vad_cmvn_file,
+        key_file=key_file,
+        allow_variable_data_keys=allow_variable_data_keys,
+        output_dir=output_dir,
+        dtype=dtype,
+        seed=seed,
+        num_workers=num_workers,
+        **kwargs,
+    )
+    return inference_pipeline(data_path_and_name_and_type, raw_inputs)
+
+
+def inference_modelscope(
+        batch_size: int,
+        ngpu: int,
+        log_level: Union[int, str],
+        # data_path_and_name_and_type,
+        vad_infer_config: Optional[str],
+        vad_model_file: Optional[str],
+        vad_cmvn_file: Optional[str] = None,
+        # raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+        key_file: Optional[str] = None,
+        allow_variable_data_keys: bool = False,
+        output_dir: Optional[str] = None,
+        dtype: str = "float32",
+        seed: int = 0,
+        num_workers: int = 1,
+        **kwargs,
+):
+    assert check_argument_types()
+    if batch_size > 1:
+        raise NotImplementedError("batch decoding is not implemented")
+    if ngpu > 1:
+        raise NotImplementedError("only single GPU decoding is supported")
+
+    logging.basicConfig(
+        level=log_level,
+        format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
+    )
+
+    if ngpu >= 1 and torch.cuda.is_available():
+        device = "cuda"
+    else:
+        device = "cpu"
+
+    # 1. Set random-seed
+    set_all_random_seed(seed)
+
+    # 2. Build speech2vadsegment
+    speech2vadsegment_kwargs = dict(
+        vad_infer_config=vad_infer_config,
+        vad_model_file=vad_model_file,
+        vad_cmvn_file=vad_cmvn_file,
+        device=device,
+        dtype=dtype,
+    )
+    logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
+    speech2vadsegment = Speech2VadSegmentOnline(**speech2vadsegment_kwargs)
+
+    def _forward(
+            data_path_and_name_and_type,
+            raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+            output_dir_v2: Optional[str] = None,
+            fs: dict = None,
+            param_dict: dict = None,
+    ):
+        # 3. Build data-iterator
+        if data_path_and_name_and_type is None and raw_inputs is not None:
+            if isinstance(raw_inputs, torch.Tensor):
+                raw_inputs = raw_inputs.numpy()
+            data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
+        loader = VADTask.build_streaming_iterator(
+            data_path_and_name_and_type,
+            dtype=dtype,
+            batch_size=batch_size,
+            key_file=key_file,
+            num_workers=num_workers,
+            preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
+            collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
+            allow_variable_data_keys=allow_variable_data_keys,
+            inference=True,
+        )
+
+        finish_count = 0
+        file_count = 1
+        # 7 .Start for-loop
+        # FIXME(kamo): The output format should be discussed about
+        output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+        if output_path is not None:
+            writer = DatadirWriter(output_path)
+            ibest_writer = writer[f"1best_recog"]
+        else:
+            writer = None
+            ibest_writer = None
+
+        vad_results = []
+        batch_in_cache = param_dict['in_cache'] if param_dict is not None else dict()
+        is_final = param_dict['is_final'] if param_dict is not None else False
+        for keys, batch in loader:
+            assert isinstance(batch, dict), type(batch)
+            assert all(isinstance(s, str) for s in keys), keys
+            _bs = len(next(iter(batch.values())))
+            assert len(keys) == _bs, f"{len(keys)} != {_bs}"
+            batch['in_cache'] = batch_in_cache
+            batch['is_final'] = is_final
+
+            # do vad segment
+            _, results, param_dict['in_cache'] = speech2vadsegment(**batch)
+            # param_dict['in_cache'] = batch['in_cache']
+            if results:
+                for i, _ in enumerate(keys):
+                    results[i] = json.dumps(results[i])
+                    item = {'key': keys[i], 'value': results[i]}
+                    vad_results.append(item)
+                    if writer is not None:
+                        results[i] = json.loads(results[i])
+                        ibest_writer["text"][keys[i]] = "{}".format(results[i])
+
+        return vad_results
+
+    return _forward
+
+
+def get_parser():
+    parser = config_argparse.ArgumentParser(
+        description="VAD Decoding",
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+    )
+
+    # Note(kamo): Use '_' instead of '-' as separator.
+    # '-' is confusing if written in yaml.
+    parser.add_argument(
+        "--log_level",
+        type=lambda x: x.upper(),
+        default="INFO",
+        choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
+        help="The verbose level of logging",
+    )
+
+    parser.add_argument("--output_dir", type=str, required=False)
+    parser.add_argument(
+        "--ngpu",
+        type=int,
+        default=0,
+        help="The number of gpus. 0 indicates CPU mode",
+    )
+    parser.add_argument(
+        "--gpuid_list",
+        type=str,
+        default="",
+        help="The visible gpus",
+    )
+    parser.add_argument("--seed", type=int, default=0, help="Random seed")
+    parser.add_argument(
+        "--dtype",
+        default="float32",
+        choices=["float16", "float32", "float64"],
+        help="Data type",
+    )
+    parser.add_argument(
+        "--num_workers",
+        type=int,
+        default=1,
+        help="The number of workers used for DataLoader",
+    )
+
+    group = parser.add_argument_group("Input data related")
+    group.add_argument(
+        "--data_path_and_name_and_type",
+        type=str2triple_str,
+        required=False,
+        action="append",
+    )
+    group.add_argument("--raw_inputs", type=list, default=None)
+    # example=[{'key':'EdevDEWdIYQ_0021','file':'/mnt/data/jiangyu.xzy/test_data/speech_io/SPEECHIO_ASR_ZH00007_zhibodaihuo/wav/EdevDEWdIYQ_0021.wav'}])
+    group.add_argument("--key_file", type=str_or_none)
+    group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
+
+    group = parser.add_argument_group("The model configuration related")
+    group.add_argument(
+        "--vad_infer_config",
+        type=str,
+        help="VAD infer configuration",
+    )
+    group.add_argument(
+        "--vad_model_file",
+        type=str,
+        help="VAD model parameter file",
+    )
+    group.add_argument(
+        "--vad_cmvn_file",
+        type=str,
+        help="Global cmvn file",
+    )
+
+    group = parser.add_argument_group("infer related")
+    group.add_argument(
+        "--batch_size",
+        type=int,
+        default=1,
+        help="The batch size for inference",
+    )
+
+    return parser
+
+
+def main(cmd=None):
+    print(get_commandline_args(), file=sys.stderr)
+    parser = get_parser()
+    args = parser.parse_args(cmd)
+    kwargs = vars(args)
+    kwargs.pop("config", None)
+    inference(**kwargs)
+
+
+if __name__ == "__main__":
+    main()

+ 39 - 1
funasr/models/e2e_vad.py

@@ -215,6 +215,7 @@ class E2EVadModel(nn.Module):
         self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
         self.noise_average_decibel = -100.0
         self.pre_end_silence_detected = False
+        self.next_seg = True
 
         self.output_data_buf = []
         self.output_data_buf_offset = 0
@@ -244,6 +245,7 @@ class E2EVadModel(nn.Module):
         self.sil_pdf_ids = self.vad_opts.sil_pdf_ids
         self.noise_average_decibel = -100.0
         self.pre_end_silence_detected = False
+        self.next_seg = True
 
         self.output_data_buf = []
         self.output_data_buf_offset = 0
@@ -441,7 +443,7 @@ class E2EVadModel(nn.Module):
                         - 1)) / self.vad_opts.noise_frame_num_used_for_snr
 
         return frame_state
-
+     
     def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
                 is_final: bool = False
                 ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
@@ -470,6 +472,42 @@ class E2EVadModel(nn.Module):
             self.AllResetDetection()
         return segments, in_cache
 
+    def forward_online(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
+                is_final: bool = False
+                ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
+        self.waveform = waveform  # compute decibel for each frame
+        self.ComputeDecibel()
+        self.ComputeScores(feats, in_cache)
+        if not is_final:
+            self.DetectCommonFrames()
+        else:
+            self.DetectLastFrames()
+        segments = []
+        for batch_num in range(0, feats.shape[0]):  # only support batch_size = 1 now
+            segment_batch = []
+            if len(self.output_data_buf) > 0:
+                for i in range(self.output_data_buf_offset, len(self.output_data_buf)):
+                    if not self.output_data_buf[i].contain_seg_start_point:
+                        continue
+                    if not self.next_seg and not self.output_data_buf[i].contain_seg_end_point:
+                        continue
+                    start_ms = self.output_data_buf[i].start_ms if self.next_seg else -1
+                    if self.output_data_buf[i].contain_seg_end_point:
+                        end_ms = self.output_data_buf[i].end_ms
+                        self.next_seg = True
+                        self.output_data_buf_offset += 1
+                    else:
+                        end_ms = -1
+                        self.next_seg = False
+                    segment = [start_ms, end_ms]
+                    segment_batch.append(segment)
+            if segment_batch:
+                segments.append(segment_batch)
+        if is_final:
+            # reset class variables and clear the dict for the next query
+            self.AllResetDetection()
+        return segments, in_cache
+
     def DetectCommonFrames(self) -> int:
         if self.vad_state_machine == VadStateMachine.kVadInStateEndPointDetected:
             return 0

+ 261 - 21
funasr/models/frontend/wav_frontend.py

@@ -1,6 +1,6 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 # Part of the implementation is borrowed from espnet/espnet.
-
+from abc import ABC
 from typing import Tuple
 
 import numpy as np
@@ -33,9 +33,9 @@ def load_cmvn(cmvn_file):
     means = np.array(means_list).astype(np.float)
     vars = np.array(vars_list).astype(np.float)
     cmvn = np.array([means, vars])
-    cmvn = torch.as_tensor(cmvn) 
-    return cmvn 
-          
+    cmvn = torch.as_tensor(cmvn)
+    return cmvn
+
 
 def apply_cmvn(inputs, cmvn_file):  # noqa
     """
@@ -78,21 +78,22 @@ def apply_lfr(inputs, lfr_m, lfr_n):
 class WavFrontend(AbsFrontend):
     """Conventional frontend structure for ASR.
     """
+
     def __init__(
-        self,
-        cmvn_file: str = None,
-        fs: int = 16000,
-        window: str = 'hamming',
-        n_mels: int = 80,
-        frame_length: int = 25,
-        frame_shift: int = 10,
-        filter_length_min: int = -1,
-        filter_length_max: int = -1,
-        lfr_m: int = 1,
-        lfr_n: int = 1,
-        dither: float = 1.0,
-        snip_edges: bool = True,
-        upsacle_samples: bool = True,
+            self,
+            cmvn_file: str = None,
+            fs: int = 16000,
+            window: str = 'hamming',
+            n_mels: int = 80,
+            frame_length: int = 25,
+            frame_shift: int = 10,
+            filter_length_min: int = -1,
+            filter_length_max: int = -1,
+            lfr_m: int = 1,
+            lfr_n: int = 1,
+            dither: float = 1.0,
+            snip_edges: bool = True,
+            upsacle_samples: bool = True,
     ):
         assert check_argument_types()
         super().__init__()
@@ -135,11 +136,11 @@ class WavFrontend(AbsFrontend):
                               window_type=self.window,
                               sample_frequency=self.fs,
                               snip_edges=self.snip_edges)
-     
+
             if self.lfr_m != 1 or self.lfr_n != 1:
                 mat = apply_lfr(mat, self.lfr_m, self.lfr_n)
             if self.cmvn_file is not None:
-                mat = apply_cmvn(mat, self.cmvn_file) 
+                mat = apply_cmvn(mat, self.cmvn_file)
             feat_length = mat.size(0)
             feats.append(mat)
             feats_lens.append(feat_length)
@@ -171,7 +172,6 @@ class WavFrontend(AbsFrontend):
                               window_type=self.window,
                               sample_frequency=self.fs)
 
-
             feat_length = mat.size(0)
             feats.append(mat)
             feats_lens.append(feat_length)
@@ -204,3 +204,243 @@ class WavFrontend(AbsFrontend):
                                  batch_first=True,
                                  padding_value=0.0)
         return feats_pad, feats_lens
+
+
+class WavFrontendOnline(AbsFrontend):
+    """Conventional frontend structure for streaming ASR/VAD.
+    """
+
+    def __init__(
+            self,
+            cmvn_file: str = None,
+            fs: int = 16000,
+            window: str = 'hamming',
+            n_mels: int = 80,
+            frame_length: int = 25,
+            frame_shift: int = 10,
+            filter_length_min: int = -1,
+            filter_length_max: int = -1,
+            lfr_m: int = 1,
+            lfr_n: int = 1,
+            dither: float = 1.0,
+            snip_edges: bool = True,
+            upsacle_samples: bool = True,
+    ):
+        assert check_argument_types()
+        super().__init__()
+        self.fs = fs
+        self.window = window
+        self.n_mels = n_mels
+        self.frame_length = frame_length
+        self.frame_shift = frame_shift
+        self.frame_sample_length = int(self.frame_length * self.fs / 1000)
+        self.frame_shift_sample_length = int(self.frame_shift * self.fs / 1000)
+        self.filter_length_min = filter_length_min
+        self.filter_length_max = filter_length_max
+        self.lfr_m = lfr_m
+        self.lfr_n = lfr_n
+        self.cmvn_file = cmvn_file
+        self.dither = dither
+        self.snip_edges = snip_edges
+        self.upsacle_samples = upsacle_samples
+        self.waveforms = None
+        self.reserve_waveforms = None
+        self.fbanks = None
+        self.fbanks_lens = None
+        self.cmvn = None if self.cmvn_file is None else load_cmvn(self.cmvn_file)
+        self.input_cache = None
+        self.lfr_splice_cache = []
+
+    def output_size(self) -> int:
+        return self.n_mels * self.lfr_m
+
+    @staticmethod
+    def apply_cmvn(inputs: torch.Tensor, cmvn: torch.Tensor) -> torch.Tensor:
+        """
+        Apply CMVN with mvn data
+        """
+
+        device = inputs.device
+        dtype = inputs.dtype
+        frame, dim = inputs.shape
+
+        means = np.tile(cmvn[0:1, :dim], (frame, 1))
+        vars = np.tile(cmvn[1:2, :dim], (frame, 1))
+        inputs += torch.from_numpy(means).type(dtype).to(device)
+        inputs *= torch.from_numpy(vars).type(dtype).to(device)
+
+        return inputs.type(torch.float32)
+
+    @staticmethod
+    # inputs tensor has catted the cache tensor
+    # def apply_lfr(inputs: torch.Tensor, lfr_m: int, lfr_n: int, inputs_lfr_cache: torch.Tensor = None,
+    #               is_final: bool = False) -> Tuple[torch.Tensor, torch.Tensor, int]:
+    def apply_lfr(inputs: torch.Tensor, lfr_m: int, lfr_n: int, is_final: bool = False) -> Tuple[torch.Tensor, torch.Tensor, int]:
+        """
+        Apply lfr with data
+        """
+
+        LFR_inputs = []
+        # inputs = torch.vstack((inputs_lfr_cache, inputs))
+        T = inputs.shape[0]  # include the right context
+        T_lfr = int(np.ceil((T - (lfr_m - 1) // 2) / lfr_n))  # minus the right context: (lfr_m - 1) // 2
+        splice_idx = T_lfr
+        for i in range(T_lfr):
+            if lfr_m <= T - i * lfr_n:
+                LFR_inputs.append((inputs[i * lfr_n:i * lfr_n + lfr_m]).view(1, -1))
+            else:  # process last LFR frame
+                if is_final:
+                    num_padding = lfr_m - (T - i * lfr_n)
+                    frame = (inputs[i * lfr_n:]).view(-1)
+                    for _ in range(num_padding):
+                        frame = torch.hstack((frame, inputs[-1]))
+                    LFR_inputs.append(frame)
+                else:
+                    # update splice_idx and break the circle
+                    splice_idx = i
+                    break
+        splice_idx = min(T - 1, splice_idx * lfr_n)
+        lfr_splice_cache = inputs[splice_idx:, :]
+        LFR_outputs = torch.vstack(LFR_inputs)
+        return LFR_outputs.type(torch.float32), lfr_splice_cache, splice_idx
+
+    @staticmethod
+    def compute_frame_num(sample_length: int, frame_sample_length: int, frame_shift_sample_length: int) -> int:
+        frame_num = int((sample_length - frame_sample_length) / frame_shift_sample_length + 1)
+        return frame_num if frame_num >= 1 and sample_length >= frame_sample_length else 0
+
+    def forward_fbank(
+            self,
+            input: torch.Tensor,
+            input_lengths: torch.Tensor
+    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+        batch_size = input.size(0)
+        if self.input_cache is None:
+            self.input_cache = torch.empty(0)
+        input = torch.cat((self.input_cache, input), dim=1)
+        frame_num = self.compute_frame_num(input.shape[-1], self.frame_sample_length, self.frame_shift_sample_length)
+        # update self.in_cache
+        self.input_cache = input[:, -(input.shape[-1] - frame_num * self.frame_shift_sample_length):]
+        waveforms = torch.empty(0)
+        feats_pad = torch.empty(0)
+        feats_lens = torch.empty(0)
+        if frame_num:
+            waveforms = []
+            feats = []
+            feats_lens = []
+            for i in range(batch_size):
+                waveform = input[i]
+                # we need accurate wave samples that used for fbank extracting
+                waveforms.append(
+                    waveform[:((frame_num - 1) * self.frame_shift_sample_length + self.frame_sample_length)])
+                waveform = waveform * (1 << 15)
+                waveform = waveform.unsqueeze(0)
+                mat = kaldi.fbank(waveform,
+                                  num_mel_bins=self.n_mels,
+                                  frame_length=self.frame_length,
+                                  frame_shift=self.frame_shift,
+                                  dither=self.dither,
+                                  energy_floor=0.0,
+                                  window_type=self.window,
+                                  sample_frequency=self.fs)
+
+                feat_length = mat.size(0)
+                feats.append(mat)
+                feats_lens.append(feat_length)
+
+            waveforms = torch.stack(waveforms)
+            feats_lens = torch.as_tensor(feats_lens)
+            feats_pad = pad_sequence(feats,
+                                     batch_first=True,
+                                     padding_value=0.0)
+        self.fbanks = feats_pad
+        import copy
+        self.fbanks_lens = copy.deepcopy(feats_lens)
+        return waveforms, feats_pad, feats_lens
+
+    def get_fbank(self) -> Tuple[torch.Tensor, torch.Tensor]:
+        return self.fbanks, self.fbanks_lens
+
+    def forward_lfr_cmvn(
+            self,
+            input: torch.Tensor,
+            input_lengths: torch.Tensor,
+            is_final: bool = False
+    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+        batch_size = input.size(0)
+        feats = []
+        feats_lens = []
+        lfr_splice_frame_idxs = []
+        for i in range(batch_size):
+            mat = input[i, :input_lengths[i], :]
+            if self.lfr_m != 1 or self.lfr_n != 1:
+                # update self.lfr_splice_cache in self.apply_lfr
+                # mat, self.lfr_splice_cache[i], lfr_splice_frame_idx = self.apply_lfr(mat, self.lfr_m, self.lfr_n, self.lfr_splice_cache[i],
+                mat, self.lfr_splice_cache[i], lfr_splice_frame_idx = self.apply_lfr(mat, self.lfr_m, self.lfr_n, is_final)
+            if self.cmvn_file is not None:
+                mat = self.apply_cmvn(mat, self.cmvn)
+            feat_length = mat.size(0)
+            feats.append(mat)
+            feats_lens.append(feat_length)
+            lfr_splice_frame_idxs.append(lfr_splice_frame_idx)
+
+        feats_lens = torch.as_tensor(feats_lens)
+        feats_pad = pad_sequence(feats,
+                                 batch_first=True,
+                                 padding_value=0.0)
+        lfr_splice_frame_idxs = torch.as_tensor(lfr_splice_frame_idxs)
+        return feats_pad, feats_lens, lfr_splice_frame_idxs
+
+    def forward(
+            self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False
+    ) -> Tuple[torch.Tensor, torch.Tensor]:
+        batch_size = input.shape[0]
+        assert batch_size == 1, 'we support to extract feature online only when the batch size is equal to 1 now'
+        waveforms, feats, feats_lengths = self.forward_fbank(input, input_lengths)  # input shape: B T D
+        if feats.shape[0]:
+            #if self.reserve_waveforms is None and self.lfr_m > 1:
+            #    self.reserve_waveforms = waveforms[:, :(self.lfr_m - 1) // 2 * self.frame_shift_sample_length]
+            self.waveforms = waveforms if self.reserve_waveforms is None else torch.cat((self.reserve_waveforms, waveforms), dim=1)
+            if not self.lfr_splice_cache:  # 初始化splice_cache
+                for i in range(batch_size):
+                    self.lfr_splice_cache.append(feats[i][0, :].unsqueeze(dim=0).repeat((self.lfr_m - 1) // 2, 1))
+            # need the number of the input frames + self.lfr_splice_cache[0].shape[0] is greater than self.lfr_m
+            if feats_lengths[0] + self.lfr_splice_cache[0].shape[0] >= self.lfr_m:
+                lfr_splice_cache_tensor = torch.stack(self.lfr_splice_cache)  # B T D
+                feats = torch.cat((lfr_splice_cache_tensor, feats), dim=1)
+                feats_lengths += lfr_splice_cache_tensor[0].shape[0]
+                frame_from_waveforms = int((self.waveforms.shape[1] - self.frame_sample_length) / self.frame_shift_sample_length + 1)
+                minus_frame = (self.lfr_m - 1) // 2 if self.reserve_waveforms is None else 0
+                feats, feats_lengths, lfr_splice_frame_idxs = self.forward_lfr_cmvn(feats, feats_lengths, is_final)
+                if self.lfr_m == 1:
+                    self.reserve_waveforms = None
+                else:
+                    reserve_frame_idx = lfr_splice_frame_idxs[0] - minus_frame
+                    # print('reserve_frame_idx:  ' + str(reserve_frame_idx))
+                    # print('frame_frame:  ' + str(frame_from_waveforms))
+                    self.reserve_waveforms = self.waveforms[:, reserve_frame_idx * self.frame_shift_sample_length:frame_from_waveforms * self.frame_shift_sample_length]
+                    sample_length = (frame_from_waveforms - 1) * self.frame_shift_sample_length + self.frame_sample_length
+                    self.waveforms = self.waveforms[:, :sample_length]
+            else:
+                # update self.reserve_waveforms and self.lfr_splice_cache
+                self.reserve_waveforms = self.waveforms[:, :-(self.frame_sample_length - self.frame_shift_sample_length)]
+                for i in range(batch_size):
+                    self.lfr_splice_cache[i] = torch.cat((self.lfr_splice_cache[i], feats[i]), dim=0)
+                return torch.empty(0), feats_lengths
+        else:
+            if is_final:
+                self.waveforms = waveforms if self.reserve_waveforms is None else self.reserve_waveforms
+                feats = torch.stack(self.lfr_splice_cache) 
+                feats_lengths = torch.zeros(batch_size, dtype=torch.int) + feats.shape[1]
+                feats, feats_lengths, _ = self.forward_lfr_cmvn(feats, feats_lengths, is_final)
+        if is_final:
+            self.cache_reset()
+        return feats, feats_lengths
+
+    def get_waveforms(self):
+        return self.waveforms
+
+    def cache_reset(self):
+        self.reserve_waveforms = None
+        self.input_cache = None
+        self.lfr_splice_cache = []