speech_asr 2 лет назад
Родитель
Сommit
7161312271

+ 0 - 14
funasr/layers/abs_normalize.py

@@ -1,14 +0,0 @@
-from abc import ABC
-from abc import abstractmethod
-from typing import Tuple
-
-import torch
-
-
-class AbsNormalize(torch.nn.Module, ABC):
-    @abstractmethod
-    def forward(
-        self, input: torch.Tensor, input_lengths: torch.Tensor = None
-    ) -> Tuple[torch.Tensor, torch.Tensor]:
-        # return output, output_lengths
-        raise NotImplementedError

+ 1 - 4
funasr/layers/global_mvn.py

@@ -7,11 +7,8 @@ import torch
 from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
-from funasr.layers.abs_normalize import AbsNormalize
-from funasr.layers.inversible_interface import InversibleInterface
 
-
-class GlobalMVN(AbsNormalize, InversibleInterface):
+class GlobalMVN(torch.nn.Module):
     """Apply global mean and variance normalization
 
     TODO(kamo): Make this class portable somehow

+ 0 - 14
funasr/layers/inversible_interface.py

@@ -1,14 +0,0 @@
-from abc import ABC
-from abc import abstractmethod
-from typing import Tuple
-
-import torch
-
-
-class InversibleInterface(ABC):
-    @abstractmethod
-    def inverse(
-        self, input: torch.Tensor, input_lengths: torch.Tensor = None
-    ) -> Tuple[torch.Tensor, torch.Tensor]:
-        # return output, output_lengths
-        raise NotImplementedError

+ 5 - 8
funasr/models/data2vec.py

@@ -13,12 +13,9 @@ import torch
 from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
-from funasr.models.encoder.abs_encoder import AbsEncoder
-from funasr.models.frontend.abs_frontend import AbsFrontend
 from funasr.models.preencoder.abs_preencoder import AbsPreEncoder
-from funasr.models.specaug.abs_specaug import AbsSpecAug
 from funasr.torch_utils.device_funcs import force_gatherable
-from funasr.train.abs_espnet_model import AbsESPnetModel
+from funasr.models.base_model import FunASRModel
 
 if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
     from torch.cuda.amp import autocast
@@ -29,16 +26,16 @@ else:
         yield
 
 
-class Data2VecPretrainModel(AbsESPnetModel):
+class Data2VecPretrainModel(FunASRModel):
     """Data2Vec Pretrain model"""
 
     def __init__(
             self,
-            frontend: Optional[AbsFrontend],
-            specaug: Optional[AbsSpecAug],
+            frontend: Optional[torch.nn.Module],
+            specaug: Optional[torch.nn.Module],
             normalize: Optional[AbsNormalize],
             preencoder: Optional[AbsPreEncoder],
-            encoder: AbsEncoder,
+            encoder: torch.nn.Module,
     ):
         assert check_argument_types()