Bladeren bron

funasr1.0 fsmn-vad streaming

游雁 2 jaren geleden
bovenliggende
commit
247c763286

+ 2 - 2
examples/industrial_data_pretraining/paraformer/demo.py

@@ -5,7 +5,7 @@
 
 
 from funasr import AutoModel
 from funasr import AutoModel
 
 
-model = AutoModel(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revison="v2.0.0")
+model = AutoModel(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.0")
 
 
 res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
 res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
 print(res)
 print(res)
@@ -13,7 +13,7 @@ print(res)
 
 
 from funasr import AutoFrontend
 from funasr import AutoFrontend
 
 
-frontend = AutoFrontend(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revison="v2.0.0")
+frontend = AutoFrontend(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.0")
 
 
 fbanks = frontend(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", batch_size=2)
 fbanks = frontend(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", batch_size=2)
 
 

+ 1 - 1
examples/industrial_data_pretraining/paraformer_streaming/demo.py

@@ -9,7 +9,7 @@ chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
 encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
 encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
 decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 
 
-model = AutoModel(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online", model_revison="v2.0.0")
+model = AutoModel(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online", model_revision="v2.0.0")
 cache = {}
 cache = {}
 res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
 res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
             chunk_size=chunk_size,
             chunk_size=chunk_size,

+ 1 - 1
funasr/bin/inference.py

@@ -83,7 +83,7 @@ def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
     
     
     return key_list, data_list
     return key_list, data_list
 
 
-@hydra.main(config_name=None, version_base=None)
+@hydra.main(config_name=None)
 def main_hydra(cfg: DictConfig):
 def main_hydra(cfg: DictConfig):
     def to_plain_list(cfg_item):
     def to_plain_list(cfg_item):
         if isinstance(cfg_item, ListConfig):
         if isinstance(cfg_item, ListConfig):

+ 1 - 1
funasr/bin/train.py

@@ -23,7 +23,7 @@ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
 from funasr.download.download_from_hub import download_model
 from funasr.download.download_from_hub import download_model
 from funasr.register import tables
 from funasr.register import tables
 
 
-@hydra.main(config_name=None, version_base=None)
+@hydra.main(config_name=None)
 def main_hydra(kwargs: DictConfig):
 def main_hydra(kwargs: DictConfig):
 	if kwargs.get("debug", False):
 	if kwargs.get("debug", False):
 		import pdb; pdb.set_trace()
 		import pdb; pdb.set_trace()

+ 1 - 1
setup.py

@@ -40,7 +40,7 @@ requirements = {
         "hdbscan",
         "hdbscan",
         "umap",
         "umap",
         "jaconv",
         "jaconv",
-        "hydra-core",
+        "hydra-core>=1.3.2",
     ],
     ],
     # train: The modules invoked when training only.
     # train: The modules invoked when training only.
     "train": [
     "train": [