游雁 2 ani în urmă
părinte
comite
813027835e

+ 2 - 0
funasr/bin/asr_inference.py

@@ -346,6 +346,8 @@ def inference_modelscope(
     **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if word_lm_train_config is not None:

+ 0 - 5
funasr/bin/asr_inference_launch.py

@@ -1,9 +1,4 @@
 #!/usr/bin/env python3
-# Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
-
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 2 - 0
funasr/bin/asr_inference_mfcca.py

@@ -472,6 +472,8 @@ def inference_modelscope(
     **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if word_lm_train_config is not None:

+ 3 - 1
funasr/bin/asr_inference_paraformer.py

@@ -612,7 +612,9 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
-
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if word_lm_train_config is not None:
         raise NotImplementedError("Word LM is not implemented")
     if ngpu > 1:

+ 2 - 0
funasr/bin/asr_inference_paraformer_streaming.py

@@ -536,6 +536,8 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
 
     if word_lm_train_config is not None:
         raise NotImplementedError("Word LM is not implemented")

+ 2 - 0
funasr/bin/asr_inference_paraformer_vad.py

@@ -157,6 +157,8 @@ def inference_modelscope(
     **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     
     if word_lm_train_config is not None:
         raise NotImplementedError("Word LM is not implemented")

+ 2 - 0
funasr/bin/asr_inference_paraformer_vad_punc.py

@@ -484,6 +484,8 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
 
     if word_lm_train_config is not None:
         raise NotImplementedError("Word LM is not implemented")

+ 2 - 0
funasr/bin/asr_inference_uniasr.py

@@ -379,6 +379,8 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if word_lm_train_config is not None:

+ 0 - 2
funasr/bin/diar_inference_launch.py

@@ -2,8 +2,6 @@
 # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
 #  MIT License  (https://opensource.org/licenses/MIT)
 
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 2 - 0
funasr/bin/eend_ola_inference.py

@@ -158,6 +158,8 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:

+ 3 - 4
funasr/bin/lm_inference.py

@@ -89,10 +89,9 @@ def inference_modelscope(
     **kwargs,
 ):
     assert check_argument_types()
-    logging.basicConfig(
-        level=log_level,
-        format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
-    )
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+
 
     if ngpu >= 1 and torch.cuda.is_available():
         device = "cuda"

+ 1 - 4
funasr/bin/lm_inference_launch.py

@@ -1,9 +1,6 @@
 #!/usr/bin/env python3
-# Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
 
-import torch
-torch.set_num_threads(1)
+
 
 import argparse
 import logging

+ 0 - 4
funasr/bin/punc_inference_launch.py

@@ -1,9 +1,5 @@
 #!/usr/bin/env python3
-# Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
 
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 2 - 4
funasr/bin/punctuation_infer_vadrealtime.py

@@ -203,10 +203,8 @@ def inference_modelscope(
     **kwargs,
 ):
     assert check_argument_types()
-    logging.basicConfig(
-        level=log_level,
-        format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
-    )
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
 
     if ngpu >= 1 and torch.cuda.is_available():
         device = "cuda"

+ 2 - 0
funasr/bin/sond_inference.py

@@ -252,6 +252,8 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:

+ 3 - 0
funasr/bin/sv_inference.py

@@ -179,6 +179,9 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:

+ 0 - 2
funasr/bin/sv_inference_launch.py

@@ -2,8 +2,6 @@
 # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
 #  MIT License  (https://opensource.org/licenses/MIT)
 
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 3 - 0
funasr/bin/tp_inference.py

@@ -179,6 +179,9 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:

+ 0 - 4
funasr/bin/tp_inference_launch.py

@@ -1,9 +1,5 @@
 #!/usr/bin/env python3
-# Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
 
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 3 - 0
funasr/bin/vad_inference.py

@@ -192,6 +192,9 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:

+ 0 - 5
funasr/bin/vad_inference_launch.py

@@ -1,9 +1,4 @@
 #!/usr/bin/env python3
-# Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
-
-import torch
-torch.set_num_threads(1)
 
 import argparse
 import logging

+ 3 - 0
funasr/bin/vad_inference_online.py

@@ -151,6 +151,9 @@ def inference_modelscope(
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1: