Просмотр исходного кода

Merge pull request #58 from alibaba-damo-academy/dev

Dev
zhifu gao 3 лет назад
Родитель
Сommit
79bd015ab0

+ 2 - 0
funasr/bin/asr_inference.py

@@ -483,6 +483,7 @@ def inference_modelscope(
     ngram_weight: float = 0.9,
     nbest: int = 1,
     num_workers: int = 1,
+    param_dict: dict = None,
     **kwargs,
 ):
     assert check_argument_types()
@@ -533,6 +534,7 @@ def inference_modelscope(
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 param_dict: dict = None,
                  ):
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:

+ 26 - 1
funasr/bin/asr_inference_launch.py

@@ -223,6 +223,31 @@ def inference_launch(**kwargs):
         logging.info("Unknown decoding mode: {}".format(mode))
         return None
 
+def inference_launch_funasr(**kwargs):
+    if 'mode' in kwargs:
+        mode = kwargs['mode']
+    else:
+        logging.info("Unknown decoding mode.")
+        return None
+    if mode == "asr":
+        from funasr.bin.asr_inference import inference
+        return inference(**kwargs)
+    elif mode == "uniasr":
+        from funasr.bin.asr_inference_uniasr import inference
+        return inference(**kwargs)
+    elif mode == "paraformer":
+        from funasr.bin.asr_inference_paraformer import inference
+        return inference(**kwargs)
+    elif mode == "paraformer_vad_punc":
+        from funasr.bin.asr_inference_paraformer_vad_punc import inference
+        return inference(**kwargs)
+    elif mode == "vad":
+        from funasr.bin.vad_inference import inference
+        return inference(**kwargs)
+    else:
+        logging.info("Unknown decoding mode: {}".format(mode))
+        return None
+
 
 def main(cmd=None):
     print(get_commandline_args(), file=sys.stderr)
@@ -251,7 +276,7 @@ def main(cmd=None):
         os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
         os.environ["CUDA_VISIBLE_DEVICES"] = gpuid
 
-    inference_launch(**kwargs)
+    inference_launch_funasr(**kwargs)
 
 
 if __name__ == "__main__":

+ 2 - 0
funasr/bin/asr_inference_paraformer.py

@@ -529,6 +529,7 @@ def inference_modelscope(
         nbest: int = 1,
         num_workers: int = 1,
         output_dir: Optional[str] = None,
+        param_dict: dict = None,
         **kwargs,
 ):
     assert check_argument_types()
@@ -578,6 +579,7 @@ def inference_modelscope(
             data_path_and_name_and_type,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             output_dir_v2: Optional[str] = None,
+            param_dict: dict = None,
     ):
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:

+ 2 - 6
funasr/bin/asr_inference_paraformer_vad_punc.py

@@ -659,12 +659,8 @@ def inference_modelscope(
                         punc_id_list = ""
                         text_postprocessed_punc = ""
 
-                    item = {'key': key, 'value': text_postprocessed_punc_time_stamp, 'text': text_postprocessed,
-                            'time_stamp': time_stamp_postprocessed, 'punc': punc_id_list, 'token': token}
-                    if outputs_dict:
-                        item = {'text_punc': text_postprocessed_punc, 'text': text_postprocessed,
-                                'punc_id': punc_id_list, 'token': token, 'time_stamp': time_stamp_postprocessed}
-                        item = {'key': key, 'value': item}
+                    item = {'key': key, 'value': text_postprocessed_punc, 'text_postprocessed': text_postprocessed,
+                            'time_stamp': time_stamp_postprocessed, 'token': token}
                     asr_result_list.append(item)
                     finish_count += 1
                     # asr_utils.print_progress(finish_count / file_count)

+ 2 - 0
funasr/bin/asr_inference_uniasr.py

@@ -521,6 +521,7 @@ def inference_modelscope(
         token_num_relax: int = 1,
         decoding_ind: int = 0,
         decoding_mode: str = "model1",
+        param_dict: dict = None,
         **kwargs,
 ):
     assert check_argument_types()
@@ -574,6 +575,7 @@ def inference_modelscope(
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 param_dict: dict = None,
                  ):
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:

+ 17 - 15
funasr/bin/sv_inference.py

@@ -171,6 +171,7 @@ def inference_modelscope(
         streaming: bool = False,
         embedding_node: str = "resnet1_dense",
         sv_threshold: float = 0.9465,
+        param_dict: Optional[dict] = None,
         **kwargs,
 ):
     assert check_argument_types()
@@ -183,6 +184,7 @@ def inference_modelscope(
         level=log_level,
         format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
     )
+    logging.info("param_dict: {}".format(param_dict))
 
     if ngpu >= 1 and torch.cuda.is_available():
         device = "cuda"
@@ -212,7 +214,9 @@ def inference_modelscope(
             data_path_and_name_and_type: Sequence[Tuple[str, str, str]] = None,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             output_dir_v2: Optional[str] = None,
+            param_dict: Optional[dict] = None,
     ):
+        logging.info("param_dict: {}".format(param_dict))
         if data_path_and_name_and_type is None and raw_inputs is not None:
             if isinstance(raw_inputs, torch.Tensor):
                 raw_inputs = raw_inputs.numpy()
@@ -233,11 +237,10 @@ def inference_modelscope(
 
         # 7 .Start for-loop
         output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
-        embd_fd, ref_emb_fd, score_fd = None, None, None
+        embd_writer, ref_embd_writer, score_writer = None, None, None
         if output_path is not None:
             os.makedirs(output_path, exist_ok=True)
-            embd_writer = WriteHelper("ark:{}/xvector.ark".format(output_path))
-            # embd_fd = open(os.path.join(output_path, "xvector.ark"), "wb")
+            embd_writer = WriteHelper("ark,scp:{}/xvector.ark,{}/xvector.scp".format(output_path, output_path))
         sv_result_list = []
         for keys, batch in loader:
             assert isinstance(batch, dict), type(batch)
@@ -249,6 +252,7 @@ def inference_modelscope(
             embedding, ref_embedding, score = speech2xvector(**batch)
             # Only supporting batch_size==1
             key = keys[0]
+            normalized_score = 0.0
             if score is not None:
                 score = score.item()
                 normalized_score = max(score - sv_threshold, 0.0) / (1.0 - sv_threshold) * 100.0
@@ -257,23 +261,21 @@ def inference_modelscope(
                 item = {"key": key, "value": embedding.squeeze(0).cpu().numpy()}
             sv_result_list.append(item)
             if output_path is not None:
-                # kaldiio.save_mat(embd_fd, embedding[0].cpu().numpy(), key)
                 embd_writer(key, embedding[0].cpu().numpy())
                 if ref_embedding is not None:
-                    if ref_emb_fd is None:
-                        # ref_emb_fd = open(os.path.join(output_path, "ref_xvector.ark"), "wb")
-                        ref_embd_writer = WriteHelper("ark:{}/ref_xvector.ark".format(output_path))
-                        score_fd = open(os.path.join(output_path, "score.txt"), "w")
-                    # kaldiio.save_mat(ref_emb_fd, ref_embedding[0].cpu().numpy(), key)
+                    if ref_embd_writer is None:
+                        ref_embd_writer = WriteHelper(
+                            "ark,scp:{}/ref_xvector.ark,{}/ref_xvector.scp".format(output_path, output_path)
+                        )
+                        score_writer = open(os.path.join(output_path, "score.txt"), "w")
                     ref_embd_writer(key, ref_embedding[0].cpu().numpy())
-                    score_fd.write("{:.6f}\n".format(score.item()))
+                    score_writer.write("{} {:.6f}\n".format(key, normalized_score))
+
         if output_path is not None:
-            # embd_fd.close()
             embd_writer.close()
-            if ref_emb_fd is not None:
-                # ref_emb_fd.close()
-                ref_emb_fd.close()
-                score_fd.close()
+            if ref_embd_writer is not None:
+                ref_embd_writer.close()
+                score_writer.close()
 
         return sv_result_list