语帆 2 anni fa
parent
commit
e59ec16e6a
2 ha cambiato i file con 1 aggiunte e 9 eliminazioni
  1. 0 2
      funasr/auto/auto_model.py
  2. 1 7
      funasr/models/lcbnet/model.py

+ 0 - 2
funasr/auto/auto_model.py

@@ -234,11 +234,9 @@ class AutoModel:
         
             time1 = time.perf_counter()
             with torch.no_grad():
-                pdb.set_trace()
                 results, meta_data = model.inference(**batch, **kwargs)
             time2 = time.perf_counter()
             
-            pdb.set_trace()
             asr_result_list.extend(results)
 
             # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()

+ 1 - 7
funasr/models/lcbnet/model.py

@@ -181,8 +181,7 @@ class LCBNet(nn.Module):
                 text: (Batch, Length)
                 text_lengths: (Batch,)
         """
-        # import pdb;
-        # pdb.set_trace()
+
         if len(text_lengths.size()) > 1:
             text_lengths = text_lengths[:, 0]
         if len(speech_lengths.size()) > 1:
@@ -449,7 +448,6 @@ class LCBNet(nn.Module):
         ocr, ocr_lens, _ = self.text_encoder(ocr, ocr_lengths)
         fusion_out, _, _, _ = self.fusion_encoder(encoder_out,None, ocr, None)
         encoder_out = encoder_out + fusion_out
-        pdb.set_trace()
         # c. Passed the encoder result and the beam search
         nbest_hyps = self.beam_search(
             x=encoder_out[0], maxlenratio=kwargs.get("maxlenratio", 0.0), minlenratio=kwargs.get("minlenratio", 0.0)
@@ -457,7 +455,6 @@ class LCBNet(nn.Module):
         
         nbest_hyps = nbest_hyps[: self.nbest]
 
-        pdb.set_trace()
         results = []
         b, n, d = encoder_out.size()
         for i in range(b):
@@ -479,12 +476,9 @@ class LCBNet(nn.Module):
                 # remove blank symbol id, which is assumed to be 0
                 token_int = list(filter(lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int))
                 
-                pdb.set_trace()
                 # Change integer-ids to tokens
                 token = tokenizer.ids2tokens(token_int)
-                pdb.set_trace()
                 text = tokenizer.tokens2text(token)
-                pdb.set_trace()
 
                 text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
                 result_i = {"key": key[i], "token": token, "text": text_postprocessed}