model.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. import logging
  2. from typing import Union, Dict, List, Tuple, Optional
  3. import time
  4. import torch
  5. import torch.nn as nn
  6. import torch.nn.functional as F
  7. from torch.cuda.amp import autocast
  8. from funasr.models.scama.utils import sequence_mask
  9. from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
  10. from funasr.models.ctc.ctc import CTC
  11. from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
  12. from funasr.metrics.compute_acc import th_accuracy, compute_accuracy
  13. # from funasr.models.e2e_asr_common import ErrorCalculator
  14. from funasr.train_utils.device_funcs import force_gatherable
  15. from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
  16. from funasr.utils import postprocess_utils
  17. from funasr.utils.datadir_writer import DatadirWriter
  18. from funasr.register import tables
  19. @tables.register("model_classes", "LLMASRNAR")
  20. class LLMASRNAR(nn.Module):
  21. """ """
  22. def __init__(
  23. self,
  24. specaug: str = None,
  25. specaug_conf: dict = None,
  26. normalize: str = None,
  27. normalize_conf: dict = None,
  28. encoder: str = None,
  29. encoder_conf: dict = None,
  30. decoder: str = None,
  31. decoder_conf: dict = None,
  32. ctc: str = None,
  33. ctc_conf: dict = None,
  34. ctc_weight: float = 0.5,
  35. llm: str = None,
  36. llm_conf: dict = None,
  37. adaptor: str = None,
  38. adaptor_conf: dict = None,
  39. input_size: int = 80,
  40. vocab_size: int = -1,
  41. ignore_id: int = -1,
  42. blank_id: int = 0,
  43. sos: int = 1,
  44. eos: int = 2,
  45. lsm_weight: float = 0.0,
  46. length_normalized_loss: bool = False,
  47. report_cer: bool = True,
  48. report_wer: bool = True,
  49. sym_space: str = "<space>",
  50. sym_blank: str = "<blank>",
  51. # extract_feats_in_collect_stats: bool = True,
  52. share_embedding: bool = False,
  53. # preencoder: Optional[AbsPreEncoder] = None,
  54. # postencoder: Optional[AbsPostEncoder] = None,
  55. **kwargs,
  56. ):
  57. super().__init__()
  58. if specaug is not None:
  59. specaug_class = tables.specaug_classes.get(specaug)
  60. specaug = specaug_class(**specaug_conf)
  61. if normalize is not None:
  62. normalize_class = tables.normalize_classes.get(normalize)
  63. normalize = normalize_class(**normalize_conf)
  64. # audio encoder
  65. hub = encoder_conf.get("hub", None)
  66. if hub == "funasr":
  67. from funasr import AutoModel
  68. init_param_path = encoder_conf.get("hub", "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
  69. model = AutoModel(model=init_param_path, model_revision="v2.0.4")
  70. # frontend = model.kwargs.get("frontend")
  71. model.model.decoder = None
  72. self.audio_encoder = model.model
  73. # self.frontend = frontend
  74. elif hub == "hf":
  75. pass
  76. else:
  77. encoder_class = tables.encoder_classes.get(encoder)
  78. encoder = encoder_class(input_size=input_size, **encoder_conf)
  79. encoder_output_size = encoder.output_size()
  80. # llm
  81. hub = llm_conf.get("hub", "hf")
  82. self.llm = None
  83. if hub == "hf":
  84. from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
  85. init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
  86. model = AutoModelForCausalLM.from_pretrained(
  87. init_param_path,
  88. load_in_8bit=None,
  89. device_map=None,
  90. use_cache=None,
  91. )
  92. freeze = llm_conf.get("freeze", True)
  93. if freeze:
  94. for name, param in model.named_parameters():
  95. param.requires_grad = False
  96. model.eval()
  97. self.llm = model
  98. # adaptor
  99. adaptor_class = tables.adaptor_classes.get(adaptor)
  100. adaptor = adaptor_class(**adaptor_conf)
  101. self.adaptor = adaptor
  102. self.blank_id = blank_id
  103. self.sos = sos if sos is not None else vocab_size - 1
  104. self.eos = eos if eos is not None else vocab_size - 1
  105. self.vocab_size = vocab_size
  106. self.ignore_id = ignore_id
  107. self.specaug = specaug
  108. self.normalize = normalize
  109. self.encoder = encoder
  110. self.criterion_att = LabelSmoothingLoss(
  111. size=vocab_size,
  112. padding_idx=ignore_id,
  113. smoothing=lsm_weight,
  114. normalize_length=length_normalized_loss,
  115. )
  116. #
  117. # if report_cer or report_wer:
  118. # self.error_calculator = ErrorCalculator(
  119. # token_list, sym_space, sym_blank, report_cer, report_wer
  120. # )
  121. #
  122. self.error_calculator = None
  123. self.length_normalized_loss = length_normalized_loss
  124. self.beam_search = None
  125. def forward(
  126. self,
  127. speech: torch.Tensor,
  128. speech_lengths: torch.Tensor,
  129. text: torch.Tensor,
  130. text_lengths: torch.Tensor,
  131. input_ids: torch.Tensor,
  132. attention_mask:torch.Tensor,
  133. labels_ids: torch.Tensor,
  134. label_mask: torch.Tensor,
  135. audio_mask: torch.Tensor,
  136. **kwargs,
  137. ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
  138. """Encoder + Decoder + Calc loss
  139. Args:
  140. speech: (Batch, Length, ...)
  141. speech_lengths: (Batch, )
  142. text: (Batch, Length)
  143. text_lengths: (Batch,)
  144. """
  145. # import pdb;
  146. # pdb.set_trace()
  147. if len(text_lengths.size()) > 1:
  148. text_lengths = text_lengths[:, 0]
  149. if len(speech_lengths.size()) > 1:
  150. speech_lengths = speech_lengths[:, 0]
  151. batch_size = speech.shape[0]
  152. # audio encoder
  153. encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, audio_mask=audio_mask)
  154. # adaptor
  155. encoder_out = self.adaptor(encoder_out)
  156. if input_ids is not None:
  157. input_ids[input_ids == -1] = 0
  158. if hasattr(self.llm.model, "embed_tokens"):
  159. inputs_embeds = self.llm.model.embed_tokens(input_ids)
  160. elif hasattr(self.llm.model.model, "embed_tokens"):
  161. inputs_embeds = self.llm.model.model.embed_tokens(input_ids)
  162. else:
  163. inputs_embeds = self.llm.model.model.model.embed_tokens(input_ids)
  164. if audio_mask is not None:
  165. batch_size, token_num, dims = inputs_embeds.shape
  166. _, l, _ = encoder_out.shape
  167. encoder_outs_pad = F.pad(encoder_out, (0, 0, token_num-l-1, 1, 0, 0), value=0.0)
  168. inputs_embeds = encoder_outs_pad * audio_mask[:, :, None] + inputs_embeds * (~audio_mask[:, :, None])
  169. inputs_embeds = F.pad(inputs_embeds[:, 1:, :], (0, 0, 0, 1, 0, 0), value=0.0)
  170. model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids)
  171. loss = model_outputs.loss
  172. stats = {}
  173. if self.metric:
  174. with torch.no_grad():
  175. preds = torch.argmax(model_outputs.logits, -1)
  176. acc_att = compute_accuracy(preds[:, :-1], labels_ids[:, 1:], ignore_label=-100)
  177. stats["acc"] = acc_att
  178. stats["loss"] = torch.clone(loss.detach())
  179. # force_gatherable: to-device and to-tensor if scalar for DataParallel
  180. if self.length_normalized_loss:
  181. batch_size = int((text_lengths + 1).sum())
  182. loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
  183. return loss, stats, weight
  184. def encode(
  185. self, speech: torch.Tensor, speech_lengths: torch.Tensor, **kwargs,
  186. ) -> Tuple[torch.Tensor, torch.Tensor]:
  187. audio_mask = kwargs.get("audio_mask")
  188. audio_token_lengths = audio_mask.sum(-1)
  189. batch = {"speech": speech, "speech_lengths": speech_lengths}
  190. enc, enc_lens = self.audio_encoder.encode(**batch)
  191. enc_mask = sequence_mask(enc_lens, enc.size(1), device=enc.device)[:, None, :]
  192. pre_acoustic_embeds, pre_token_length, _, _ = self.audio_encoder.predictor(enc,
  193. mask=enc_mask,
  194. target_label_length=audio_token_lengths,
  195. )
  196. return pre_acoustic_embeds, pre_token_length
  197. def inference(self,
  198. data_in,
  199. data_lengths=None,
  200. key: list = None,
  201. tokenizer=None,
  202. frontend=None,
  203. **kwargs,
  204. ):
  205. if kwargs.get("batch_size", 1) > 1:
  206. raise NotImplementedError("batch decoding is not implemented")
  207. # init beamsearch
  208. if self.beam_search is None:
  209. logging.info("enable beam_search")
  210. self.init_beam_search(**kwargs)
  211. self.nbest = kwargs.get("nbest", 1)
  212. meta_data = {}
  213. if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank
  214. speech, speech_lengths = data_in, data_lengths
  215. if len(speech.shape) < 3:
  216. speech = speech[None, :, :]
  217. if speech_lengths is None:
  218. speech_lengths = speech.shape[1]
  219. else:
  220. # extract fbank feats
  221. time1 = time.perf_counter()
  222. audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
  223. data_type=kwargs.get("data_type", "sound"),
  224. tokenizer=tokenizer)
  225. time2 = time.perf_counter()
  226. meta_data["load_data"] = f"{time2 - time1:0.3f}"
  227. speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
  228. frontend=frontend)
  229. time3 = time.perf_counter()
  230. meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
  231. meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
  232. speech = speech.to(device=kwargs["device"])
  233. speech_lengths = speech_lengths.to(device=kwargs["device"])
  234. # Encoder
  235. encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
  236. if isinstance(encoder_out, tuple):
  237. encoder_out = encoder_out[0]
  238. # c. Passed the encoder result and the beam search
  239. nbest_hyps = self.beam_search(
  240. x=encoder_out[0], maxlenratio=kwargs.get("maxlenratio", 0.0), minlenratio=kwargs.get("minlenratio", 0.0)
  241. )
  242. nbest_hyps = nbest_hyps[: self.nbest]
  243. results = []
  244. b, n, d = encoder_out.size()
  245. for i in range(b):
  246. for nbest_idx, hyp in enumerate(nbest_hyps):
  247. ibest_writer = None
  248. if kwargs.get("output_dir") is not None:
  249. if not hasattr(self, "writer"):
  250. self.writer = DatadirWriter(kwargs.get("output_dir"))
  251. ibest_writer = self.writer[f"{nbest_idx + 1}best_recog"]
  252. # remove sos/eos and get results
  253. last_pos = -1
  254. if isinstance(hyp.yseq, list):
  255. token_int = hyp.yseq[1:last_pos]
  256. else:
  257. token_int = hyp.yseq[1:last_pos].tolist()
  258. # remove blank symbol id, which is assumed to be 0
  259. token_int = list(filter(lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int))
  260. # Change integer-ids to tokens
  261. token = tokenizer.ids2tokens(token_int)
  262. text = tokenizer.tokens2text(token)
  263. text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
  264. result_i = {"key": key[i], "token": token, "text": text_postprocessed}
  265. results.append(result_i)
  266. if ibest_writer is not None:
  267. ibest_writer["token"][key[i]] = " ".join(token)
  268. ibest_writer["text"][key[i]] = text_postprocessed
  269. return results, meta_data