batch_beam_search.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. """Parallel beam search module."""
  2. import logging
  3. from typing import Any
  4. from typing import Dict
  5. from typing import List
  6. from typing import NamedTuple
  7. from typing import Tuple
  8. import torch
  9. from torch.nn.utils.rnn import pad_sequence
  10. from funasr.modules.beam_search.beam_search import BeamSearch
  11. from funasr.modules.beam_search.beam_search import Hypothesis
  12. class BatchHypothesis(NamedTuple):
  13. """Batchfied/Vectorized hypothesis data type."""
  14. yseq: torch.Tensor = torch.tensor([]) # (batch, maxlen)
  15. score: torch.Tensor = torch.tensor([]) # (batch,)
  16. length: torch.Tensor = torch.tensor([]) # (batch,)
  17. scores: Dict[str, torch.Tensor] = dict() # values: (batch,)
  18. states: Dict[str, Dict] = dict()
  19. def __len__(self) -> int:
  20. """Return a batch size."""
  21. return len(self.length)
  22. class BatchBeamSearch(BeamSearch):
  23. """Batch beam search implementation."""
  24. def batchfy(self, hyps: List[Hypothesis]) -> BatchHypothesis:
  25. """Convert list to batch."""
  26. if len(hyps) == 0:
  27. return BatchHypothesis()
  28. return BatchHypothesis(
  29. yseq=pad_sequence(
  30. [h.yseq for h in hyps], batch_first=True, padding_value=self.eos
  31. ),
  32. length=torch.tensor([len(h.yseq) for h in hyps], dtype=torch.int64),
  33. score=torch.tensor([h.score for h in hyps]),
  34. scores={k: torch.tensor([h.scores[k] for h in hyps]) for k in self.scorers},
  35. states={k: [h.states[k] for h in hyps] for k in self.scorers},
  36. )
  37. def _batch_select(self, hyps: BatchHypothesis, ids: List[int]) -> BatchHypothesis:
  38. return BatchHypothesis(
  39. yseq=hyps.yseq[ids],
  40. score=hyps.score[ids],
  41. length=hyps.length[ids],
  42. scores={k: v[ids] for k, v in hyps.scores.items()},
  43. states={
  44. k: [self.scorers[k].select_state(v, i) for i in ids]
  45. for k, v in hyps.states.items()
  46. },
  47. )
  48. def _select(self, hyps: BatchHypothesis, i: int) -> Hypothesis:
  49. return Hypothesis(
  50. yseq=hyps.yseq[i, : hyps.length[i]],
  51. score=hyps.score[i],
  52. scores={k: v[i] for k, v in hyps.scores.items()},
  53. states={
  54. k: self.scorers[k].select_state(v, i) for k, v in hyps.states.items()
  55. },
  56. )
  57. def unbatchfy(self, batch_hyps: BatchHypothesis) -> List[Hypothesis]:
  58. """Revert batch to list."""
  59. return [
  60. Hypothesis(
  61. yseq=batch_hyps.yseq[i][: batch_hyps.length[i]],
  62. score=batch_hyps.score[i],
  63. scores={k: batch_hyps.scores[k][i] for k in self.scorers},
  64. states={
  65. k: v.select_state(batch_hyps.states[k], i)
  66. for k, v in self.scorers.items()
  67. },
  68. )
  69. for i in range(len(batch_hyps.length))
  70. ]
  71. def batch_beam(
  72. self, weighted_scores: torch.Tensor, ids: torch.Tensor
  73. ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
  74. """Batch-compute topk full token ids and partial token ids.
  75. Args:
  76. weighted_scores (torch.Tensor): The weighted sum scores for each tokens.
  77. Its shape is `(n_beam, self.vocab_size)`.
  78. ids (torch.Tensor): The partial token ids to compute topk.
  79. Its shape is `(n_beam, self.pre_beam_size)`.
  80. Returns:
  81. Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
  82. The topk full (prev_hyp, new_token) ids
  83. and partial (prev_hyp, new_token) ids.
  84. Their shapes are all `(self.beam_size,)`
  85. """
  86. top_ids = weighted_scores.view(-1).topk(self.beam_size)[1]
  87. # Because of the flatten above, `top_ids` is organized as:
  88. # [hyp1 * V + token1, hyp2 * V + token2, ..., hypK * V + tokenK],
  89. # where V is `self.n_vocab` and K is `self.beam_size`
  90. prev_hyp_ids = top_ids // self.n_vocab
  91. new_token_ids = top_ids % self.n_vocab
  92. return prev_hyp_ids, new_token_ids, prev_hyp_ids, new_token_ids
  93. def init_hyp(self, x: torch.Tensor) -> BatchHypothesis:
  94. """Get an initial hypothesis data.
  95. Args:
  96. x (torch.Tensor): The encoder output feature
  97. Returns:
  98. Hypothesis: The initial hypothesis.
  99. """
  100. init_states = dict()
  101. init_scores = dict()
  102. for k, d in self.scorers.items():
  103. init_states[k] = d.batch_init_state(x)
  104. init_scores[k] = 0.0
  105. return self.batchfy(
  106. [
  107. Hypothesis(
  108. score=0.0,
  109. scores=init_scores,
  110. states=init_states,
  111. yseq=torch.tensor([self.sos], device=x.device),
  112. )
  113. ]
  114. )
  115. def score_full(
  116. self, hyp: BatchHypothesis, x: torch.Tensor
  117. ) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
  118. """Score new hypothesis by `self.full_scorers`.
  119. Args:
  120. hyp (Hypothesis): Hypothesis with prefix tokens to score
  121. x (torch.Tensor): Corresponding input feature
  122. Returns:
  123. Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
  124. score dict of `hyp` that has string keys of `self.full_scorers`
  125. and tensor score values of shape: `(self.n_vocab,)`,
  126. and state dict that has string keys
  127. and state values of `self.full_scorers`
  128. """
  129. scores = dict()
  130. states = dict()
  131. for k, d in self.full_scorers.items():
  132. scores[k], states[k] = d.batch_score(hyp.yseq, hyp.states[k], x)
  133. return scores, states
  134. def score_partial(
  135. self, hyp: BatchHypothesis, ids: torch.Tensor, x: torch.Tensor
  136. ) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
  137. """Score new hypothesis by `self.full_scorers`.
  138. Args:
  139. hyp (Hypothesis): Hypothesis with prefix tokens to score
  140. ids (torch.Tensor): 2D tensor of new partial tokens to score
  141. x (torch.Tensor): Corresponding input feature
  142. Returns:
  143. Tuple[Dict[str, torch.Tensor], Dict[str, Any]]: Tuple of
  144. score dict of `hyp` that has string keys of `self.full_scorers`
  145. and tensor score values of shape: `(self.n_vocab,)`,
  146. and state dict that has string keys
  147. and state values of `self.full_scorers`
  148. """
  149. scores = dict()
  150. states = dict()
  151. for k, d in self.part_scorers.items():
  152. scores[k], states[k] = d.batch_score_partial(
  153. hyp.yseq, ids, hyp.states[k], x
  154. )
  155. return scores, states
  156. def merge_states(self, states: Any, part_states: Any, part_idx: int) -> Any:
  157. """Merge states for new hypothesis.
  158. Args:
  159. states: states of `self.full_scorers`
  160. part_states: states of `self.part_scorers`
  161. part_idx (int): The new token id for `part_scores`
  162. Returns:
  163. Dict[str, torch.Tensor]: The new score dict.
  164. Its keys are names of `self.full_scorers` and `self.part_scorers`.
  165. Its values are states of the scorers.
  166. """
  167. new_states = dict()
  168. for k, v in states.items():
  169. new_states[k] = v
  170. for k, v in part_states.items():
  171. new_states[k] = v
  172. return new_states
  173. def search(self, running_hyps: BatchHypothesis, x: torch.Tensor) -> BatchHypothesis:
  174. """Search new tokens for running hypotheses and encoded speech x.
  175. Args:
  176. running_hyps (BatchHypothesis): Running hypotheses on beam
  177. x (torch.Tensor): Encoded speech feature (T, D)
  178. Returns:
  179. BatchHypothesis: Best sorted hypotheses
  180. """
  181. n_batch = len(running_hyps)
  182. part_ids = None # no pre-beam
  183. # batch scoring
  184. weighted_scores = torch.zeros(
  185. n_batch, self.n_vocab, dtype=x.dtype, device=x.device
  186. )
  187. scores, states = self.score_full(running_hyps, x.expand(n_batch, *x.shape))
  188. for k in self.full_scorers:
  189. weighted_scores += self.weights[k] * scores[k]
  190. # partial scoring
  191. if self.do_pre_beam:
  192. pre_beam_scores = (
  193. weighted_scores
  194. if self.pre_beam_score_key == "full"
  195. else scores[self.pre_beam_score_key]
  196. )
  197. part_ids = torch.topk(pre_beam_scores, self.pre_beam_size, dim=-1)[1]
  198. # NOTE(takaaki-hori): Unlike BeamSearch, we assume that score_partial returns
  199. # full-size score matrices, which has non-zero scores for part_ids and zeros
  200. # for others.
  201. part_scores, part_states = self.score_partial(running_hyps, part_ids, x)
  202. for k in self.part_scorers:
  203. weighted_scores += self.weights[k] * part_scores[k]
  204. # add previous hyp scores
  205. weighted_scores += running_hyps.score.to(
  206. dtype=x.dtype, device=x.device
  207. ).unsqueeze(1)
  208. # TODO(karita): do not use list. use batch instead
  209. # see also https://github.com/espnet/espnet/pull/1402#discussion_r354561029
  210. # update hyps
  211. best_hyps = []
  212. prev_hyps = self.unbatchfy(running_hyps)
  213. for (
  214. full_prev_hyp_id,
  215. full_new_token_id,
  216. part_prev_hyp_id,
  217. part_new_token_id,
  218. ) in zip(*self.batch_beam(weighted_scores, part_ids)):
  219. prev_hyp = prev_hyps[full_prev_hyp_id]
  220. best_hyps.append(
  221. Hypothesis(
  222. score=weighted_scores[full_prev_hyp_id, full_new_token_id],
  223. yseq=self.append_token(prev_hyp.yseq, full_new_token_id),
  224. scores=self.merge_scores(
  225. prev_hyp.scores,
  226. {k: v[full_prev_hyp_id] for k, v in scores.items()},
  227. full_new_token_id,
  228. {k: v[part_prev_hyp_id] for k, v in part_scores.items()},
  229. part_new_token_id,
  230. ),
  231. states=self.merge_states(
  232. {
  233. k: self.full_scorers[k].select_state(v, full_prev_hyp_id)
  234. for k, v in states.items()
  235. },
  236. {
  237. k: self.part_scorers[k].select_state(
  238. v, part_prev_hyp_id, part_new_token_id
  239. )
  240. for k, v in part_states.items()
  241. },
  242. part_new_token_id,
  243. ),
  244. )
  245. )
  246. return self.batchfy(best_hyps)
  247. def post_process(
  248. self,
  249. i: int,
  250. maxlen: int,
  251. maxlenratio: float,
  252. running_hyps: BatchHypothesis,
  253. ended_hyps: List[Hypothesis],
  254. ) -> BatchHypothesis:
  255. """Perform post-processing of beam search iterations.
  256. Args:
  257. i (int): The length of hypothesis tokens.
  258. maxlen (int): The maximum length of tokens in beam search.
  259. maxlenratio (int): The maximum length ratio in beam search.
  260. running_hyps (BatchHypothesis): The running hypotheses in beam search.
  261. ended_hyps (List[Hypothesis]): The ended hypotheses in beam search.
  262. Returns:
  263. BatchHypothesis: The new running hypotheses.
  264. """
  265. n_batch = running_hyps.yseq.shape[0]
  266. logging.debug(f"the number of running hypothes: {n_batch}")
  267. if self.token_list is not None:
  268. logging.debug(
  269. "best hypo: "
  270. + "".join(
  271. [
  272. self.token_list[x]
  273. for x in running_hyps.yseq[0, 1 : running_hyps.length[0]]
  274. ]
  275. )
  276. )
  277. # add eos in the final loop to avoid that there are no ended hyps
  278. if i == maxlen - 1:
  279. logging.info("adding <eos> in the last position in the loop")
  280. yseq_eos = torch.cat(
  281. (
  282. running_hyps.yseq,
  283. torch.full(
  284. (n_batch, 1),
  285. self.eos,
  286. device=running_hyps.yseq.device,
  287. dtype=torch.int64,
  288. ),
  289. ),
  290. 1,
  291. )
  292. running_hyps.yseq.resize_as_(yseq_eos)
  293. running_hyps.yseq[:] = yseq_eos
  294. running_hyps.length[:] = yseq_eos.shape[1]
  295. # add ended hypotheses to a final list, and removed them from current hypotheses
  296. # (this will be a probmlem, number of hyps < beam)
  297. is_eos = (
  298. running_hyps.yseq[torch.arange(n_batch), running_hyps.length - 1]
  299. == self.eos
  300. )
  301. for b in torch.nonzero(is_eos, as_tuple=False).view(-1):
  302. hyp = self._select(running_hyps, b)
  303. ended_hyps.append(hyp)
  304. remained_ids = torch.nonzero(is_eos == 0, as_tuple=False).view(-1)
  305. return self._batch_select(running_hyps, remained_ids)