rnn_decoder.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. import random
  2. import numpy as np
  3. import torch
  4. import torch.nn.functional as F
  5. from typeguard import check_argument_types
  6. from funasr.modules.nets_utils import make_pad_mask
  7. from funasr.modules.nets_utils import to_device
  8. from funasr.modules.rnn.attentions import initial_att
  9. from funasr.models.decoder.abs_decoder import AbsDecoder
  10. from funasr.utils.get_default_kwargs import get_default_kwargs
  11. def build_attention_list(
  12. eprojs: int,
  13. dunits: int,
  14. atype: str = "location",
  15. num_att: int = 1,
  16. num_encs: int = 1,
  17. aheads: int = 4,
  18. adim: int = 320,
  19. awin: int = 5,
  20. aconv_chans: int = 10,
  21. aconv_filts: int = 100,
  22. han_mode: bool = False,
  23. han_type=None,
  24. han_heads: int = 4,
  25. han_dim: int = 320,
  26. han_conv_chans: int = -1,
  27. han_conv_filts: int = 100,
  28. han_win: int = 5,
  29. ):
  30. att_list = torch.nn.ModuleList()
  31. if num_encs == 1:
  32. for i in range(num_att):
  33. att = initial_att(
  34. atype,
  35. eprojs,
  36. dunits,
  37. aheads,
  38. adim,
  39. awin,
  40. aconv_chans,
  41. aconv_filts,
  42. )
  43. att_list.append(att)
  44. elif num_encs > 1: # no multi-speaker mode
  45. if han_mode:
  46. att = initial_att(
  47. han_type,
  48. eprojs,
  49. dunits,
  50. han_heads,
  51. han_dim,
  52. han_win,
  53. han_conv_chans,
  54. han_conv_filts,
  55. han_mode=True,
  56. )
  57. return att
  58. else:
  59. att_list = torch.nn.ModuleList()
  60. for idx in range(num_encs):
  61. att = initial_att(
  62. atype[idx],
  63. eprojs,
  64. dunits,
  65. aheads[idx],
  66. adim[idx],
  67. awin[idx],
  68. aconv_chans[idx],
  69. aconv_filts[idx],
  70. )
  71. att_list.append(att)
  72. else:
  73. raise ValueError(
  74. "Number of encoders needs to be more than one. {}".format(num_encs)
  75. )
  76. return att_list
  77. class RNNDecoder(AbsDecoder):
  78. def __init__(
  79. self,
  80. vocab_size: int,
  81. encoder_output_size: int,
  82. rnn_type: str = "lstm",
  83. num_layers: int = 1,
  84. hidden_size: int = 320,
  85. sampling_probability: float = 0.0,
  86. dropout: float = 0.0,
  87. context_residual: bool = False,
  88. replace_sos: bool = False,
  89. num_encs: int = 1,
  90. att_conf: dict = get_default_kwargs(build_attention_list),
  91. ):
  92. # FIXME(kamo): The parts of num_spk should be refactored more more more
  93. assert check_argument_types()
  94. if rnn_type not in {"lstm", "gru"}:
  95. raise ValueError(f"Not supported: rnn_type={rnn_type}")
  96. super().__init__()
  97. eprojs = encoder_output_size
  98. self.dtype = rnn_type
  99. self.dunits = hidden_size
  100. self.dlayers = num_layers
  101. self.context_residual = context_residual
  102. self.sos = vocab_size - 1
  103. self.eos = vocab_size - 1
  104. self.odim = vocab_size
  105. self.sampling_probability = sampling_probability
  106. self.dropout = dropout
  107. self.num_encs = num_encs
  108. # for multilingual translation
  109. self.replace_sos = replace_sos
  110. self.embed = torch.nn.Embedding(vocab_size, hidden_size)
  111. self.dropout_emb = torch.nn.Dropout(p=dropout)
  112. self.decoder = torch.nn.ModuleList()
  113. self.dropout_dec = torch.nn.ModuleList()
  114. self.decoder += [
  115. torch.nn.LSTMCell(hidden_size + eprojs, hidden_size)
  116. if self.dtype == "lstm"
  117. else torch.nn.GRUCell(hidden_size + eprojs, hidden_size)
  118. ]
  119. self.dropout_dec += [torch.nn.Dropout(p=dropout)]
  120. for _ in range(1, self.dlayers):
  121. self.decoder += [
  122. torch.nn.LSTMCell(hidden_size, hidden_size)
  123. if self.dtype == "lstm"
  124. else torch.nn.GRUCell(hidden_size, hidden_size)
  125. ]
  126. self.dropout_dec += [torch.nn.Dropout(p=dropout)]
  127. # NOTE: dropout is applied only for the vertical connections
  128. # see https://arxiv.org/pdf/1409.2329.pdf
  129. if context_residual:
  130. self.output = torch.nn.Linear(hidden_size + eprojs, vocab_size)
  131. else:
  132. self.output = torch.nn.Linear(hidden_size, vocab_size)
  133. self.att_list = build_attention_list(
  134. eprojs=eprojs, dunits=hidden_size, **att_conf
  135. )
  136. def zero_state(self, hs_pad):
  137. return hs_pad.new_zeros(hs_pad.size(0), self.dunits)
  138. def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):
  139. if self.dtype == "lstm":
  140. z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))
  141. for i in range(1, self.dlayers):
  142. z_list[i], c_list[i] = self.decoder[i](
  143. self.dropout_dec[i - 1](z_list[i - 1]),
  144. (z_prev[i], c_prev[i]),
  145. )
  146. else:
  147. z_list[0] = self.decoder[0](ey, z_prev[0])
  148. for i in range(1, self.dlayers):
  149. z_list[i] = self.decoder[i](
  150. self.dropout_dec[i - 1](z_list[i - 1]), z_prev[i]
  151. )
  152. return z_list, c_list
  153. def forward(self, hs_pad, hlens, ys_in_pad, ys_in_lens, strm_idx=0):
  154. # to support mutiple encoder asr mode, in single encoder mode,
  155. # convert torch.Tensor to List of torch.Tensor
  156. if self.num_encs == 1:
  157. hs_pad = [hs_pad]
  158. hlens = [hlens]
  159. # attention index for the attention module
  160. # in SPA (speaker parallel attention),
  161. # att_idx is used to select attention module. In other cases, it is 0.
  162. att_idx = min(strm_idx, len(self.att_list) - 1)
  163. # hlens should be list of list of integer
  164. hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]
  165. # get dim, length info
  166. olength = ys_in_pad.size(1)
  167. # initialization
  168. c_list = [self.zero_state(hs_pad[0])]
  169. z_list = [self.zero_state(hs_pad[0])]
  170. for _ in range(1, self.dlayers):
  171. c_list.append(self.zero_state(hs_pad[0]))
  172. z_list.append(self.zero_state(hs_pad[0]))
  173. z_all = []
  174. if self.num_encs == 1:
  175. att_w = None
  176. self.att_list[att_idx].reset() # reset pre-computation of h
  177. else:
  178. att_w_list = [None] * (self.num_encs + 1) # atts + han
  179. att_c_list = [None] * self.num_encs # atts
  180. for idx in range(self.num_encs + 1):
  181. # reset pre-computation of h in atts and han
  182. self.att_list[idx].reset()
  183. # pre-computation of embedding
  184. eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
  185. # loop for an output sequence
  186. for i in range(olength):
  187. if self.num_encs == 1:
  188. att_c, att_w = self.att_list[att_idx](
  189. hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w
  190. )
  191. else:
  192. for idx in range(self.num_encs):
  193. att_c_list[idx], att_w_list[idx] = self.att_list[idx](
  194. hs_pad[idx],
  195. hlens[idx],
  196. self.dropout_dec[0](z_list[0]),
  197. att_w_list[idx],
  198. )
  199. hs_pad_han = torch.stack(att_c_list, dim=1)
  200. hlens_han = [self.num_encs] * len(ys_in_pad)
  201. att_c, att_w_list[self.num_encs] = self.att_list[self.num_encs](
  202. hs_pad_han,
  203. hlens_han,
  204. self.dropout_dec[0](z_list[0]),
  205. att_w_list[self.num_encs],
  206. )
  207. if i > 0 and random.random() < self.sampling_probability:
  208. z_out = self.output(z_all[-1])
  209. z_out = np.argmax(z_out.detach().cpu(), axis=1)
  210. z_out = self.dropout_emb(self.embed(to_device(self, z_out)))
  211. ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)
  212. else:
  213. # utt x (zdim + hdim)
  214. ey = torch.cat((eys[:, i, :], att_c), dim=1)
  215. z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
  216. if self.context_residual:
  217. z_all.append(
  218. torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
  219. ) # utt x (zdim + hdim)
  220. else:
  221. z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)
  222. z_all = torch.stack(z_all, dim=1)
  223. z_all = self.output(z_all)
  224. z_all.masked_fill_(
  225. make_pad_mask(ys_in_lens, z_all, 1),
  226. 0,
  227. )
  228. return z_all, ys_in_lens
  229. def init_state(self, x):
  230. # to support mutiple encoder asr mode, in single encoder mode,
  231. # convert torch.Tensor to List of torch.Tensor
  232. if self.num_encs == 1:
  233. x = [x]
  234. c_list = [self.zero_state(x[0].unsqueeze(0))]
  235. z_list = [self.zero_state(x[0].unsqueeze(0))]
  236. for _ in range(1, self.dlayers):
  237. c_list.append(self.zero_state(x[0].unsqueeze(0)))
  238. z_list.append(self.zero_state(x[0].unsqueeze(0)))
  239. # TODO(karita): support strm_index for `asr_mix`
  240. strm_index = 0
  241. att_idx = min(strm_index, len(self.att_list) - 1)
  242. if self.num_encs == 1:
  243. a = None
  244. self.att_list[att_idx].reset() # reset pre-computation of h
  245. else:
  246. a = [None] * (self.num_encs + 1) # atts + han
  247. for idx in range(self.num_encs + 1):
  248. # reset pre-computation of h in atts and han
  249. self.att_list[idx].reset()
  250. return dict(
  251. c_prev=c_list[:],
  252. z_prev=z_list[:],
  253. a_prev=a,
  254. workspace=(att_idx, z_list, c_list),
  255. )
  256. def score(self, yseq, state, x):
  257. # to support mutiple encoder asr mode, in single encoder mode,
  258. # convert torch.Tensor to List of torch.Tensor
  259. if self.num_encs == 1:
  260. x = [x]
  261. att_idx, z_list, c_list = state["workspace"]
  262. vy = yseq[-1].unsqueeze(0)
  263. ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
  264. if self.num_encs == 1:
  265. att_c, att_w = self.att_list[att_idx](
  266. x[0].unsqueeze(0),
  267. [x[0].size(0)],
  268. self.dropout_dec[0](state["z_prev"][0]),
  269. state["a_prev"],
  270. )
  271. else:
  272. att_w = [None] * (self.num_encs + 1) # atts + han
  273. att_c_list = [None] * self.num_encs # atts
  274. for idx in range(self.num_encs):
  275. att_c_list[idx], att_w[idx] = self.att_list[idx](
  276. x[idx].unsqueeze(0),
  277. [x[idx].size(0)],
  278. self.dropout_dec[0](state["z_prev"][0]),
  279. state["a_prev"][idx],
  280. )
  281. h_han = torch.stack(att_c_list, dim=1)
  282. att_c, att_w[self.num_encs] = self.att_list[self.num_encs](
  283. h_han,
  284. [self.num_encs],
  285. self.dropout_dec[0](state["z_prev"][0]),
  286. state["a_prev"][self.num_encs],
  287. )
  288. ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
  289. z_list, c_list = self.rnn_forward(
  290. ey, z_list, c_list, state["z_prev"], state["c_prev"]
  291. )
  292. if self.context_residual:
  293. logits = self.output(
  294. torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
  295. )
  296. else:
  297. logits = self.output(self.dropout_dec[-1](z_list[-1]))
  298. logp = F.log_softmax(logits, dim=1).squeeze(0)
  299. return (
  300. logp,
  301. dict(
  302. c_prev=c_list[:],
  303. z_prev=z_list[:],
  304. a_prev=att_w,
  305. workspace=(att_idx, z_list, c_list),
  306. ),
  307. )