rnn_decoder.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. import random
  2. import numpy as np
  3. import torch
  4. import torch.nn.functional as F
  5. from funasr.modules.nets_utils import make_pad_mask
  6. from funasr.modules.nets_utils import to_device
  7. from funasr.modules.rnn.attentions import initial_att
  8. from funasr.models.decoder.abs_decoder import AbsDecoder
  9. from funasr.utils.get_default_kwargs import get_default_kwargs
  10. def build_attention_list(
  11. eprojs: int,
  12. dunits: int,
  13. atype: str = "location",
  14. num_att: int = 1,
  15. num_encs: int = 1,
  16. aheads: int = 4,
  17. adim: int = 320,
  18. awin: int = 5,
  19. aconv_chans: int = 10,
  20. aconv_filts: int = 100,
  21. han_mode: bool = False,
  22. han_type=None,
  23. han_heads: int = 4,
  24. han_dim: int = 320,
  25. han_conv_chans: int = -1,
  26. han_conv_filts: int = 100,
  27. han_win: int = 5,
  28. ):
  29. att_list = torch.nn.ModuleList()
  30. if num_encs == 1:
  31. for i in range(num_att):
  32. att = initial_att(
  33. atype,
  34. eprojs,
  35. dunits,
  36. aheads,
  37. adim,
  38. awin,
  39. aconv_chans,
  40. aconv_filts,
  41. )
  42. att_list.append(att)
  43. elif num_encs > 1: # no multi-speaker mode
  44. if han_mode:
  45. att = initial_att(
  46. han_type,
  47. eprojs,
  48. dunits,
  49. han_heads,
  50. han_dim,
  51. han_win,
  52. han_conv_chans,
  53. han_conv_filts,
  54. han_mode=True,
  55. )
  56. return att
  57. else:
  58. att_list = torch.nn.ModuleList()
  59. for idx in range(num_encs):
  60. att = initial_att(
  61. atype[idx],
  62. eprojs,
  63. dunits,
  64. aheads[idx],
  65. adim[idx],
  66. awin[idx],
  67. aconv_chans[idx],
  68. aconv_filts[idx],
  69. )
  70. att_list.append(att)
  71. else:
  72. raise ValueError(
  73. "Number of encoders needs to be more than one. {}".format(num_encs)
  74. )
  75. return att_list
  76. class RNNDecoder(AbsDecoder):
  77. def __init__(
  78. self,
  79. vocab_size: int,
  80. encoder_output_size: int,
  81. rnn_type: str = "lstm",
  82. num_layers: int = 1,
  83. hidden_size: int = 320,
  84. sampling_probability: float = 0.0,
  85. dropout: float = 0.0,
  86. context_residual: bool = False,
  87. replace_sos: bool = False,
  88. num_encs: int = 1,
  89. att_conf: dict = get_default_kwargs(build_attention_list),
  90. ):
  91. # FIXME(kamo): The parts of num_spk should be refactored more more more
  92. if rnn_type not in {"lstm", "gru"}:
  93. raise ValueError(f"Not supported: rnn_type={rnn_type}")
  94. super().__init__()
  95. eprojs = encoder_output_size
  96. self.dtype = rnn_type
  97. self.dunits = hidden_size
  98. self.dlayers = num_layers
  99. self.context_residual = context_residual
  100. self.sos = vocab_size - 1
  101. self.eos = vocab_size - 1
  102. self.odim = vocab_size
  103. self.sampling_probability = sampling_probability
  104. self.dropout = dropout
  105. self.num_encs = num_encs
  106. # for multilingual translation
  107. self.replace_sos = replace_sos
  108. self.embed = torch.nn.Embedding(vocab_size, hidden_size)
  109. self.dropout_emb = torch.nn.Dropout(p=dropout)
  110. self.decoder = torch.nn.ModuleList()
  111. self.dropout_dec = torch.nn.ModuleList()
  112. self.decoder += [
  113. torch.nn.LSTMCell(hidden_size + eprojs, hidden_size)
  114. if self.dtype == "lstm"
  115. else torch.nn.GRUCell(hidden_size + eprojs, hidden_size)
  116. ]
  117. self.dropout_dec += [torch.nn.Dropout(p=dropout)]
  118. for _ in range(1, self.dlayers):
  119. self.decoder += [
  120. torch.nn.LSTMCell(hidden_size, hidden_size)
  121. if self.dtype == "lstm"
  122. else torch.nn.GRUCell(hidden_size, hidden_size)
  123. ]
  124. self.dropout_dec += [torch.nn.Dropout(p=dropout)]
  125. # NOTE: dropout is applied only for the vertical connections
  126. # see https://arxiv.org/pdf/1409.2329.pdf
  127. if context_residual:
  128. self.output = torch.nn.Linear(hidden_size + eprojs, vocab_size)
  129. else:
  130. self.output = torch.nn.Linear(hidden_size, vocab_size)
  131. self.att_list = build_attention_list(
  132. eprojs=eprojs, dunits=hidden_size, **att_conf
  133. )
  134. def zero_state(self, hs_pad):
  135. return hs_pad.new_zeros(hs_pad.size(0), self.dunits)
  136. def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):
  137. if self.dtype == "lstm":
  138. z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))
  139. for i in range(1, self.dlayers):
  140. z_list[i], c_list[i] = self.decoder[i](
  141. self.dropout_dec[i - 1](z_list[i - 1]),
  142. (z_prev[i], c_prev[i]),
  143. )
  144. else:
  145. z_list[0] = self.decoder[0](ey, z_prev[0])
  146. for i in range(1, self.dlayers):
  147. z_list[i] = self.decoder[i](
  148. self.dropout_dec[i - 1](z_list[i - 1]), z_prev[i]
  149. )
  150. return z_list, c_list
  151. def forward(self, hs_pad, hlens, ys_in_pad, ys_in_lens, strm_idx=0):
  152. # to support mutiple encoder asr mode, in single encoder mode,
  153. # convert torch.Tensor to List of torch.Tensor
  154. if self.num_encs == 1:
  155. hs_pad = [hs_pad]
  156. hlens = [hlens]
  157. # attention index for the attention module
  158. # in SPA (speaker parallel attention),
  159. # att_idx is used to select attention module. In other cases, it is 0.
  160. att_idx = min(strm_idx, len(self.att_list) - 1)
  161. # hlens should be list of list of integer
  162. hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]
  163. # get dim, length info
  164. olength = ys_in_pad.size(1)
  165. # initialization
  166. c_list = [self.zero_state(hs_pad[0])]
  167. z_list = [self.zero_state(hs_pad[0])]
  168. for _ in range(1, self.dlayers):
  169. c_list.append(self.zero_state(hs_pad[0]))
  170. z_list.append(self.zero_state(hs_pad[0]))
  171. z_all = []
  172. if self.num_encs == 1:
  173. att_w = None
  174. self.att_list[att_idx].reset() # reset pre-computation of h
  175. else:
  176. att_w_list = [None] * (self.num_encs + 1) # atts + han
  177. att_c_list = [None] * self.num_encs # atts
  178. for idx in range(self.num_encs + 1):
  179. # reset pre-computation of h in atts and han
  180. self.att_list[idx].reset()
  181. # pre-computation of embedding
  182. eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
  183. # loop for an output sequence
  184. for i in range(olength):
  185. if self.num_encs == 1:
  186. att_c, att_w = self.att_list[att_idx](
  187. hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w
  188. )
  189. else:
  190. for idx in range(self.num_encs):
  191. att_c_list[idx], att_w_list[idx] = self.att_list[idx](
  192. hs_pad[idx],
  193. hlens[idx],
  194. self.dropout_dec[0](z_list[0]),
  195. att_w_list[idx],
  196. )
  197. hs_pad_han = torch.stack(att_c_list, dim=1)
  198. hlens_han = [self.num_encs] * len(ys_in_pad)
  199. att_c, att_w_list[self.num_encs] = self.att_list[self.num_encs](
  200. hs_pad_han,
  201. hlens_han,
  202. self.dropout_dec[0](z_list[0]),
  203. att_w_list[self.num_encs],
  204. )
  205. if i > 0 and random.random() < self.sampling_probability:
  206. z_out = self.output(z_all[-1])
  207. z_out = np.argmax(z_out.detach().cpu(), axis=1)
  208. z_out = self.dropout_emb(self.embed(to_device(self, z_out)))
  209. ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)
  210. else:
  211. # utt x (zdim + hdim)
  212. ey = torch.cat((eys[:, i, :], att_c), dim=1)
  213. z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
  214. if self.context_residual:
  215. z_all.append(
  216. torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
  217. ) # utt x (zdim + hdim)
  218. else:
  219. z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)
  220. z_all = torch.stack(z_all, dim=1)
  221. z_all = self.output(z_all)
  222. z_all.masked_fill_(
  223. make_pad_mask(ys_in_lens, z_all, 1),
  224. 0,
  225. )
  226. return z_all, ys_in_lens
  227. def init_state(self, x):
  228. # to support mutiple encoder asr mode, in single encoder mode,
  229. # convert torch.Tensor to List of torch.Tensor
  230. if self.num_encs == 1:
  231. x = [x]
  232. c_list = [self.zero_state(x[0].unsqueeze(0))]
  233. z_list = [self.zero_state(x[0].unsqueeze(0))]
  234. for _ in range(1, self.dlayers):
  235. c_list.append(self.zero_state(x[0].unsqueeze(0)))
  236. z_list.append(self.zero_state(x[0].unsqueeze(0)))
  237. # TODO(karita): support strm_index for `asr_mix`
  238. strm_index = 0
  239. att_idx = min(strm_index, len(self.att_list) - 1)
  240. if self.num_encs == 1:
  241. a = None
  242. self.att_list[att_idx].reset() # reset pre-computation of h
  243. else:
  244. a = [None] * (self.num_encs + 1) # atts + han
  245. for idx in range(self.num_encs + 1):
  246. # reset pre-computation of h in atts and han
  247. self.att_list[idx].reset()
  248. return dict(
  249. c_prev=c_list[:],
  250. z_prev=z_list[:],
  251. a_prev=a,
  252. workspace=(att_idx, z_list, c_list),
  253. )
  254. def score(self, yseq, state, x):
  255. # to support mutiple encoder asr mode, in single encoder mode,
  256. # convert torch.Tensor to List of torch.Tensor
  257. if self.num_encs == 1:
  258. x = [x]
  259. att_idx, z_list, c_list = state["workspace"]
  260. vy = yseq[-1].unsqueeze(0)
  261. ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
  262. if self.num_encs == 1:
  263. att_c, att_w = self.att_list[att_idx](
  264. x[0].unsqueeze(0),
  265. [x[0].size(0)],
  266. self.dropout_dec[0](state["z_prev"][0]),
  267. state["a_prev"],
  268. )
  269. else:
  270. att_w = [None] * (self.num_encs + 1) # atts + han
  271. att_c_list = [None] * self.num_encs # atts
  272. for idx in range(self.num_encs):
  273. att_c_list[idx], att_w[idx] = self.att_list[idx](
  274. x[idx].unsqueeze(0),
  275. [x[idx].size(0)],
  276. self.dropout_dec[0](state["z_prev"][0]),
  277. state["a_prev"][idx],
  278. )
  279. h_han = torch.stack(att_c_list, dim=1)
  280. att_c, att_w[self.num_encs] = self.att_list[self.num_encs](
  281. h_han,
  282. [self.num_encs],
  283. self.dropout_dec[0](state["z_prev"][0]),
  284. state["a_prev"][self.num_encs],
  285. )
  286. ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
  287. z_list, c_list = self.rnn_forward(
  288. ey, z_list, c_list, state["z_prev"], state["c_prev"]
  289. )
  290. if self.context_residual:
  291. logits = self.output(
  292. torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)
  293. )
  294. else:
  295. logits = self.output(self.dropout_dec[-1](z_list[-1]))
  296. logp = F.log_softmax(logits, dim=1).squeeze(0)
  297. return (
  298. logp,
  299. dict(
  300. c_prev=c_list[:],
  301. z_prev=z_list[:],
  302. a_prev=att_w,
  303. workspace=(att_idx, z_list, c_list),
  304. ),
  305. )