sanm_decoder.py 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486
  1. from typing import List
  2. from typing import Tuple
  3. import logging
  4. import torch
  5. import torch.nn as nn
  6. import numpy as np
  7. from funasr.modules.streaming_utils import utils as myutils
  8. from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder
  9. from funasr.modules.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
  10. from funasr.modules.embedding import PositionalEncoding
  11. from funasr.modules.layer_norm import LayerNorm
  12. from funasr.modules.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM
  13. from funasr.modules.repeat import repeat
  14. class DecoderLayerSANM(nn.Module):
  15. """Single decoder layer module.
  16. Args:
  17. size (int): Input dimension.
  18. self_attn (torch.nn.Module): Self-attention module instance.
  19. `MultiHeadedAttention` instance can be used as the argument.
  20. src_attn (torch.nn.Module): Self-attention module instance.
  21. `MultiHeadedAttention` instance can be used as the argument.
  22. feed_forward (torch.nn.Module): Feed-forward module instance.
  23. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance
  24. can be used as the argument.
  25. dropout_rate (float): Dropout rate.
  26. normalize_before (bool): Whether to use layer_norm before the first block.
  27. concat_after (bool): Whether to concat attention layer's input and output.
  28. if True, additional linear will be applied.
  29. i.e. x -> x + linear(concat(x, att(x)))
  30. if False, no additional linear will be applied. i.e. x -> x + att(x)
  31. """
  32. def __init__(
  33. self,
  34. size,
  35. self_attn,
  36. src_attn,
  37. feed_forward,
  38. dropout_rate,
  39. normalize_before=True,
  40. concat_after=False,
  41. ):
  42. """Construct an DecoderLayer object."""
  43. super(DecoderLayerSANM, self).__init__()
  44. self.size = size
  45. self.self_attn = self_attn
  46. self.src_attn = src_attn
  47. self.feed_forward = feed_forward
  48. self.norm1 = LayerNorm(size)
  49. if self_attn is not None:
  50. self.norm2 = LayerNorm(size)
  51. if src_attn is not None:
  52. self.norm3 = LayerNorm(size)
  53. self.dropout = nn.Dropout(dropout_rate)
  54. self.normalize_before = normalize_before
  55. self.concat_after = concat_after
  56. if self.concat_after:
  57. self.concat_linear1 = nn.Linear(size + size, size)
  58. self.concat_linear2 = nn.Linear(size + size, size)
  59. def forward(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
  60. """Compute decoded features.
  61. Args:
  62. tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
  63. tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
  64. memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
  65. memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
  66. cache (List[torch.Tensor]): List of cached tensors.
  67. Each tensor shape should be (#batch, maxlen_out - 1, size).
  68. Returns:
  69. torch.Tensor: Output tensor(#batch, maxlen_out, size).
  70. torch.Tensor: Mask for output tensor (#batch, maxlen_out).
  71. torch.Tensor: Encoded memory (#batch, maxlen_in, size).
  72. torch.Tensor: Encoded memory mask (#batch, maxlen_in).
  73. """
  74. # tgt = self.dropout(tgt)
  75. residual = tgt
  76. if self.normalize_before:
  77. tgt = self.norm1(tgt)
  78. tgt = self.feed_forward(tgt)
  79. x = tgt
  80. if self.self_attn:
  81. if self.normalize_before:
  82. tgt = self.norm2(tgt)
  83. x, _ = self.self_attn(tgt, tgt_mask)
  84. x = residual + self.dropout(x)
  85. if self.src_attn is not None:
  86. residual = x
  87. if self.normalize_before:
  88. x = self.norm3(x)
  89. x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
  90. return x, tgt_mask, memory, memory_mask, cache
  91. def forward_chunk(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
  92. """Compute decoded features.
  93. Args:
  94. tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
  95. tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
  96. memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
  97. memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
  98. cache (List[torch.Tensor]): List of cached tensors.
  99. Each tensor shape should be (#batch, maxlen_out - 1, size).
  100. Returns:
  101. torch.Tensor: Output tensor(#batch, maxlen_out, size).
  102. torch.Tensor: Mask for output tensor (#batch, maxlen_out).
  103. torch.Tensor: Encoded memory (#batch, maxlen_in, size).
  104. torch.Tensor: Encoded memory mask (#batch, maxlen_in).
  105. """
  106. # tgt = self.dropout(tgt)
  107. residual = tgt
  108. if self.normalize_before:
  109. tgt = self.norm1(tgt)
  110. tgt = self.feed_forward(tgt)
  111. x = tgt
  112. if self.self_attn:
  113. if self.normalize_before:
  114. tgt = self.norm2(tgt)
  115. if self.training:
  116. cache = None
  117. x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
  118. x = residual + self.dropout(x)
  119. if self.src_attn is not None:
  120. residual = x
  121. if self.normalize_before:
  122. x = self.norm3(x)
  123. x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
  124. return x, tgt_mask, memory, memory_mask, cache
  125. class FsmnDecoderSCAMAOpt(BaseTransformerDecoder):
  126. """
  127. Author: Speech Lab of DAMO Academy, Alibaba Group
  128. SCAMA: Streaming chunk-aware multihead attention for online end-to-end speech recognition
  129. https://arxiv.org/abs/2006.01713
  130. """
  131. def __init__(
  132. self,
  133. vocab_size: int,
  134. encoder_output_size: int,
  135. attention_heads: int = 4,
  136. linear_units: int = 2048,
  137. num_blocks: int = 6,
  138. dropout_rate: float = 0.1,
  139. positional_dropout_rate: float = 0.1,
  140. self_attention_dropout_rate: float = 0.0,
  141. src_attention_dropout_rate: float = 0.0,
  142. input_layer: str = "embed",
  143. use_output_layer: bool = True,
  144. pos_enc_class=PositionalEncoding,
  145. normalize_before: bool = True,
  146. concat_after: bool = False,
  147. att_layer_num: int = 6,
  148. kernel_size: int = 21,
  149. sanm_shfit: int = None,
  150. concat_embeds: bool = False,
  151. attention_dim: int = None,
  152. tf2torch_tensor_name_prefix_torch: str = "decoder",
  153. tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
  154. embed_tensor_name_prefix_tf: str = None,
  155. ):
  156. super().__init__(
  157. vocab_size=vocab_size,
  158. encoder_output_size=encoder_output_size,
  159. dropout_rate=dropout_rate,
  160. positional_dropout_rate=positional_dropout_rate,
  161. input_layer=input_layer,
  162. use_output_layer=use_output_layer,
  163. pos_enc_class=pos_enc_class,
  164. normalize_before=normalize_before,
  165. )
  166. if attention_dim is None:
  167. attention_dim = encoder_output_size
  168. if input_layer == "embed":
  169. self.embed = torch.nn.Sequential(
  170. torch.nn.Embedding(vocab_size, attention_dim),
  171. )
  172. elif input_layer == "linear":
  173. self.embed = torch.nn.Sequential(
  174. torch.nn.Linear(vocab_size, attention_dim),
  175. torch.nn.LayerNorm(attention_dim),
  176. torch.nn.Dropout(dropout_rate),
  177. torch.nn.ReLU(),
  178. pos_enc_class(attention_dim, positional_dropout_rate),
  179. )
  180. else:
  181. raise ValueError(f"only 'embed' or 'linear' is supported: {input_layer}")
  182. self.normalize_before = normalize_before
  183. if self.normalize_before:
  184. self.after_norm = LayerNorm(attention_dim)
  185. if use_output_layer:
  186. self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
  187. else:
  188. self.output_layer = None
  189. self.att_layer_num = att_layer_num
  190. self.num_blocks = num_blocks
  191. if sanm_shfit is None:
  192. sanm_shfit = (kernel_size - 1) // 2
  193. self.decoders = repeat(
  194. att_layer_num,
  195. lambda lnum: DecoderLayerSANM(
  196. attention_dim,
  197. MultiHeadedAttentionSANMDecoder(
  198. attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit
  199. ),
  200. MultiHeadedAttentionCrossAtt(
  201. attention_heads, attention_dim, src_attention_dropout_rate, encoder_output_size=encoder_output_size
  202. ),
  203. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  204. dropout_rate,
  205. normalize_before,
  206. concat_after,
  207. ),
  208. )
  209. if num_blocks - att_layer_num <= 0:
  210. self.decoders2 = None
  211. else:
  212. self.decoders2 = repeat(
  213. num_blocks - att_layer_num,
  214. lambda lnum: DecoderLayerSANM(
  215. attention_dim,
  216. MultiHeadedAttentionSANMDecoder(
  217. attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit
  218. ),
  219. None,
  220. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  221. dropout_rate,
  222. normalize_before,
  223. concat_after,
  224. ),
  225. )
  226. self.decoders3 = repeat(
  227. 1,
  228. lambda lnum: DecoderLayerSANM(
  229. attention_dim,
  230. None,
  231. None,
  232. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  233. dropout_rate,
  234. normalize_before,
  235. concat_after,
  236. ),
  237. )
  238. if concat_embeds:
  239. self.embed_concat_ffn = repeat(
  240. 1,
  241. lambda lnum: DecoderLayerSANM(
  242. attention_dim + encoder_output_size,
  243. None,
  244. None,
  245. PositionwiseFeedForwardDecoderSANM(attention_dim + encoder_output_size, linear_units, dropout_rate,
  246. adim=attention_dim),
  247. dropout_rate,
  248. normalize_before,
  249. concat_after,
  250. ),
  251. )
  252. else:
  253. self.embed_concat_ffn = None
  254. self.concat_embeds = concat_embeds
  255. self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
  256. self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
  257. self.embed_tensor_name_prefix_tf = embed_tensor_name_prefix_tf
  258. def forward(
  259. self,
  260. hs_pad: torch.Tensor,
  261. hlens: torch.Tensor,
  262. ys_in_pad: torch.Tensor,
  263. ys_in_lens: torch.Tensor,
  264. chunk_mask: torch.Tensor = None,
  265. pre_acoustic_embeds: torch.Tensor = None,
  266. ) -> Tuple[torch.Tensor, torch.Tensor]:
  267. """Forward decoder.
  268. Args:
  269. hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
  270. hlens: (batch)
  271. ys_in_pad:
  272. input token ids, int64 (batch, maxlen_out)
  273. if input_layer == "embed"
  274. input tensor (batch, maxlen_out, #mels) in the other cases
  275. ys_in_lens: (batch)
  276. Returns:
  277. (tuple): tuple containing:
  278. x: decoded token score before softmax (batch, maxlen_out, token)
  279. if use_output_layer is True,
  280. olens: (batch, )
  281. """
  282. tgt = ys_in_pad
  283. tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
  284. memory = hs_pad
  285. memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
  286. if chunk_mask is not None:
  287. memory_mask = memory_mask * chunk_mask
  288. if tgt_mask.size(1) != memory_mask.size(1):
  289. memory_mask = torch.cat((memory_mask, memory_mask[:, -2:-1, :]), dim=1)
  290. x = self.embed(tgt)
  291. if pre_acoustic_embeds is not None and self.concat_embeds:
  292. x = torch.cat((x, pre_acoustic_embeds), dim=-1)
  293. x, _, _, _, _ = self.embed_concat_ffn(x, None, None, None, None)
  294. x, tgt_mask, memory, memory_mask, _ = self.decoders(
  295. x, tgt_mask, memory, memory_mask
  296. )
  297. if self.decoders2 is not None:
  298. x, tgt_mask, memory, memory_mask, _ = self.decoders2(
  299. x, tgt_mask, memory, memory_mask
  300. )
  301. x, tgt_mask, memory, memory_mask, _ = self.decoders3(
  302. x, tgt_mask, memory, memory_mask
  303. )
  304. if self.normalize_before:
  305. x = self.after_norm(x)
  306. if self.output_layer is not None:
  307. x = self.output_layer(x)
  308. olens = tgt_mask.sum(1)
  309. return x, olens
  310. def score(self, ys, state, x, x_mask=None, pre_acoustic_embeds: torch.Tensor = None, ):
  311. """Score."""
  312. ys_mask = myutils.sequence_mask(torch.tensor([len(ys)], dtype=torch.int32), device=x.device)[:, :, None]
  313. logp, state = self.forward_one_step(
  314. ys.unsqueeze(0), ys_mask, x.unsqueeze(0), memory_mask=x_mask, pre_acoustic_embeds=pre_acoustic_embeds,
  315. cache=state
  316. )
  317. return logp.squeeze(0), state
  318. def forward_one_step(
  319. self,
  320. tgt: torch.Tensor,
  321. tgt_mask: torch.Tensor,
  322. memory: torch.Tensor,
  323. memory_mask: torch.Tensor = None,
  324. pre_acoustic_embeds: torch.Tensor = None,
  325. cache: List[torch.Tensor] = None,
  326. ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
  327. """Forward one step.
  328. Args:
  329. tgt: input token ids, int64 (batch, maxlen_out)
  330. tgt_mask: input token mask, (batch, maxlen_out)
  331. dtype=torch.uint8 in PyTorch 1.2-
  332. dtype=torch.bool in PyTorch 1.2+ (include 1.2)
  333. memory: encoded memory, float32 (batch, maxlen_in, feat)
  334. cache: cached output list of (batch, max_time_out-1, size)
  335. Returns:
  336. y, cache: NN output value and cache per `self.decoders`.
  337. y.shape` is (batch, maxlen_out, token)
  338. """
  339. x = tgt[:, -1:]
  340. tgt_mask = None
  341. x = self.embed(x)
  342. if pre_acoustic_embeds is not None and self.concat_embeds:
  343. x = torch.cat((x, pre_acoustic_embeds), dim=-1)
  344. x, _, _, _, _ = self.embed_concat_ffn(x, None, None, None, None)
  345. if cache is None:
  346. cache_layer_num = len(self.decoders)
  347. if self.decoders2 is not None:
  348. cache_layer_num += len(self.decoders2)
  349. cache = [None] * cache_layer_num
  350. new_cache = []
  351. # for c, decoder in zip(cache, self.decoders):
  352. for i in range(self.att_layer_num):
  353. decoder = self.decoders[i]
  354. c = cache[i]
  355. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  356. x, tgt_mask, memory, memory_mask, cache=c
  357. )
  358. new_cache.append(c_ret)
  359. if self.num_blocks - self.att_layer_num >= 1:
  360. for i in range(self.num_blocks - self.att_layer_num):
  361. j = i + self.att_layer_num
  362. decoder = self.decoders2[i]
  363. c = cache[j]
  364. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  365. x, tgt_mask, memory, memory_mask, cache=c
  366. )
  367. new_cache.append(c_ret)
  368. for decoder in self.decoders3:
  369. x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
  370. x, tgt_mask, memory, None, cache=None
  371. )
  372. if self.normalize_before:
  373. y = self.after_norm(x[:, -1])
  374. else:
  375. y = x[:, -1]
  376. if self.output_layer is not None:
  377. y = self.output_layer(y)
  378. y = torch.log_softmax(y, dim=-1)
  379. return y, new_cache
  380. def gen_tf2torch_map_dict(self):
  381. tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
  382. tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
  383. embed_tensor_name_prefix_tf = self.embed_tensor_name_prefix_tf if self.embed_tensor_name_prefix_tf is not None else tensor_name_prefix_tf
  384. map_dict_local = {
  385. ## decoder
  386. # ffn
  387. "{}.decoders.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  388. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/gamma".format(tensor_name_prefix_tf),
  389. "squeeze": None,
  390. "transpose": None,
  391. }, # (256,),(256,)
  392. "{}.decoders.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  393. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/beta".format(tensor_name_prefix_tf),
  394. "squeeze": None,
  395. "transpose": None,
  396. }, # (256,),(256,)
  397. "{}.decoders.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  398. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/kernel".format(tensor_name_prefix_tf),
  399. "squeeze": 0,
  400. "transpose": (1, 0),
  401. }, # (1024,256),(1,256,1024)
  402. "{}.decoders.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  403. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/bias".format(tensor_name_prefix_tf),
  404. "squeeze": None,
  405. "transpose": None,
  406. }, # (1024,),(1024,)
  407. "{}.decoders.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  408. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  409. "squeeze": None,
  410. "transpose": None,
  411. }, # (1024,),(1024,)
  412. "{}.decoders.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  413. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  414. "squeeze": None,
  415. "transpose": None,
  416. }, # (1024,),(1024,)
  417. "{}.decoders.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  418. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d_1/kernel".format(tensor_name_prefix_tf),
  419. "squeeze": 0,
  420. "transpose": (1, 0),
  421. }, # (256,1024),(1,1024,256)
  422. # fsmn
  423. "{}.decoders.layeridx.norm2.weight".format(tensor_name_prefix_torch):
  424. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/gamma".format(
  425. tensor_name_prefix_tf),
  426. "squeeze": None,
  427. "transpose": None,
  428. }, # (256,),(256,)
  429. "{}.decoders.layeridx.norm2.bias".format(tensor_name_prefix_torch):
  430. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/beta".format(
  431. tensor_name_prefix_tf),
  432. "squeeze": None,
  433. "transpose": None,
  434. }, # (256,),(256,)
  435. "{}.decoders.layeridx.self_attn.fsmn_block.weight".format(tensor_name_prefix_torch):
  436. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/depth_conv_w".format(
  437. tensor_name_prefix_tf),
  438. "squeeze": 0,
  439. "transpose": (1, 2, 0),
  440. }, # (256,1,31),(1,31,256,1)
  441. # src att
  442. "{}.decoders.layeridx.norm3.weight".format(tensor_name_prefix_torch):
  443. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/gamma".format(tensor_name_prefix_tf),
  444. "squeeze": None,
  445. "transpose": None,
  446. }, # (256,),(256,)
  447. "{}.decoders.layeridx.norm3.bias".format(tensor_name_prefix_torch):
  448. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/beta".format(tensor_name_prefix_tf),
  449. "squeeze": None,
  450. "transpose": None,
  451. }, # (256,),(256,)
  452. "{}.decoders.layeridx.src_attn.linear_q.weight".format(tensor_name_prefix_torch):
  453. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/kernel".format(tensor_name_prefix_tf),
  454. "squeeze": 0,
  455. "transpose": (1, 0),
  456. }, # (256,256),(1,256,256)
  457. "{}.decoders.layeridx.src_attn.linear_q.bias".format(tensor_name_prefix_torch):
  458. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/bias".format(tensor_name_prefix_tf),
  459. "squeeze": None,
  460. "transpose": None,
  461. }, # (256,),(256,)
  462. "{}.decoders.layeridx.src_attn.linear_k_v.weight".format(tensor_name_prefix_torch):
  463. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/kernel".format(tensor_name_prefix_tf),
  464. "squeeze": 0,
  465. "transpose": (1, 0),
  466. }, # (1024,256),(1,256,1024)
  467. "{}.decoders.layeridx.src_attn.linear_k_v.bias".format(tensor_name_prefix_torch):
  468. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/bias".format(tensor_name_prefix_tf),
  469. "squeeze": None,
  470. "transpose": None,
  471. }, # (1024,),(1024,)
  472. "{}.decoders.layeridx.src_attn.linear_out.weight".format(tensor_name_prefix_torch):
  473. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/kernel".format(tensor_name_prefix_tf),
  474. "squeeze": 0,
  475. "transpose": (1, 0),
  476. }, # (256,256),(1,256,256)
  477. "{}.decoders.layeridx.src_attn.linear_out.bias".format(tensor_name_prefix_torch):
  478. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/bias".format(tensor_name_prefix_tf),
  479. "squeeze": None,
  480. "transpose": None,
  481. }, # (256,),(256,)
  482. # dnn
  483. "{}.decoders3.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  484. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm/gamma".format(tensor_name_prefix_tf),
  485. "squeeze": None,
  486. "transpose": None,
  487. }, # (256,),(256,)
  488. "{}.decoders3.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  489. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm/beta".format(tensor_name_prefix_tf),
  490. "squeeze": None,
  491. "transpose": None,
  492. }, # (256,),(256,)
  493. "{}.decoders3.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  494. {"name": "{}/decoder_dnn_layer_layeridx/conv1d/kernel".format(tensor_name_prefix_tf),
  495. "squeeze": 0,
  496. "transpose": (1, 0),
  497. }, # (1024,256),(1,256,1024)
  498. "{}.decoders3.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  499. {"name": "{}/decoder_dnn_layer_layeridx/conv1d/bias".format(tensor_name_prefix_tf),
  500. "squeeze": None,
  501. "transpose": None,
  502. }, # (1024,),(1024,)
  503. "{}.decoders3.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  504. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  505. "squeeze": None,
  506. "transpose": None,
  507. }, # (1024,),(1024,)
  508. "{}.decoders3.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  509. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  510. "squeeze": None,
  511. "transpose": None,
  512. }, # (1024,),(1024,)
  513. "{}.decoders3.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  514. {"name": "{}/decoder_dnn_layer_layeridx/conv1d_1/kernel".format(tensor_name_prefix_tf),
  515. "squeeze": 0,
  516. "transpose": (1, 0),
  517. }, # (256,1024),(1,1024,256)
  518. # embed_concat_ffn
  519. "{}.embed_concat_ffn.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  520. {"name": "{}/cif_concat/LayerNorm/gamma".format(tensor_name_prefix_tf),
  521. "squeeze": None,
  522. "transpose": None,
  523. }, # (256,),(256,)
  524. "{}.embed_concat_ffn.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  525. {"name": "{}/cif_concat/LayerNorm/beta".format(tensor_name_prefix_tf),
  526. "squeeze": None,
  527. "transpose": None,
  528. }, # (256,),(256,)
  529. "{}.embed_concat_ffn.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  530. {"name": "{}/cif_concat/conv1d/kernel".format(tensor_name_prefix_tf),
  531. "squeeze": 0,
  532. "transpose": (1, 0),
  533. }, # (1024,256),(1,256,1024)
  534. "{}.embed_concat_ffn.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  535. {"name": "{}/cif_concat/conv1d/bias".format(tensor_name_prefix_tf),
  536. "squeeze": None,
  537. "transpose": None,
  538. }, # (1024,),(1024,)
  539. "{}.embed_concat_ffn.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  540. {"name": "{}/cif_concat/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  541. "squeeze": None,
  542. "transpose": None,
  543. }, # (1024,),(1024,)
  544. "{}.embed_concat_ffn.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  545. {"name": "{}/cif_concat/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  546. "squeeze": None,
  547. "transpose": None,
  548. }, # (1024,),(1024,)
  549. "{}.embed_concat_ffn.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  550. {"name": "{}/cif_concat/conv1d_1/kernel".format(tensor_name_prefix_tf),
  551. "squeeze": 0,
  552. "transpose": (1, 0),
  553. }, # (256,1024),(1,1024,256)
  554. # out norm
  555. "{}.after_norm.weight".format(tensor_name_prefix_torch):
  556. {"name": "{}/LayerNorm/gamma".format(tensor_name_prefix_tf),
  557. "squeeze": None,
  558. "transpose": None,
  559. }, # (256,),(256,)
  560. "{}.after_norm.bias".format(tensor_name_prefix_torch):
  561. {"name": "{}/LayerNorm/beta".format(tensor_name_prefix_tf),
  562. "squeeze": None,
  563. "transpose": None,
  564. }, # (256,),(256,)
  565. # in embed
  566. "{}.embed.0.weight".format(tensor_name_prefix_torch):
  567. {"name": "{}/w_embs".format(embed_tensor_name_prefix_tf),
  568. "squeeze": None,
  569. "transpose": None,
  570. }, # (4235,256),(4235,256)
  571. # out layer
  572. "{}.output_layer.weight".format(tensor_name_prefix_torch):
  573. {"name": ["{}/dense/kernel".format(tensor_name_prefix_tf),
  574. "{}/w_embs".format(embed_tensor_name_prefix_tf)],
  575. "squeeze": [None, None],
  576. "transpose": [(1, 0), None],
  577. }, # (4235,256),(256,4235)
  578. "{}.output_layer.bias".format(tensor_name_prefix_torch):
  579. {"name": ["{}/dense/bias".format(tensor_name_prefix_tf),
  580. "seq2seq/2bias" if tensor_name_prefix_tf == "seq2seq/decoder/inputter_1" else "seq2seq/bias"],
  581. "squeeze": [None, None],
  582. "transpose": [None, None],
  583. }, # (4235,),(4235,)
  584. }
  585. return map_dict_local
  586. def convert_tf2torch(self,
  587. var_dict_tf,
  588. var_dict_torch,
  589. ):
  590. map_dict = self.gen_tf2torch_map_dict()
  591. var_dict_torch_update = dict()
  592. decoder_layeridx_sets = set()
  593. for name in sorted(var_dict_torch.keys(), reverse=False):
  594. names = name.split('.')
  595. if names[0] == self.tf2torch_tensor_name_prefix_torch:
  596. if names[1] == "decoders":
  597. layeridx = int(names[2])
  598. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  599. layeridx_bias = 0
  600. layeridx += layeridx_bias
  601. decoder_layeridx_sets.add(layeridx)
  602. if name_q in map_dict.keys():
  603. name_v = map_dict[name_q]["name"]
  604. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  605. data_tf = var_dict_tf[name_tf]
  606. if map_dict[name_q]["squeeze"] is not None:
  607. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  608. if map_dict[name_q]["transpose"] is not None:
  609. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  610. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  611. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  612. var_dict_torch[
  613. name].size(),
  614. data_tf.size())
  615. var_dict_torch_update[name] = data_tf
  616. logging.info(
  617. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  618. var_dict_tf[name_tf].shape))
  619. elif names[1] == "decoders2":
  620. layeridx = int(names[2])
  621. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  622. name_q = name_q.replace("decoders2", "decoders")
  623. layeridx_bias = len(decoder_layeridx_sets)
  624. layeridx += layeridx_bias
  625. if "decoders." in name:
  626. decoder_layeridx_sets.add(layeridx)
  627. if name_q in map_dict.keys():
  628. name_v = map_dict[name_q]["name"]
  629. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  630. data_tf = var_dict_tf[name_tf]
  631. if map_dict[name_q]["squeeze"] is not None:
  632. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  633. if map_dict[name_q]["transpose"] is not None:
  634. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  635. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  636. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  637. var_dict_torch[
  638. name].size(),
  639. data_tf.size())
  640. var_dict_torch_update[name] = data_tf
  641. logging.info(
  642. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  643. var_dict_tf[name_tf].shape))
  644. elif names[1] == "decoders3":
  645. layeridx = int(names[2])
  646. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  647. layeridx_bias = 0
  648. layeridx += layeridx_bias
  649. if "decoders." in name:
  650. decoder_layeridx_sets.add(layeridx)
  651. if name_q in map_dict.keys():
  652. name_v = map_dict[name_q]["name"]
  653. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  654. data_tf = var_dict_tf[name_tf]
  655. if map_dict[name_q]["squeeze"] is not None:
  656. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  657. if map_dict[name_q]["transpose"] is not None:
  658. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  659. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  660. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  661. var_dict_torch[
  662. name].size(),
  663. data_tf.size())
  664. var_dict_torch_update[name] = data_tf
  665. logging.info(
  666. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  667. var_dict_tf[name_tf].shape))
  668. elif names[1] == "embed" or names[1] == "output_layer":
  669. name_tf = map_dict[name]["name"]
  670. if isinstance(name_tf, list):
  671. idx_list = 0
  672. if name_tf[idx_list] in var_dict_tf.keys():
  673. pass
  674. else:
  675. idx_list = 1
  676. data_tf = var_dict_tf[name_tf[idx_list]]
  677. if map_dict[name]["squeeze"][idx_list] is not None:
  678. data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"][idx_list])
  679. if map_dict[name]["transpose"][idx_list] is not None:
  680. data_tf = np.transpose(data_tf, map_dict[name]["transpose"][idx_list])
  681. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  682. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  683. var_dict_torch[
  684. name].size(),
  685. data_tf.size())
  686. var_dict_torch_update[name] = data_tf
  687. logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(),
  688. name_tf[idx_list],
  689. var_dict_tf[name_tf[
  690. idx_list]].shape))
  691. else:
  692. data_tf = var_dict_tf[name_tf]
  693. if map_dict[name]["squeeze"] is not None:
  694. data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
  695. if map_dict[name]["transpose"] is not None:
  696. data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
  697. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  698. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  699. var_dict_torch[
  700. name].size(),
  701. data_tf.size())
  702. var_dict_torch_update[name] = data_tf
  703. logging.info(
  704. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_tf,
  705. var_dict_tf[name_tf].shape))
  706. elif names[1] == "after_norm":
  707. name_tf = map_dict[name]["name"]
  708. data_tf = var_dict_tf[name_tf]
  709. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  710. var_dict_torch_update[name] = data_tf
  711. logging.info(
  712. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_tf,
  713. var_dict_tf[name_tf].shape))
  714. elif names[1] == "embed_concat_ffn":
  715. layeridx = int(names[2])
  716. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  717. layeridx_bias = 0
  718. layeridx += layeridx_bias
  719. if "decoders." in name:
  720. decoder_layeridx_sets.add(layeridx)
  721. if name_q in map_dict.keys():
  722. name_v = map_dict[name_q]["name"]
  723. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  724. data_tf = var_dict_tf[name_tf]
  725. if map_dict[name_q]["squeeze"] is not None:
  726. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  727. if map_dict[name_q]["transpose"] is not None:
  728. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  729. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  730. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  731. var_dict_torch[
  732. name].size(),
  733. data_tf.size())
  734. var_dict_torch_update[name] = data_tf
  735. logging.info(
  736. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  737. var_dict_tf[name_tf].shape))
  738. return var_dict_torch_update
  739. class ParaformerSANMDecoder(BaseTransformerDecoder):
  740. """
  741. Author: Speech Lab of DAMO Academy, Alibaba Group
  742. Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition
  743. https://arxiv.org/abs/2006.01713
  744. """
  745. def __init__(
  746. self,
  747. vocab_size: int,
  748. encoder_output_size: int,
  749. attention_heads: int = 4,
  750. linear_units: int = 2048,
  751. num_blocks: int = 6,
  752. dropout_rate: float = 0.1,
  753. positional_dropout_rate: float = 0.1,
  754. self_attention_dropout_rate: float = 0.0,
  755. src_attention_dropout_rate: float = 0.0,
  756. input_layer: str = "embed",
  757. use_output_layer: bool = True,
  758. pos_enc_class=PositionalEncoding,
  759. normalize_before: bool = True,
  760. concat_after: bool = False,
  761. att_layer_num: int = 6,
  762. kernel_size: int = 21,
  763. sanm_shfit: int = 0,
  764. tf2torch_tensor_name_prefix_torch: str = "decoder",
  765. tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
  766. ):
  767. super().__init__(
  768. vocab_size=vocab_size,
  769. encoder_output_size=encoder_output_size,
  770. dropout_rate=dropout_rate,
  771. positional_dropout_rate=positional_dropout_rate,
  772. input_layer=input_layer,
  773. use_output_layer=use_output_layer,
  774. pos_enc_class=pos_enc_class,
  775. normalize_before=normalize_before,
  776. )
  777. attention_dim = encoder_output_size
  778. if input_layer == "embed":
  779. self.embed = torch.nn.Sequential(
  780. torch.nn.Embedding(vocab_size, attention_dim),
  781. # pos_enc_class(attention_dim, positional_dropout_rate),
  782. )
  783. elif input_layer == "linear":
  784. self.embed = torch.nn.Sequential(
  785. torch.nn.Linear(vocab_size, attention_dim),
  786. torch.nn.LayerNorm(attention_dim),
  787. torch.nn.Dropout(dropout_rate),
  788. torch.nn.ReLU(),
  789. pos_enc_class(attention_dim, positional_dropout_rate),
  790. )
  791. else:
  792. raise ValueError(f"only 'embed' or 'linear' is supported: {input_layer}")
  793. self.normalize_before = normalize_before
  794. if self.normalize_before:
  795. self.after_norm = LayerNorm(attention_dim)
  796. if use_output_layer:
  797. self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
  798. else:
  799. self.output_layer = None
  800. self.att_layer_num = att_layer_num
  801. self.num_blocks = num_blocks
  802. if sanm_shfit is None:
  803. sanm_shfit = (kernel_size - 1) // 2
  804. self.decoders = repeat(
  805. att_layer_num,
  806. lambda lnum: DecoderLayerSANM(
  807. attention_dim,
  808. MultiHeadedAttentionSANMDecoder(
  809. attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=sanm_shfit
  810. ),
  811. MultiHeadedAttentionCrossAtt(
  812. attention_heads, attention_dim, src_attention_dropout_rate
  813. ),
  814. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  815. dropout_rate,
  816. normalize_before,
  817. concat_after,
  818. ),
  819. )
  820. if num_blocks - att_layer_num <= 0:
  821. self.decoders2 = None
  822. else:
  823. self.decoders2 = repeat(
  824. num_blocks - att_layer_num,
  825. lambda lnum: DecoderLayerSANM(
  826. attention_dim,
  827. MultiHeadedAttentionSANMDecoder(
  828. attention_dim, self_attention_dropout_rate, kernel_size, sanm_shfit=0
  829. ),
  830. None,
  831. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  832. dropout_rate,
  833. normalize_before,
  834. concat_after,
  835. ),
  836. )
  837. self.decoders3 = repeat(
  838. 1,
  839. lambda lnum: DecoderLayerSANM(
  840. attention_dim,
  841. None,
  842. None,
  843. PositionwiseFeedForwardDecoderSANM(attention_dim, linear_units, dropout_rate),
  844. dropout_rate,
  845. normalize_before,
  846. concat_after,
  847. ),
  848. )
  849. self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
  850. self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
  851. def forward(
  852. self,
  853. hs_pad: torch.Tensor,
  854. hlens: torch.Tensor,
  855. ys_in_pad: torch.Tensor,
  856. ys_in_lens: torch.Tensor,
  857. chunk_mask: torch.Tensor = None,
  858. ) -> Tuple[torch.Tensor, torch.Tensor]:
  859. """Forward decoder.
  860. Args:
  861. hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
  862. hlens: (batch)
  863. ys_in_pad:
  864. input token ids, int64 (batch, maxlen_out)
  865. if input_layer == "embed"
  866. input tensor (batch, maxlen_out, #mels) in the other cases
  867. ys_in_lens: (batch)
  868. Returns:
  869. (tuple): tuple containing:
  870. x: decoded token score before softmax (batch, maxlen_out, token)
  871. if use_output_layer is True,
  872. olens: (batch, )
  873. """
  874. tgt = ys_in_pad
  875. tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
  876. memory = hs_pad
  877. memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
  878. if chunk_mask is not None:
  879. memory_mask = memory_mask * chunk_mask
  880. if tgt_mask.size(1) != memory_mask.size(1):
  881. memory_mask = torch.cat((memory_mask, memory_mask[:, -2:-1, :]), dim=1)
  882. x = tgt
  883. x, tgt_mask, memory, memory_mask, _ = self.decoders(
  884. x, tgt_mask, memory, memory_mask
  885. )
  886. if self.decoders2 is not None:
  887. x, tgt_mask, memory, memory_mask, _ = self.decoders2(
  888. x, tgt_mask, memory, memory_mask
  889. )
  890. x, tgt_mask, memory, memory_mask, _ = self.decoders3(
  891. x, tgt_mask, memory, memory_mask
  892. )
  893. if self.normalize_before:
  894. x = self.after_norm(x)
  895. if self.output_layer is not None:
  896. x = self.output_layer(x)
  897. olens = tgt_mask.sum(1)
  898. return x, olens
  899. def score(self, ys, state, x):
  900. """Score."""
  901. ys_mask = myutils.sequence_mask(torch.tensor([len(ys)], dtype=torch.int32), device=x.device)[:, :, None]
  902. logp, state = self.forward_one_step(
  903. ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
  904. )
  905. return logp.squeeze(0), state
  906. def forward_chunk(
  907. self,
  908. memory: torch.Tensor,
  909. tgt: torch.Tensor,
  910. cache: dict = None,
  911. ) -> Tuple[torch.Tensor, torch.Tensor]:
  912. """Forward decoder.
  913. Args:
  914. hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
  915. hlens: (batch)
  916. ys_in_pad:
  917. input token ids, int64 (batch, maxlen_out)
  918. if input_layer == "embed"
  919. input tensor (batch, maxlen_out, #mels) in the other cases
  920. ys_in_lens: (batch)
  921. Returns:
  922. (tuple): tuple containing:
  923. x: decoded token score before softmax (batch, maxlen_out, token)
  924. if use_output_layer is True,
  925. olens: (batch, )
  926. """
  927. x = tgt
  928. if cache["decode_fsmn"] is None:
  929. cache_layer_num = len(self.decoders)
  930. if self.decoders2 is not None:
  931. cache_layer_num += len(self.decoders2)
  932. new_cache = [None] * cache_layer_num
  933. else:
  934. new_cache = cache["decode_fsmn"]
  935. for i in range(self.att_layer_num):
  936. decoder = self.decoders[i]
  937. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  938. x, None, memory, None, cache=new_cache[i]
  939. )
  940. new_cache[i] = c_ret
  941. if self.num_blocks - self.att_layer_num > 1:
  942. for i in range(self.num_blocks - self.att_layer_num):
  943. j = i + self.att_layer_num
  944. decoder = self.decoders2[i]
  945. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  946. x, None, memory, None, cache=new_cache[j]
  947. )
  948. new_cache[j] = c_ret
  949. for decoder in self.decoders3:
  950. x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
  951. x, None, memory, None, cache=None
  952. )
  953. if self.normalize_before:
  954. x = self.after_norm(x)
  955. if self.output_layer is not None:
  956. x = self.output_layer(x)
  957. cache["decode_fsmn"] = new_cache
  958. return x
  959. def forward_one_step(
  960. self,
  961. tgt: torch.Tensor,
  962. tgt_mask: torch.Tensor,
  963. memory: torch.Tensor,
  964. cache: List[torch.Tensor] = None,
  965. ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
  966. """Forward one step.
  967. Args:
  968. tgt: input token ids, int64 (batch, maxlen_out)
  969. tgt_mask: input token mask, (batch, maxlen_out)
  970. dtype=torch.uint8 in PyTorch 1.2-
  971. dtype=torch.bool in PyTorch 1.2+ (include 1.2)
  972. memory: encoded memory, float32 (batch, maxlen_in, feat)
  973. cache: cached output list of (batch, max_time_out-1, size)
  974. Returns:
  975. y, cache: NN output value and cache per `self.decoders`.
  976. y.shape` is (batch, maxlen_out, token)
  977. """
  978. x = self.embed(tgt)
  979. if cache is None:
  980. cache_layer_num = len(self.decoders)
  981. if self.decoders2 is not None:
  982. cache_layer_num += len(self.decoders2)
  983. cache = [None] * cache_layer_num
  984. new_cache = []
  985. # for c, decoder in zip(cache, self.decoders):
  986. for i in range(self.att_layer_num):
  987. decoder = self.decoders[i]
  988. c = cache[i]
  989. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  990. x, tgt_mask, memory, None, cache=c
  991. )
  992. new_cache.append(c_ret)
  993. if self.num_blocks - self.att_layer_num > 1:
  994. for i in range(self.num_blocks - self.att_layer_num):
  995. j = i + self.att_layer_num
  996. decoder = self.decoders2[i]
  997. c = cache[j]
  998. x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
  999. x, tgt_mask, memory, None, cache=c
  1000. )
  1001. new_cache.append(c_ret)
  1002. for decoder in self.decoders3:
  1003. x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
  1004. x, tgt_mask, memory, None, cache=None
  1005. )
  1006. if self.normalize_before:
  1007. y = self.after_norm(x[:, -1])
  1008. else:
  1009. y = x[:, -1]
  1010. if self.output_layer is not None:
  1011. y = torch.log_softmax(self.output_layer(y), dim=-1)
  1012. return y, new_cache
  1013. def gen_tf2torch_map_dict(self):
  1014. tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
  1015. tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
  1016. map_dict_local = {
  1017. ## decoder
  1018. # ffn
  1019. "{}.decoders.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  1020. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/gamma".format(tensor_name_prefix_tf),
  1021. "squeeze": None,
  1022. "transpose": None,
  1023. }, # (256,),(256,)
  1024. "{}.decoders.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  1025. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm/beta".format(tensor_name_prefix_tf),
  1026. "squeeze": None,
  1027. "transpose": None,
  1028. }, # (256,),(256,)
  1029. "{}.decoders.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  1030. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/kernel".format(tensor_name_prefix_tf),
  1031. "squeeze": 0,
  1032. "transpose": (1, 0),
  1033. }, # (1024,256),(1,256,1024)
  1034. "{}.decoders.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  1035. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d/bias".format(tensor_name_prefix_tf),
  1036. "squeeze": None,
  1037. "transpose": None,
  1038. }, # (1024,),(1024,)
  1039. "{}.decoders.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  1040. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  1041. "squeeze": None,
  1042. "transpose": None,
  1043. }, # (1024,),(1024,)
  1044. "{}.decoders.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  1045. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  1046. "squeeze": None,
  1047. "transpose": None,
  1048. }, # (1024,),(1024,)
  1049. "{}.decoders.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  1050. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_ffn/conv1d_1/kernel".format(tensor_name_prefix_tf),
  1051. "squeeze": 0,
  1052. "transpose": (1, 0),
  1053. }, # (256,1024),(1,1024,256)
  1054. # fsmn
  1055. "{}.decoders.layeridx.norm2.weight".format(tensor_name_prefix_torch):
  1056. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/gamma".format(
  1057. tensor_name_prefix_tf),
  1058. "squeeze": None,
  1059. "transpose": None,
  1060. }, # (256,),(256,)
  1061. "{}.decoders.layeridx.norm2.bias".format(tensor_name_prefix_torch):
  1062. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/LayerNorm/beta".format(
  1063. tensor_name_prefix_tf),
  1064. "squeeze": None,
  1065. "transpose": None,
  1066. }, # (256,),(256,)
  1067. "{}.decoders.layeridx.self_attn.fsmn_block.weight".format(tensor_name_prefix_torch):
  1068. {"name": "{}/decoder_fsmn_layer_layeridx/decoder_memory_block/depth_conv_w".format(
  1069. tensor_name_prefix_tf),
  1070. "squeeze": 0,
  1071. "transpose": (1, 2, 0),
  1072. }, # (256,1,31),(1,31,256,1)
  1073. # src att
  1074. "{}.decoders.layeridx.norm3.weight".format(tensor_name_prefix_torch):
  1075. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/gamma".format(tensor_name_prefix_tf),
  1076. "squeeze": None,
  1077. "transpose": None,
  1078. }, # (256,),(256,)
  1079. "{}.decoders.layeridx.norm3.bias".format(tensor_name_prefix_torch):
  1080. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/LayerNorm/beta".format(tensor_name_prefix_tf),
  1081. "squeeze": None,
  1082. "transpose": None,
  1083. }, # (256,),(256,)
  1084. "{}.decoders.layeridx.src_attn.linear_q.weight".format(tensor_name_prefix_torch):
  1085. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/kernel".format(tensor_name_prefix_tf),
  1086. "squeeze": 0,
  1087. "transpose": (1, 0),
  1088. }, # (256,256),(1,256,256)
  1089. "{}.decoders.layeridx.src_attn.linear_q.bias".format(tensor_name_prefix_torch):
  1090. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d/bias".format(tensor_name_prefix_tf),
  1091. "squeeze": None,
  1092. "transpose": None,
  1093. }, # (256,),(256,)
  1094. "{}.decoders.layeridx.src_attn.linear_k_v.weight".format(tensor_name_prefix_torch):
  1095. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/kernel".format(tensor_name_prefix_tf),
  1096. "squeeze": 0,
  1097. "transpose": (1, 0),
  1098. }, # (1024,256),(1,256,1024)
  1099. "{}.decoders.layeridx.src_attn.linear_k_v.bias".format(tensor_name_prefix_torch):
  1100. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_1/bias".format(tensor_name_prefix_tf),
  1101. "squeeze": None,
  1102. "transpose": None,
  1103. }, # (1024,),(1024,)
  1104. "{}.decoders.layeridx.src_attn.linear_out.weight".format(tensor_name_prefix_torch):
  1105. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/kernel".format(tensor_name_prefix_tf),
  1106. "squeeze": 0,
  1107. "transpose": (1, 0),
  1108. }, # (256,256),(1,256,256)
  1109. "{}.decoders.layeridx.src_attn.linear_out.bias".format(tensor_name_prefix_torch):
  1110. {"name": "{}/decoder_fsmn_layer_layeridx/multi_head/conv1d_2/bias".format(tensor_name_prefix_tf),
  1111. "squeeze": None,
  1112. "transpose": None,
  1113. }, # (256,),(256,)
  1114. # dnn
  1115. "{}.decoders3.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  1116. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm/gamma".format(tensor_name_prefix_tf),
  1117. "squeeze": None,
  1118. "transpose": None,
  1119. }, # (256,),(256,)
  1120. "{}.decoders3.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  1121. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm/beta".format(tensor_name_prefix_tf),
  1122. "squeeze": None,
  1123. "transpose": None,
  1124. }, # (256,),(256,)
  1125. "{}.decoders3.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  1126. {"name": "{}/decoder_dnn_layer_layeridx/conv1d/kernel".format(tensor_name_prefix_tf),
  1127. "squeeze": 0,
  1128. "transpose": (1, 0),
  1129. }, # (1024,256),(1,256,1024)
  1130. "{}.decoders3.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  1131. {"name": "{}/decoder_dnn_layer_layeridx/conv1d/bias".format(tensor_name_prefix_tf),
  1132. "squeeze": None,
  1133. "transpose": None,
  1134. }, # (1024,),(1024,)
  1135. "{}.decoders3.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  1136. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  1137. "squeeze": None,
  1138. "transpose": None,
  1139. }, # (1024,),(1024,)
  1140. "{}.decoders3.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  1141. {"name": "{}/decoder_dnn_layer_layeridx/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  1142. "squeeze": None,
  1143. "transpose": None,
  1144. }, # (1024,),(1024,)
  1145. "{}.decoders3.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  1146. {"name": "{}/decoder_dnn_layer_layeridx/conv1d_1/kernel".format(tensor_name_prefix_tf),
  1147. "squeeze": 0,
  1148. "transpose": (1, 0),
  1149. }, # (256,1024),(1,1024,256)
  1150. # embed_concat_ffn
  1151. "{}.embed_concat_ffn.layeridx.norm1.weight".format(tensor_name_prefix_torch):
  1152. {"name": "{}/cif_concat/LayerNorm/gamma".format(tensor_name_prefix_tf),
  1153. "squeeze": None,
  1154. "transpose": None,
  1155. }, # (256,),(256,)
  1156. "{}.embed_concat_ffn.layeridx.norm1.bias".format(tensor_name_prefix_torch):
  1157. {"name": "{}/cif_concat/LayerNorm/beta".format(tensor_name_prefix_tf),
  1158. "squeeze": None,
  1159. "transpose": None,
  1160. }, # (256,),(256,)
  1161. "{}.embed_concat_ffn.layeridx.feed_forward.w_1.weight".format(tensor_name_prefix_torch):
  1162. {"name": "{}/cif_concat/conv1d/kernel".format(tensor_name_prefix_tf),
  1163. "squeeze": 0,
  1164. "transpose": (1, 0),
  1165. }, # (1024,256),(1,256,1024)
  1166. "{}.embed_concat_ffn.layeridx.feed_forward.w_1.bias".format(tensor_name_prefix_torch):
  1167. {"name": "{}/cif_concat/conv1d/bias".format(tensor_name_prefix_tf),
  1168. "squeeze": None,
  1169. "transpose": None,
  1170. }, # (1024,),(1024,)
  1171. "{}.embed_concat_ffn.layeridx.feed_forward.norm.weight".format(tensor_name_prefix_torch):
  1172. {"name": "{}/cif_concat/LayerNorm_1/gamma".format(tensor_name_prefix_tf),
  1173. "squeeze": None,
  1174. "transpose": None,
  1175. }, # (1024,),(1024,)
  1176. "{}.embed_concat_ffn.layeridx.feed_forward.norm.bias".format(tensor_name_prefix_torch):
  1177. {"name": "{}/cif_concat/LayerNorm_1/beta".format(tensor_name_prefix_tf),
  1178. "squeeze": None,
  1179. "transpose": None,
  1180. }, # (1024,),(1024,)
  1181. "{}.embed_concat_ffn.layeridx.feed_forward.w_2.weight".format(tensor_name_prefix_torch):
  1182. {"name": "{}/cif_concat/conv1d_1/kernel".format(tensor_name_prefix_tf),
  1183. "squeeze": 0,
  1184. "transpose": (1, 0),
  1185. }, # (256,1024),(1,1024,256)
  1186. # out norm
  1187. "{}.after_norm.weight".format(tensor_name_prefix_torch):
  1188. {"name": "{}/LayerNorm/gamma".format(tensor_name_prefix_tf),
  1189. "squeeze": None,
  1190. "transpose": None,
  1191. }, # (256,),(256,)
  1192. "{}.after_norm.bias".format(tensor_name_prefix_torch):
  1193. {"name": "{}/LayerNorm/beta".format(tensor_name_prefix_tf),
  1194. "squeeze": None,
  1195. "transpose": None,
  1196. }, # (256,),(256,)
  1197. # in embed
  1198. "{}.embed.0.weight".format(tensor_name_prefix_torch):
  1199. {"name": "{}/w_embs".format(tensor_name_prefix_tf),
  1200. "squeeze": None,
  1201. "transpose": None,
  1202. }, # (4235,256),(4235,256)
  1203. # out layer
  1204. "{}.output_layer.weight".format(tensor_name_prefix_torch):
  1205. {"name": ["{}/dense/kernel".format(tensor_name_prefix_tf), "{}/w_embs".format(tensor_name_prefix_tf)],
  1206. "squeeze": [None, None],
  1207. "transpose": [(1, 0), None],
  1208. }, # (4235,256),(256,4235)
  1209. "{}.output_layer.bias".format(tensor_name_prefix_torch):
  1210. {"name": ["{}/dense/bias".format(tensor_name_prefix_tf),
  1211. "seq2seq/2bias" if tensor_name_prefix_tf == "seq2seq/decoder/inputter_1" else "seq2seq/bias"],
  1212. "squeeze": [None, None],
  1213. "transpose": [None, None],
  1214. }, # (4235,),(4235,)
  1215. }
  1216. return map_dict_local
  1217. def convert_tf2torch(self,
  1218. var_dict_tf,
  1219. var_dict_torch,
  1220. ):
  1221. map_dict = self.gen_tf2torch_map_dict()
  1222. var_dict_torch_update = dict()
  1223. decoder_layeridx_sets = set()
  1224. for name in sorted(var_dict_torch.keys(), reverse=False):
  1225. names = name.split('.')
  1226. if names[0] == self.tf2torch_tensor_name_prefix_torch:
  1227. if names[1] == "decoders":
  1228. layeridx = int(names[2])
  1229. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  1230. layeridx_bias = 0
  1231. layeridx += layeridx_bias
  1232. decoder_layeridx_sets.add(layeridx)
  1233. if name_q in map_dict.keys():
  1234. name_v = map_dict[name_q]["name"]
  1235. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  1236. data_tf = var_dict_tf[name_tf]
  1237. if map_dict[name_q]["squeeze"] is not None:
  1238. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  1239. if map_dict[name_q]["transpose"] is not None:
  1240. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  1241. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1242. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1243. var_dict_torch[
  1244. name].size(),
  1245. data_tf.size())
  1246. var_dict_torch_update[name] = data_tf
  1247. logging.info(
  1248. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  1249. var_dict_tf[name_tf].shape))
  1250. elif names[1] == "decoders2":
  1251. layeridx = int(names[2])
  1252. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  1253. name_q = name_q.replace("decoders2", "decoders")
  1254. layeridx_bias = len(decoder_layeridx_sets)
  1255. layeridx += layeridx_bias
  1256. if "decoders." in name:
  1257. decoder_layeridx_sets.add(layeridx)
  1258. if name_q in map_dict.keys():
  1259. name_v = map_dict[name_q]["name"]
  1260. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  1261. data_tf = var_dict_tf[name_tf]
  1262. if map_dict[name_q]["squeeze"] is not None:
  1263. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  1264. if map_dict[name_q]["transpose"] is not None:
  1265. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  1266. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1267. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1268. var_dict_torch[
  1269. name].size(),
  1270. data_tf.size())
  1271. var_dict_torch_update[name] = data_tf
  1272. logging.info(
  1273. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  1274. var_dict_tf[name_tf].shape))
  1275. elif names[1] == "decoders3":
  1276. layeridx = int(names[2])
  1277. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  1278. layeridx_bias = 0
  1279. layeridx += layeridx_bias
  1280. if "decoders." in name:
  1281. decoder_layeridx_sets.add(layeridx)
  1282. if name_q in map_dict.keys():
  1283. name_v = map_dict[name_q]["name"]
  1284. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  1285. data_tf = var_dict_tf[name_tf]
  1286. if map_dict[name_q]["squeeze"] is not None:
  1287. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  1288. if map_dict[name_q]["transpose"] is not None:
  1289. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  1290. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1291. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1292. var_dict_torch[
  1293. name].size(),
  1294. data_tf.size())
  1295. var_dict_torch_update[name] = data_tf
  1296. logging.info(
  1297. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  1298. var_dict_tf[name_tf].shape))
  1299. elif names[1] == "embed" or names[1] == "output_layer":
  1300. name_tf = map_dict[name]["name"]
  1301. if isinstance(name_tf, list):
  1302. idx_list = 0
  1303. if name_tf[idx_list] in var_dict_tf.keys():
  1304. pass
  1305. else:
  1306. idx_list = 1
  1307. data_tf = var_dict_tf[name_tf[idx_list]]
  1308. if map_dict[name]["squeeze"][idx_list] is not None:
  1309. data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"][idx_list])
  1310. if map_dict[name]["transpose"][idx_list] is not None:
  1311. data_tf = np.transpose(data_tf, map_dict[name]["transpose"][idx_list])
  1312. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1313. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1314. var_dict_torch[
  1315. name].size(),
  1316. data_tf.size())
  1317. var_dict_torch_update[name] = data_tf
  1318. logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(),
  1319. name_tf[idx_list],
  1320. var_dict_tf[name_tf[
  1321. idx_list]].shape))
  1322. else:
  1323. data_tf = var_dict_tf[name_tf]
  1324. if map_dict[name]["squeeze"] is not None:
  1325. data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
  1326. if map_dict[name]["transpose"] is not None:
  1327. data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
  1328. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1329. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1330. var_dict_torch[
  1331. name].size(),
  1332. data_tf.size())
  1333. var_dict_torch_update[name] = data_tf
  1334. logging.info(
  1335. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_tf,
  1336. var_dict_tf[name_tf].shape))
  1337. elif names[1] == "after_norm":
  1338. name_tf = map_dict[name]["name"]
  1339. data_tf = var_dict_tf[name_tf]
  1340. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1341. var_dict_torch_update[name] = data_tf
  1342. logging.info(
  1343. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_tf,
  1344. var_dict_tf[name_tf].shape))
  1345. elif names[1] == "embed_concat_ffn":
  1346. layeridx = int(names[2])
  1347. name_q = name.replace(".{}.".format(layeridx), ".layeridx.")
  1348. layeridx_bias = 0
  1349. layeridx += layeridx_bias
  1350. if "decoders." in name:
  1351. decoder_layeridx_sets.add(layeridx)
  1352. if name_q in map_dict.keys():
  1353. name_v = map_dict[name_q]["name"]
  1354. name_tf = name_v.replace("layeridx", "{}".format(layeridx))
  1355. data_tf = var_dict_tf[name_tf]
  1356. if map_dict[name_q]["squeeze"] is not None:
  1357. data_tf = np.squeeze(data_tf, axis=map_dict[name_q]["squeeze"])
  1358. if map_dict[name_q]["transpose"] is not None:
  1359. data_tf = np.transpose(data_tf, map_dict[name_q]["transpose"])
  1360. data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
  1361. assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
  1362. var_dict_torch[
  1363. name].size(),
  1364. data_tf.size())
  1365. var_dict_torch_update[name] = data_tf
  1366. logging.info(
  1367. "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_v,
  1368. var_dict_tf[name_tf].shape))
  1369. return var_dict_torch_update