build_dataloader.py 3.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. import logging
  2. from pathlib import Path
  3. from typing import Iterable
  4. from typing import List
  5. from typing import Union
  6. import sentencepiece as spm
  7. from torch.utils.data import DataLoader
  8. from typeguard import check_argument_types
  9. from funasr.datasets.large_datasets.dataset import Dataset
  10. from funasr.iterators.abs_iter_factory import AbsIterFactory
  11. from funasr.text.abs_tokenizer import AbsTokenizer
  12. def read_symbol_table(symbol_table_file):
  13. if isinstance(symbol_table_file, str):
  14. symbol_table = {}
  15. with open(symbol_table_file, "r", encoding="utf8") as fin:
  16. for i, line in enumerate(fin):
  17. char = line.strip()
  18. symbol_table[char] = i
  19. else:
  20. assert isinstance(symbol_table_file, list)
  21. symbol_table = {}
  22. for i, char in enumerate(symbol_table_file):
  23. symbol_table[char] = i
  24. return symbol_table
  25. def load_seg_dict(seg_dict_file):
  26. seg_dict = {}
  27. assert isinstance(seg_dict_file, str)
  28. with open(seg_dict_file, "r", encoding="utf8") as f:
  29. lines = f.readlines()
  30. for line in lines:
  31. s = line.strip().split()
  32. key = s[0]
  33. value = s[1:]
  34. seg_dict[key] = " ".join(value)
  35. return seg_dict
  36. class SentencepiecesTokenizer(AbsTokenizer):
  37. def __init__(self, model: Union[Path, str]):
  38. assert check_argument_types()
  39. self.model = str(model)
  40. self.sp = None
  41. def __repr__(self):
  42. return f'{self.__class__.__name__}(model="{self.model}")'
  43. def _build_sentence_piece_processor(self):
  44. if self.sp is None:
  45. self.sp = spm.SentencePieceProcessor()
  46. self.sp.load(self.model)
  47. def text2tokens(self, line: str) -> List[str]:
  48. self._build_sentence_piece_processor()
  49. return self.sp.EncodeAsPieces(line)
  50. def tokens2text(self, tokens: Iterable[str]) -> str:
  51. self._build_sentence_piece_processor()
  52. return self.sp.DecodePieces(list(tokens))
  53. class LargeDataLoader(AbsIterFactory):
  54. def __init__(self, args, mode="train"):
  55. symbol_table, seg_dict, punc_dict, bpe_tokenizer = None, None, None, None
  56. if hasattr(args, "token_list") and args.token_list is not None:
  57. symbol_table = read_symbol_table(args.token_list)
  58. if hasattr(args, "seg_dict_file") and args.seg_dict_file is not None:
  59. seg_dict = load_seg_dict(args.seg_dict_file)
  60. if hasattr(args, "punc_dict_file") and args.punc_dict_file is not None:
  61. punc_dict = read_symbol_table(args.punc_dict_file)
  62. if hasattr(args, "bpemodel") and args.bpemodel is not None:
  63. bpe_tokenizer = SentencepiecesTokenizer(args.bpemodel)
  64. self.dataset_conf = args.dataset_conf
  65. self.frontend_conf = args.frontend_conf
  66. logging.info("dataloader config: {}".format(self.dataset_conf))
  67. batch_mode = self.dataset_conf.get("batch_mode", "padding")
  68. data_list = args.train_data_file if mode == "train" else args.valid_data_file
  69. self.dataset = Dataset(data_list, symbol_table, seg_dict, punc_dict, bpe_tokenizer,
  70. self.dataset_conf, self.frontend_conf,
  71. speed_perturb=args.speed_perturb if mode == "train" else None,
  72. mode=mode, batch_mode=batch_mode)
  73. def build_iter(self, epoch, shuffle=True):
  74. self.dataset.set_epoch(epoch)
  75. data_loader = DataLoader(self.dataset,
  76. batch_size=None,
  77. pin_memory=True,
  78. num_workers=self.dataset_conf.get("num_workers", 8))
  79. return data_loader