chunk_iter_factory.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. import logging
  2. from typing import Any
  3. from typing import Dict
  4. from typing import Iterator
  5. from typing import List
  6. from typing import Sequence
  7. from typing import Tuple
  8. from typing import Union
  9. import numpy as np
  10. import torch
  11. from funasr.iterators.abs_iter_factory import AbsIterFactory
  12. from funasr.iterators.sequence_iter_factory import SequenceIterFactory
  13. from funasr.samplers.abs_sampler import AbsSampler
  14. class ChunkIterFactory(AbsIterFactory):
  15. """Creates chunks from a sequence
  16. Examples:
  17. >>> batches = [["id1"], ["id2"], ...]
  18. >>> batch_size = 128
  19. >>> chunk_length = 1000
  20. >>> iter_factory = ChunkIterFactory(dataset, batches, batch_size, chunk_length)
  21. >>> it = iter_factory.build_iter(epoch)
  22. >>> for ids, batch in it:
  23. ... ...
  24. - The number of mini-batches are varied in each epochs and
  25. we can't get the number in advance
  26. because IterFactory doesn't be given to the length information.
  27. - Since the first reason, "num_iters_per_epoch" can't be implemented
  28. for this iterator. Instead of it, "num_samples_per_epoch" is implemented.
  29. """
  30. def __init__(
  31. self,
  32. dataset,
  33. batch_size: int,
  34. batches: Union[AbsSampler, Sequence[Sequence[Any]]],
  35. chunk_length: Union[int, str],
  36. chunk_shift_ratio: float = 0.5,
  37. num_cache_chunks: int = 1024,
  38. num_samples_per_epoch: int = None,
  39. seed: int = 0,
  40. shuffle: bool = False,
  41. num_workers: int = 0,
  42. collate_fn=None,
  43. pin_memory: bool = False,
  44. ):
  45. assert all(len(x) == 1 for x in batches), "batch-size must be 1"
  46. self.per_sample_iter_factory = SequenceIterFactory(
  47. dataset=dataset,
  48. batches=batches,
  49. num_iters_per_epoch=num_samples_per_epoch,
  50. seed=seed,
  51. shuffle=shuffle,
  52. num_workers=num_workers,
  53. collate_fn=collate_fn,
  54. pin_memory=pin_memory,
  55. )
  56. self.num_cache_chunks = max(num_cache_chunks, batch_size)
  57. if isinstance(chunk_length, str):
  58. if len(chunk_length) == 0:
  59. raise ValueError("e.g. 5,8 or 3-5: but got empty string")
  60. self.chunk_lengths = []
  61. for x in chunk_length.split(","):
  62. try:
  63. sps = list(map(int, x.split("-")))
  64. except ValueError:
  65. raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
  66. if len(sps) > 2:
  67. raise ValueError(f"e.g. 5,8 or 3-5: but got {chunk_length}")
  68. elif len(sps) == 2:
  69. # Append all numbers between the range into the candidates
  70. self.chunk_lengths += list(range(sps[0], sps[1] + 1))
  71. else:
  72. self.chunk_lengths += [sps[0]]
  73. else:
  74. # Single candidates: Fixed chunk length
  75. self.chunk_lengths = [chunk_length]
  76. self.chunk_shift_ratio = chunk_shift_ratio
  77. self.batch_size = batch_size
  78. self.seed = seed
  79. self.shuffle = shuffle
  80. def build_iter(
  81. self,
  82. epoch: int,
  83. shuffle: bool = None,
  84. ) -> Iterator[Tuple[List[str], Dict[str, torch.Tensor]]]:
  85. per_sample_loader = self.per_sample_iter_factory.build_iter(epoch, shuffle)
  86. if shuffle is None:
  87. shuffle = self.shuffle
  88. state = np.random.RandomState(epoch + self.seed)
  89. # NOTE(kamo):
  90. # This iterator supports multiple chunk lengths and
  91. # keep chunks for each lengths here until collecting specified numbers
  92. cache_chunks_dict = {}
  93. cache_id_list_dict = {}
  94. for ids, batch in per_sample_loader:
  95. # Must be per-sample-loader
  96. assert len(ids) == 1, f"Must be per-sample-loader: {len(ids)}"
  97. assert all(len(x) == 1 for x in batch.values())
  98. # Get keys of sequence data
  99. sequence_keys = []
  100. for key in batch:
  101. if key + "_lengths" in batch:
  102. sequence_keys.append(key)
  103. # Remove lengths data and get the first sample
  104. batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
  105. id_ = ids[0]
  106. for key in sequence_keys:
  107. if len(batch[key]) != len(batch[sequence_keys[0]]):
  108. raise RuntimeError(
  109. f"All sequences must has same length: "
  110. f"{len(batch[key])} != {len(batch[sequence_keys[0]])}"
  111. )
  112. L = len(batch[sequence_keys[0]])
  113. # Select chunk length
  114. chunk_lengths = [lg for lg in self.chunk_lengths if lg < L]
  115. if len(chunk_lengths) == 0:
  116. logging.warning(
  117. f"The length of '{id_}' is {L}, but it is shorter than "
  118. f"any candidates of chunk-length: {self.chunk_lengths}"
  119. )
  120. continue
  121. W = int(state.choice(chunk_lengths, 1))
  122. cache_id_list = cache_id_list_dict.setdefault(W, [])
  123. cache_chunks = cache_chunks_dict.setdefault(W, {})
  124. # Shift width to the next chunk
  125. S = int(W * self.chunk_shift_ratio)
  126. # Number of chunks
  127. N = (L - W) // S + 1
  128. if shuffle:
  129. Z = state.randint(0, (L - W) % S + 1)
  130. else:
  131. Z = 0
  132. # Split a sequence into chunks.
  133. # Note that the marginal frames divided by chunk length are discarded
  134. for k, v in batch.items():
  135. if k not in cache_chunks:
  136. cache_chunks[k] = []
  137. if k in sequence_keys:
  138. # Shift chunks with overlapped length for data augmentation
  139. cache_chunks[k] += [v[Z + i * S : Z + i * S + W] for i in range(N)]
  140. else:
  141. # If not sequence, use whole data instead of chunk
  142. cache_chunks[k] += [v for _ in range(N)]
  143. cache_id_list += [id_ for _ in range(N)]
  144. if len(cache_id_list) > self.num_cache_chunks:
  145. cache_id_list, cache_chunks = yield from self._generate_mini_batches(
  146. cache_id_list,
  147. cache_chunks,
  148. shuffle,
  149. state,
  150. )
  151. cache_id_list_dict[W] = cache_id_list
  152. cache_chunks_dict[W] = cache_chunks
  153. else:
  154. for W in cache_id_list_dict:
  155. cache_id_list = cache_id_list_dict.setdefault(W, [])
  156. cache_chunks = cache_chunks_dict.setdefault(W, {})
  157. yield from self._generate_mini_batches(
  158. cache_id_list,
  159. cache_chunks,
  160. shuffle,
  161. state,
  162. )
  163. def _generate_mini_batches(
  164. self,
  165. id_list: List[str],
  166. batches: Dict[str, List[torch.Tensor]],
  167. shuffle: bool,
  168. state: np.random.RandomState,
  169. ):
  170. if shuffle:
  171. indices = np.arange(0, len(id_list))
  172. state.shuffle(indices)
  173. batches = {k: [v[i] for i in indices] for k, v in batches.items()}
  174. id_list = [id_list[i] for i in indices]
  175. bs = self.batch_size
  176. while len(id_list) >= bs:
  177. # Make mini-batch and yield
  178. yield (
  179. id_list[:bs],
  180. {k: torch.stack(v[:bs], 0) for k, v in batches.items()},
  181. )
  182. id_list = id_list[bs:]
  183. batches = {k: v[bs:] for k, v in batches.items()}
  184. return id_list, batches