trainer.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. import os
  2. import time
  3. import torch
  4. import logging
  5. from tqdm import tqdm
  6. import torch.distributed as dist
  7. from contextlib import nullcontext
  8. # from torch.utils.tensorboard import SummaryWriter
  9. from tensorboardX import SummaryWriter
  10. from pathlib import Path
  11. from funasr.train_utils.device_funcs import to_device
  12. from funasr.train_utils.recursive_op import recursive_average
  13. from funasr.train_utils.average_nbest_models import average_checkpoints
  14. class Trainer:
  15. """
  16. A simple trainer class for training a PyTorch model, saving checkpoints at the end of each epoch,
  17. and optionally resuming from a saved checkpoint.
  18. Attributes:
  19. max_epoch (int): Maximum number of epochs for training.
  20. model (torch.nn.Module): The model to be trained.
  21. optim (torch.optim.Optimizer): The optimizer to use for training.
  22. scheduler (torch.optim.lr_scheduler._LRScheduler): The learning rate scheduler.
  23. dataloader_train (torch.utils.data.DataLoader): DataLoader for the training dataset.
  24. dataloader_val (torch.utils.data.DataLoader): DataLoader for the validation dataset.
  25. output_dir (str): Directory where model checkpoints will be saved.
  26. resume (str, optional): Path to a checkpoint to resume training from.
  27. """
  28. def __init__(self, model,
  29. optim,
  30. scheduler,
  31. dataloader_train,
  32. dataloader_val,
  33. local_rank,
  34. use_ddp=False,
  35. use_fsdp=False,
  36. output_dir: str="./",
  37. **kwargs):
  38. """
  39. Initializes the Trainer class with the model, optimizer, scheduler, dataloaders, and other settings.
  40. Args:
  41. model (torch.nn.Module): The model to be trained.
  42. optim (torch.optim.Optimizer): The optimizer to use for training.
  43. scheduler (torch.optim.lr_scheduler._LRScheduler): The learning rate scheduler.
  44. dataloader_train (torch.utils.data.DataLoader): The DataLoader for the training dataset.
  45. dataloader_val (torch.utils.data.DataLoader): The DataLoader for the validation dataset.
  46. **kwargs: Additional keyword arguments:
  47. max_epoch (int): The maximum number of epochs for training.
  48. output_dir (str): The directory where model checkpoints will be saved. Default is './'.
  49. resume (str, optional): The file path to a checkpoint to resume training from.
  50. """
  51. self.model = model
  52. self.optim = optim
  53. self.scheduler = scheduler
  54. self.dataloader_train = dataloader_train
  55. self.dataloader_val = dataloader_val
  56. self.output_dir = output_dir
  57. self.resume = kwargs.get('resume', True)
  58. self.start_epoch = 0
  59. self.max_epoch = kwargs.get('max_epoch', 100)
  60. self.local_rank = local_rank
  61. self.use_ddp = use_ddp
  62. self.use_fsdp = use_fsdp
  63. self.device = next(model.parameters()).device
  64. self.avg_nbest_model = kwargs.get("avg_nbest_model", 5)
  65. self.kwargs = kwargs
  66. self.log_interval = kwargs.get("log_interval", 50)
  67. self.batch_total = 0
  68. try:
  69. rank = dist.get_rank()
  70. world_size = dist.get_world_size()
  71. except:
  72. rank = 0
  73. world_size = 1
  74. logging.warning("distributed is not initialized, only single shard")
  75. self.rank = rank
  76. self.world_size = world_size
  77. os.makedirs(os.path.join(self.output_dir, "tensorboard"), exist_ok=True)
  78. self.writer = SummaryWriter(os.path.join(self.output_dir, "tensorboard")) if rank == 0 else None
  79. def _save_checkpoint(self, epoch):
  80. """
  81. Saves a checkpoint containing the model's state, the optimizer's state,
  82. and the scheduler's state at the end of the given epoch. This method is
  83. intended to be called at the end of each epoch to save the training progress.
  84. Args:
  85. epoch (int): The epoch number at which the checkpoint is being saved.
  86. """
  87. state = {
  88. 'epoch': epoch,
  89. 'state_dict': self.model.state_dict(),
  90. 'optimizer': self.optim.state_dict(),
  91. 'scheduler': self.scheduler.state_dict(),
  92. }
  93. # Create output directory if it does not exist
  94. os.makedirs(self.output_dir, exist_ok=True)
  95. filename = os.path.join(self.output_dir, f'model.pt.ep{epoch}')
  96. torch.save(state, filename)
  97. print(f'Checkpoint saved to {filename}')
  98. latest = Path(os.path.join(self.output_dir, f'model.pt'))
  99. try:
  100. latest.unlink()
  101. except:
  102. pass
  103. latest.symlink_to(filename)
  104. def _resume_checkpoint(self, resume_path):
  105. """
  106. Resumes training from a checkpoint at the given file path.
  107. Loads the model's state, the optimizer's state, and the scheduler's state.
  108. Args:
  109. resume_path (str): The file path to the checkpoint to resume from.
  110. """
  111. ckpt = os.path.join(resume_path, "model.pt")
  112. if os.path.isfile(ckpt):
  113. checkpoint = torch.load(ckpt)
  114. self.start_epoch = checkpoint['epoch'] + 1
  115. self.model.load_state_dict(checkpoint['state_dict'])
  116. self.optim.load_state_dict(checkpoint['optimizer'])
  117. self.scheduler.load_state_dict(checkpoint['scheduler'])
  118. print(f"Checkpoint loaded successfully from '{ckpt}'")
  119. else:
  120. print(f"No checkpoint found at '{ckpt}', starting from scratch")
  121. if self.use_ddp or self.use_fsdp:
  122. dist.barrier()
  123. def run(self):
  124. """
  125. Starts the training process, iterating over epochs, training the model,
  126. and saving checkpoints at the end of each epoch.
  127. """
  128. if self.resume:
  129. self._resume_checkpoint(self.output_dir)
  130. for epoch in range(self.start_epoch, self.max_epoch + 1):
  131. self._train_epoch(epoch)
  132. if self.use_ddp or self.use_fsdp:
  133. dist.barrier()
  134. self._validate_epoch(epoch)
  135. if self.use_ddp or self.use_fsdp:
  136. dist.barrier()
  137. if self.rank == 0:
  138. self._save_checkpoint(epoch)
  139. if self.use_ddp or self.use_fsdp:
  140. dist.barrier()
  141. self.scheduler.step()
  142. if self.rank == 0:
  143. average_checkpoints(self.output_dir, self.avg_nbest_model)
  144. if self.use_ddp or self.use_fsdp:
  145. dist.barrier()
  146. if self.writer:
  147. self.writer.close()
  148. def _train_epoch(self, epoch):
  149. """
  150. Defines the training process for a single epoch with gradient accumulation.
  151. Args:
  152. epoch (int): The current epoch number.
  153. """
  154. self.model.train()
  155. pbar = tqdm(colour="blue", desc=f"rank: {self.local_rank}, Training Epoch: {epoch + 1}", total=len(self.dataloader_train),
  156. dynamic_ncols=True)
  157. # Set the number of steps for gradient accumulation
  158. accum_grad = self.kwargs.get("accum_grad", 1)
  159. # Initialize the gradient accumulation
  160. self.optim.zero_grad()
  161. speed_stats = {}
  162. time5 = time.perf_counter()
  163. for batch_idx, batch in enumerate(self.dataloader_train):
  164. self.batch_total += 1
  165. time1 = time.perf_counter()
  166. speed_stats["data_load"] = f"{time1-time5:0.3f}"
  167. batch = to_device(batch, self.device)
  168. my_context = self.model.no_sync if batch_idx % accum_grad != 0 else nullcontext
  169. with my_context():
  170. time2 = time.perf_counter()
  171. retval = self.model(**batch)
  172. torch.cuda.empty_cache()
  173. time3 = time.perf_counter()
  174. speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
  175. loss, stats, weight = retval
  176. stats = {k: v for k, v in stats.items() if v is not None}
  177. if self.use_ddp or self.use_fsdp:
  178. # Apply weighted averaging for loss and stats
  179. loss = (loss * weight.type(loss.dtype)).sum()
  180. # if distributed, this method can also apply all_reduce()
  181. stats, weight = recursive_average(stats, weight, distributed=True)
  182. # Now weight is summation over all workers
  183. loss /= weight
  184. # Multiply world_size because DistributedDataParallel
  185. # automatically normalizes the gradient by world_size.
  186. loss *= self.world_size
  187. # Scale the loss since we're not updating for every mini-batch
  188. loss = loss / accum_grad
  189. loss.backward()
  190. time4 = time.perf_counter()
  191. speed_stats["backward_time"] = f"{time4 - time3:0.3f}"
  192. # Perform an optimizer step only after accumulating enough gradients
  193. if (batch_idx + 1) % accum_grad == 0 or (batch_idx + 1) == len(self.dataloader_train):
  194. # Perform gradient clipping if it is set
  195. if self.kwargs.get("grad_clip", None) is not None:
  196. grad_norm = torch.nn.utils.clip_grad_norm_(
  197. self.model.parameters(),
  198. max_norm=self.kwargs.get("grad_clip", 10.0),
  199. norm_type=self.kwargs.get("grad_clip_type", 2.0),
  200. )
  201. if not torch.isfinite(grad_norm):
  202. logging.warning(
  203. f"The grad norm is {grad_norm}. Skipping updating the model."
  204. )
  205. self.optim.zero_grad() # Reset gradients
  206. continue
  207. # Execute an optimization step (update model parameters)
  208. if self.use_ddp or self.use_fsdp:
  209. dist.barrier()
  210. self.optim.step()
  211. self.scheduler.step()
  212. # Clear gradients for the next accumulation stage
  213. self.optim.zero_grad()
  214. total_time = f"{time.perf_counter() - time5:0.3f}"
  215. time5 = time.perf_counter()
  216. speed_stats["optim_time"] = f"{time5 - time4:0.3f}"
  217. speed_stats["total_time"] = total_time
  218. if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_train):
  219. pbar.update(self.log_interval)
  220. gpu_info = "GPU, memory: {:.3f} GB, " \
  221. "{:.3f} GB, "\
  222. "{:.3f} GB, "\
  223. "{:.3f} GB".format(torch.cuda.memory_allocated()/1024/1024/1024,
  224. torch.cuda.max_memory_allocated()/1024/1024/1024,
  225. torch.cuda.memory_reserved()/1024/1024/1024,
  226. torch.cuda.max_memory_reserved()/1024/1024/1024,
  227. )
  228. lr = self.scheduler.get_last_lr()[0]
  229. description = (
  230. f"rank: {self.local_rank}, "
  231. f"epoch: {epoch}/{self.max_epoch}, "
  232. f"step: {batch_idx+1}/{len(self.dataloader_train)}, total: {self.batch_total}, "
  233. f"(loss: {loss.detach().cpu().item():.3f}), "
  234. f"(lr: {lr:.3e}), "
  235. f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
  236. f"{speed_stats}, "
  237. f"{gpu_info}"
  238. )
  239. pbar.set_description(description)
  240. if self.writer:
  241. self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(),
  242. epoch*len(self.dataloader_train) + batch_idx)
  243. for key, var in stats.items():
  244. self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(),
  245. epoch * len(self.dataloader_train) + batch_idx)
  246. for key, var in speed_stats.items():
  247. self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var),
  248. epoch * len(self.dataloader_train) + batch_idx)
  249. # if batch_idx == 2:
  250. # break
  251. pbar.close()
  252. def _validate_epoch(self, epoch):
  253. """
  254. Defines the validation process for a single epoch.
  255. Should be implemented with the actual model validation steps.
  256. Args:
  257. epoch (int): The current epoch number.
  258. """
  259. self.model.eval()
  260. with torch.no_grad():
  261. pbar = tqdm(colour="red", desc=f"rank: {self.local_rank}, Validation Epoch: {epoch + 1}", total=len(self.dataloader_val),
  262. dynamic_ncols=True)
  263. speed_stats = {}
  264. time5 = time.perf_counter()
  265. for batch_idx, batch in enumerate(self.dataloader_val):
  266. time1 = time.perf_counter()
  267. speed_stats["data_load"] = f"{time1 - time5:0.3f}"
  268. batch = to_device(batch, self.device)
  269. time2 = time.perf_counter()
  270. retval = self.model(**batch)
  271. time3 = time.perf_counter()
  272. speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
  273. loss, stats, weight = retval
  274. stats = {k: v for k, v in stats.items() if v is not None}
  275. if self.use_ddp or self.use_fsdp:
  276. # Apply weighted averaging for loss and stats
  277. loss = (loss * weight.type(loss.dtype)).sum()
  278. # if distributed, this method can also apply all_reduce()
  279. stats, weight = recursive_average(stats, weight, distributed=True)
  280. # Now weight is summation over all workers
  281. loss /= weight
  282. # Multiply world_size because DistributedDataParallel
  283. # automatically normalizes the gradient by world_size.
  284. loss *= self.world_size
  285. # Scale the loss since we're not updating for every mini-batch
  286. loss = loss
  287. time4 = time.perf_counter()
  288. if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_val):
  289. pbar.update(self.log_interval)
  290. description = (
  291. f"rank: {self.local_rank}, "
  292. f"validation epoch: {epoch}/{self.max_epoch}, "
  293. f"step: {batch_idx+1}/{len(self.dataloader_val)}, "
  294. f"(loss: {loss.detach().cpu().item():.3f}), "
  295. f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
  296. f"{speed_stats}, "
  297. )
  298. pbar.set_description(description)
  299. if self.writer:
  300. self.writer.add_scalar(f"rank{self.local_rank}_Loss/val", loss.item(),
  301. epoch*len(self.dataloader_val) + batch_idx)
  302. for key, var in stats.items():
  303. self.writer.add_scalar(f'rank{self.local_rank}_{key}/val', var.item(),
  304. epoch * len(self.dataloader_val) + batch_idx)
  305. for key, var in speed_stats.items():
  306. self.writer.add_scalar(f'rank{self.local_rank}_{key}/val', eval(var),
  307. epoch * len(self.dataloader_val) + batch_idx)