trainer.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. # Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
  2. # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
  3. """Trainer module."""
  4. import argparse
  5. from contextlib import contextmanager
  6. import dataclasses
  7. from dataclasses import is_dataclass
  8. from distutils.version import LooseVersion
  9. import logging
  10. from pathlib import Path
  11. import time
  12. from typing import Dict
  13. from typing import Iterable
  14. from typing import List
  15. from typing import Optional
  16. from typing import Sequence
  17. from typing import Tuple
  18. from typing import Union
  19. import humanfriendly
  20. import oss2
  21. from io import BytesIO
  22. import os
  23. import numpy as np
  24. import torch
  25. import torch.nn
  26. import torch.optim
  27. from typeguard import check_argument_types
  28. from funasr.iterators.abs_iter_factory import AbsIterFactory
  29. from funasr.main_funcs.average_nbest_models import average_nbest_models
  30. from funasr.main_funcs.calculate_all_attentions import calculate_all_attentions
  31. from funasr.schedulers.abs_scheduler import AbsBatchStepScheduler
  32. from funasr.schedulers.abs_scheduler import AbsEpochStepScheduler
  33. from funasr.schedulers.abs_scheduler import AbsScheduler
  34. from funasr.schedulers.abs_scheduler import AbsValEpochStepScheduler
  35. from funasr.torch_utils.add_gradient_noise import add_gradient_noise
  36. from funasr.torch_utils.device_funcs import to_device
  37. from funasr.torch_utils.recursive_op import recursive_average
  38. from funasr.torch_utils.set_all_random_seed import set_all_random_seed
  39. from funasr.train.abs_espnet_model import AbsESPnetModel
  40. from funasr.train.distributed_utils import DistributedOption
  41. from funasr.train.reporter import Reporter
  42. from funasr.train.reporter import SubReporter
  43. from funasr.utils.build_dataclass import build_dataclass
  44. if torch.distributed.is_available():
  45. from torch.distributed import ReduceOp
  46. if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
  47. from torch.cuda.amp import autocast
  48. from torch.cuda.amp import GradScaler
  49. else:
  50. # Nothing to do if torch<1.6.0
  51. @contextmanager
  52. def autocast(enabled=True):
  53. yield
  54. GradScaler = None
  55. try:
  56. import fairscale
  57. except ImportError:
  58. fairscale = None
  59. @dataclasses.dataclass
  60. class TrainerOptions:
  61. ngpu: int
  62. resume: bool
  63. use_amp: bool
  64. train_dtype: str
  65. grad_noise: bool
  66. accum_grad: int
  67. grad_clip: float
  68. grad_clip_type: float
  69. log_interval: Optional[int]
  70. no_forward_run: bool
  71. use_tensorboard: bool
  72. use_wandb: bool
  73. output_dir: Union[Path, str]
  74. max_epoch: int
  75. max_update: int
  76. seed: int
  77. sharded_ddp: bool
  78. patience: Optional[int]
  79. keep_nbest_models: Union[int, List[int]]
  80. nbest_averaging_interval: int
  81. early_stopping_criterion: Sequence[str]
  82. best_model_criterion: Sequence[Sequence[str]]
  83. val_scheduler_criterion: Sequence[str]
  84. unused_parameters: bool
  85. wandb_model_log_interval: int
  86. use_pai: bool
  87. oss_bucket: Union[oss2.Bucket, None]
  88. class Trainer:
  89. """Trainer having a optimizer.
  90. If you'd like to use multiple optimizers, then inherit this class
  91. and override the methods if necessary - at least "train_one_epoch()"
  92. >>> class TwoOptimizerTrainer(Trainer):
  93. ... @classmethod
  94. ... def add_arguments(cls, parser):
  95. ... ...
  96. ...
  97. ... @classmethod
  98. ... def train_one_epoch(cls, model, optimizers, ...):
  99. ... loss1 = model.model1(...)
  100. ... loss1.backward()
  101. ... optimizers[0].step()
  102. ...
  103. ... loss2 = model.model2(...)
  104. ... loss2.backward()
  105. ... optimizers[1].step()
  106. """
  107. def __init__(self):
  108. raise RuntimeError("This class can't be instantiated.")
  109. @classmethod
  110. def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
  111. """Build options consumed by train(), eval()"""
  112. assert check_argument_types()
  113. return build_dataclass(TrainerOptions, args)
  114. @classmethod
  115. def add_arguments(cls, parser: argparse.ArgumentParser):
  116. """Reserved for future development of another Trainer"""
  117. pass
  118. @staticmethod
  119. def resume(
  120. checkpoint: Union[str, Path],
  121. model: torch.nn.Module,
  122. reporter: Reporter,
  123. optimizers: Sequence[torch.optim.Optimizer],
  124. schedulers: Sequence[Optional[AbsScheduler]],
  125. scaler: Optional[GradScaler],
  126. ngpu: int = 0,
  127. ):
  128. states = torch.load(
  129. checkpoint,
  130. map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
  131. )
  132. model.load_state_dict(states["model"])
  133. reporter.load_state_dict(states["reporter"])
  134. for optimizer, state in zip(optimizers, states["optimizers"]):
  135. optimizer.load_state_dict(state)
  136. for scheduler, state in zip(schedulers, states["schedulers"]):
  137. if scheduler is not None:
  138. scheduler.load_state_dict(state)
  139. if scaler is not None:
  140. if states["scaler"] is None:
  141. logging.warning("scaler state is not found")
  142. else:
  143. scaler.load_state_dict(states["scaler"])
  144. logging.info(f"The training was resumed using {checkpoint}")
  145. @classmethod
  146. def run(
  147. cls,
  148. model: AbsESPnetModel,
  149. optimizers: Sequence[torch.optim.Optimizer],
  150. schedulers: Sequence[Optional[AbsScheduler]],
  151. train_iter_factory: AbsIterFactory,
  152. valid_iter_factory: AbsIterFactory,
  153. trainer_options,
  154. distributed_option: DistributedOption,
  155. ) -> None:
  156. """Perform training. This method performs the main process of training."""
  157. assert check_argument_types()
  158. # NOTE(kamo): Don't check the type more strictly as far trainer_options
  159. assert is_dataclass(trainer_options), type(trainer_options)
  160. assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))
  161. if isinstance(trainer_options.keep_nbest_models, int):
  162. keep_nbest_models = [trainer_options.keep_nbest_models]
  163. else:
  164. if len(trainer_options.keep_nbest_models) == 0:
  165. logging.warning("No keep_nbest_models is given. Change to [1]")
  166. trainer_options.keep_nbest_models = [1]
  167. keep_nbest_models = trainer_options.keep_nbest_models
  168. output_dir = Path(trainer_options.output_dir)
  169. reporter = Reporter()
  170. if trainer_options.use_amp:
  171. if LooseVersion(torch.__version__) < LooseVersion("1.6.0"):
  172. raise RuntimeError(
  173. "Require torch>=1.6.0 for Automatic Mixed Precision"
  174. )
  175. if trainer_options.sharded_ddp:
  176. if fairscale is None:
  177. raise RuntimeError(
  178. "Requiring fairscale. Do 'pip install fairscale'"
  179. )
  180. scaler = fairscale.optim.grad_scaler.ShardedGradScaler()
  181. else:
  182. scaler = GradScaler()
  183. else:
  184. scaler = None
  185. if trainer_options.resume and (output_dir / "checkpoint.pb").exists():
  186. cls.resume(
  187. checkpoint=output_dir / "checkpoint.pb",
  188. model=model,
  189. optimizers=optimizers,
  190. schedulers=schedulers,
  191. reporter=reporter,
  192. scaler=scaler,
  193. ngpu=trainer_options.ngpu,
  194. )
  195. start_epoch = reporter.get_epoch() + 1
  196. if start_epoch == trainer_options.max_epoch + 1:
  197. logging.warning(
  198. f"The training has already reached at max_epoch: {start_epoch}"
  199. )
  200. if distributed_option.distributed:
  201. if trainer_options.sharded_ddp:
  202. dp_model = fairscale.nn.data_parallel.ShardedDataParallel(
  203. module=model,
  204. sharded_optimizer=optimizers,
  205. )
  206. else:
  207. dp_model = torch.nn.parallel.DistributedDataParallel(
  208. model, find_unused_parameters=trainer_options.unused_parameters)
  209. elif distributed_option.ngpu > 1:
  210. dp_model = torch.nn.parallel.DataParallel(
  211. model,
  212. device_ids=list(range(distributed_option.ngpu)),
  213. )
  214. else:
  215. # NOTE(kamo): DataParallel also should work with ngpu=1,
  216. # but for debuggability it's better to keep this block.
  217. dp_model = model
  218. if trainer_options.use_tensorboard and (
  219. not distributed_option.distributed or distributed_option.dist_rank == 0
  220. ):
  221. from torch.utils.tensorboard import SummaryWriter
  222. if trainer_options.use_pai:
  223. train_summary_writer = SummaryWriter(
  224. os.path.join(trainer_options.output_dir, "tensorboard/train")
  225. )
  226. valid_summary_writer = SummaryWriter(
  227. os.path.join(trainer_options.output_dir, "tensorboard/valid")
  228. )
  229. else:
  230. train_summary_writer = SummaryWriter(
  231. str(output_dir / "tensorboard" / "train")
  232. )
  233. valid_summary_writer = SummaryWriter(
  234. str(output_dir / "tensorboard" / "valid")
  235. )
  236. else:
  237. train_summary_writer = None
  238. start_time = time.perf_counter()
  239. for iepoch in range(start_epoch, trainer_options.max_epoch + 1):
  240. if iepoch != start_epoch:
  241. logging.info(
  242. "{}/{}epoch started. Estimated time to finish: {}".format(
  243. iepoch,
  244. trainer_options.max_epoch,
  245. humanfriendly.format_timespan(
  246. (time.perf_counter() - start_time)
  247. / (iepoch - start_epoch)
  248. * (trainer_options.max_epoch - iepoch + 1)
  249. ),
  250. )
  251. )
  252. else:
  253. logging.info(f"{iepoch}/{trainer_options.max_epoch}epoch started")
  254. set_all_random_seed(trainer_options.seed + iepoch)
  255. reporter.set_epoch(iepoch)
  256. # 1. Train and validation for one-epoch
  257. with reporter.observe("train") as sub_reporter:
  258. all_steps_are_invalid, max_update_stop = cls.train_one_epoch(
  259. model=dp_model,
  260. optimizers=optimizers,
  261. schedulers=schedulers,
  262. iterator=train_iter_factory.build_iter(iepoch),
  263. reporter=sub_reporter,
  264. scaler=scaler,
  265. summary_writer=train_summary_writer,
  266. options=trainer_options,
  267. distributed_option=distributed_option,
  268. )
  269. with reporter.observe("valid") as sub_reporter:
  270. cls.validate_one_epoch(
  271. model=dp_model,
  272. iterator=valid_iter_factory.build_iter(iepoch),
  273. reporter=sub_reporter,
  274. options=trainer_options,
  275. distributed_option=distributed_option,
  276. )
  277. # 2. LR Scheduler step
  278. for scheduler in schedulers:
  279. if isinstance(scheduler, AbsValEpochStepScheduler):
  280. scheduler.step(
  281. reporter.get_value(*trainer_options.val_scheduler_criterion)
  282. )
  283. elif isinstance(scheduler, AbsEpochStepScheduler):
  284. scheduler.step()
  285. if trainer_options.sharded_ddp:
  286. for optimizer in optimizers:
  287. if isinstance(optimizer, fairscale.optim.oss.OSS):
  288. optimizer.consolidate_state_dict()
  289. if not distributed_option.distributed or distributed_option.dist_rank == 0:
  290. # 3. Report the results
  291. logging.info(reporter.log_message())
  292. if train_summary_writer is not None:
  293. reporter.tensorboard_add_scalar(train_summary_writer, key1="train")
  294. reporter.tensorboard_add_scalar(valid_summary_writer, key1="valid")
  295. if trainer_options.use_wandb:
  296. reporter.wandb_log()
  297. # save tensorboard on oss
  298. if trainer_options.use_pai and train_summary_writer is not None:
  299. def write_tensorboard_summary(summary_writer_path, oss_bucket):
  300. file_list = []
  301. for root, dirs, files in os.walk(summary_writer_path, topdown=False):
  302. for name in files:
  303. file_full_path = os.path.join(root, name)
  304. file_list.append(file_full_path)
  305. for file_full_path in file_list:
  306. with open(file_full_path, "rb") as f:
  307. oss_bucket.put_object(file_full_path, f)
  308. write_tensorboard_summary(os.path.join(trainer_options.output_dir, "tensorboard/train"), trainer_options.oss_bucket)
  309. write_tensorboard_summary(os.path.join(trainer_options.output_dir, "tensorboard/valid"), trainer_options.oss_bucket)
  310. # 4. Save/Update the checkpoint
  311. if trainer_options.use_pai:
  312. buffer = BytesIO()
  313. torch.save(
  314. {
  315. "model": model.state_dict(),
  316. "reporter": reporter.state_dict(),
  317. "optimizers": [o.state_dict() for o in optimizers],
  318. "schedulers": [
  319. s.state_dict() if s is not None else None
  320. for s in schedulers
  321. ],
  322. "scaler": scaler.state_dict() if scaler is not None else None,
  323. "ema_model": model.encoder.ema.model.state_dict()
  324. if hasattr(model.encoder, "ema") and model.encoder.ema is not None else None,
  325. },
  326. buffer,
  327. )
  328. trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir, "checkpoint.pb"), buffer.getvalue())
  329. else:
  330. torch.save(
  331. {
  332. "model": model.state_dict(),
  333. "reporter": reporter.state_dict(),
  334. "optimizers": [o.state_dict() for o in optimizers],
  335. "schedulers": [
  336. s.state_dict() if s is not None else None
  337. for s in schedulers
  338. ],
  339. "scaler": scaler.state_dict() if scaler is not None else None,
  340. },
  341. output_dir / "checkpoint.pb",
  342. )
  343. # 5. Save and log the model and update the link to the best model
  344. if trainer_options.use_pai:
  345. buffer = BytesIO()
  346. torch.save(model.state_dict(), buffer)
  347. trainer_options.oss_bucket.put_object(os.path.join(trainer_options.output_dir,
  348. f"{iepoch}epoch.pb"),buffer.getvalue())
  349. else:
  350. torch.save(model.state_dict(), output_dir / f"{iepoch}epoch.pb")
  351. # Creates a sym link latest.pb -> {iepoch}epoch.pb
  352. if trainer_options.use_pai:
  353. p = os.path.join(trainer_options.output_dir, "latest.pb")
  354. if trainer_options.oss_bucket.object_exists(p):
  355. trainer_options.oss_bucket.delete_object(p)
  356. trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,
  357. os.path.join(trainer_options.output_dir, f"{iepoch}epoch.pb"), p)
  358. else:
  359. p = output_dir / "latest.pb"
  360. if p.is_symlink() or p.exists():
  361. p.unlink()
  362. p.symlink_to(f"{iepoch}epoch.pb")
  363. _improved = []
  364. for _phase, k, _mode in trainer_options.best_model_criterion:
  365. # e.g. _phase, k, _mode = "train", "loss", "min"
  366. if reporter.has(_phase, k):
  367. best_epoch = reporter.get_best_epoch(_phase, k, _mode)
  368. # Creates sym links if it's the best result
  369. if best_epoch == iepoch:
  370. if trainer_options.use_pai:
  371. p = os.path.join(trainer_options.output_dir, f"{_phase}.{k}.best.pb")
  372. if trainer_options.oss_bucket.object_exists(p):
  373. trainer_options.oss_bucket.delete_object(p)
  374. trainer_options.oss_bucket.copy_object(trainer_options.oss_bucket.bucket_name,
  375. os.path.join(trainer_options.output_dir, f"{iepoch}epoch.pb"),p)
  376. else:
  377. p = output_dir / f"{_phase}.{k}.best.pb"
  378. if p.is_symlink() or p.exists():
  379. p.unlink()
  380. p.symlink_to(f"{iepoch}epoch.pb")
  381. _improved.append(f"{_phase}.{k}")
  382. if len(_improved) == 0:
  383. logging.info("There are no improvements in this epoch")
  384. else:
  385. logging.info(
  386. "The best model has been updated: " + ", ".join(_improved)
  387. )
  388. log_model = (
  389. trainer_options.wandb_model_log_interval > 0
  390. and iepoch % trainer_options.wandb_model_log_interval == 0
  391. )
  392. if log_model and trainer_options.use_wandb:
  393. import wandb
  394. logging.info("Logging Model on this epoch :::::")
  395. artifact = wandb.Artifact(
  396. name=f"model_{wandb.run.id}",
  397. type="model",
  398. metadata={"improved": _improved},
  399. )
  400. artifact.add_file(str(output_dir / f"{iepoch}epoch.pb"))
  401. aliases = [
  402. f"epoch-{iepoch}",
  403. "best" if best_epoch == iepoch else "",
  404. ]
  405. wandb.log_artifact(artifact, aliases=aliases)
  406. # 6. Remove the model files excluding n-best epoch and latest epoch
  407. _removed = []
  408. # Get the union set of the n-best among multiple criterion
  409. nbests = set().union(
  410. *[
  411. set(reporter.sort_epochs(ph, k, m)[: max(keep_nbest_models)])
  412. for ph, k, m in trainer_options.best_model_criterion
  413. if reporter.has(ph, k)
  414. ]
  415. )
  416. # Generated n-best averaged model
  417. if (
  418. trainer_options.nbest_averaging_interval > 0
  419. and iepoch % trainer_options.nbest_averaging_interval == 0
  420. ):
  421. average_nbest_models(
  422. reporter=reporter,
  423. output_dir=output_dir,
  424. best_model_criterion=trainer_options.best_model_criterion,
  425. nbest=keep_nbest_models,
  426. suffix=f"till{iepoch}epoch",
  427. oss_bucket=trainer_options.oss_bucket,
  428. pai_output_dir=trainer_options.output_dir,
  429. )
  430. for e in range(1, iepoch):
  431. if trainer_options.use_pai:
  432. p = os.path.join(trainer_options.output_dir, f"{e}epoch.pb")
  433. if trainer_options.oss_bucket.object_exists(p) and e not in nbests:
  434. trainer_options.oss_bucket.delete_object(p)
  435. _removed.append(str(p))
  436. else:
  437. p = output_dir / f"{e}epoch.pb"
  438. if p.exists() and e not in nbests:
  439. p.unlink()
  440. _removed.append(str(p))
  441. if len(_removed) != 0:
  442. logging.info("The model files were removed: " + ", ".join(_removed))
  443. # 7. If any updating haven't happened, stops the training
  444. if all_steps_are_invalid:
  445. logging.warning(
  446. f"The gradients at all steps are invalid in this epoch. "
  447. f"Something seems wrong. This training was stopped at {iepoch}epoch"
  448. )
  449. break
  450. if max_update_stop:
  451. logging.info(
  452. f"Stopping training due to "
  453. f"num_updates: {trainer_options.num_updates} >= max_update: {trainer_options.max_update}"
  454. )
  455. break
  456. # 8. Check early stopping
  457. if trainer_options.patience is not None:
  458. if reporter.check_early_stopping(
  459. trainer_options.patience, *trainer_options.early_stopping_criterion
  460. ):
  461. break
  462. else:
  463. logging.info(
  464. f"The training was finished at {trainer_options.max_epoch} epochs "
  465. )
  466. # Generated n-best averaged model
  467. if not distributed_option.distributed or distributed_option.dist_rank == 0:
  468. average_nbest_models(
  469. reporter=reporter,
  470. output_dir=output_dir,
  471. best_model_criterion=trainer_options.best_model_criterion,
  472. nbest=keep_nbest_models,
  473. oss_bucket=trainer_options.oss_bucket,
  474. pai_output_dir=trainer_options.output_dir,
  475. )
  476. @classmethod
  477. def train_one_epoch(
  478. cls,
  479. model: torch.nn.Module,
  480. iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
  481. optimizers: Sequence[torch.optim.Optimizer],
  482. schedulers: Sequence[Optional[AbsScheduler]],
  483. scaler: Optional[GradScaler],
  484. reporter: SubReporter,
  485. summary_writer,
  486. options: TrainerOptions,
  487. distributed_option: DistributedOption,
  488. ) -> Tuple[bool, bool]:
  489. assert check_argument_types()
  490. grad_noise = options.grad_noise
  491. accum_grad = options.accum_grad
  492. grad_clip = options.grad_clip
  493. grad_clip_type = options.grad_clip_type
  494. log_interval = options.log_interval
  495. no_forward_run = options.no_forward_run
  496. ngpu = options.ngpu
  497. use_wandb = options.use_wandb
  498. distributed = distributed_option.distributed
  499. if log_interval is None:
  500. try:
  501. log_interval = max(len(iterator) // 20, 10)
  502. except TypeError:
  503. log_interval = 100
  504. model.train()
  505. all_steps_are_invalid = True
  506. max_update_stop = False
  507. # [For distributed] Because iteration counts are not always equals between
  508. # processes, send stop-flag to the other processes if iterator is finished
  509. iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
  510. start_time = time.perf_counter()
  511. for iiter, (_, batch) in enumerate(
  512. reporter.measure_iter_time(iterator, "iter_time"), 1
  513. ):
  514. assert isinstance(batch, dict), type(batch)
  515. if distributed:
  516. torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
  517. if iterator_stop > 0:
  518. break
  519. batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
  520. if no_forward_run:
  521. all_steps_are_invalid = False
  522. continue
  523. with autocast(scaler is not None):
  524. with reporter.measure_time("forward_time"):
  525. retval = model(**batch)
  526. # Note(kamo):
  527. # Supporting two patterns for the returned value from the model
  528. # a. dict type
  529. if isinstance(retval, dict):
  530. loss = retval["loss"]
  531. stats = retval["stats"]
  532. weight = retval["weight"]
  533. optim_idx = retval.get("optim_idx")
  534. if optim_idx is not None and not isinstance(optim_idx, int):
  535. if not isinstance(optim_idx, torch.Tensor):
  536. raise RuntimeError(
  537. "optim_idx must be int or 1dim torch.Tensor, "
  538. f"but got {type(optim_idx)}"
  539. )
  540. if optim_idx.dim() >= 2:
  541. raise RuntimeError(
  542. "optim_idx must be int or 1dim torch.Tensor, "
  543. f"but got {optim_idx.dim()}dim tensor"
  544. )
  545. if optim_idx.dim() == 1:
  546. for v in optim_idx:
  547. if v != optim_idx[0]:
  548. raise RuntimeError(
  549. "optim_idx must be 1dim tensor "
  550. "having same values for all entries"
  551. )
  552. optim_idx = optim_idx[0].item()
  553. else:
  554. optim_idx = optim_idx.item()
  555. # b. tuple or list type
  556. else:
  557. loss, stats, weight = retval
  558. optim_idx = None
  559. stats = {k: v for k, v in stats.items() if v is not None}
  560. if ngpu > 1 or distributed:
  561. # Apply weighted averaging for loss and stats
  562. loss = (loss * weight.type(loss.dtype)).sum()
  563. # if distributed, this method can also apply all_reduce()
  564. stats, weight = recursive_average(stats, weight, distributed)
  565. # Now weight is summation over all workers
  566. loss /= weight
  567. if distributed:
  568. # NOTE(kamo): Multiply world_size because DistributedDataParallel
  569. # automatically normalizes the gradient by world_size.
  570. loss *= torch.distributed.get_world_size()
  571. loss /= accum_grad
  572. reporter.register(stats, weight)
  573. with reporter.measure_time("backward_time"):
  574. if scaler is not None:
  575. # Scales loss. Calls backward() on scaled loss
  576. # to create scaled gradients.
  577. # Backward passes under autocast are not recommended.
  578. # Backward ops run in the same dtype autocast chose
  579. # for corresponding forward ops.
  580. scaler.scale(loss).backward()
  581. else:
  582. loss.backward()
  583. if iiter % accum_grad == 0:
  584. if scaler is not None:
  585. # Unscales the gradients of optimizer's assigned params in-place
  586. for iopt, optimizer in enumerate(optimizers):
  587. if optim_idx is not None and iopt != optim_idx:
  588. continue
  589. scaler.unscale_(optimizer)
  590. # gradient noise injection
  591. if grad_noise:
  592. add_gradient_noise(
  593. model,
  594. reporter.get_total_count(),
  595. duration=100,
  596. eta=1.0,
  597. scale_factor=0.55,
  598. )
  599. # compute the gradient norm to check if it is normal or not
  600. grad_norm = torch.nn.utils.clip_grad_norm_(
  601. model.parameters(),
  602. max_norm=grad_clip,
  603. norm_type=grad_clip_type,
  604. )
  605. # PyTorch<=1.4, clip_grad_norm_ returns float value
  606. if not isinstance(grad_norm, torch.Tensor):
  607. grad_norm = torch.tensor(grad_norm)
  608. if not torch.isfinite(grad_norm):
  609. logging.warning(
  610. f"The grad norm is {grad_norm}. Skipping updating the model."
  611. )
  612. # Must invoke scaler.update() if unscale_() is used in the iteration
  613. # to avoid the following error:
  614. # RuntimeError: unscale_() has already been called
  615. # on this optimizer since the last update().
  616. # Note that if the gradient has inf/nan values,
  617. # scaler.step skips optimizer.step().
  618. if scaler is not None:
  619. for iopt, optimizer in enumerate(optimizers):
  620. if optim_idx is not None and iopt != optim_idx:
  621. continue
  622. scaler.step(optimizer)
  623. scaler.update()
  624. else:
  625. all_steps_are_invalid = False
  626. with reporter.measure_time("optim_step_time"):
  627. for iopt, (optimizer, scheduler) in enumerate(
  628. zip(optimizers, schedulers)
  629. ):
  630. if optim_idx is not None and iopt != optim_idx:
  631. continue
  632. if scaler is not None:
  633. # scaler.step() first unscales the gradients of
  634. # the optimizer's assigned params.
  635. scaler.step(optimizer)
  636. # Updates the scale for next iteration.
  637. scaler.update()
  638. else:
  639. optimizer.step()
  640. if isinstance(scheduler, AbsBatchStepScheduler):
  641. scheduler.step()
  642. for iopt, optimizer in enumerate(optimizers):
  643. if optim_idx is not None and iopt != optim_idx:
  644. continue
  645. optimizer.zero_grad()
  646. # Register lr and train/load time[sec/step],
  647. # where step refers to accum_grad * mini-batch
  648. reporter.register(
  649. dict(
  650. {
  651. f"optim{i}_lr{j}": pg["lr"]
  652. for i, optimizer in enumerate(optimizers)
  653. for j, pg in enumerate(optimizer.param_groups)
  654. if "lr" in pg
  655. },
  656. train_time=time.perf_counter() - start_time,
  657. ),
  658. )
  659. start_time = time.perf_counter()
  660. # update num_updates
  661. if distributed:
  662. if hasattr(model.module, "num_updates"):
  663. model.module.set_num_updates(model.module.get_num_updates() + 1)
  664. options.num_updates = model.module.get_num_updates()
  665. if model.module.get_num_updates() >= options.max_update:
  666. max_update_stop = True
  667. else:
  668. if hasattr(model, "num_updates"):
  669. model.set_num_updates(model.get_num_updates() + 1)
  670. options.num_updates = model.get_num_updates()
  671. if model.get_num_updates() >= options.max_update:
  672. max_update_stop = True
  673. # NOTE(kamo): Call log_message() after next()
  674. reporter.next()
  675. if iiter % log_interval == 0:
  676. num_updates = options.num_updates if hasattr(options, "num_updates") else None
  677. logging.info(reporter.log_message(-log_interval, num_updates=num_updates))
  678. if summary_writer is not None:
  679. reporter.tensorboard_add_scalar(summary_writer, -log_interval)
  680. if use_wandb:
  681. reporter.wandb_log()
  682. if max_update_stop:
  683. break
  684. else:
  685. if distributed:
  686. iterator_stop.fill_(1)
  687. torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
  688. return all_steps_are_invalid, max_update_stop
  689. @classmethod
  690. @torch.no_grad()
  691. def validate_one_epoch(
  692. cls,
  693. model: torch.nn.Module,
  694. iterator: Iterable[Dict[str, torch.Tensor]],
  695. reporter: SubReporter,
  696. options: TrainerOptions,
  697. distributed_option: DistributedOption,
  698. ) -> None:
  699. assert check_argument_types()
  700. ngpu = options.ngpu
  701. no_forward_run = options.no_forward_run
  702. distributed = distributed_option.distributed
  703. model.eval()
  704. # [For distributed] Because iteration counts are not always equals between
  705. # processes, send stop-flag to the other processes if iterator is finished
  706. iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
  707. for (_, batch) in iterator:
  708. assert isinstance(batch, dict), type(batch)
  709. if distributed:
  710. torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
  711. if iterator_stop > 0:
  712. break
  713. batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
  714. if no_forward_run:
  715. continue
  716. retval = model(**batch)
  717. if isinstance(retval, dict):
  718. stats = retval["stats"]
  719. weight = retval["weight"]
  720. else:
  721. _, stats, weight = retval
  722. if ngpu > 1 or distributed:
  723. # Apply weighted averaging for stats.
  724. # if distributed, this method can also apply all_reduce()
  725. stats, weight = recursive_average(stats, weight, distributed)
  726. reporter.register(stats, weight)
  727. reporter.next()
  728. else:
  729. if distributed:
  730. iterator_stop.fill_(1)
  731. torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)