abs_task.py 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795
  1. # Copyright ESPnet (https://github.com/espnet/espnet). All Rights Reserved.
  2. # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
  3. """Abstract task module."""
  4. import argparse
  5. import functools
  6. import logging
  7. import os
  8. import sys
  9. from abc import ABC
  10. from abc import abstractmethod
  11. from dataclasses import dataclass
  12. from distutils.version import LooseVersion
  13. from io import BytesIO
  14. from pathlib import Path
  15. from typing import Any
  16. from typing import Callable
  17. from typing import Dict
  18. from typing import List
  19. from typing import Optional
  20. from typing import Sequence
  21. from typing import Tuple
  22. from typing import Union
  23. import humanfriendly
  24. import numpy as np
  25. import torch
  26. import torch.multiprocessing
  27. import torch.nn
  28. import torch.optim
  29. import yaml
  30. from torch.utils.data import DataLoader
  31. from typeguard import check_argument_types
  32. from typeguard import check_return_type
  33. from funasr import __version__
  34. from funasr.datasets.dataset import AbsDataset
  35. from funasr.datasets.dataset import DATA_TYPES
  36. from funasr.datasets.dataset import ESPnetDataset
  37. from funasr.datasets.iterable_dataset import IterableESPnetDataset
  38. from funasr.iterators.abs_iter_factory import AbsIterFactory
  39. from funasr.iterators.chunk_iter_factory import ChunkIterFactory
  40. from funasr.iterators.multiple_iter_factory import MultipleIterFactory
  41. from funasr.iterators.sequence_iter_factory import SequenceIterFactory
  42. from funasr.optimizers.sgd import SGD
  43. from funasr.samplers.build_batch_sampler import BATCH_TYPES
  44. from funasr.samplers.build_batch_sampler import build_batch_sampler
  45. from funasr.samplers.unsorted_batch_sampler import UnsortedBatchSampler
  46. from funasr.schedulers.noam_lr import NoamLR
  47. from funasr.schedulers.warmup_lr import WarmupLR
  48. from funasr.torch_utils.load_pretrained_model import load_pretrained_model
  49. from funasr.torch_utils.model_summary import model_summary
  50. from funasr.torch_utils.pytorch_version import pytorch_cudnn_version
  51. from funasr.torch_utils.set_all_random_seed import set_all_random_seed
  52. from funasr.train.abs_espnet_model import AbsESPnetModel
  53. from funasr.train.class_choices import ClassChoices
  54. from funasr.train.distributed_utils import DistributedOption
  55. from funasr.train.trainer import Trainer
  56. from funasr.utils import config_argparse
  57. from funasr.utils.build_dataclass import build_dataclass
  58. from funasr.utils.cli_utils import get_commandline_args
  59. from funasr.utils.get_default_kwargs import get_default_kwargs
  60. from funasr.utils.nested_dict_action import NestedDictAction
  61. from funasr.utils.types import humanfriendly_parse_size_or_none
  62. from funasr.utils.types import int_or_none
  63. from funasr.utils.types import str2bool
  64. from funasr.utils.types import str2triple_str
  65. from funasr.utils.types import str_or_int
  66. from funasr.utils.types import str_or_none
  67. from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
  68. try:
  69. import wandb
  70. except Exception:
  71. wandb = None
  72. if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
  73. pass
  74. else:
  75. pass
  76. optim_classes = dict(
  77. adam=torch.optim.Adam,
  78. adamw=torch.optim.AdamW,
  79. sgd=SGD,
  80. adadelta=torch.optim.Adadelta,
  81. adagrad=torch.optim.Adagrad,
  82. adamax=torch.optim.Adamax,
  83. asgd=torch.optim.ASGD,
  84. lbfgs=torch.optim.LBFGS,
  85. rmsprop=torch.optim.RMSprop,
  86. rprop=torch.optim.Rprop,
  87. )
  88. if LooseVersion(torch.__version__) >= LooseVersion("1.10.0"):
  89. # From 1.10.0, RAdam is officially supported
  90. optim_classes.update(
  91. radam=torch.optim.RAdam,
  92. )
  93. try:
  94. import torch_optimizer
  95. optim_classes.update(
  96. accagd=torch_optimizer.AccSGD,
  97. adabound=torch_optimizer.AdaBound,
  98. adamod=torch_optimizer.AdaMod,
  99. diffgrad=torch_optimizer.DiffGrad,
  100. lamb=torch_optimizer.Lamb,
  101. novograd=torch_optimizer.NovoGrad,
  102. pid=torch_optimizer.PID,
  103. # torch_optimizer<=0.0.1a10 doesn't support
  104. # qhadam=torch_optimizer.QHAdam,
  105. qhm=torch_optimizer.QHM,
  106. sgdw=torch_optimizer.SGDW,
  107. yogi=torch_optimizer.Yogi,
  108. )
  109. if LooseVersion(torch_optimizer.__version__) < LooseVersion("0.2.0"):
  110. # From 0.2.0, RAdam is dropped
  111. optim_classes.update(
  112. radam=torch_optimizer.RAdam,
  113. )
  114. del torch_optimizer
  115. except ImportError:
  116. pass
  117. try:
  118. import apex
  119. optim_classes.update(
  120. fusedadam=apex.optimizers.FusedAdam,
  121. fusedlamb=apex.optimizers.FusedLAMB,
  122. fusednovograd=apex.optimizers.FusedNovoGrad,
  123. fusedsgd=apex.optimizers.FusedSGD,
  124. )
  125. del apex
  126. except ImportError:
  127. pass
  128. try:
  129. import fairscale
  130. except ImportError:
  131. fairscale = None
  132. scheduler_classes = dict(
  133. ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
  134. lambdalr=torch.optim.lr_scheduler.LambdaLR,
  135. steplr=torch.optim.lr_scheduler.StepLR,
  136. multisteplr=torch.optim.lr_scheduler.MultiStepLR,
  137. exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
  138. CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
  139. noamlr=NoamLR,
  140. warmuplr=WarmupLR,
  141. cycliclr=torch.optim.lr_scheduler.CyclicLR,
  142. onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
  143. CosineAnnealingWarmRestarts=torch.optim.lr_scheduler.CosineAnnealingWarmRestarts,
  144. )
  145. # To lower keys
  146. optim_classes = {k.lower(): v for k, v in optim_classes.items()}
  147. scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
  148. @dataclass
  149. class IteratorOptions:
  150. preprocess_fn: callable
  151. collate_fn: callable
  152. data_path_and_name_and_type: list
  153. shape_files: list
  154. batch_size: int
  155. batch_bins: int
  156. batch_type: str
  157. max_cache_size: float
  158. max_cache_fd: int
  159. distributed: bool
  160. num_batches: Optional[int]
  161. num_iters_per_epoch: Optional[int]
  162. train: bool
  163. class AbsTask(ABC):
  164. # Use @staticmethod, or @classmethod,
  165. # instead of instance method to avoid God classes
  166. # If you need more than one optimizers, change this value in inheritance
  167. num_optimizers: int = 1
  168. trainer = Trainer
  169. class_choices_list: List[ClassChoices] = []
  170. def __init__(self):
  171. raise RuntimeError("This class can't be instantiated.")
  172. @classmethod
  173. @abstractmethod
  174. def add_task_arguments(cls, parser: argparse.ArgumentParser):
  175. pass
  176. @classmethod
  177. @abstractmethod
  178. def build_collate_fn(
  179. cls, args: argparse.Namespace, train: bool
  180. ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
  181. """Return "collate_fn", which is a callable object and given to DataLoader.
  182. >>> from torch.utils.data import DataLoader
  183. >>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...)
  184. In many cases, you can use our common collate_fn.
  185. """
  186. raise NotImplementedError
  187. @classmethod
  188. @abstractmethod
  189. def build_preprocess_fn(
  190. cls, args: argparse.Namespace, train: bool
  191. ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
  192. raise NotImplementedError
  193. @classmethod
  194. @abstractmethod
  195. def required_data_names(
  196. cls, train: bool = True, inference: bool = False
  197. ) -> Tuple[str, ...]:
  198. """Define the required names by Task
  199. This function is used by
  200. >>> cls.check_task_requirements()
  201. If your model is defined as following,
  202. >>> from funasr.train.abs_espnet_model import AbsESPnetModel
  203. >>> class Model(AbsESPnetModel):
  204. ... def forward(self, input, output, opt=None): pass
  205. then "required_data_names" should be as
  206. >>> required_data_names = ('input', 'output')
  207. """
  208. raise NotImplementedError
  209. @classmethod
  210. @abstractmethod
  211. def optional_data_names(
  212. cls, train: bool = True, inference: bool = False
  213. ) -> Tuple[str, ...]:
  214. """Define the optional names by Task
  215. This function is used by
  216. >>> cls.check_task_requirements()
  217. If your model is defined as follows,
  218. >>> from funasr.train.abs_espnet_model import AbsESPnetModel
  219. >>> class Model(AbsESPnetModel):
  220. ... def forward(self, input, output, opt=None): pass
  221. then "optional_data_names" should be as
  222. >>> optional_data_names = ('opt',)
  223. """
  224. raise NotImplementedError
  225. @classmethod
  226. @abstractmethod
  227. def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel:
  228. raise NotImplementedError
  229. @classmethod
  230. def get_parser(cls) -> config_argparse.ArgumentParser:
  231. assert check_argument_types()
  232. class ArgumentDefaultsRawTextHelpFormatter(
  233. argparse.RawTextHelpFormatter,
  234. argparse.ArgumentDefaultsHelpFormatter,
  235. ):
  236. pass
  237. parser = config_argparse.ArgumentParser(
  238. description="base parser",
  239. formatter_class=ArgumentDefaultsRawTextHelpFormatter,
  240. )
  241. # NOTE(kamo): Use '_' instead of '-' to avoid confusion.
  242. # I think '-' looks really confusing if it's written in yaml.
  243. # NOTE(kamo): add_arguments(..., required=True) can't be used
  244. # to provide --print_config mode. Instead of it, do as
  245. parser.set_defaults(required=["output_dir"])
  246. group = parser.add_argument_group("Common configuration")
  247. group.add_argument(
  248. "--print_config",
  249. action="store_true",
  250. help="Print the config file and exit",
  251. )
  252. group.add_argument(
  253. "--log_level",
  254. type=lambda x: x.upper(),
  255. default="INFO",
  256. choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
  257. help="The verbose level of logging",
  258. )
  259. group.add_argument(
  260. "--dry_run",
  261. type=str2bool,
  262. default=False,
  263. help="Perform process without training",
  264. )
  265. group.add_argument(
  266. "--iterator_type",
  267. type=str,
  268. choices=["sequence", "chunk", "task", "none"],
  269. default="sequence",
  270. help="Specify iterator type",
  271. )
  272. group.add_argument("--output_dir", type=str_or_none, default=None)
  273. group.add_argument(
  274. "--ngpu",
  275. type=int,
  276. default=0,
  277. help="The number of gpus. 0 indicates CPU mode",
  278. )
  279. group.add_argument("--seed", type=int, default=0, help="Random seed")
  280. group.add_argument(
  281. "--num_workers",
  282. type=int,
  283. default=1,
  284. help="The number of workers used for DataLoader",
  285. )
  286. group.add_argument(
  287. "--num_att_plot",
  288. type=int,
  289. default=3,
  290. help="The number images to plot the outputs from attention. "
  291. "This option makes sense only when attention-based model. "
  292. "We can also disable the attention plot by setting it 0",
  293. )
  294. group = parser.add_argument_group("distributed training related")
  295. group.add_argument(
  296. "--dist_backend",
  297. default="nccl",
  298. type=str,
  299. help="distributed backend",
  300. )
  301. group.add_argument(
  302. "--dist_init_method",
  303. type=str,
  304. default="env://",
  305. help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
  306. '"WORLD_SIZE", and "RANK" are referred.',
  307. )
  308. group.add_argument(
  309. "--dist_world_size",
  310. default=None,
  311. type=int_or_none,
  312. help="number of nodes for distributed training",
  313. )
  314. group.add_argument(
  315. "--dist_rank",
  316. type=int_or_none,
  317. default=None,
  318. help="node rank for distributed training",
  319. )
  320. group.add_argument(
  321. # Not starting with "dist_" for compatibility to launch.py
  322. "--local_rank",
  323. type=int_or_none,
  324. default=None,
  325. help="local rank for distributed training. This option is used if "
  326. "--multiprocessing_distributed=false",
  327. )
  328. group.add_argument(
  329. "--dist_master_addr",
  330. default=None,
  331. type=str_or_none,
  332. help="The master address for distributed training. "
  333. "This value is used when dist_init_method == 'env://'",
  334. )
  335. group.add_argument(
  336. "--dist_master_port",
  337. default=None,
  338. type=int_or_none,
  339. help="The master port for distributed training"
  340. "This value is used when dist_init_method == 'env://'",
  341. )
  342. group.add_argument(
  343. "--dist_launcher",
  344. default=None,
  345. type=str_or_none,
  346. choices=["slurm", "mpi", None],
  347. help="The launcher type for distributed training",
  348. )
  349. group.add_argument(
  350. "--multiprocessing_distributed",
  351. default=False,
  352. type=str2bool,
  353. help="Use multi-processing distributed training to launch "
  354. "N processes per node, which has N GPUs. This is the "
  355. "fastest way to use PyTorch for either single node or "
  356. "multi node data parallel training",
  357. )
  358. group.add_argument(
  359. "--unused_parameters",
  360. type=str2bool,
  361. default=False,
  362. help="Whether to use the find_unused_parameters in "
  363. "torch.nn.parallel.DistributedDataParallel ",
  364. )
  365. group.add_argument(
  366. "--sharded_ddp",
  367. default=False,
  368. type=str2bool,
  369. help="Enable sharded training provided by fairscale",
  370. )
  371. group = parser.add_argument_group("cudnn mode related")
  372. group.add_argument(
  373. "--cudnn_enabled",
  374. type=str2bool,
  375. default=torch.backends.cudnn.enabled,
  376. help="Enable CUDNN",
  377. )
  378. group.add_argument(
  379. "--cudnn_benchmark",
  380. type=str2bool,
  381. default=torch.backends.cudnn.benchmark,
  382. help="Enable cudnn-benchmark mode",
  383. )
  384. group.add_argument(
  385. "--cudnn_deterministic",
  386. type=str2bool,
  387. default=True,
  388. help="Enable cudnn-deterministic mode",
  389. )
  390. group = parser.add_argument_group("collect stats mode related")
  391. group.add_argument(
  392. "--collect_stats",
  393. type=str2bool,
  394. default=False,
  395. help='Perform on "collect stats" mode',
  396. )
  397. group.add_argument(
  398. "--write_collected_feats",
  399. type=str2bool,
  400. default=False,
  401. help='Write the output features from the model when "collect stats" mode',
  402. )
  403. group = parser.add_argument_group("Trainer related")
  404. group.add_argument(
  405. "--max_epoch",
  406. type=int,
  407. default=40,
  408. help="The maximum number epoch to train",
  409. )
  410. group.add_argument(
  411. "--max_update",
  412. type=int,
  413. default=sys.maxsize,
  414. help="The maximum number update step to train",
  415. )
  416. group.add_argument(
  417. "--patience",
  418. type=int_or_none,
  419. default=None,
  420. help="Number of epochs to wait without improvement "
  421. "before stopping the training",
  422. )
  423. group.add_argument(
  424. "--val_scheduler_criterion",
  425. type=str,
  426. nargs=2,
  427. default=("valid", "loss"),
  428. help="The criterion used for the value given to the lr scheduler. "
  429. 'Give a pair referring the phase, "train" or "valid",'
  430. 'and the criterion name. The mode specifying "min" or "max" can '
  431. "be changed by --scheduler_conf",
  432. )
  433. group.add_argument(
  434. "--early_stopping_criterion",
  435. type=str,
  436. nargs=3,
  437. default=("valid", "loss", "min"),
  438. help="The criterion used for judging of early stopping. "
  439. 'Give a pair referring the phase, "train" or "valid",'
  440. 'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
  441. )
  442. group.add_argument(
  443. "--best_model_criterion",
  444. type=str2triple_str,
  445. nargs="+",
  446. default=[
  447. ("train", "loss", "min"),
  448. ("valid", "loss", "min"),
  449. ("train", "acc", "max"),
  450. ("valid", "acc", "max"),
  451. ],
  452. help="The criterion used for judging of the best model. "
  453. 'Give a pair referring the phase, "train" or "valid",'
  454. 'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
  455. )
  456. group.add_argument(
  457. "--keep_nbest_models",
  458. type=int,
  459. nargs="+",
  460. default=[10],
  461. help="Remove previous snapshots excluding the n-best scored epochs",
  462. )
  463. group.add_argument(
  464. "--nbest_averaging_interval",
  465. type=int,
  466. default=0,
  467. help="The epoch interval to apply model averaging and save nbest models",
  468. )
  469. group.add_argument(
  470. "--grad_clip",
  471. type=float,
  472. default=5.0,
  473. help="Gradient norm threshold to clip",
  474. )
  475. group.add_argument(
  476. "--grad_clip_type",
  477. type=float,
  478. default=2.0,
  479. help="The type of the used p-norm for gradient clip. Can be inf",
  480. )
  481. group.add_argument(
  482. "--grad_noise",
  483. type=str2bool,
  484. default=False,
  485. help="The flag to switch to use noise injection to "
  486. "gradients during training",
  487. )
  488. group.add_argument(
  489. "--accum_grad",
  490. type=int,
  491. default=1,
  492. help="The number of gradient accumulation",
  493. )
  494. group.add_argument(
  495. "--no_forward_run",
  496. type=str2bool,
  497. default=False,
  498. help="Just only iterating data loading without "
  499. "model forwarding and training",
  500. )
  501. group.add_argument(
  502. "--resume",
  503. type=str2bool,
  504. default=False,
  505. help="Enable resuming if checkpoint is existing",
  506. )
  507. group.add_argument(
  508. "--train_dtype",
  509. default="float32",
  510. choices=["float16", "float32", "float64"],
  511. help="Data type for training.",
  512. )
  513. group.add_argument(
  514. "--use_amp",
  515. type=str2bool,
  516. default=False,
  517. help="Enable Automatic Mixed Precision. This feature requires pytorch>=1.6",
  518. )
  519. group.add_argument(
  520. "--log_interval",
  521. type=int_or_none,
  522. default=None,
  523. help="Show the logs every the number iterations in each epochs at the "
  524. "training phase. If None is given, it is decided according the number "
  525. "of training samples automatically .",
  526. )
  527. group.add_argument(
  528. "--use_tensorboard",
  529. type=str2bool,
  530. default=True,
  531. help="Enable tensorboard logging",
  532. )
  533. group.add_argument(
  534. "--use_wandb",
  535. type=str2bool,
  536. default=False,
  537. help="Enable wandb logging",
  538. )
  539. group.add_argument(
  540. "--wandb_project",
  541. type=str,
  542. default=None,
  543. help="Specify wandb project",
  544. )
  545. group.add_argument(
  546. "--wandb_id",
  547. type=str,
  548. default=None,
  549. help="Specify wandb id",
  550. )
  551. group.add_argument(
  552. "--wandb_entity",
  553. type=str,
  554. default=None,
  555. help="Specify wandb entity",
  556. )
  557. group.add_argument(
  558. "--wandb_name",
  559. type=str,
  560. default=None,
  561. help="Specify wandb run name",
  562. )
  563. group.add_argument(
  564. "--wandb_model_log_interval",
  565. type=int,
  566. default=-1,
  567. help="Set the model log period",
  568. )
  569. group.add_argument(
  570. "--detect_anomaly",
  571. type=str2bool,
  572. default=False,
  573. help="Set torch.autograd.set_detect_anomaly",
  574. )
  575. group = parser.add_argument_group("Pretraining model related")
  576. group.add_argument("--pretrain_path", help="This option is obsoleted")
  577. group.add_argument(
  578. "--init_param",
  579. type=str,
  580. default=[],
  581. nargs="*",
  582. help="Specify the file path used for initialization of parameters. "
  583. "The format is '<file_path>:<src_key>:<dst_key>:<exclude_keys>', "
  584. "where file_path is the model file path, "
  585. "src_key specifies the key of model states to be used in the model file, "
  586. "dst_key specifies the attribute of the model to be initialized, "
  587. "and exclude_keys excludes keys of model states for the initialization."
  588. "e.g.\n"
  589. " # Load all parameters"
  590. " --init_param some/where/model.pth\n"
  591. " # Load only decoder parameters"
  592. " --init_param some/where/model.pth:decoder:decoder\n"
  593. " # Load only decoder parameters excluding decoder.embed"
  594. " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n"
  595. " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n",
  596. )
  597. group.add_argument(
  598. "--ignore_init_mismatch",
  599. type=str2bool,
  600. default=False,
  601. help="Ignore size mismatch when loading pre-trained model",
  602. )
  603. group.add_argument(
  604. "--freeze_param",
  605. type=str,
  606. default=[],
  607. nargs="*",
  608. help="Freeze parameters",
  609. )
  610. group = parser.add_argument_group("BatchSampler related")
  611. group.add_argument(
  612. "--num_iters_per_epoch",
  613. type=int_or_none,
  614. default=None,
  615. help="Restrict the number of iterations for training per epoch",
  616. )
  617. group.add_argument(
  618. "--batch_size",
  619. type=int,
  620. default=20,
  621. help="The mini-batch size used for training. Used if batch_type='unsorted',"
  622. " 'sorted', or 'folded'.",
  623. )
  624. group.add_argument(
  625. "--valid_batch_size",
  626. type=int_or_none,
  627. default=None,
  628. help="If not given, the value of --batch_size is used",
  629. )
  630. group.add_argument(
  631. "--batch_bins",
  632. type=int,
  633. default=1000000,
  634. help="The number of batch bins. Used if batch_type='length' or 'numel'",
  635. )
  636. group.add_argument(
  637. "--valid_batch_bins",
  638. type=int_or_none,
  639. default=None,
  640. help="If not given, the value of --batch_bins is used",
  641. )
  642. group.add_argument("--train_shape_file", type=str, action="append", default=[])
  643. group.add_argument("--valid_shape_file", type=str, action="append", default=[])
  644. group = parser.add_argument_group("Sequence iterator related")
  645. _batch_type_help = ""
  646. for key, value in BATCH_TYPES.items():
  647. _batch_type_help += f'"{key}":\n{value}\n'
  648. group.add_argument(
  649. "--batch_type",
  650. type=str,
  651. default="folded",
  652. choices=list(BATCH_TYPES),
  653. help=_batch_type_help,
  654. )
  655. group.add_argument(
  656. "--valid_batch_type",
  657. type=str_or_none,
  658. default=None,
  659. choices=list(BATCH_TYPES) + [None],
  660. help="If not given, the value of --batch_type is used",
  661. )
  662. group.add_argument("--fold_length", type=int, action="append", default=[])
  663. group.add_argument(
  664. "--sort_in_batch",
  665. type=str,
  666. default="descending",
  667. choices=["descending", "ascending"],
  668. help="Sort the samples in each mini-batches by the sample "
  669. 'lengths. To enable this, "shape_file" must have the length information.',
  670. )
  671. group.add_argument(
  672. "--sort_batch",
  673. type=str,
  674. default="descending",
  675. choices=["descending", "ascending"],
  676. help="Sort mini-batches by the sample lengths",
  677. )
  678. group.add_argument(
  679. "--multiple_iterator",
  680. type=str2bool,
  681. default=False,
  682. help="Use multiple iterator mode",
  683. )
  684. group = parser.add_argument_group("Chunk iterator related")
  685. group.add_argument(
  686. "--chunk_length",
  687. type=str_or_int,
  688. default=500,
  689. help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
  690. "If multiple numbers separated by command are given, "
  691. "one of them is selected randomly for each samples. "
  692. "If two numbers are given with '-', it indicates the range of the choices. "
  693. "Note that if the sequence length is shorter than the all chunk_lengths, "
  694. "the sample is discarded. ",
  695. )
  696. group.add_argument(
  697. "--chunk_shift_ratio",
  698. type=float,
  699. default=0.5,
  700. help="Specify the shift width of chunks. If it's less than 1, "
  701. "allows the overlapping and if bigger than 1, there are some gaps "
  702. "between each chunk.",
  703. )
  704. group.add_argument(
  705. "--num_cache_chunks",
  706. type=int,
  707. default=1024,
  708. help="Shuffle in the specified number of chunks and generate mini-batches "
  709. "More larger this value, more randomness can be obtained.",
  710. )
  711. group = parser.add_argument_group("Dataset related")
  712. _data_path_and_name_and_type_help = (
  713. "Give three words splitted by comma. It's used for the training data. "
  714. "e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
  715. "The first value, some/path/a.scp, indicates the file path, "
  716. "and the second, foo, is the key name used for the mini-batch data, "
  717. "and the last, sound, decides the file type. "
  718. "This option is repeatable, so you can input any number of features "
  719. "for your task. Supported file types are as follows:\n\n"
  720. )
  721. for key, dic in DATA_TYPES.items():
  722. _data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
  723. # for large dataset
  724. group.add_argument(
  725. "--dataset_type",
  726. type=str,
  727. default="small",
  728. help="whether to use dataloader for large dataset",
  729. )
  730. parser.add_argument(
  731. "--dataset_conf",
  732. action=NestedDictAction,
  733. default=dict(),
  734. help=f"The keyword arguments for dataset",
  735. )
  736. group.add_argument(
  737. "--train_data_file",
  738. type=str,
  739. default=None,
  740. help="train_list for large dataset",
  741. )
  742. group.add_argument(
  743. "--valid_data_file",
  744. type=str,
  745. default=None,
  746. help="valid_list for large dataset",
  747. )
  748. group.add_argument(
  749. "--train_data_path_and_name_and_type",
  750. type=str2triple_str,
  751. action="append",
  752. default=[],
  753. help=_data_path_and_name_and_type_help,
  754. )
  755. group.add_argument(
  756. "--valid_data_path_and_name_and_type",
  757. type=str2triple_str,
  758. action="append",
  759. default=[],
  760. )
  761. group.add_argument(
  762. "--allow_variable_data_keys",
  763. type=str2bool,
  764. default=False,
  765. help="Allow the arbitrary keys for mini-batch with ignoring "
  766. "the task requirements",
  767. )
  768. group.add_argument(
  769. "--max_cache_size",
  770. type=humanfriendly.parse_size,
  771. default=0.0,
  772. help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
  773. )
  774. group.add_argument(
  775. "--max_cache_fd",
  776. type=int,
  777. default=32,
  778. help="The maximum number of file descriptors to be kept "
  779. "as opened for ark files. "
  780. "This feature is only valid when data type is 'kaldi_ark'.",
  781. )
  782. group.add_argument(
  783. "--valid_max_cache_size",
  784. type=humanfriendly_parse_size_or_none,
  785. default=None,
  786. help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
  787. "If None, the 5 percent size of --max_cache_size",
  788. )
  789. group = parser.add_argument_group("Optimizer related")
  790. for i in range(1, cls.num_optimizers + 1):
  791. suf = "" if i == 1 else str(i)
  792. group.add_argument(
  793. f"--optim{suf}",
  794. type=lambda x: x.lower(),
  795. default="adadelta",
  796. choices=list(optim_classes),
  797. help="The optimizer type",
  798. )
  799. group.add_argument(
  800. f"--optim{suf}_conf",
  801. action=NestedDictAction,
  802. default=dict(),
  803. help="The keyword arguments for optimizer",
  804. )
  805. group.add_argument(
  806. f"--scheduler{suf}",
  807. type=lambda x: str_or_none(x.lower()),
  808. default=None,
  809. choices=list(scheduler_classes) + [None],
  810. help="The lr scheduler type",
  811. )
  812. group.add_argument(
  813. f"--scheduler{suf}_conf",
  814. action=NestedDictAction,
  815. default=dict(),
  816. help="The keyword arguments for lr scheduler",
  817. )
  818. # for training on PAI
  819. group = parser.add_argument_group("PAI training related")
  820. group.add_argument(
  821. "--use_pai",
  822. type=str2bool,
  823. default=False,
  824. help="flag to indicate whether training on PAI",
  825. )
  826. group.add_argument(
  827. "--num_worker_count",
  828. type=int,
  829. default=1,
  830. help="The number of machines on PAI.",
  831. )
  832. group.add_argument(
  833. "--access_key_id",
  834. type=str,
  835. default=None,
  836. help="The username for oss.",
  837. )
  838. group.add_argument(
  839. "--access_key_secret",
  840. type=str,
  841. default=None,
  842. help="The password for oss.",
  843. )
  844. group.add_argument(
  845. "--endpoint",
  846. type=str,
  847. default=None,
  848. help="The endpoint for oss.",
  849. )
  850. group.add_argument(
  851. "--bucket_name",
  852. type=str,
  853. default=None,
  854. help="The bucket name for oss.",
  855. )
  856. group.add_argument(
  857. "--oss_bucket",
  858. default=None,
  859. help="oss bucket.",
  860. )
  861. cls.trainer.add_arguments(parser)
  862. cls.add_task_arguments(parser)
  863. assert check_return_type(parser)
  864. return parser
  865. @classmethod
  866. def build_optimizers(
  867. cls,
  868. args: argparse.Namespace,
  869. model: torch.nn.Module,
  870. ) -> List[torch.optim.Optimizer]:
  871. if cls.num_optimizers != 1:
  872. raise RuntimeError(
  873. "build_optimizers() must be overridden if num_optimizers != 1"
  874. )
  875. optim_class = optim_classes.get(args.optim)
  876. if optim_class is None:
  877. raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
  878. if args.sharded_ddp:
  879. if fairscale is None:
  880. raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
  881. optim = fairscale.optim.oss.OSS(
  882. params=model.parameters(), optim=optim_class, **args.optim_conf
  883. )
  884. else:
  885. optim = optim_class(model.parameters(), **args.optim_conf)
  886. optimizers = [optim]
  887. return optimizers
  888. @classmethod
  889. def exclude_opts(cls) -> Tuple[str, ...]:
  890. """The options not to be shown by --print_config"""
  891. return "required", "print_config", "config", "ngpu"
  892. @classmethod
  893. def get_default_config(cls) -> Dict[str, Any]:
  894. """Return the configuration as dict.
  895. This method is used by print_config()
  896. """
  897. def get_class_type(name: str, classes: dict):
  898. _cls = classes.get(name)
  899. if _cls is None:
  900. raise ValueError(f"must be one of {list(classes)}: {name}")
  901. return _cls
  902. # This method is used only for --print_config
  903. assert check_argument_types()
  904. parser = cls.get_parser()
  905. args, _ = parser.parse_known_args()
  906. config = vars(args)
  907. # Excludes the options not to be shown
  908. for k in AbsTask.exclude_opts():
  909. config.pop(k)
  910. for i in range(1, cls.num_optimizers + 1):
  911. suf = "" if i == 1 else str(i)
  912. name = config[f"optim{suf}"]
  913. optim_class = get_class_type(name, optim_classes)
  914. conf = get_default_kwargs(optim_class)
  915. # Overwrite the default by the arguments,
  916. conf.update(config[f"optim{suf}_conf"])
  917. # and set it again
  918. config[f"optim{suf}_conf"] = conf
  919. name = config[f"scheduler{suf}"]
  920. if name is not None:
  921. scheduler_class = get_class_type(name, scheduler_classes)
  922. conf = get_default_kwargs(scheduler_class)
  923. # Overwrite the default by the arguments,
  924. conf.update(config[f"scheduler{suf}_conf"])
  925. # and set it again
  926. config[f"scheduler{suf}_conf"] = conf
  927. for class_choices in cls.class_choices_list:
  928. if getattr(args, class_choices.name) is not None:
  929. class_obj = class_choices.get_class(getattr(args, class_choices.name))
  930. conf = get_default_kwargs(class_obj)
  931. name = class_choices.name
  932. # Overwrite the default by the arguments,
  933. conf.update(config[f"{name}_conf"])
  934. # and set it again
  935. config[f"{name}_conf"] = conf
  936. return config
  937. @classmethod
  938. def check_required_command_args(cls, args: argparse.Namespace):
  939. assert check_argument_types()
  940. for k in vars(args):
  941. if "-" in k:
  942. raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
  943. required = ", ".join(
  944. f"--{a}" for a in args.required if getattr(args, a) is None
  945. )
  946. if len(required) != 0:
  947. parser = cls.get_parser()
  948. parser.print_help(file=sys.stderr)
  949. p = Path(sys.argv[0]).name
  950. print(file=sys.stderr)
  951. print(
  952. f"{p}: error: the following arguments are required: " f"{required}",
  953. file=sys.stderr,
  954. )
  955. sys.exit(2)
  956. @classmethod
  957. def check_task_requirements(
  958. cls,
  959. dataset: Union[AbsDataset, IterableESPnetDataset],
  960. allow_variable_data_keys: bool,
  961. train: bool,
  962. inference: bool = False,
  963. ) -> None:
  964. """Check if the dataset satisfy the requirement of current Task"""
  965. assert check_argument_types()
  966. mes = (
  967. f"If you intend to use an additional input, modify "
  968. f'"{cls.__name__}.required_data_names()" or '
  969. f'"{cls.__name__}.optional_data_names()". '
  970. f"Otherwise you need to set --allow_variable_data_keys true "
  971. )
  972. for k in cls.required_data_names(train, inference):
  973. if not dataset.has_name(k):
  974. raise RuntimeError(
  975. f'"{cls.required_data_names(train, inference)}" are required for'
  976. f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
  977. )
  978. if not allow_variable_data_keys:
  979. task_keys = cls.required_data_names(
  980. train, inference
  981. ) + cls.optional_data_names(train, inference)
  982. for k in dataset.names():
  983. if k not in task_keys:
  984. raise RuntimeError(
  985. f"The data-name must be one of {task_keys} "
  986. f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
  987. )
  988. @classmethod
  989. def print_config(cls, file=sys.stdout) -> None:
  990. assert check_argument_types()
  991. # Shows the config: e.g. python train.py asr --print_config
  992. config = cls.get_default_config()
  993. file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
  994. @classmethod
  995. def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
  996. assert check_argument_types()
  997. print(get_commandline_args(), file=sys.stderr)
  998. if args is None:
  999. parser = cls.get_parser()
  1000. args = parser.parse_args(cmd)
  1001. args.version = __version__
  1002. if args.pretrain_path is not None:
  1003. raise RuntimeError("--pretrain_path is deprecated. Use --init_param")
  1004. if args.print_config:
  1005. cls.print_config()
  1006. sys.exit(0)
  1007. cls.check_required_command_args(args)
  1008. if not args.distributed or not args.multiprocessing_distributed:
  1009. cls.main_worker(args)
  1010. else:
  1011. assert args.ngpu > 1
  1012. cls.main_worker(args)
  1013. @classmethod
  1014. def main_worker(cls, args: argparse.Namespace):
  1015. assert check_argument_types()
  1016. # 0. Init distributed process
  1017. distributed_option = build_dataclass(DistributedOption, args)
  1018. # Setting distributed_option.dist_rank, etc.
  1019. if args.use_pai:
  1020. distributed_option.init_options_pai()
  1021. else:
  1022. distributed_option.init_options()
  1023. # NOTE(kamo): Don't use logging before invoking logging.basicConfig()
  1024. if not distributed_option.distributed or distributed_option.dist_rank == 0:
  1025. if not distributed_option.distributed:
  1026. _rank = ""
  1027. else:
  1028. _rank = (
  1029. f":{distributed_option.dist_rank}/"
  1030. f"{distributed_option.dist_world_size}"
  1031. )
  1032. # NOTE(kamo):
  1033. # logging.basicConfig() is invoked in main_worker() instead of main()
  1034. # because it can be invoked only once in a process.
  1035. # FIXME(kamo): Should we use logging.getLogger()?
  1036. logging.basicConfig(
  1037. level=args.log_level,
  1038. format=f"[{os.uname()[1].split('.')[0]}]"
  1039. f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  1040. )
  1041. else:
  1042. # Suppress logging if RANK != 0
  1043. logging.basicConfig(
  1044. level="ERROR",
  1045. format=f"[{os.uname()[1].split('.')[0]}]"
  1046. f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
  1047. )
  1048. # Invoking torch.distributed.init_process_group
  1049. if args.use_pai:
  1050. distributed_option.init_torch_distributed_pai(args)
  1051. else:
  1052. distributed_option.init_torch_distributed(args)
  1053. # 1. Set random-seed
  1054. set_all_random_seed(args.seed)
  1055. torch.backends.cudnn.enabled = args.cudnn_enabled
  1056. torch.backends.cudnn.benchmark = args.cudnn_benchmark
  1057. torch.backends.cudnn.deterministic = args.cudnn_deterministic
  1058. if args.detect_anomaly:
  1059. logging.info("Invoking torch.autograd.set_detect_anomaly(True)")
  1060. torch.autograd.set_detect_anomaly(args.detect_anomaly)
  1061. # 2. Build model
  1062. model = cls.build_model(args=args)
  1063. if not isinstance(model, AbsESPnetModel):
  1064. raise RuntimeError(
  1065. f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
  1066. )
  1067. model = model.to(
  1068. dtype=getattr(torch, args.train_dtype),
  1069. device="cuda" if args.ngpu > 0 else "cpu",
  1070. )
  1071. for t in args.freeze_param:
  1072. for k, p in model.named_parameters():
  1073. if k.startswith(t + ".") or k == t:
  1074. logging.info(f"Setting {k}.requires_grad = False")
  1075. p.requires_grad = False
  1076. # 3. Build optimizer
  1077. optimizers = cls.build_optimizers(args, model=model)
  1078. # 4. Build schedulers
  1079. schedulers = []
  1080. for i, optim in enumerate(optimizers, 1):
  1081. suf = "" if i == 1 else str(i)
  1082. name = getattr(args, f"scheduler{suf}")
  1083. conf = getattr(args, f"scheduler{suf}_conf")
  1084. if name is not None:
  1085. cls_ = scheduler_classes.get(name)
  1086. if cls_ is None:
  1087. raise ValueError(
  1088. f"must be one of {list(scheduler_classes)}: {name}"
  1089. )
  1090. scheduler = cls_(optim, **conf)
  1091. else:
  1092. scheduler = None
  1093. schedulers.append(scheduler)
  1094. logging.info(pytorch_cudnn_version())
  1095. logging.info(model_summary(model))
  1096. for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
  1097. suf = "" if i == 1 else str(i)
  1098. logging.info(f"Optimizer{suf}:\n{o}")
  1099. logging.info(f"Scheduler{suf}: {s}")
  1100. # 5. Dump "args" to config.yaml
  1101. # NOTE(kamo): "args" should be saved after object-buildings are done
  1102. # because they are allowed to modify "args".
  1103. output_dir = Path(args.output_dir)
  1104. if not distributed_option.distributed or distributed_option.dist_rank == 0:
  1105. output_dir.mkdir(parents=True, exist_ok=True)
  1106. with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
  1107. logging.info(
  1108. f'Saving the configuration in {output_dir / "config.yaml"}'
  1109. )
  1110. if args.use_pai:
  1111. buffer = BytesIO()
  1112. torch.save({"config": vars(args)}, buffer)
  1113. args.oss_bucket.put_object(os.path.join(args.output_dir, "config.dict"), buffer.getvalue())
  1114. else:
  1115. yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
  1116. if args.dry_run:
  1117. pass
  1118. else:
  1119. logging.info("Training args: {}".format(args))
  1120. # 6. Loads pre-trained model
  1121. for p in args.init_param:
  1122. logging.info(f"Loading pretrained params from {p}")
  1123. load_pretrained_model(
  1124. model=model,
  1125. init_param=p,
  1126. ignore_init_mismatch=args.ignore_init_mismatch,
  1127. # NOTE(kamo): "cuda" for torch.load always indicates cuda:0
  1128. # in PyTorch<=1.4
  1129. map_location=f"cuda:{torch.cuda.current_device()}"
  1130. if args.ngpu > 0
  1131. else "cpu",
  1132. oss_bucket=args.oss_bucket,
  1133. )
  1134. # 7. Build iterator factories
  1135. if args.dataset_type == "large":
  1136. from funasr.datasets.large_datasets.build_dataloader import ArkDataLoader
  1137. train_iter_factory = ArkDataLoader(args.train_data_file, args.token_list,
  1138. args.config, mode="train")
  1139. valid_iter_factory = ArkDataLoader(args.valid_data_file, args.token_list,
  1140. args.config, mode="eval")
  1141. elif args.dataset_type == "small":
  1142. train_iter_factory = cls.build_iter_factory(
  1143. args=args,
  1144. distributed_option=distributed_option,
  1145. mode="train",
  1146. )
  1147. valid_iter_factory = cls.build_iter_factory(
  1148. args=args,
  1149. distributed_option=distributed_option,
  1150. mode="valid",
  1151. )
  1152. else:
  1153. raise ValueError(f"Not supported dataset_type={args.dataset_type}")
  1154. if args.scheduler == "tri_stage":
  1155. for scheduler in schedulers:
  1156. scheduler.init_tri_stage_scheudler(max_update=args.max_update)
  1157. # 8. Start training
  1158. if args.use_wandb:
  1159. if wandb is None:
  1160. raise RuntimeError("Please install wandb")
  1161. try:
  1162. wandb.login()
  1163. except wandb.errors.UsageError:
  1164. logging.info("wandb not configured! run `wandb login` to enable")
  1165. args.use_wandb = False
  1166. if args.use_wandb:
  1167. if (
  1168. not distributed_option.distributed
  1169. or distributed_option.dist_rank == 0
  1170. ):
  1171. if args.wandb_project is None:
  1172. project = "FunASR_" + cls.__name__
  1173. else:
  1174. project = args.wandb_project
  1175. if args.wandb_name is None:
  1176. name = str(Path(".").resolve()).replace("/", "_")
  1177. else:
  1178. name = args.wandb_name
  1179. wandb.init(
  1180. entity=args.wandb_entity,
  1181. project=project,
  1182. name=name,
  1183. dir=output_dir,
  1184. id=args.wandb_id,
  1185. resume="allow",
  1186. )
  1187. wandb.config.update(args)
  1188. else:
  1189. # wandb also supports grouping for distributed training,
  1190. # but we only logs aggregated data,
  1191. # so it's enough to perform on rank0 node.
  1192. args.use_wandb = False
  1193. # Don't give args to trainer.run() directly!!!
  1194. # Instead of it, define "Options" object and build here.
  1195. trainer_options = cls.trainer.build_options(args)
  1196. cls.trainer.run(
  1197. model=model,
  1198. optimizers=optimizers,
  1199. schedulers=schedulers,
  1200. train_iter_factory=train_iter_factory,
  1201. valid_iter_factory=valid_iter_factory,
  1202. trainer_options=trainer_options,
  1203. distributed_option=distributed_option,
  1204. )
  1205. if args.use_wandb and wandb.run:
  1206. wandb.finish()
  1207. @classmethod
  1208. def build_iter_options(
  1209. cls,
  1210. args: argparse.Namespace,
  1211. distributed_option: DistributedOption,
  1212. mode: str,
  1213. ):
  1214. if mode == "train":
  1215. preprocess_fn = cls.build_preprocess_fn(args, train=True)
  1216. collate_fn = cls.build_collate_fn(args, train=True)
  1217. data_path_and_name_and_type = args.train_data_path_and_name_and_type
  1218. shape_files = args.train_shape_file
  1219. batch_size = args.batch_size
  1220. batch_bins = args.batch_bins
  1221. batch_type = args.batch_type
  1222. max_cache_size = args.max_cache_size
  1223. max_cache_fd = args.max_cache_fd
  1224. distributed = distributed_option.distributed
  1225. num_batches = None
  1226. num_iters_per_epoch = args.num_iters_per_epoch
  1227. train = True
  1228. elif mode == "valid":
  1229. preprocess_fn = cls.build_preprocess_fn(args, train=False)
  1230. collate_fn = cls.build_collate_fn(args, train=False)
  1231. data_path_and_name_and_type = args.valid_data_path_and_name_and_type
  1232. shape_files = args.valid_shape_file
  1233. if args.valid_batch_type is None:
  1234. batch_type = args.batch_type
  1235. else:
  1236. batch_type = args.valid_batch_type
  1237. if args.valid_batch_size is None:
  1238. batch_size = args.batch_size
  1239. else:
  1240. batch_size = args.valid_batch_size
  1241. if args.valid_batch_bins is None:
  1242. batch_bins = args.batch_bins
  1243. else:
  1244. batch_bins = args.valid_batch_bins
  1245. if args.valid_max_cache_size is None:
  1246. # Cache 5% of maximum size for validation loader
  1247. max_cache_size = 0.05 * args.max_cache_size
  1248. else:
  1249. max_cache_size = args.valid_max_cache_size
  1250. max_cache_fd = args.max_cache_fd
  1251. distributed = distributed_option.distributed
  1252. num_batches = None
  1253. num_iters_per_epoch = None
  1254. train = False
  1255. else:
  1256. raise NotImplementedError(f"mode={mode}")
  1257. return IteratorOptions(
  1258. preprocess_fn=preprocess_fn,
  1259. collate_fn=collate_fn,
  1260. data_path_and_name_and_type=data_path_and_name_and_type,
  1261. shape_files=shape_files,
  1262. batch_type=batch_type,
  1263. batch_size=batch_size,
  1264. batch_bins=batch_bins,
  1265. num_batches=num_batches,
  1266. max_cache_size=max_cache_size,
  1267. max_cache_fd=max_cache_fd,
  1268. distributed=distributed,
  1269. num_iters_per_epoch=num_iters_per_epoch,
  1270. train=train,
  1271. )
  1272. @classmethod
  1273. def build_iter_factory(
  1274. cls,
  1275. args: argparse.Namespace,
  1276. distributed_option: DistributedOption,
  1277. mode: str,
  1278. kwargs: dict = None,
  1279. ) -> AbsIterFactory:
  1280. """Build a factory object of mini-batch iterator.
  1281. This object is invoked at every epochs to build the iterator for each epoch
  1282. as following:
  1283. >>> iter_factory = cls.build_iter_factory(...)
  1284. >>> for epoch in range(1, max_epoch):
  1285. ... for keys, batch in iter_fatory.build_iter(epoch):
  1286. ... model(**batch)
  1287. The mini-batches for each epochs are fully controlled by this class.
  1288. Note that the random seed used for shuffling is decided as "seed + epoch" and
  1289. the generated mini-batches can be reproduces when resuming.
  1290. Note that the definition of "epoch" doesn't always indicate
  1291. to run out of the whole training corpus.
  1292. "--num_iters_per_epoch" option restricts the number of iterations for each epoch
  1293. and the rest of samples for the originally epoch are left for the next epoch.
  1294. e.g. If The number of mini-batches equals to 4, the following two are same:
  1295. - 1 epoch without "--num_iters_per_epoch"
  1296. - 4 epoch with "--num_iters_per_epoch" == 4
  1297. """
  1298. assert check_argument_types()
  1299. iter_options = cls.build_iter_options(args, distributed_option, mode)
  1300. # Overwrite iter_options if any kwargs is given
  1301. if kwargs is not None:
  1302. for k, v in kwargs.items():
  1303. setattr(iter_options, k, v)
  1304. if args.iterator_type == "sequence":
  1305. return cls.build_sequence_iter_factory(
  1306. args=args,
  1307. iter_options=iter_options,
  1308. mode=mode,
  1309. )
  1310. elif args.iterator_type == "chunk":
  1311. return cls.build_chunk_iter_factory(
  1312. args=args,
  1313. iter_options=iter_options,
  1314. mode=mode,
  1315. )
  1316. elif args.iterator_type == "task":
  1317. return cls.build_task_iter_factory(
  1318. args=args,
  1319. iter_options=iter_options,
  1320. mode=mode,
  1321. )
  1322. else:
  1323. raise RuntimeError(f"Not supported: iterator_type={args.iterator_type}")
  1324. @classmethod
  1325. def build_sequence_iter_factory(
  1326. cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str
  1327. ) -> AbsIterFactory:
  1328. assert check_argument_types()
  1329. dataset = ESPnetDataset(
  1330. iter_options.data_path_and_name_and_type,
  1331. float_dtype=args.train_dtype,
  1332. preprocess=iter_options.preprocess_fn,
  1333. max_cache_size=iter_options.max_cache_size,
  1334. max_cache_fd=iter_options.max_cache_fd,
  1335. )
  1336. cls.check_task_requirements(
  1337. dataset, args.allow_variable_data_keys, train=iter_options.train
  1338. )
  1339. if Path(
  1340. Path(iter_options.data_path_and_name_and_type[0][0]).parent, "utt2category"
  1341. ).exists():
  1342. utt2category_file = str(
  1343. Path(
  1344. Path(iter_options.data_path_and_name_and_type[0][0]).parent,
  1345. "utt2category",
  1346. )
  1347. )
  1348. else:
  1349. utt2category_file = None
  1350. batch_sampler = build_batch_sampler(
  1351. type=iter_options.batch_type,
  1352. shape_files=iter_options.shape_files,
  1353. fold_lengths=args.fold_length,
  1354. batch_size=iter_options.batch_size,
  1355. batch_bins=iter_options.batch_bins,
  1356. sort_in_batch=args.sort_in_batch,
  1357. sort_batch=args.sort_batch,
  1358. drop_last=False,
  1359. min_batch_size=torch.distributed.get_world_size()
  1360. if iter_options.distributed
  1361. else 1,
  1362. utt2category_file=utt2category_file,
  1363. )
  1364. batches = list(batch_sampler)
  1365. if iter_options.num_batches is not None:
  1366. batches = batches[: iter_options.num_batches]
  1367. bs_list = [len(batch) for batch in batches]
  1368. logging.info(f"[{mode}] dataset:\n{dataset}")
  1369. logging.info(f"[{mode}] Batch sampler: {batch_sampler}")
  1370. logging.info(
  1371. f"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, "
  1372. f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
  1373. )
  1374. if args.scheduler == "tri_stage" and mode == "train":
  1375. args.max_update = len(bs_list) * args.max_epoch
  1376. logging.info("Max update: {}".format(args.max_update))
  1377. if iter_options.distributed:
  1378. world_size = torch.distributed.get_world_size()
  1379. rank = torch.distributed.get_rank()
  1380. for batch in batches:
  1381. if len(batch) < world_size:
  1382. raise RuntimeError(
  1383. f"The batch-size must be equal or more than world_size: "
  1384. f"{len(batch)} < {world_size}"
  1385. )
  1386. batches = [batch[rank::world_size] for batch in batches]
  1387. return SequenceIterFactory(
  1388. dataset=dataset,
  1389. batches=batches,
  1390. seed=args.seed,
  1391. num_iters_per_epoch=iter_options.num_iters_per_epoch,
  1392. shuffle=iter_options.train,
  1393. num_workers=args.num_workers,
  1394. collate_fn=iter_options.collate_fn,
  1395. pin_memory=args.ngpu > 0,
  1396. )
  1397. @classmethod
  1398. def build_chunk_iter_factory(
  1399. cls,
  1400. args: argparse.Namespace,
  1401. iter_options: IteratorOptions,
  1402. mode: str,
  1403. ) -> AbsIterFactory:
  1404. assert check_argument_types()
  1405. dataset = ESPnetDataset(
  1406. iter_options.data_path_and_name_and_type,
  1407. float_dtype=args.train_dtype,
  1408. preprocess=iter_options.preprocess_fn,
  1409. max_cache_size=iter_options.max_cache_size,
  1410. max_cache_fd=iter_options.max_cache_fd,
  1411. )
  1412. cls.check_task_requirements(
  1413. dataset, args.allow_variable_data_keys, train=iter_options.train
  1414. )
  1415. if len(iter_options.shape_files) == 0:
  1416. key_file = iter_options.data_path_and_name_and_type[0][0]
  1417. else:
  1418. key_file = iter_options.shape_files[0]
  1419. batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
  1420. batches = list(batch_sampler)
  1421. if iter_options.num_batches is not None:
  1422. batches = batches[: iter_options.num_batches]
  1423. logging.info(f"[{mode}] dataset:\n{dataset}")
  1424. if iter_options.distributed:
  1425. world_size = torch.distributed.get_world_size()
  1426. rank = torch.distributed.get_rank()
  1427. if len(batches) < world_size:
  1428. raise RuntimeError("Number of samples is smaller than world_size")
  1429. if iter_options.batch_size < world_size:
  1430. raise RuntimeError("batch_size must be equal or more than world_size")
  1431. if rank < iter_options.batch_size % world_size:
  1432. batch_size = iter_options.batch_size // world_size + 1
  1433. else:
  1434. batch_size = iter_options.batch_size // world_size
  1435. num_cache_chunks = args.num_cache_chunks // world_size
  1436. # NOTE(kamo): Split whole corpus by sample numbers without considering
  1437. # each of the lengths, therefore the number of iteration counts are not
  1438. # always equal to each other and the iterations are limitted
  1439. # by the fewest iterations.
  1440. # i.e. the samples over the counts are discarded.
  1441. batches = batches[rank::world_size]
  1442. else:
  1443. batch_size = iter_options.batch_size
  1444. num_cache_chunks = args.num_cache_chunks
  1445. return ChunkIterFactory(
  1446. dataset=dataset,
  1447. batches=batches,
  1448. seed=args.seed,
  1449. batch_size=batch_size,
  1450. # For chunk iterator,
  1451. # --num_iters_per_epoch doesn't indicate the number of iterations,
  1452. # but indicates the number of samples.
  1453. num_samples_per_epoch=iter_options.num_iters_per_epoch,
  1454. shuffle=iter_options.train,
  1455. num_workers=args.num_workers,
  1456. collate_fn=iter_options.collate_fn,
  1457. pin_memory=args.ngpu > 0,
  1458. chunk_length=args.chunk_length,
  1459. chunk_shift_ratio=args.chunk_shift_ratio,
  1460. num_cache_chunks=num_cache_chunks,
  1461. )
  1462. # NOTE(kamo): Not abstract class
  1463. @classmethod
  1464. def build_task_iter_factory(
  1465. cls,
  1466. args: argparse.Namespace,
  1467. iter_options: IteratorOptions,
  1468. mode: str,
  1469. ) -> AbsIterFactory:
  1470. """Build task specific iterator factory
  1471. Example:
  1472. >>> class YourTask(AbsTask):
  1473. ... @classmethod
  1474. ... def add_task_arguments(cls, parser: argparse.ArgumentParser):
  1475. ... parser.set_defaults(iterator_type="task")
  1476. ...
  1477. ... @classmethod
  1478. ... def build_task_iter_factory(
  1479. ... cls,
  1480. ... args: argparse.Namespace,
  1481. ... iter_options: IteratorOptions,
  1482. ... mode: str,
  1483. ... ):
  1484. ... return FooIterFactory(...)
  1485. ...
  1486. ... @classmethod
  1487. ... def build_iter_options(
  1488. .... args: argparse.Namespace,
  1489. ... distributed_option: DistributedOption,
  1490. ... mode: str
  1491. ... ):
  1492. ... # if you need to customize options object
  1493. """
  1494. raise NotImplementedError
  1495. @classmethod
  1496. def build_multiple_iter_factory(
  1497. cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str
  1498. ):
  1499. assert check_argument_types()
  1500. iter_options = cls.build_iter_options(args, distributed_option, mode)
  1501. assert len(iter_options.data_path_and_name_and_type) > 0, len(
  1502. iter_options.data_path_and_name_and_type
  1503. )
  1504. # 1. Sanity check
  1505. num_splits = None
  1506. for path in [
  1507. path for path, _, _ in iter_options.data_path_and_name_and_type
  1508. ] + list(iter_options.shape_files):
  1509. if not Path(path).is_dir():
  1510. raise RuntimeError(f"{path} is not a directory")
  1511. p = Path(path) / "num_splits"
  1512. if not p.exists():
  1513. raise FileNotFoundError(f"{p} is not found")
  1514. with p.open() as f:
  1515. _num_splits = int(f.read())
  1516. if num_splits is not None and num_splits != _num_splits:
  1517. raise RuntimeError(
  1518. f"Number of splits are mismathed: "
  1519. f"{iter_options.data_path_and_name_and_type[0][0]} and {path}"
  1520. )
  1521. num_splits = _num_splits
  1522. for i in range(num_splits):
  1523. p = Path(path) / f"split.{i}"
  1524. if not p.exists():
  1525. raise FileNotFoundError(f"{p} is not found")
  1526. # 2. Create functions to build an iter factory for each splits
  1527. data_path_and_name_and_type_list = [
  1528. [
  1529. (str(Path(p) / f"split.{i}"), n, t)
  1530. for p, n, t in iter_options.data_path_and_name_and_type
  1531. ]
  1532. for i in range(num_splits)
  1533. ]
  1534. shape_files_list = [
  1535. [str(Path(s) / f"split.{i}") for s in iter_options.shape_files]
  1536. for i in range(num_splits)
  1537. ]
  1538. num_iters_per_epoch_list = [
  1539. (iter_options.num_iters_per_epoch + i) // num_splits
  1540. if iter_options.num_iters_per_epoch is not None
  1541. else None
  1542. for i in range(num_splits)
  1543. ]
  1544. max_cache_size = iter_options.max_cache_size / num_splits
  1545. # Note that iter-factories are built for each epoch at runtime lazily.
  1546. build_funcs = [
  1547. functools.partial(
  1548. cls.build_iter_factory,
  1549. args,
  1550. distributed_option,
  1551. mode,
  1552. kwargs=dict(
  1553. data_path_and_name_and_type=_data_path_and_name_and_type,
  1554. shape_files=_shape_files,
  1555. num_iters_per_epoch=_num_iters_per_epoch,
  1556. max_cache_size=max_cache_size,
  1557. ),
  1558. )
  1559. for (
  1560. _data_path_and_name_and_type,
  1561. _shape_files,
  1562. _num_iters_per_epoch,
  1563. ) in zip(
  1564. data_path_and_name_and_type_list,
  1565. shape_files_list,
  1566. num_iters_per_epoch_list,
  1567. )
  1568. ]
  1569. # 3. Build MultipleIterFactory
  1570. return MultipleIterFactory(
  1571. build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed
  1572. )
  1573. @classmethod
  1574. def build_streaming_iterator(
  1575. cls,
  1576. data_path_and_name_and_type,
  1577. preprocess_fn,
  1578. collate_fn,
  1579. key_file: str = None,
  1580. batch_size: int = 1,
  1581. dtype: str = np.float32,
  1582. num_workers: int = 1,
  1583. allow_variable_data_keys: bool = False,
  1584. ngpu: int = 0,
  1585. inference: bool = False,
  1586. ) -> DataLoader:
  1587. """Build DataLoader using iterable dataset"""
  1588. assert check_argument_types()
  1589. # For backward compatibility for pytorch DataLoader
  1590. if collate_fn is not None:
  1591. kwargs = dict(collate_fn=collate_fn)
  1592. else:
  1593. kwargs = {}
  1594. dataset = IterableESPnetDataset(
  1595. data_path_and_name_and_type,
  1596. float_dtype=dtype,
  1597. preprocess=preprocess_fn,
  1598. key_file=key_file,
  1599. )
  1600. if dataset.apply_utt2category:
  1601. kwargs.update(batch_size=1)
  1602. else:
  1603. kwargs.update(batch_size=batch_size)
  1604. cls.check_task_requirements(
  1605. dataset, allow_variable_data_keys, train=False, inference=inference
  1606. )
  1607. return DataLoader(
  1608. dataset=dataset,
  1609. pin_memory=ngpu > 0,
  1610. num_workers=num_workers,
  1611. **kwargs,
  1612. )
  1613. # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
  1614. @classmethod
  1615. def build_model_from_file(
  1616. cls,
  1617. config_file: Union[Path, str] = None,
  1618. model_file: Union[Path, str] = None,
  1619. device: str = "cpu",
  1620. ) -> Tuple[AbsESPnetModel, argparse.Namespace]:
  1621. """Build model from the files.
  1622. This method is used for inference or fine-tuning.
  1623. Args:
  1624. config_file: The yaml file saved when training.
  1625. model_file: The model file saved when training.
  1626. device: Device type, "cpu", "cuda", or "cuda:N".
  1627. """
  1628. assert check_argument_types()
  1629. if config_file is None:
  1630. assert model_file is not None, (
  1631. "The argument 'model_file' must be provided "
  1632. "if the argument 'config_file' is not specified."
  1633. )
  1634. config_file = Path(model_file).parent / "config.yaml"
  1635. else:
  1636. config_file = Path(config_file)
  1637. with config_file.open("r", encoding="utf-8") as f:
  1638. args = yaml.safe_load(f)
  1639. args = argparse.Namespace(**args)
  1640. model = cls.build_model(args)
  1641. if not isinstance(model, AbsESPnetModel):
  1642. raise RuntimeError(
  1643. f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
  1644. )
  1645. model.to(device)
  1646. if model_file is not None:
  1647. if device == "cuda":
  1648. # NOTE(kamo): "cuda" for torch.load always indicates cuda:0
  1649. # in PyTorch<=1.4
  1650. device = f"cuda:{torch.cuda.current_device()}"
  1651. model.load_state_dict(torch.load(model_file, map_location=device))
  1652. return model, args