template.yaml 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # This is an example that demonstrates how to configure a model file.
  2. # You can modify the configuration according to your own requirements.
  3. # to print the register_table:
  4. # from funasr.register import tables
  5. # tables.print()
  6. # network architecture
  7. model: LLMASRNAR
  8. model_conf:
  9. lsm_weight: 0.1 # label smoothing option
  10. length_normalized_loss: true
  11. # encoder
  12. encoder: Paraformer
  13. encoder_conf:
  14. hub: funasr
  15. init_param_path: "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
  16. freeze: false
  17. llm: Vicuna
  18. llm_conf:
  19. hub: hf
  20. init_param_path: "/nfs/maziyang.mzy/models/vicuna-7b-v1.5"
  21. freeze: true
  22. adaptor: linear
  23. adaptor_conf:
  24. downsample_rate: 1
  25. llm_dim: 4096
  26. encoder_dim: 2048
  27. # frontend related
  28. frontend: WavFrontend
  29. frontend_conf:
  30. fs: 16000
  31. window: hamming
  32. n_mels: 80
  33. frame_length: 25
  34. frame_shift: 10
  35. dither: 0.0
  36. lfr_m: 1
  37. lfr_n: 1
  38. specaug: SpecAug
  39. specaug_conf:
  40. apply_time_warp: true
  41. time_warp_window: 5
  42. time_warp_mode: bicubic
  43. apply_freq_mask: true
  44. freq_mask_width_range:
  45. - 0
  46. - 30
  47. num_freq_mask: 2
  48. apply_time_mask: true
  49. time_mask_width_range:
  50. - 0
  51. - 40
  52. num_time_mask: 2
  53. train_conf:
  54. accum_grad: 1
  55. grad_clip: 5
  56. max_epoch: 150
  57. keep_nbest_models: 10
  58. log_interval: 50
  59. optim: adam
  60. optim_conf:
  61. lr: 0.001
  62. weight_decay: 0.000001
  63. scheduler: warmuplr
  64. scheduler_conf:
  65. warmup_steps: 35000
  66. dataset: AudioLLMDataset
  67. dataset_conf:
  68. index_ds: IndexDSJsonl
  69. batch_sampler: RankFullLocalShuffleBatchSampler
  70. batch_type: example # example or length
  71. batch_size: 4 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
  72. max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
  73. buffer_size: 500
  74. shuffle: True
  75. num_workers: 4
  76. tokenizer: HuggingfaceTokenizer
  77. tokenizer_conf:
  78. unk_symbol: <unk>
  79. init_param_path: null