| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- # This is an example that demonstrates how to configure a model file.
- # You can modify the configuration according to your own requirements.
- # to print the register_table:
- # from funasr.register import tables
- # tables.print()
- # network architecture
- model: LLMASRNAR
- model_conf:
- lsm_weight: 0.1 # label smoothing option
- length_normalized_loss: true
- # encoder
- encoder: Paraformer
- encoder_conf:
- hub: funasr
- init_param_path: "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
- freeze: false
- llm: Vicuna
- llm_conf:
- hub: hf
- init_param_path: "/nfs/maziyang.mzy/models/vicuna-7b-v1.5"
- freeze: true
- adaptor: linear
- adaptor_conf:
- downsample_rate: 1
- llm_dim: 4096
- encoder_dim: 2048
- # frontend related
- frontend: WavFrontend
- frontend_conf:
- fs: 16000
- window: hamming
- n_mels: 80
- frame_length: 25
- frame_shift: 10
- dither: 0.0
- lfr_m: 1
- lfr_n: 1
- specaug: SpecAug
- specaug_conf:
- apply_time_warp: true
- time_warp_window: 5
- time_warp_mode: bicubic
- apply_freq_mask: true
- freq_mask_width_range:
- - 0
- - 30
- num_freq_mask: 2
- apply_time_mask: true
- time_mask_width_range:
- - 0
- - 40
- num_time_mask: 2
- train_conf:
- accum_grad: 1
- grad_clip: 5
- max_epoch: 150
- keep_nbest_models: 10
- log_interval: 50
- optim: adam
- optim_conf:
- lr: 0.001
- weight_decay: 0.000001
- scheduler: warmuplr
- scheduler_conf:
- warmup_steps: 35000
- dataset: AudioLLMDataset
- dataset_conf:
- index_ds: IndexDSJsonl
- batch_sampler: RankFullLocalShuffleBatchSampler
- batch_type: example # example or length
- batch_size: 4 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
- max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
- buffer_size: 500
- shuffle: True
- num_workers: 4
- tokenizer: HuggingfaceTokenizer
- tokenizer_conf:
- unk_symbol: <unk>
- init_param_path: null
|