|
@@ -23,22 +23,28 @@ decoder_conf:
|
|
|
self_attention_dropout_rate: 0.0
|
|
self_attention_dropout_rate: 0.0
|
|
|
src_attention_dropout_rate: 0.0
|
|
src_attention_dropout_rate: 0.0
|
|
|
|
|
|
|
|
|
|
+# frontend related
|
|
|
|
|
+frontend: wav_frontend
|
|
|
|
|
+frontend_conf:
|
|
|
|
|
+ fs: 16000
|
|
|
|
|
+ window: hamming
|
|
|
|
|
+ n_mels: 80
|
|
|
|
|
+ frame_length: 25
|
|
|
|
|
+ frame_shift: 10
|
|
|
|
|
+ lfr_m: 1
|
|
|
|
|
+ lfr_n: 1
|
|
|
|
|
+
|
|
|
# hybrid CTC/attention
|
|
# hybrid CTC/attention
|
|
|
model_conf:
|
|
model_conf:
|
|
|
ctc_weight: 0.3
|
|
ctc_weight: 0.3
|
|
|
lsm_weight: 0.1 # label smoothing option
|
|
lsm_weight: 0.1 # label smoothing option
|
|
|
length_normalized_loss: false
|
|
length_normalized_loss: false
|
|
|
|
|
|
|
|
-# minibatch related
|
|
|
|
|
-batch_type: length
|
|
|
|
|
-batch_bins: 32000
|
|
|
|
|
-num_workers: 8
|
|
|
|
|
-
|
|
|
|
|
# optimization related
|
|
# optimization related
|
|
|
accum_grad: 1
|
|
accum_grad: 1
|
|
|
grad_clip: 5
|
|
grad_clip: 5
|
|
|
patience: 3
|
|
patience: 3
|
|
|
-max_epoch: 20
|
|
|
|
|
|
|
+max_epoch: 60
|
|
|
val_scheduler_criterion:
|
|
val_scheduler_criterion:
|
|
|
- valid
|
|
- valid
|
|
|
- acc
|
|
- acc
|
|
@@ -66,5 +72,15 @@ scheduler: warmuplr # pytorch v1.1.0+ required
|
|
|
scheduler_conf:
|
|
scheduler_conf:
|
|
|
warmup_steps: 25000
|
|
warmup_steps: 25000
|
|
|
|
|
|
|
|
|
|
+dataset_conf:
|
|
|
|
|
+ shuffle: True
|
|
|
|
|
+ shuffle_conf:
|
|
|
|
|
+ shuffle_size: 2048
|
|
|
|
|
+ sort_size: 500
|
|
|
|
|
+ batch_conf:
|
|
|
|
|
+ batch_type: token
|
|
|
|
|
+ batch_size: 25000
|
|
|
|
|
+ num_workers: 8
|
|
|
|
|
+
|
|
|
log_interval: 50
|
|
log_interval: 50
|
|
|
normalize: None
|
|
normalize: None
|