-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathrnnlm_libri.yaml
31 lines (29 loc) · 1.16 KB
/
rnnlm_libri.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
rnn_lm:
optimizer:
type: 'Adam' # Optimizer used for training (adam)
learning_rate: 0.0001 # Learning rate for opt
model_para:
emb_dim: 650
h_dim: 650
layers: 2
rnn: 'LSTM'
dropout_rate: 0.5
solver:
# Data options
dataset: 'librispeech' #
data_path: 'data/libri_fbank80_subword5000' # Source data path
n_jobs: 4 # Subprocess used for torch Dataloader
max_label_len: 400 # Max length for output sequence (0 for no restriction)
# Training options
train_set: ['train-clean-100','train-clean-360','train-other-500'] #
batch_size: 32 # training batch size
apex: True # Use APEX (see https://github.com/NVIDIA/apex for more details)
total_steps: 3000000 # total steps for training
# Validation options
dev_set: ['dev-clean']
dev_batch_size: 32
dev_step: 1000
dev_metric: 'dev_loss'
# Misc
max_timestep: 0
test_set: []