-
Notifications
You must be signed in to change notification settings - Fork 169
/
Copy pathconfig.yaml
34 lines (29 loc) · 1.05 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# distributed training
nodes: 1
gpus: 1 # I recommend always assigning 1 GPU to 1 node
nr: 0 # machine nr. in node (0 -- nodes - 1)
dataparallel: 0 # Use DataParallel instead of DistributedDataParallel
workers: 8
dataset_dir: "/Users/gaurav/Desktop/thesis-work/Datasets/T-1/train-faces/all/train"
# train options
seed: 42 # sacred handles automatic seeding when passed in the config
batch_size: 128
image_size: [108, 124]
start_epoch: 0
epochs: 100
dataset: "TFIW" #"CIFAR10" # STL10
pretrain: True
# model options
resnet: "resnet18"
projection_dim: 64 # "[...] to project the representation to a 128-dimensional latent space"
# loss options
optimizer: "Adam" # or LARS (experimental)
weight_decay: 1.0e-6 # "optimized using LARS [...] and weight decay of 10−6"
temperature: 0.5 # see appendix B.7.: Optimal temperature under different batch sizes
# reload options
model_path: "save" # set to the directory containing `checkpoint_##.tar`
epoch_num: 100 # set to checkpoint number
reload: False
# logistic regression options
logistic_batch_size: 256
logistic_epochs: 500