-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmakefile
More file actions
187 lines (171 loc) · 5.68 KB
/
makefile
File metadata and controls
187 lines (171 loc) · 5.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# local: `make baseline METHOD=head_only` (default METHOD=head_only, other: lora, fullft)
# Slurm GPU cluster: `make sbaseline METHOD=head_only` (default METHOD=head_only, other: lora, fullft)
PY=python
DATA_DIR=src/data/pcam
MODEL_ID=facebook/dinov3-vits16-pretrain-lvd1689m # Backbone model
# Cluster-specific overrides
CLUSTER_BASHRC?=$(HOME)/.bashrc # path to bashrc to source on cluster (set empty to skip)
CLUSTER_DIR?=$(HOME)/Tiny-DINOv3-PCam # project path on the cluster
VENV?=.venv # virtualenv path relative to $(CLUSTER_DIR)
CLUSTER_ACTIVATE?=source $(VENV)/bin/activate # command to activate environment on cluster
# Sanitize accidental padding/trailing spaces from variables above
CLUSTER_BASHRC := $(strip $(CLUSTER_BASHRC))
CLUSTER_DIR := $(strip $(CLUSTER_DIR))
VENV := $(strip $(VENV))
CLUSTER_ACTIVATE := $(strip $(CLUSTER_ACTIVATE))
# Download PCam, run once before other commands
get-data:
$(PY) -m scripts.download_pcam --out $(DATA_DIR)
METHOD?=head_only
SELECT_METRIC?=auroc
WANDB?=--wandb
NUM_WORKERS?=4
EPOCHS?=8
RESOLUTION?=224
WARMUP_STEPS?=200
GRAD_CLIP?=1.0
LABEL_SMOOTHING?=0.05
BATCH_SIZE?=256
VAL_BATCH_SIZE?=512
LR?=1.0e-3
WEIGHT_DECAY?=1.0e-4
LORA_R?=8
LORA_ALPHA?=16
LORA_DROPOUT?=0.05
LORA_TARGETS?=q_proj,k_proj,v_proj,o_proj
LR_HEAD?=1.0e-3
LR_LORA?=5.0e-4
LR_NORMS_BIAS?=5.0e-4
VAL_EVAL_FRAC?=0.5
# Possible VAL_FLAGS: val_mid_epoch, val_epoch_end, val_heavy_end, val_heavy_mid
VAL_FLAGS=--val_mid_epoch --val_epoch_end --val_heavy_end
VAL_FLAGS_HUGE=--val_mid_epoch --val_epoch_end --val_heavy_end --val_heavy_mid
VAL_FLAGS_NO_MID=--val_epoch_end --val_heavy_end
TRAIN_NORMS_BIAS?=none # [none, norms, bias, both] train the LayerNorms params for head_only/LoRA methods
QUANTIZE?=none #[none, int8, bf16]
CHECKPOINT?=lora.pt
PRUNE_METHOD?=attention_heads
PRUNE_AMOUNT?=0.8
PRUNE_TARGETS?=all
COMPUTE_FLOPS=--compute_flops
WANDB_RUN_NAME?=eval_run
eval:
$(PY) -m src.train.pruning \
--checkpoint $(CHECKPOINT) \
--data_dir $(DATA_DIR) \
--model_id $(MODEL_ID) \
--resolution $(RESOLUTION) \
--val_batch_size $(VAL_BATCH_SIZE) \
--num_workers $(NUM_WORKERS) \
--prune_method $(PRUNE_METHOD) --prune_amount $(PRUNE_AMOUNT) --prune_targets $(PRUNE_TARGETS) \
$(COMPUTE_FLOPS) \
--quantize $(QUANTIZE) \
--tta_eval
baseline:
$(PY) -m src.train.finetune \
--data_dir $(DATA_DIR) \
--model_id $(MODEL_ID) \
--select_metric $(SELECT_METRIC) \
--resolution $(RESOLUTION) \
--batch_size $(BATCH_SIZE) \
--val_batch_size $(VAL_BATCH_SIZE) \
--epochs $(EPOCHS) \
--lr $(LR) \
--weight_decay $(WEIGHT_DECAY) \
--num_workers $(NUM_WORKERS) \
--method $(METHOD) \
--train_log_every_steps 4 \
--val_eval_frac $(VAL_EVAL_FRAC) \
$(VAL_FLAGS_NO_MID) \
--lora_r $(LORA_R) --lora_alpha $(LORA_ALPHA) --lora_dropout $(LORA_DROPOUT) \
--lora_targets $(LORA_TARGETS) \
--lora_include_mlp \
--lr_head $(LR_HEAD) --lr_lora $(LR_LORA) \
--train_norms_bias $(TRAIN_NORMS_BIAS) --lr_norms_bias $(LR_NORMS_BIAS) \
--warmup_steps $(WARMUP_STEPS) --grad_clip $(GRAD_CLIP) \
--label_smoothing $(LABEL_SMOOTHING) \
--aug_histology --tta_eval \
--save_best
debug:
$(PY) -m src.train.finetune \
--method $(METHOD) \
--data_dir $(DATA_DIR) \
--model_id $(MODEL_ID) \
--resolution 96 \
--batch_size 64 \
--val_batch_size 64 \
--epochs 1 \
--lr 1e-3 \
--weight_decay 1e-4 \
--num_workers $(NUM_WORKERS) \
--max_train_batches 1 \
--max_eval_batches 1 \
--skip_bench \
--warmup_steps 0 --grad_clip $(GRAD_CLIP) \
--label_smoothing $(LABEL_SMOOTHING)
SLURM_PARTITION ?= tau
SLURM_TIME ?= 24:00:00
SLURM_GPUS ?= 1
SLURM_CPUS ?= 11
SLURM_JOB_NAME ?= tiny-dino-pcam
SBATCH = sbatch \
--job-name=$(SLURM_JOB_NAME) \
--partition=$(SLURM_PARTITION) \
--gres=gpu:$(SLURM_GPUS) \
--cpus-per-task=$(SLURM_CPUS) \
--time=$(SLURM_TIME) \
--output=slurm/%x_%j.out \
--error=slurm/%x_%j.out \
--nodes=1 \
--ntasks-per-node=1
CLUSTER_TRAIN = $(PY) -m src.train.finetune \
--data_dir $(DATA_DIR) \
--model_id $(MODEL_ID) \
--select_metric $(SELECT_METRIC) \
--method $(METHOD) \
--resolution $(RESOLUTION) \
--num_workers $(NUM_WORKERS) \
--batch_size $(BATCH_SIZE) --val_batch_size $(VAL_BATCH_SIZE) \
--epochs $(EPOCHS) --lr $(LR) --weight_decay $(WEIGHT_DECAY) \
$(WANDB) --wandb_project dinov3-pcam-compress \
--train_log_every_steps 4 \
--val_eval_frac $(VAL_EVAL_FRAC) \
$(VAL_FLAGS_NO_MID) \
--lora_r $(LORA_R) --lora_alpha $(LORA_ALPHA) --lora_dropout $(LORA_DROPOUT) \
--lora_targets $(LORA_TARGETS) \
--lora_include_mlp \
--lr_head $(LR_HEAD) --lr_lora $(LR_LORA) \
--train_norms_bias $(TRAIN_NORMS_BIAS) --lr_norms_bias $(LR_NORMS_BIAS) \
--warmup_steps $(WARMUP_STEPS) --grad_clip $(GRAD_CLIP) \
--label_smoothing $(LABEL_SMOOTHING) \
--aug_histology --tta_eval \
--save_best
CLUSTER_PRUNE = $(PY) -m src.train.pruning \
--checkpoint $(CHECKPOINT) \
$(WANDB) --wandb_project dinov3-pcam-compress \
--wandb_run_name $(WANDB_RUN_NAME) \
--data_dir $(DATA_DIR) \
--model_id $(MODEL_ID) \
--resolution $(RESOLUTION) \
--val_batch_size $(VAL_BATCH_SIZE) \
--num_workers $(NUM_WORKERS) \
--prune_method $(PRUNE_METHOD) --prune_amount $(PRUNE_AMOUNT) --prune_targets $(PRUNE_TARGETS) \
$(COMPUTE_FLOPS) \
--quantize $(QUANTIZE) \
--tta_eval
.PHONY: cluster_train clustser_prune
define WRAP_CMD
bash -lc 'if [ -n "$(CLUSTER_BASHRC)" ] && [ -f "$(CLUSTER_BASHRC)" ]; then source "$(CLUSTER_BASHRC)"; fi; \
cd "$(CLUSTER_DIR)"; \
$(CLUSTER_ACTIVATE); \
which python; python --version; \
echo Running on partition: $$SLURM_JOB_PARTITION; \
echo Running: $(1); \
$(1)'
endef
sbaseline:
mkdir -p slurm
$(SBATCH) --wrap="$(call WRAP_CMD,$(CLUSTER_TRAIN))"
seval:
mkdir -p slurm
$(SBATCH) --wrap="$(call WRAP_CMD,$(CLUSTER_PRUNE))"