forked from OpenRLHF/OpenRLHF
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_kto_llama.sh
More file actions
31 lines (28 loc) · 798 Bytes
/
train_kto_llama.sh
File metadata and controls
31 lines (28 loc) · 798 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
set -x
read -r -d '' training_commands <<EOF
openrlhf.cli.train_kto \
--save_path ./checkpoint/llama3-8b-kto \
--save_steps -1 \
--logging_steps 1 \
--eval_steps -1 \
--train_batch_size 64 \
--micro_train_batch_size 1 \
--pretrain OpenRLHF/Llama-3-8b-sft-mixture \
--param_dtype bf16 \
--max_epochs 1 \
--max_len 8192 \
--zero_stage 3 \
--learning_rate 5e-7 \
--dataset Dylan2048/ultrafeedback-unpaired-preferences \
--input_key instruction \
--output_key response \
--label_key score \
--attn_implementation flash_attention_2 \
--beta 0.1 \
--max_samples 1024 \
--gradient_checkpointing
EOF
# --use_wandb [WANDB_TOKENS] or True (use wandb login command)
if [[ ${1} != "slurm" ]]; then
deepspeed --module $training_commands
fi