-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Expand file tree
/
Copy pathdeepseek_ocr2_full_8k_config.yaml
More file actions
72 lines (66 loc) · 1.6 KB
/
deepseek_ocr2_full_8k_config.yaml
File metadata and controls
72 lines (66 loc) · 1.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
### data
train_dataset_type: messages
eval_dataset_type: messages
train_dataset_path: ./ocr_vl_sft-train_Bengali.jsonl
train_dataset_prob: "1.0"
eval_dataset_path: ./ocr_vl_sft-test_Bengali.jsonl
eval_dataset_prob: "1.0"
max_seq_len: 8192
padding_free: False
packing: False
truncate_packing: False
dataset_type: map
dataloader_num_workers: 8
mix_strategy: concat
template_backend: custom
template: deepseek_ocr2
### model
model_name_or_path: deepseek-ai/DeepSeek-OCR-2
_attn_implementation: flashmask
copy_custom_file_list: "configuration_deepseek_v2.py conversation.py deepencoderv2.py modeling_deepseekocr2.py modeling_deepseekv2.py"
### finetuning
# base
stage: VL-SFT
fine_tuning: full
seed: 42
do_train: true
do_eval: true
per_device_eval_batch_size: 8
per_device_train_batch_size: 8
num_train_epochs: 2
max_steps: -1
max_estimate_samples: 500
eval_steps: 400
evaluation_strategy: steps
save_steps: 400
save_strategy: steps
logging_steps: 1
gradient_accumulation_steps: 8
logging_dir: ./Deepseek-OCR2-Bengali/visualdl_logs/
output_dir: ./Deepseek-OCR2-SFT-Bengali
disable_tqdm: true
eval_accumulation_steps: 16
# train
lr_scheduler_type: cosine
warmup_ratio: 0.01
learning_rate: 5.0e-6
min_lr: 5.0e-7
# optimizer
weight_decay: 0.1
adam_epsilon: 1.0e-8
adam_beta1: 0.9
adam_beta2: 0.95
# performance
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
sharding: stage1
recompute_granularity: full
recompute_method: uniform
recompute_num_layers: 1
bf16: true
fp16_opt_level: O2
# pre_alloc_memory: 42
# save
unified_checkpoint: False
save_checkpoint_format: "flex_checkpoint"
load_checkpoint_format: "flex_checkpoint"