-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patharguments.py
More file actions
81 lines (65 loc) · 4.44 KB
/
arguments.py
File metadata and controls
81 lines (65 loc) · 4.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import argparse
import yaml
import ast
import os
def parse_arguments():
parser = argparse.ArgumentParser(description="Evaluation script for downstream tasks")
# Device and model configuration
parser.add_argument("--device", "-d", type=str, default='0', help="CUDA device ID")
parser.add_argument("--model", type=str, default="state-spaces/mamba2-1.3b",
help="Model name or path from Hugging Face")
parser.add_argument("--config", type=str, default=None,
help="Path to remapping configuration file (None for vanilla model)")
# Evaluation mode settings
parser.add_argument("--keep_last_space", action='store_true',
help="Preserve the last space character in predictions")
# PPL task configuration
parser.add_argument("--ppl", action="store_true",
help="Enable perplexity calculation on custom input")
parser.add_argument("--sample_path", type=str, default="subseq_lambada.txt",
help="Path to input file for perplexity calculation")
# LongBench evaluation settings
parser.add_argument("--only_eval", action='store_true',
help="Skip prediction phase and only perform evaluation")
parser.add_argument("--long_eval_task", "-lt", type=str, default='no',
choices=["no", "yes", "e", "c"],
help="LongBench evaluation task type: no/yes/e/c")
# Helmet task configuration
parser.add_argument("--helmet_config", type=str, default=None,
help="Path to Helmet configuration file")
parser.add_argument("--tag", type=str, default="eval",
help="Tag to add to output files")
# Helmet data paths
parser.add_argument("--datasets", type=str, default=None)
parser.add_argument("--demo_files", type=str, default=None)
parser.add_argument("--test_files", type=str, default=None)
parser.add_argument("--helmet_output_dir", type=str, default=None, help="path to save the predictions")
parser.add_argument("--overwrite", action="store_true", help="whether to the saved file")
parser.add_argument("--max_test_samples", type=int, default=None)
parser.add_argument("--num_workers", type=int, default=4)
# Helmet dataset specific settings
parser.add_argument("--popularity_threshold", type=int, default=3)
# Helmet evaluation settings
parser.add_argument("--shots", type=int, default=5, help="total number of demos (encoder + decoder)")
parser.add_argument("--input_max_length", type=str, default='8192', help="the maximum number of tokens of the input, we truncate the end of the context; can be separated by comma to match the specified datasets")
# Helmet generation settings
parser.add_argument("--do_sample", type=ast.literal_eval, choices=[True, False], default=False, help="whether to use sampling (false is greedy), overwrites temperature")
parser.add_argument("--generation_max_length", type=str, default='10', help="max number of tokens to generate, can be separated by comma to match the specified datasets")
parser.add_argument("--generation_min_length", type=int, default=0, help="min number of tokens to generate")
parser.add_argument("--temperature", type=float, default=1.0, help="generation temperature")
parser.add_argument("--top_p", type=float, default=1.0, help="top-p parameter for nucleus sampling")
parser.add_argument("--stop_newline", type=ast.literal_eval, choices=[True, False], default=False, help="whether to stop generation at newline")
# Helmet model specific settings
parser.add_argument("--seed", type=int, default=42, help="random seed")
parser.add_argument("--use_chat_template", type=ast.literal_eval, choices=[True, False], default=False, help="whether to use chat template")
# misc
parser.add_argument("--debug", action="store_true", help="for debugging")
parser.add_argument("--count_tokens", action="store_true", help="instead of running generation, just count the number of tokens (only for HF models not API)")
args = parser.parse_args()
helmet_config = yaml.safe_load(open(args.helmet_config)) if args.helmet_config is not None else {}
parser.set_defaults(**helmet_config)
args = parser.parse_args()
# Set default output directory if not specified
if args.helmet_output_dir is None:
args.helmet_output_dir = f"./helmet/output/{os.path.basename(args.model)}"
return args