-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy pathconfigs.py
58 lines (54 loc) · 1.32 KB
/
configs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch
# TODO: add your custom model config here:
gpt_configs = {
"gpt4-32k": {
"engine": "devgpt4-32k",
"temperature": 0.0,
"max_tokens": 5000,
"top_p": 1.0,
"frequency_penalty": 0.0,
"presence_penalty": 0.0,
"stop": None
},
"gpt35-turbo": {
"engine": "mtutor-openai-dev",
"temperature": 0.0,
"max_tokens": 3999,
"top_p": 1.0,
"frequency_penalty": 0.0,
"presence_penalty": 0.0,
"stop": None
}
}
llama_configs = {
"meta-llama/Llama-2-7b-chat-hf": {
"task": "text-generation",
"model": "meta-llama/Llama-2-7b-chat-hf",
"torch_dtype": torch.float16,
"device_map": "auto",
"do_sample":False,
},
"meta-llama/Llama-2-13b-chat-hf": {
"task": "text-generation",
"model": "meta-llama/Llama-2-13b-chat-hf",
"torch_dtype": torch.float16,
"device_map": "auto",
"do_sample":False,
}
}
default_llama_config = {
"task": "text-generation",
"model": None,
"torch_dtype": torch.float16,
"device_map": "auto",
"do_sample":False,
}
default_gpt_config = {
"engine": None,
"temperature": 0.0,
"max_tokens": 5000,
"top_p": 1.0,
"frequency_penalty": 0.0,
"presence_penalty": 0.0,
"stop": None
}