Skip to content

Commit bd8f220

Browse files
authored
feat: enhance OpenAI-compatible model support with role-specific configurations (#356)
2 parents 106a169 + 2e034d9 commit bd8f220

File tree

7 files changed

+99
-31
lines changed

7 files changed

+99
-31
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ venv/
2727
env/
2828
ENV/
2929
.env
30+
.venv
3031

3132
# IDE
3233
.idea/
@@ -58,3 +59,4 @@ coverage.xml
5859
owl/camel/types/__pycache__/
5960
owl/camel/__pycache__/
6061
owl/camel/utils/__pycache_/
62+
tmp/

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -364,8 +364,10 @@ python examples/run_qwen_zh.py
364364
# Run with Deepseek model
365365
python examples/run_deepseek_zh.py
366366

367-
# Run with other OpenAI-compatible models
367+
# Run with other OpenAI-compatible models, supporting different models for different roles
368368
python examples/run_openai_compatiable_model.py
369+
# Example with question
370+
python examples/run_openai_compatiable_model.py "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
369371

370372
# Run with Azure OpenAI
371373
python examples/run_azure_openai.py

README_zh.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,8 +363,10 @@ python examples/run_qwen_zh.py
363363
# 使用 Deepseek 模型运行
364364
python examples/run_deepseek_zh.py
365365

366-
# 使用其他 OpenAI 兼容模型运行
366+
# 使用其他 OpenAI 兼容模型运行,支持不同的 role 使用不同的模型
367367
python examples/run_openai_compatiable_model.py
368+
# 带问题的示例
369+
python examples/run_openai_compatiable_model.py "浏览京东并找出一款对程序员有吸引力的产品。请提供产品名称和价格。"
368370

369371
# 使用 Azure OpenAI模型运行
370372
python examples/run_azure_openai.py

examples/run_openai_compatiable_model.py

Lines changed: 50 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -53,38 +53,56 @@ def construct_society(question: str) -> RolePlaying:
5353
models = {
5454
"user": ModelFactory.create(
5555
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
56-
model_type="qwen-max",
57-
api_key=os.getenv("QWEN_API_KEY"),
58-
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
59-
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
56+
57+
model_type=os.getenv("USER_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
58+
api_key=os.getenv("USER_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
59+
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
60+
model_config_dict={
61+
"temperature": float(os.getenv("USER_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
62+
"max_tokens": int(os.getenv("USER_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
63+
},
6064
),
6165
"assistant": ModelFactory.create(
6266
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
63-
model_type="qwen-max",
64-
api_key=os.getenv("QWEN_API_KEY"),
65-
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
66-
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
67+
model_type=os.getenv("ASSISTANT_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
68+
api_key=os.getenv("ASSISTANT_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
69+
url=os.getenv("ASSISTANT_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
70+
model_config_dict={
71+
"temperature": float(os.getenv("ASSISTANT_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
72+
"max_tokens": int(os.getenv("ASSISTANT_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
73+
},
74+
6775
),
6876
"browsing": ModelFactory.create(
6977
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
70-
model_type="qwen-vl-max",
71-
api_key=os.getenv("QWEN_API_KEY"),
72-
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
73-
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
78+
79+
model_type=os.getenv("WEB_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
80+
api_key=os.getenv("WEB_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
81+
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
82+
model_config_dict={
83+
"temperature": float(os.getenv("WEB_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
84+
"max_tokens": int(os.getenv("WEB_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
85+
},
7486
),
7587
"planning": ModelFactory.create(
7688
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
77-
model_type="qwen-max",
78-
api_key=os.getenv("QWEN_API_KEY"),
79-
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
80-
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
89+
model_type=os.getenv("PLANNING_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
90+
api_key=os.getenv("PLANNING_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
91+
url=os.getenv("PLANNING_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
92+
model_config_dict={
93+
"temperature": float(os.getenv("PLANNING_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
94+
"max_tokens": int(os.getenv("PLANNING_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
95+
},
8196
),
8297
"image": ModelFactory.create(
8398
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
84-
model_type="qwen-vl-max",
85-
api_key=os.getenv("QWEN_API_KEY"),
86-
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
87-
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
99+
model_type=os.getenv("IMAGE_ROLE_API_MODEL_TYPE", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
100+
api_key=os.getenv("IMAGE_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
101+
url=os.getenv("IMAGE_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
102+
model_config_dict={
103+
"temperature": float(os.getenv("IMAGE_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
104+
"max_tokens": int(os.getenv("IMAGE_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
105+
},
88106
),
89107
}
90108

@@ -126,13 +144,16 @@ def construct_society(question: str) -> RolePlaying:
126144
return society
127145

128146

129-
def main():
130-
r"""Main function to run the OWL system with an example question."""
131-
# Example research question
132-
default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
133147

134-
# Override default task if command line argument is provided
135-
task = sys.argv[1] if len(sys.argv) > 1 else default_task
148+
def main(question: str = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."):
149+
r"""Main function to run the OWL system with an example question.
150+
Args:
151+
question (str): The task or question to be addressed by the society.
152+
If not provided, a default question will be used.
153+
Defaults to "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
154+
Returns:
155+
None
156+
"""
136157

137158
# Construct and run the society
138159
society = construct_society(task)
@@ -141,7 +162,9 @@ def main():
141162

142163
# Output the result
143164
print(f"\033[94mAnswer: {answer}\033[0m")
165+
# Output the token count
166+
print(f"\033[94mToken count: {token_count}\033[0m")
144167

145168

146169
if __name__ == "__main__":
147-
main()
170+
main(sys.argv[1] if len(sys.argv) > 1 else "")

owl/.env_template

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,45 @@ QWEN_API_KEY='Your_Key'
2424
# DeepSeek API (https://platform.deepseek.com/api_keys)
2525
DEEPSEEK_API_KEY='Your_Key'
2626

27+
# Multi-platform LLM/VLLM API, default values for user assistant planning web image roles
28+
# LLM_ROLE_API_BASE_URL=''
29+
# LLM_ROLE_API_KEY='Your_Key'
30+
# LLM_ROLE_API_MODEL_TYPE=''
31+
# LLM_ROLE_API_MODEL_TEMPERATURE='0.0'
32+
# LLM_ROLE_API_MODEL_MAX_TOKENS='0'
33+
# VLLM_ROLE_API_BASE_URL=''
34+
# VLLM_ROLE_API_KEY='Your_Key'
35+
# VLLM_ROLE_API_MODEL_TYPE=''
36+
# VLLM_ROLE_API_MODEL_TEMPERATURE='0.0'
37+
# VLLM_ROLE_API_MODEL_MAX_TOKENS='0'
38+
39+
# Multi-platform LLM/VLLM API for user assistant planning web image roles
40+
# USER_ROLE_API_BASE_URL=''
41+
# USER_ROLE_API_KEY='Your_Key'
42+
# USER_ROLE_API_MODEL_TYPE=''
43+
# USER_ROLE_API_MODEL_TEMPERATURE='0.8'
44+
# USER_ROLE_API_MODEL_MAX_TOKENS='4096'
45+
# ASSISTANT_ROLE_API_BASE_URL=''
46+
# ASSISTANT_ROLE_API_KEY='Your_Key'
47+
# ASSISTANT_ROLE_API_MODEL_TYPE=''
48+
# ASSISTANT_ROLE_API_MODEL_TEMPERATURE='0.2'
49+
# ASSISTANT_ROLE_API_MODEL_MAX_TOKENS='4096'
50+
# PLANNING_ROLE_API_BASE_URL=''
51+
# PLANNING_ROLE_API_KEY='Your_Key'
52+
# PLANNING_ROLE_API_MODEL_TYPE=''
53+
# PLANNING_ROLE_API_MODEL_TEMPERATURE='0.4'
54+
# PLANNING_ROLE_API_MODEL_MAX_TOKENS='8192'
55+
# WEB_ROLE_API_BASE_URL=''
56+
# WEB_ROLE_API_KEY='Your_Key'
57+
# WEB_ROLE_API_MODEL_TYPE=''
58+
# WEB_ROLE_API_MODEL_TEMPERATURE='0.0'
59+
# WEB_ROLE_API_MODEL_MAX_TOKENS='0'
60+
# IMAGE_ROLE_API_BASE_URL=''
61+
# IMAGE_ROLE_API_KEY='Your_Key'
62+
# IMAGE_ROLE_API_MODEL_TYPE=''
63+
# IMAGE_ROLE_API_MODEL_TEMPERATURE='0.0'
64+
# IMAGE_ROLE_API_MODEL_MAX_TOKENS='0'
65+
2766
#===========================================
2867
# Tools & Services API
2968
#===========================================

owl/webapp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def process_message(role, content):
245245
"run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
246246
"run_mini": "Using OpenAI model with minimal configuration to process tasks",
247247
"run_deepseek_zh": "Using deepseek model to process Chinese tasks",
248-
"run_openai_compatiable_model": "Using openai compatible model to process tasks",
248+
"run_openai_compatiable_model": "Using multiple openai compatible model to process tasks",
249249
"run_ollama": "Using local ollama model to process tasks",
250250
"run_qwen_mini_zh": "Using qwen model with minimal configuration to process tasks",
251251
"run_qwen_zh": "Using qwen model to process tasks",

owl/webapp_zh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def process_message(role, content):
245245
"run": "默认模式:使用OpenAI模型的默认的智能体协作模式,适合大多数任务。",
246246
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
247247
"run_deepseek_zh": "使用deepseek模型处理中文任务",
248-
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
248+
"run_openai_compatiable_model": "使用多个openai兼容模型处理任务",
249249
"run_ollama": "使用本地ollama模型处理任务",
250250
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
251251
"run_qwen_zh": "使用qwen模型处理任务",

0 commit comments

Comments
 (0)