Skip to content

Commit 265facf

Browse files
committed
Revert "feat: enhance OpenAI-compatible model support with role-specific configurations (#356)"
This reverts commit bd8f220, reversing changes made to 106a169.
1 parent bd8f220 commit 265facf

File tree

7 files changed

+31
-99
lines changed

7 files changed

+31
-99
lines changed

.gitignore

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ venv/
2727
env/
2828
ENV/
2929
.env
30-
.venv
3130

3231
# IDE
3332
.idea/
@@ -59,4 +58,3 @@ coverage.xml
5958
owl/camel/types/__pycache__/
6059
owl/camel/__pycache__/
6160
owl/camel/utils/__pycache_/
62-
tmp/

README.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -364,10 +364,8 @@ python examples/run_qwen_zh.py
364364
# Run with Deepseek model
365365
python examples/run_deepseek_zh.py
366366

367-
# Run with other OpenAI-compatible models, supporting different models for different roles
367+
# Run with other OpenAI-compatible models
368368
python examples/run_openai_compatiable_model.py
369-
# Example with question
370-
python examples/run_openai_compatiable_model.py "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
371369

372370
# Run with Azure OpenAI
373371
python examples/run_azure_openai.py

README_zh.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -363,10 +363,8 @@ python examples/run_qwen_zh.py
363363
# 使用 Deepseek 模型运行
364364
python examples/run_deepseek_zh.py
365365

366-
# 使用其他 OpenAI 兼容模型运行,支持不同的 role 使用不同的模型
366+
# 使用其他 OpenAI 兼容模型运行
367367
python examples/run_openai_compatiable_model.py
368-
# 带问题的示例
369-
python examples/run_openai_compatiable_model.py "浏览京东并找出一款对程序员有吸引力的产品。请提供产品名称和价格。"
370368

371369
# 使用 Azure OpenAI模型运行
372370
python examples/run_azure_openai.py

examples/run_openai_compatiable_model.py

Lines changed: 27 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -53,56 +53,38 @@ def construct_society(question: str) -> RolePlaying:
5353
models = {
5454
"user": ModelFactory.create(
5555
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
56-
57-
model_type=os.getenv("USER_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
58-
api_key=os.getenv("USER_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
59-
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
60-
model_config_dict={
61-
"temperature": float(os.getenv("USER_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
62-
"max_tokens": int(os.getenv("USER_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
63-
},
56+
model_type="qwen-max",
57+
api_key=os.getenv("QWEN_API_KEY"),
58+
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
59+
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
6460
),
6561
"assistant": ModelFactory.create(
6662
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
67-
model_type=os.getenv("ASSISTANT_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
68-
api_key=os.getenv("ASSISTANT_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
69-
url=os.getenv("ASSISTANT_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
70-
model_config_dict={
71-
"temperature": float(os.getenv("ASSISTANT_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
72-
"max_tokens": int(os.getenv("ASSISTANT_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
73-
},
74-
63+
model_type="qwen-max",
64+
api_key=os.getenv("QWEN_API_KEY"),
65+
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
66+
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
7567
),
7668
"browsing": ModelFactory.create(
7769
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
78-
79-
model_type=os.getenv("WEB_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
80-
api_key=os.getenv("WEB_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
81-
url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
82-
model_config_dict={
83-
"temperature": float(os.getenv("WEB_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
84-
"max_tokens": int(os.getenv("WEB_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
85-
},
70+
model_type="qwen-vl-max",
71+
api_key=os.getenv("QWEN_API_KEY"),
72+
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
73+
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
8674
),
8775
"planning": ModelFactory.create(
8876
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
89-
model_type=os.getenv("PLANNING_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")),
90-
api_key=os.getenv("PLANNING_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
91-
url=os.getenv("PLANNING_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
92-
model_config_dict={
93-
"temperature": float(os.getenv("PLANNING_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
94-
"max_tokens": int(os.getenv("PLANNING_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
95-
},
77+
model_type="qwen-max",
78+
api_key=os.getenv("QWEN_API_KEY"),
79+
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
80+
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
9681
),
9782
"image": ModelFactory.create(
9883
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
99-
model_type=os.getenv("IMAGE_ROLE_API_MODEL_TYPE", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")),
100-
api_key=os.getenv("IMAGE_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))),
101-
url=os.getenv("IMAGE_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")),
102-
model_config_dict={
103-
"temperature": float(os.getenv("IMAGE_ROLE_API_MODEL_TEMPERATURE", os.getenv("VLLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))),
104-
"max_tokens": int(os.getenv("IMAGE_ROLE_API_MODEL_MAX_TOKENS", os.getenv("VLLM_ROLE_API_MODEL_MAX_TOKENS", "4096")))
105-
},
84+
model_type="qwen-vl-max",
85+
api_key=os.getenv("QWEN_API_KEY"),
86+
url="https://dashscope.aliyuncs.com/compatible-mode/v1",
87+
model_config_dict={"temperature": 0.4, "max_tokens": 128000},
10688
),
10789
}
10890

@@ -144,16 +126,13 @@ def construct_society(question: str) -> RolePlaying:
144126
return society
145127

146128

129+
def main():
130+
r"""Main function to run the OWL system with an example question."""
131+
# Example research question
132+
default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
147133

148-
def main(question: str = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."):
149-
r"""Main function to run the OWL system with an example question.
150-
Args:
151-
question (str): The task or question to be addressed by the society.
152-
If not provided, a default question will be used.
153-
Defaults to "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
154-
Returns:
155-
None
156-
"""
134+
# Override default task if command line argument is provided
135+
task = sys.argv[1] if len(sys.argv) > 1 else default_task
157136

158137
# Construct and run the society
159138
society = construct_society(task)
@@ -162,9 +141,7 @@ def main(question: str = "Navigate to Amazon.com and identify one product that i
162141

163142
# Output the result
164143
print(f"\033[94mAnswer: {answer}\033[0m")
165-
# Output the token count
166-
print(f"\033[94mToken count: {token_count}\033[0m")
167144

168145

169146
if __name__ == "__main__":
170-
main(sys.argv[1] if len(sys.argv) > 1 else "")
147+
main()

owl/.env_template

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -24,45 +24,6 @@ QWEN_API_KEY='Your_Key'
2424
# DeepSeek API (https://platform.deepseek.com/api_keys)
2525
DEEPSEEK_API_KEY='Your_Key'
2626

27-
# Multi-platform LLM/VLLM API, default values for user assistant planning web image roles
28-
# LLM_ROLE_API_BASE_URL=''
29-
# LLM_ROLE_API_KEY='Your_Key'
30-
# LLM_ROLE_API_MODEL_TYPE=''
31-
# LLM_ROLE_API_MODEL_TEMPERATURE='0.0'
32-
# LLM_ROLE_API_MODEL_MAX_TOKENS='0'
33-
# VLLM_ROLE_API_BASE_URL=''
34-
# VLLM_ROLE_API_KEY='Your_Key'
35-
# VLLM_ROLE_API_MODEL_TYPE=''
36-
# VLLM_ROLE_API_MODEL_TEMPERATURE='0.0'
37-
# VLLM_ROLE_API_MODEL_MAX_TOKENS='0'
38-
39-
# Multi-platform LLM/VLLM API for user assistant planning web image roles
40-
# USER_ROLE_API_BASE_URL=''
41-
# USER_ROLE_API_KEY='Your_Key'
42-
# USER_ROLE_API_MODEL_TYPE=''
43-
# USER_ROLE_API_MODEL_TEMPERATURE='0.8'
44-
# USER_ROLE_API_MODEL_MAX_TOKENS='4096'
45-
# ASSISTANT_ROLE_API_BASE_URL=''
46-
# ASSISTANT_ROLE_API_KEY='Your_Key'
47-
# ASSISTANT_ROLE_API_MODEL_TYPE=''
48-
# ASSISTANT_ROLE_API_MODEL_TEMPERATURE='0.2'
49-
# ASSISTANT_ROLE_API_MODEL_MAX_TOKENS='4096'
50-
# PLANNING_ROLE_API_BASE_URL=''
51-
# PLANNING_ROLE_API_KEY='Your_Key'
52-
# PLANNING_ROLE_API_MODEL_TYPE=''
53-
# PLANNING_ROLE_API_MODEL_TEMPERATURE='0.4'
54-
# PLANNING_ROLE_API_MODEL_MAX_TOKENS='8192'
55-
# WEB_ROLE_API_BASE_URL=''
56-
# WEB_ROLE_API_KEY='Your_Key'
57-
# WEB_ROLE_API_MODEL_TYPE=''
58-
# WEB_ROLE_API_MODEL_TEMPERATURE='0.0'
59-
# WEB_ROLE_API_MODEL_MAX_TOKENS='0'
60-
# IMAGE_ROLE_API_BASE_URL=''
61-
# IMAGE_ROLE_API_KEY='Your_Key'
62-
# IMAGE_ROLE_API_MODEL_TYPE=''
63-
# IMAGE_ROLE_API_MODEL_TEMPERATURE='0.0'
64-
# IMAGE_ROLE_API_MODEL_MAX_TOKENS='0'
65-
6627
#===========================================
6728
# Tools & Services API
6829
#===========================================

owl/webapp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def process_message(role, content):
245245
"run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
246246
"run_mini": "Using OpenAI model with minimal configuration to process tasks",
247247
"run_deepseek_zh": "Using deepseek model to process Chinese tasks",
248-
"run_openai_compatiable_model": "Using multiple openai compatible model to process tasks",
248+
"run_openai_compatiable_model": "Using openai compatible model to process tasks",
249249
"run_ollama": "Using local ollama model to process tasks",
250250
"run_qwen_mini_zh": "Using qwen model with minimal configuration to process tasks",
251251
"run_qwen_zh": "Using qwen model to process tasks",

owl/webapp_zh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def process_message(role, content):
245245
"run": "默认模式:使用OpenAI模型的默认的智能体协作模式,适合大多数任务。",
246246
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
247247
"run_deepseek_zh": "使用deepseek模型处理中文任务",
248-
"run_openai_compatiable_model": "使用多个openai兼容模型处理任务",
248+
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
249249
"run_ollama": "使用本地ollama模型处理任务",
250250
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
251251
"run_qwen_zh": "使用qwen模型处理任务",

0 commit comments

Comments
 (0)